summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig18
-rw-r--r--crypto/Makefile3
-rw-r--r--crypto/asymmetric_keys/x509_parser.h1
-rw-r--r--crypto/async_tx/async_memcpy.c37
-rw-r--r--crypto/async_tx/async_pq.c174
-rw-r--r--crypto/async_tx/async_raid6_recov.c61
-rw-r--r--crypto/async_tx/async_tx.c4
-rw-r--r--crypto/async_tx/async_xor.c123
-rw-r--r--crypto/async_tx/raid6test.c10
-rw-r--r--crypto/authenc.c48
-rw-r--r--crypto/crypto_null.c6
-rw-r--r--crypto/pkc.c67
-rw-r--r--crypto/tcrypt.c16
-rw-r--r--crypto/testmgr.c283
-rw-r--r--crypto/testmgr.h412
-rw-r--r--crypto/tls.c582
16 files changed, 1638 insertions, 207 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 69ce573..2c6ef51 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -212,6 +212,24 @@ config CRYPTO_SEQIV
This IV generator generates an IV based on a sequence number by
xoring it with a salt. This algorithm is mainly useful for CTR
+config CRYPTO_TLS
+ tristate "TLS support"
+ select CRYPTO_AEAD
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_MANAGER
+ select CRYPTO_HASH
+ help
+ Support for TLS 1.0 record encryption and decryption
+
+ This module adds support for encryption/decryption of TLS 1.0 frames
+ using blockcipher algorithms. The name of the resulting algorithm is
+ "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
+ algorithms are used (e.g. aes-generic, sha1-generic), but hardware
+ accelerated versions will be used automatically if available.
+
+ User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
+ operations through AF_ALG or cryptodev interfaces
+
comment "Block modes"
config CRYPTO_CBC
diff --git a/crypto/Makefile b/crypto/Makefile
index 80019ba..ab20a2e 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -15,7 +15,7 @@ obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
-crypto_blkcipher-y := ablkcipher.o
+crypto_blkcipher-y := ablkcipher.o pkc.o
crypto_blkcipher-y += blkcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
@@ -85,6 +85,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
obj-$(CONFIG_CRYPTO_CRC32) += crc32.o
obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
+obj-$(CONFIG_CRYPTO_TLS) += tls.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
index f86dc5f..d9351ee 100644
--- a/crypto/asymmetric_keys/x509_parser.h
+++ b/crypto/asymmetric_keys/x509_parser.h
@@ -9,6 +9,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#include <linux/time.h>
#include <crypto/public_key.h>
struct x509_certificate {
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 9e62fef..f8c0b8d 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -50,33 +50,36 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
&dest, 1, &src, 1, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
+ struct dmaengine_unmap_data *unmap = NULL;
- if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
- dma_addr_t dma_dest, dma_src;
+ if (device)
+ unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
+
+ if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
unsigned long dma_prep_flags = 0;
if (submit->cb_fn)
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
- dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
- DMA_FROM_DEVICE);
-
- dma_src = dma_map_page(device->dev, src, src_offset, len,
- DMA_TO_DEVICE);
-
- tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
- len, dma_prep_flags);
- if (!tx) {
- dma_unmap_page(device->dev, dma_dest, len,
- DMA_FROM_DEVICE);
- dma_unmap_page(device->dev, dma_src, len,
- DMA_TO_DEVICE);
- }
+
+ unmap->to_cnt = 1;
+ unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len,
+ DMA_TO_DEVICE);
+ unmap->from_cnt = 1;
+ unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len,
+ DMA_FROM_DEVICE);
+ unmap->len = len;
+
+ tx = device->device_prep_dma_memcpy(chan, unmap->addr[1],
+ unmap->addr[0], len,
+ dma_prep_flags);
}
if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len);
+
+ dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
} else {
void *dest_buf, *src_buf;
@@ -96,6 +99,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
async_tx_sync_epilog(submit);
}
+ dmaengine_unmap_put(unmap);
+
return tx;
}
EXPORT_SYMBOL_GPL(async_memcpy);
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 91d5d38..d05327c 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -46,49 +46,24 @@ static struct page *pq_scribble_page;
* do_async_gen_syndrome - asynchronously calculate P and/or Q
*/
static __async_inline struct dma_async_tx_descriptor *
-do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
- const unsigned char *scfs, unsigned int offset, int disks,
- size_t len, dma_addr_t *dma_src,
+do_async_gen_syndrome(struct dma_chan *chan,
+ const unsigned char *scfs, int disks,
+ struct dmaengine_unmap_data *unmap,
+ enum dma_ctrl_flags dma_flags,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct dma_device *dma = chan->device;
- enum dma_ctrl_flags dma_flags = 0;
enum async_tx_flags flags_orig = submit->flags;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
dma_async_tx_callback cb_param_orig = submit->cb_param;
int src_cnt = disks - 2;
- unsigned char coefs[src_cnt];
unsigned short pq_src_cnt;
dma_addr_t dma_dest[2];
int src_off = 0;
- int idx;
- int i;
- /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
- if (P(blocks, disks))
- dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
- len, DMA_BIDIRECTIONAL);
- else
- dma_flags |= DMA_PREP_PQ_DISABLE_P;
- if (Q(blocks, disks))
- dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
- len, DMA_BIDIRECTIONAL);
- else
- dma_flags |= DMA_PREP_PQ_DISABLE_Q;
-
- /* convert source addresses being careful to collapse 'empty'
- * sources and update the coefficients accordingly
- */
- for (i = 0, idx = 0; i < src_cnt; i++) {
- if (blocks[i] == NULL)
- continue;
- dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
- DMA_TO_DEVICE);
- coefs[idx] = scfs[i];
- idx++;
- }
- src_cnt = idx;
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
while (src_cnt > 0) {
submit->flags = flags_orig;
@@ -100,28 +75,25 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
if (src_cnt > pq_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
- dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
- dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig;
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
- if (submit->flags & ASYNC_TX_FENCE)
- dma_flags |= DMA_PREP_FENCE;
- /* Since we have clobbered the src_list we are committed
- * to doing this asynchronously. Drivers force forward
- * progress in case they can not provide a descriptor
+ /* Drivers force forward progress in case they can not provide
+ * a descriptor
*/
for (;;) {
+ dma_dest[0] = unmap->addr[disks - 2];
+ dma_dest[1] = unmap->addr[disks - 1];
tx = dma->device_prep_dma_pq(chan, dma_dest,
- &dma_src[src_off],
+ &unmap->addr[src_off],
pq_src_cnt,
- &coefs[src_off], len,
+ &scfs[src_off], unmap->len,
dma_flags);
if (likely(tx))
break;
@@ -129,6 +101,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
dma_async_issue_pending(chan);
}
+ dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
submit->depend_tx = tx;
@@ -188,10 +161,6 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
* set to NULL those buffers will be replaced with the raid6_zero_page
* in the synchronous path and omitted in the hardware-asynchronous
* path.
- *
- * 'blocks' note: if submit->scribble is NULL then the contents of
- * 'blocks' may be overwritten to perform address conversions
- * (dma_map_page() or page_address()).
*/
struct dma_async_tx_descriptor *
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
@@ -202,26 +171,69 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
&P(blocks, disks), 2,
blocks, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
- dma_addr_t *dma_src = NULL;
+ struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
- if (submit->scribble)
- dma_src = submit->scribble;
- else if (sizeof(dma_addr_t) <= sizeof(struct page *))
- dma_src = (dma_addr_t *) blocks;
+ if (device)
+ unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
- if (dma_src && device &&
+ if (unmap &&
(src_cnt <= dma_maxpq(device, 0) ||
dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
+ struct dma_async_tx_descriptor *tx;
+ enum dma_ctrl_flags dma_flags = 0;
+ unsigned char coefs[src_cnt];
+ int i, j;
+
/* run the p+q asynchronously */
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
- return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
- disks, len, dma_src, submit);
+
+ /* convert source addresses being careful to collapse 'empty'
+ * sources and update the coefficients accordingly
+ */
+ unmap->len = len;
+ for (i = 0, j = 0; i < src_cnt; i++) {
+ if (blocks[i] == NULL)
+ continue;
+ unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
+ len, DMA_TO_DEVICE);
+ coefs[j] = raid6_gfexp[i];
+ unmap->to_cnt++;
+ j++;
+ }
+
+ /*
+ * DMAs use destinations as sources,
+ * so use BIDIRECTIONAL mapping
+ */
+ unmap->bidi_cnt++;
+ if (P(blocks, disks))
+ unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
+ offset, len, DMA_BIDIRECTIONAL);
+ else {
+ unmap->addr[j++] = 0;
+ dma_flags |= DMA_PREP_PQ_DISABLE_P;
+ }
+
+ unmap->bidi_cnt++;
+ if (Q(blocks, disks))
+ unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
+ offset, len, DMA_BIDIRECTIONAL);
+ else {
+ unmap->addr[j++] = 0;
+ dma_flags |= DMA_PREP_PQ_DISABLE_Q;
+ }
+
+ tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
+ dmaengine_unmap_put(unmap);
+ return tx;
}
+ dmaengine_unmap_put(unmap);
+
/* run the pq synchronously */
pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
@@ -277,50 +289,60 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
struct dma_async_tx_descriptor *tx;
unsigned char coefs[disks-2];
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
- dma_addr_t *dma_src = NULL;
- int src_cnt = 0;
+ struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(disks < 4);
- if (submit->scribble)
- dma_src = submit->scribble;
- else if (sizeof(dma_addr_t) <= sizeof(struct page *))
- dma_src = (dma_addr_t *) blocks;
+ if (device)
+ unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
- if (dma_src && device && disks <= dma_maxpq(device, 0) &&
+ if (unmap && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
struct device *dev = device->dev;
- dma_addr_t *pq = &dma_src[disks-2];
- int i;
+ dma_addr_t pq[2];
+ int i, j = 0, src_cnt = 0;
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
- if (!P(blocks, disks))
+
+ unmap->len = len;
+ for (i = 0; i < disks-2; i++)
+ if (likely(blocks[i])) {
+ unmap->addr[j] = dma_map_page(dev, blocks[i],
+ offset, len,
+ DMA_TO_DEVICE);
+ coefs[j] = raid6_gfexp[i];
+ unmap->to_cnt++;
+ src_cnt++;
+ j++;
+ }
+
+ if (!P(blocks, disks)) {
+ pq[0] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_P;
- else
+ } else {
pq[0] = dma_map_page(dev, P(blocks, disks),
offset, len,
DMA_TO_DEVICE);
- if (!Q(blocks, disks))
+ unmap->addr[j++] = pq[0];
+ unmap->to_cnt++;
+ }
+ if (!Q(blocks, disks)) {
+ pq[1] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
- else
+ } else {
pq[1] = dma_map_page(dev, Q(blocks, disks),
offset, len,
DMA_TO_DEVICE);
+ unmap->addr[j++] = pq[1];
+ unmap->to_cnt++;
+ }
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
- for (i = 0; i < disks-2; i++)
- if (likely(blocks[i])) {
- dma_src[src_cnt] = dma_map_page(dev, blocks[i],
- offset, len,
- DMA_TO_DEVICE);
- coefs[src_cnt] = raid6_gfexp[i];
- src_cnt++;
- }
-
for (;;) {
- tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
+ tx = device->device_prep_dma_pq_val(chan, pq,
+ unmap->addr,
src_cnt,
coefs,
len, pqres,
@@ -330,6 +352,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
async_tx_quiesce(&submit->depend_tx);
dma_async_issue_pending(chan);
}
+
+ dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
return tx;
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index a9f08a6..934a849 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -26,6 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
+#include <linux/dmaengine.h>
static struct dma_async_tx_descriptor *
async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
@@ -34,35 +35,45 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&dest, 1, srcs, 2, len);
struct dma_device *dma = chan ? chan->device : NULL;
+ struct dmaengine_unmap_data *unmap = NULL;
const u8 *amul, *bmul;
u8 ax, bx;
u8 *a, *b, *c;
- if (dma) {
- dma_addr_t dma_dest[2];
- dma_addr_t dma_src[2];
+ if (dma)
+ unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
+
+ if (unmap) {
struct device *dev = dma->dev;
+ dma_addr_t pq[2];
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
- dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
- dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
- dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
- tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef,
+ unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
+ unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
+ unmap->to_cnt = 2;
+
+ unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
+ unmap->bidi_cnt = 1;
+ /* engine only looks at Q, but expects it to follow P */
+ pq[1] = unmap->addr[2];
+
+ unmap->len = len;
+ tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef,
len, dma_flags);
if (tx) {
+ dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
+ dmaengine_unmap_put(unmap);
return tx;
}
/* could not get a descriptor, unmap and fall through to
* the synchronous path
*/
- dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
- dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
- dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE);
+ dmaengine_unmap_put(unmap);
}
/* run the operation synchronously */
@@ -89,23 +100,38 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&dest, 1, &src, 1, len);
struct dma_device *dma = chan ? chan->device : NULL;
+ struct dmaengine_unmap_data *unmap = NULL;
const u8 *qmul; /* Q multiplier table */
u8 *d, *s;
- if (dma) {
+ if (dma)
+ unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
+
+ if (unmap) {
dma_addr_t dma_dest[2];
- dma_addr_t dma_src[1];
struct device *dev = dma->dev;
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
- dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
- dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
- tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef,
- len, dma_flags);
+ unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
+ unmap->to_cnt++;
+ unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
+ dma_dest[1] = unmap->addr[1];
+ unmap->bidi_cnt++;
+ unmap->len = len;
+
+ /* this looks funny, but the engine looks for Q at
+ * dma_dest[1] and ignores dma_dest[0] as a dest
+ * due to DMA_PREP_PQ_DISABLE_P
+ */
+ tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr,
+ 1, &coef, len, dma_flags);
+
if (tx) {
+ dma_set_unmap(tx, unmap);
+ dmaengine_unmap_put(unmap);
async_tx_submit(chan, tx, submit);
return tx;
}
@@ -113,8 +139,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
/* could not get a descriptor, unmap and fall through to
* the synchronous path
*/
- dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
- dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
+ dmaengine_unmap_put(unmap);
}
/* no channel available, or failed to allocate a descriptor, so
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 7be3424..39ea479 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
}
device->device_issue_pending(chan);
} else {
- if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS)
+ if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
panic("%s: DMA error waiting for depend_tx\n",
__func__);
tx->tx_submit(tx);
@@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
* we are referring to the correct operation
*/
BUG_ON(async_tx_test_ack(*tx));
- if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS)
+ if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
panic("%s: DMA error waiting for transaction\n",
__func__);
async_tx_ack(*tx);
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 8ade0a0..3c562f5 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -33,48 +33,31 @@
/* do_async_xor - dma map the pages and perform the xor with an engine */
static __async_inline struct dma_async_tx_descriptor *
-do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
- unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src,
+do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
struct async_submit_ctl *submit)
{
struct dma_device *dma = chan->device;
struct dma_async_tx_descriptor *tx = NULL;
- int src_off = 0;
- int i;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
void *cb_param_orig = submit->cb_param;
enum async_tx_flags flags_orig = submit->flags;
- enum dma_ctrl_flags dma_flags;
- int xor_src_cnt = 0;
- dma_addr_t dma_dest;
-
- /* map the dest bidrectional in case it is re-used as a source */
- dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
- for (i = 0; i < src_cnt; i++) {
- /* only map the dest once */
- if (!src_list[i])
- continue;
- if (unlikely(src_list[i] == dest)) {
- dma_src[xor_src_cnt++] = dma_dest;
- continue;
- }
- dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
- len, DMA_TO_DEVICE);
- }
- src_cnt = xor_src_cnt;
+ enum dma_ctrl_flags dma_flags = 0;
+ int src_cnt = unmap->to_cnt;
+ int xor_src_cnt;
+ dma_addr_t dma_dest = unmap->addr[unmap->to_cnt];
+ dma_addr_t *src_list = unmap->addr;
while (src_cnt) {
+ dma_addr_t tmp;
+
submit->flags = flags_orig;
- dma_flags = 0;
xor_src_cnt = min(src_cnt, (int)dma->max_xor);
- /* if we are submitting additional xors, leave the chain open,
- * clear the callback parameters, and leave the destination
- * buffer mapped
+ /* if we are submitting additional xors, leave the chain open
+ * and clear the callback parameters
*/
if (src_cnt > xor_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
- dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
@@ -85,12 +68,18 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
dma_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
- /* Since we have clobbered the src_list we are committed
- * to doing this asynchronously. Drivers force forward progress
- * in case they can not provide a descriptor
+
+ /* Drivers force forward progress in case they can not provide a
+ * descriptor
*/
- tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off],
- xor_src_cnt, len, dma_flags);
+ tmp = src_list[0];
+ if (src_list > unmap->addr)
+ src_list[0] = dma_dest;
+ tx = dma->device_prep_dma_xor(chan, dma_dest, src_list,
+ xor_src_cnt, unmap->len,
+ dma_flags);
+ src_list[0] = tmp;
+
if (unlikely(!tx))
async_tx_quiesce(&submit->depend_tx);
@@ -99,22 +88,21 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
while (unlikely(!tx)) {
dma_async_issue_pending(chan);
tx = dma->device_prep_dma_xor(chan, dma_dest,
- &dma_src[src_off],
- xor_src_cnt, len,
+ src_list,
+ xor_src_cnt, unmap->len,
dma_flags);
}
+ dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
submit->depend_tx = tx;
if (src_cnt > xor_src_cnt) {
/* drop completed sources */
src_cnt -= xor_src_cnt;
- src_off += xor_src_cnt;
-
/* use the intermediate result a source */
- dma_src[--src_off] = dma_dest;
src_cnt++;
+ src_list += xor_src_cnt - 1;
} else
break;
}
@@ -189,22 +177,40 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
&dest, 1, src_list,
src_cnt, len);
- dma_addr_t *dma_src = NULL;
+ struct dma_device *device = chan ? chan->device : NULL;
+ struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(src_cnt <= 1);
- if (submit->scribble)
- dma_src = submit->scribble;
- else if (sizeof(dma_addr_t) <= sizeof(struct page *))
- dma_src = (dma_addr_t *) src_list;
+ if (device)
+ unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO);
+
+ if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
+ struct dma_async_tx_descriptor *tx;
+ int i, j;
- if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {
/* run the xor asynchronously */
pr_debug("%s (async): len: %zu\n", __func__, len);
- return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
- dma_src, submit);
+ unmap->len = len;
+ for (i = 0, j = 0; i < src_cnt; i++) {
+ if (!src_list[i])
+ continue;
+ unmap->to_cnt++;
+ unmap->addr[j++] = dma_map_page(device->dev, src_list[i],
+ offset, len, DMA_TO_DEVICE);
+ }
+
+ /* map it bidirectional as it may be re-used as a source */
+ unmap->addr[j] = dma_map_page(device->dev, dest, offset, len,
+ DMA_BIDIRECTIONAL);
+ unmap->bidi_cnt = 1;
+
+ tx = do_async_xor(chan, unmap, submit);
+ dmaengine_unmap_put(unmap);
+ return tx;
} else {
+ dmaengine_unmap_put(unmap);
/* run the xor synchronously */
pr_debug("%s (sync): len: %zu\n", __func__, len);
WARN_ONCE(chan, "%s: no space for dma address conversion\n",
@@ -268,16 +274,14 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
- dma_addr_t *dma_src = NULL;
+ struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(src_cnt <= 1);
- if (submit->scribble)
- dma_src = submit->scribble;
- else if (sizeof(dma_addr_t) <= sizeof(struct page *))
- dma_src = (dma_addr_t *) src_list;
+ if (device)
+ unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO);
- if (dma_src && device && src_cnt <= device->max_xor &&
+ if (unmap && src_cnt <= device->max_xor &&
is_dma_xor_aligned(device, offset, 0, len)) {
unsigned long dma_prep_flags = 0;
int i;
@@ -288,11 +292,15 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
- for (i = 0; i < src_cnt; i++)
- dma_src[i] = dma_map_page(device->dev, src_list[i],
- offset, len, DMA_TO_DEVICE);
- tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt,
+ for (i = 0; i < src_cnt; i++) {
+ unmap->addr[i] = dma_map_page(device->dev, src_list[i],
+ offset, len, DMA_TO_DEVICE);
+ unmap->to_cnt++;
+ }
+ unmap->len = len;
+
+ tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt,
len, result,
dma_prep_flags);
if (unlikely(!tx)) {
@@ -301,11 +309,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
while (!tx) {
dma_async_issue_pending(chan);
tx = device->device_prep_dma_xor_val(chan,
- dma_src, src_cnt, len, result,
+ unmap->addr, src_cnt, len, result,
dma_prep_flags);
}
}
-
+ dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
} else {
enum async_tx_flags flags_orig = submit->flags;
@@ -327,6 +335,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
async_tx_sync_epilog(submit);
submit->flags = flags_orig;
}
+ dmaengine_unmap_put(unmap);
return tx;
}
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index 4a92bac..dad95f4 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -28,7 +28,7 @@
#undef pr
#define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
-#define NDISKS 16 /* Including P and Q */
+#define NDISKS 64 /* Including P and Q */
static struct page *dataptrs[NDISKS];
static addr_conv_t addr_conv[NDISKS];
@@ -219,6 +219,14 @@ static int raid6_test(void)
err += test(11, &tests);
err += test(12, &tests);
}
+
+ /* the 24 disk case is special for ioatdma as it is the boudary point
+ * at which it needs to switch from 8-source ops to 16-source
+ * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set)
+ */
+ if (NDISKS > 24)
+ err += test(24, &tests);
+
err += test(NDISKS, &tests);
pr("\n");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 528b00b..6ff0208 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -52,40 +52,52 @@ static void authenc_request_complete(struct aead_request *req, int err)
aead_request_complete(req, err);
}
-static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
- unsigned int keylen)
+int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ unsigned int keylen)
{
- unsigned int authkeylen;
- unsigned int enckeylen;
- struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- struct crypto_ahash *auth = ctx->auth;
- struct crypto_ablkcipher *enc = ctx->enc;
- struct rtattr *rta = (void *)key;
+ struct rtattr *rta = (struct rtattr *)key;
struct crypto_authenc_key_param *param;
- int err = -EINVAL;
if (!RTA_OK(rta, keylen))
- goto badkey;
+ return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
- goto badkey;
+ return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param))
- goto badkey;
+ return -EINVAL;
param = RTA_DATA(rta);
- enckeylen = be32_to_cpu(param->enckeylen);
+ keys->enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
- if (keylen < enckeylen)
- goto badkey;
+ if (keylen < keys->enckeylen)
+ return -EINVAL;
- authkeylen = keylen - enckeylen;
+ keys->authkeylen = keylen - keys->enckeylen;
+ keys->authkey = key;
+ keys->enckey = key + keys->authkeylen;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys);
+
+static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+ struct crypto_ahash *auth = ctx->auth;
+ struct crypto_ablkcipher *enc = ctx->enc;
+ struct crypto_authenc_keys keys;
+ int err = -EINVAL;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(auth, key, authkeylen);
+ err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
@@ -95,7 +107,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
+ err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK);
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index fee7265..1dc54bb 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -17,6 +17,7 @@
*
*/
+#include <crypto/null.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <linux/init.h>
@@ -24,11 +25,6 @@
#include <linux/mm.h>
#include <linux/string.h>
-#define NULL_KEY_SIZE 0
-#define NULL_BLOCK_SIZE 1
-#define NULL_DIGEST_SIZE 0
-#define NULL_IV_SIZE 0
-
static int null_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
diff --git a/crypto/pkc.c b/crypto/pkc.c
new file mode 100644
index 0000000..ada7eaf
--- /dev/null
+++ b/crypto/pkc.c
@@ -0,0 +1,67 @@
+/*
+ * \file: pkc.c
+ * \brief: Public Key Cipher operations.
+ *
+ * This is the Public Key Cipher Implementation
+ *
+ * Author: Yashpal Dutta <yashpal.dutta@freescale.com>
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <crypto/scatterwalk.h>
+#include "internal.h"
+
+static unsigned int crypto_pkc_ctxsize(struct crypto_alg *alg, u32 type,
+ u32 mask)
+{
+ return alg->cra_ctxsize;
+}
+
+static int crypto_init_pkc_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
+{
+ struct pkc_alg *alg = &tfm->__crt_alg->cra_pkc;
+ struct pkc_tfm *crt = &tfm->crt_pkc;
+
+ crt->pkc_op = alg->pkc_op;
+ crt->min_keysize = alg->min_keysize;
+ crt->max_keysize = alg->max_keysize;
+ crt->base = tfm;
+
+ return 0;
+}
+
+static void crypto_pkc_show(struct seq_file *m, struct crypto_alg *alg)
+{
+ struct pkc_alg *pkc_alg = &alg->cra_pkc;
+
+ seq_printf(m, "type : pkc_cipher\n");
+ seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
+ "yes" : "no");
+ seq_printf(m, "min keysize : %u\n", pkc_alg->min_keysize);
+ seq_printf(m, "max keysize : %u\n", pkc_alg->max_keysize);
+}
+
+const struct crypto_type crypto_pkc_type = {
+ .ctxsize = crypto_pkc_ctxsize,
+ .init = crypto_init_pkc_ops,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_pkc_show,
+#endif
+};
+EXPORT_SYMBOL_GPL(crypto_pkc_type);
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 25a5934..93ad770 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1242,6 +1242,22 @@ static int do_test(int m)
ret += tcrypt_test("cmac(des3_ede)");
break;
+ case 155:
+ ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))");
+ break;
+
+ case 156:
+ ret += tcrypt_test("authenc(hmac(md5),ecb(cipher_null))");
+ break;
+
+ case 157:
+ ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))");
+ break;
+
+ case 180:
+ ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
+ break;
+
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index e091ef6..ee27673 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -108,6 +108,13 @@ struct cprng_test_suite {
unsigned int count;
};
+struct tls_test_suite {
+ struct {
+ struct tls_testvec *vecs;
+ unsigned int count;
+ } enc, dec;
+};
+
struct alg_test_desc {
const char *alg;
int (*test)(const struct alg_test_desc *desc, const char *driver,
@@ -121,6 +128,7 @@ struct alg_test_desc {
struct pcomp_test_suite pcomp;
struct hash_test_suite hash;
struct cprng_test_suite cprng;
+ struct tls_test_suite tls;
} suite;
};
@@ -503,16 +511,16 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
- sg_init_one(&sg[0], input,
- template[i].ilen + (enc ? authsize : 0));
-
if (diff_dst) {
output = xoutbuf[0];
output += align_offset;
+ sg_init_one(&sg[0], input, template[i].ilen);
sg_init_one(&sgout[0], output,
+ template[i].rlen);
+ } else {
+ sg_init_one(&sg[0], input,
template[i].ilen +
(enc ? authsize : 0));
- } else {
output = input;
}
@@ -612,12 +620,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
memcpy(q, template[i].input + temp,
template[i].tap[k]);
- n = template[i].tap[k];
- if (k == template[i].np - 1 && enc)
- n += authsize;
- if (offset_in_page(q) + n < PAGE_SIZE)
- q[n] = 0;
-
sg_set_buf(&sg[k], q, template[i].tap[k]);
if (diff_dst) {
@@ -625,13 +627,17 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
offset_in_page(IDX[k]);
memset(q, 0, template[i].tap[k]);
- if (offset_in_page(q) + n < PAGE_SIZE)
- q[n] = 0;
sg_set_buf(&sgout[k], q,
template[i].tap[k]);
}
+ n = template[i].tap[k];
+ if (k == template[i].np - 1 && enc)
+ n += authsize;
+ if (offset_in_page(q) + n < PAGE_SIZE)
+ q[n] = 0;
+
temp += template[i].tap[k];
}
@@ -650,10 +656,10 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
- sg[k - 1].length += authsize;
-
if (diff_dst)
sgout[k - 1].length += authsize;
+ else
+ sg[k - 1].length += authsize;
}
sg_init_table(asg, template[i].anp);
@@ -805,6 +811,208 @@ static int test_aead(struct crypto_aead *tfm, int enc,
return 0;
}
+static int __test_tls(struct crypto_aead *tfm, int enc,
+ struct tls_testvec *template, unsigned int tcount,
+ const bool diff_dst)
+{
+ const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
+ unsigned int i, rlen;
+ int ret = -ENOMEM;
+ char *q;
+ char *key;
+ struct aead_request *req;
+ struct scatterlist *sg;
+ struct scatterlist *asg;
+ struct scatterlist *sgout;
+ const char *e, *d;
+ struct tcrypt_result result;
+ void *input;
+ void *output;
+ void *assoc;
+ char iv[MAX_IVLEN];
+ char *xbuf[XBUFSIZE];
+ char *xoutbuf[XBUFSIZE];
+ char *axbuf[XBUFSIZE];
+
+ if (testmgr_alloc_buf(xbuf))
+ goto out_noxbuf;
+ if (testmgr_alloc_buf(axbuf))
+ goto out_noaxbuf;
+
+ if (diff_dst && testmgr_alloc_buf(xoutbuf))
+ goto out_nooutbuf;
+
+ sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 3 : 2), GFP_KERNEL);
+ if (!sg)
+ goto out_nosg;
+
+ asg = sg + 8;
+ sgout = sg + 16;
+
+ d = diff_dst ? "-ddst" : "";
+ e = enc ? "encryption" : "decryption";
+
+ init_completion(&result.completion);
+ req = aead_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ pr_err("alg: tls%s: Failed to allocate request for %s\n",
+ d, algo);
+ goto out;
+ }
+
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &result);
+
+ for (i = 0; i < tcount; i++) {
+ input = xbuf[0];
+ assoc = axbuf[0];
+
+ ret = -EINVAL;
+ if (WARN_ON(template[i].ilen > PAGE_SIZE ||
+ template[i].alen > PAGE_SIZE))
+ goto out;
+
+ memcpy(input, template[i].input, template[i].ilen);
+ memcpy(assoc, template[i].assoc, template[i].alen);
+ if (template[i].iv)
+ memcpy(iv, template[i].iv, MAX_IVLEN);
+ else
+ memset(iv, 0, MAX_IVLEN);
+
+ crypto_aead_clear_flags(tfm, ~0);
+ key = template[i].key;
+
+ ret = crypto_aead_setkey(tfm, key, template[i].klen);
+ if (!ret == template[i].fail) {
+ pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
+ d, i, algo, crypto_aead_get_flags(tfm));
+ goto out;
+ }
+ if (ret)
+ continue;
+
+ /* Allocate enough space in the input and output scatterlists.
+ * They must accomodate the result for in-place encryption and
+ * different-place decryption
+ */
+ rlen = max(template[i].ilen, template[i].rlen);
+ if (diff_dst) {
+ output = xoutbuf[0];
+ sg_init_one(&sg[0], input, template[i].ilen);
+ sg_init_one(&sgout[0], output, rlen);
+ } else {
+ output = input;
+ sg_init_one(&sg[0], input, rlen);
+ }
+
+ sg_init_one(&asg[0], assoc, template[i].alen);
+ aead_request_set_assoc(req, asg, template[i].alen);
+ aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
+ template[i].ilen, iv);
+
+ ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+
+ switch (ret) {
+ case 0:
+ if (template[i].novrfy) {
+ /* verification was supposed to fail */
+ pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
+ d, e, i, algo);
+ /* so really, we got a bad message */
+ ret = -EBADMSG;
+ goto out;
+ }
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ /* The result is not ready yet. We wait for it and then
+ * check if the request succeeded or not
+ */
+ ret = wait_for_completion_interruptible(
+ &result.completion);
+ if (!ret && !result.err) {
+ reinit_completion(&result.completion);
+ break;
+ } else {
+ ret = result.err;
+ }
+ /* fall through */
+ case -EBADMSG:
+ /* verification failure was expected */
+ if (template[i].novrfy)
+ continue;
+ /* fall through */
+ default:
+ pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
+ d, e, i, algo, -ret);
+ goto out;
+ }
+
+ q = output;
+ if (memcmp(q, template[i].result, template[i].rlen)) {
+ pr_err("alg: tls%s: Test %d failed on %s for %s\n",
+ d, i, e, algo);
+ hexdump(q, template[i].rlen);
+ pr_err("should be:\n");
+ hexdump(template[i].result, template[i].rlen);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ aead_request_free(req);
+ kfree(sg);
+out_nosg:
+ if (diff_dst)
+ testmgr_free_buf(xoutbuf);
+out_nooutbuf:
+ testmgr_free_buf(axbuf);
+out_noaxbuf:
+ testmgr_free_buf(xbuf);
+out_noxbuf:
+ return ret;
+}
+
+static int test_tls(struct crypto_aead *tfm, int enc,
+ struct tls_testvec *template, unsigned int tcount)
+{
+ int ret;
+ /* test 'dst == src' case */
+ ret = __test_tls(tfm, enc, template, tcount, false);
+ if (ret)
+ return ret;
+ /* test 'dst != src' case */
+ return __test_tls(tfm, enc, template, tcount, true);
+}
+
+static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
+ u32 type, u32 mask)
+{
+ struct crypto_aead *tfm;
+ int err = 0;
+
+ tfm = crypto_alloc_aead(driver, type, mask);
+ if (IS_ERR(tfm)) {
+ pr_err("alg: aead: Failed to load transform for %s: %ld\n",
+ driver, PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+ if (desc->suite.tls.enc.vecs) {
+ err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
+ desc->suite.tls.enc.count);
+ if (err)
+ goto out;
+ }
+ if (!err && desc->suite.tls.dec.vecs)
+ err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
+ desc->suite.tls.dec.count);
+
+out:
+ crypto_free_aead(tfm);
+ return err;
+}
+
static int test_cipher(struct crypto_cipher *tfm, int enc,
struct cipher_testvec *template, unsigned int tcount)
{
@@ -1811,6 +2019,22 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "authenc(hmac(md5),ecb(cipher_null))",
+ .test = alg_test_aead,
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+ .enc = {
+ .vecs = hmac_md5_ecb_cipher_null_enc_tv_template,
+ .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS
+ },
+ .dec = {
+ .vecs = hmac_md5_ecb_cipher_null_dec_tv_template,
+ .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS
+ }
+ }
+ }
+ }, {
.alg = "authenc(hmac(sha1),cbc(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
@@ -1823,6 +2047,22 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "authenc(hmac(sha1),ecb(cipher_null))",
+ .test = alg_test_aead,
+ .fips_allowed = 1,
+ .suite = {
+ .aead = {
+ .enc = {
+ .vecs = hmac_sha1_ecb_cipher_null_enc_tv_template,
+ .count = HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VECTORS
+ },
+ .dec = {
+ .vecs = hmac_sha1_ecb_cipher_null_dec_tv_template,
+ .count = HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VECTORS
+ }
+ }
+ }
+ }, {
.alg = "authenc(hmac(sha256),cbc(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
@@ -3002,6 +3242,21 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "tls10(hmac(sha1),cbc(aes))",
+ .test = alg_test_tls,
+ .suite = {
+ .tls = {
+ .enc = {
+ .vecs = tls_enc_tv_template,
+ .count = TLS_ENC_TEST_VECTORS
+ },
+ .dec = {
+ .vecs = tls_dec_tv_template,
+ .count = TLS_DEC_TEST_VECTORS
+ }
+ }
+ }
+ }, {
.alg = "vmac(aes)",
.test = alg_test_hash,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 7d44aa3..7bdadcc 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -92,8 +92,240 @@ struct cprng_testvec {
unsigned short loops;
};
+struct tls_testvec {
+ char *key; /* wrapped keys for encryption and authentication */
+ char *iv; /* initialization vector */
+ char *input; /* input data */
+ char *assoc; /* associated data: seq num, type, version, input len */
+ char *result; /* result data */
+ unsigned char fail; /* the test failure is expected */
+ unsigned char novrfy; /* dec verification failure expected */
+ unsigned char klen; /* key length */
+ unsigned short ilen; /* input data length */
+ unsigned short alen; /* associated data length */
+ unsigned short rlen; /* result length */
+};
+
static char zeroed_string[48];
+
+/*
+ * TLS1.0 synthetic test vectors
+ */
+#define TLS_ENC_TEST_VECTORS 3
+#define TLS_DEC_TEST_VECTORS 3
+
+static struct tls_testvec tls_enc_tv_template[] = {
+ {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "authenticationkey20b"
+ "enckeyis16_bytes",
+ .klen = 8 + 20 + 16,
+ .iv = "iv0123456789abcd",
+ .input = "Single block msg",
+ .ilen = 16,
+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x00\x03\x01\x00\x10",
+ .alen = 13,
+ .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
+ "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
+ "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
+ "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
+ "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
+ "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
+ .rlen = 16 + 20 + 12,
+ }, {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "authenticationkey20b"
+ "enckeyis16_bytes",
+ .klen = 8 + 20 + 16,
+ .iv = "iv0123456789abcd",
+ .input = "",
+ .ilen = 0,
+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x00\x03\x01\x00\x00",
+ .alen = 13,
+ .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
+ "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
+ "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
+ "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
+ .rlen = 20 + 12,
+ }, {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "authenticationkey20b"
+ "enckeyis16_bytes",
+ .klen = 8 + 20 + 16,
+ .iv = "iv0123456789abcd",
+ .input = "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext",
+ .ilen = 285,
+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x00\x03\x01\x01\x1d",
+ .alen = 13,
+ .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
+ "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
+ "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
+ "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
+ "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
+ "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
+ "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
+ "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
+ "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
+ "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
+ "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
+ "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
+ "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
+ "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
+ "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
+ "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
+ "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
+ "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
+ "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
+ "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
+ "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
+ "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
+ "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
+ "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
+ "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
+ "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
+ "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
+ .rlen = 285 + 20 + 15,
+ }
+};
+
+static struct tls_testvec tls_dec_tv_template[] = {
+ {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "authenticationkey20b"
+ "enckeyis16_bytes",
+ .klen = 8 + 20 + 16,
+ .iv = "iv0123456789abcd",
+ .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
+ "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
+ "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
+ "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
+ "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
+ "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
+ .ilen = 16 + 20 + 12,
+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x00\x03\x01\x00\x30",
+ .alen = 13,
+ .result = "Single block msg",
+ .rlen = 16,
+ }, {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "authenticationkey20b"
+ "enckeyis16_bytes",
+ .klen = 8 + 20 + 16,
+ .iv = "iv0123456789abcd",
+ .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
+ "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
+ "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
+ "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
+ .ilen = 20 + 12,
+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x00\x03\x01\x00\x20",
+ .alen = 13,
+ .result = "",
+ .rlen = 0,
+ }, {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "authenticationkey20b"
+ "enckeyis16_bytes",
+ .klen = 8 + 20 + 16,
+ .iv = "iv0123456789abcd",
+ .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
+ "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
+ "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
+ "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
+ "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
+ "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
+ "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
+ "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
+ "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
+ "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
+ "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
+ "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
+ "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
+ "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
+ "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
+ "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
+ "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
+ "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
+ "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
+ "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
+ "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
+ "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
+ "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
+ "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
+ "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
+ "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
+ "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
+
+ .ilen = 285 + 20 + 15,
+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x00\x03\x01\x01\x40",
+ .alen = 13,
+ .result = "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext285 bytes plaintext"
+ "285 bytes plaintext",
+ .rlen = 285,
+ }
+};
+
/*
* MD4 test vectors from RFC1320
*/
@@ -12821,6 +13053,10 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = {
#define AES_DEC_TEST_VECTORS 4
#define AES_CBC_ENC_TEST_VECTORS 5
#define AES_CBC_DEC_TEST_VECTORS 5
+#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
+#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
+#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
+#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
#define HMAC_SHA1_AES_CBC_ENC_TEST_VECTORS 7
#define HMAC_SHA256_AES_CBC_ENC_TEST_VECTORS 7
#define HMAC_SHA512_AES_CBC_ENC_TEST_VECTORS 7
@@ -13627,6 +13863,90 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
},
};
+static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
+ { /* Input data from RFC 2410 Case 1 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 8 + 16 + 0,
+ .iv = "",
+ .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .ilen = 8,
+ .result = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xaa\x42\xfe\x43\x8d\xea\xa3\x5a"
+ "\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae",
+ .rlen = 8 + 16,
+ }, { /* Input data from RFC 2410 Case 2 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 8 + 16 + 0,
+ .iv = "",
+ .input = "Network Security People Have A Strange Sense Of Humor",
+ .ilen = 53,
+ .result = "Network Security People Have A Strange Sense Of Humor"
+ "\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a"
+ "\x8e\xb5\x5f\x90\x8e\xfe\x13\x23",
+ .rlen = 53 + 16,
+ },
+};
+
+static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
+ {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 8 + 16 + 0,
+ .iv = "",
+ .input = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\xaa\x42\xfe\x43\x8d\xea\xa3\x5a"
+ "\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae",
+ .ilen = 8 + 16,
+ .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .rlen = 8,
+ }, {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 8 + 16 + 0,
+ .iv = "",
+ .input = "Network Security People Have A Strange Sense Of Humor"
+ "\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a"
+ "\x8e\xb5\x5f\x90\x8e\xfe\x13\x23",
+ .ilen = 53 + 16,
+ .result = "Network Security People Have A Strange Sense Of Humor",
+ .rlen = 53,
+ },
+};
+
static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_template[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
@@ -13876,6 +14196,98 @@ static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_template[] = {
},
};
+static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_template[] = {
+ { /* Input data from RFC 2410 Case 1 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00",
+ .klen = 8 + 20 + 0,
+ .iv = "",
+ .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .ilen = 8,
+ .result = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\x40\xc3\x0a\xa1\xc9\xa0\x28\xab"
+ "\x99\x5e\x19\x04\xd1\x72\xef\xb8"
+ "\x8c\x5e\xe4\x08",
+ .rlen = 8 + 20,
+ }, { /* Input data from RFC 2410 Case 2 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00",
+ .klen = 8 + 20 + 0,
+ .iv = "",
+ .input = "Network Security People Have A Strange Sense Of Humor",
+ .ilen = 53,
+ .result = "Network Security People Have A Strange Sense Of Humor"
+ "\x75\x6f\x42\x1e\xf8\x50\x21\xd2"
+ "\x65\x47\xee\x8e\x1a\xef\x16\xf6"
+ "\x91\x56\xe4\xd6",
+ .rlen = 53 + 20,
+ },
+};
+
+static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_template[] = {
+ {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00",
+ .klen = 8 + 20 + 0,
+ .iv = "",
+ .input = "\x01\x23\x45\x67\x89\xab\xcd\xef"
+ "\x40\xc3\x0a\xa1\xc9\xa0\x28\xab"
+ "\x99\x5e\x19\x04\xd1\x72\xef\xb8"
+ "\x8c\x5e\xe4\x08",
+ .ilen = 8 + 20,
+ .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
+ .rlen = 8,
+ }, {
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x00" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00",
+ .klen = 8 + 20 + 0,
+ .iv = "",
+ .input = "Network Security People Have A Strange Sense Of Humor"
+ "\x75\x6f\x42\x1e\xf8\x50\x21\xd2"
+ "\x65\x47\xee\x8e\x1a\xef\x16\xf6"
+ "\x91\x56\xe4\xd6",
+ .ilen = 53 + 20,
+ .result = "Network Security People Have A Strange Sense Of Humor",
+ .rlen = 53,
+ },
+};
+
static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_template[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
diff --git a/crypto/tls.c b/crypto/tls.c
new file mode 100644
index 0000000..38cd1ba
--- /dev/null
+++ b/crypto/tls.c
@@ -0,0 +1,582 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+
+struct tls_instance_ctx {
+ struct crypto_ahash_spawn auth;
+ struct crypto_skcipher_spawn enc;
+};
+
+struct crypto_tls_ctx {
+ unsigned int reqoff;
+ struct crypto_ahash *auth;
+ struct crypto_ablkcipher *enc;
+};
+
+struct tls_request_ctx {
+ /*
+ * cryptlen holds the payload length in the case of encryption or
+ * payload_len + icv_len + padding_len in case of decryption
+ */
+ unsigned int cryptlen;
+ /* working space for partial results */
+ struct scatterlist icv[2];
+ struct scatterlist cipher[2];
+ char tail[];
+};
+
+struct async_op {
+ struct completion completion;
+ int err;
+};
+
+static void tls_async_op_done(struct crypto_async_request *req, int err)
+{
+ struct async_op *areq = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ areq->err = err;
+ complete(&areq->completion);
+}
+
+static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
+ unsigned int keylen)
+{
+ unsigned int authkeylen;
+ unsigned int enckeylen;
+ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
+ struct crypto_ahash *auth = ctx->auth;
+ struct crypto_ablkcipher *enc = ctx->enc;
+ struct rtattr *rta = (void *)key;
+ struct crypto_authenc_key_param *param;
+ int err = -EINVAL;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ if (keylen < enckeylen)
+ goto badkey;
+
+ authkeylen = keylen - enckeylen;
+
+ crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_ahash_setkey(auth, key, authkeylen);
+ crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
+ CRYPTO_TFM_RES_MASK);
+
+ if (err)
+ goto out;
+
+ crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
+ crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(tls) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
+ crypto_aead_set_flags(tls, crypto_ablkcipher_get_flags(enc) &
+ CRYPTO_TFM_RES_MASK);
+out:
+ return err;
+
+badkey:
+ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
+/**
+ * crypto_tls_genicv - Calculate hmac digest for a TLS record
+ * @hash: (output) buffer to save the digest into
+ * @src: (input) scatterlist with the payload data
+ * @srclen: (input) size of the payload data
+ * @req: (input) aead request (with pointers to associated data)
+ **/
+static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
+ unsigned int srclen, struct aead_request *req)
+{
+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
+ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
+ struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
+ struct scatterlist *assoc = req->assoc;
+ struct scatterlist *icv = treq_ctx->icv;
+ struct async_op ahash_op;
+ struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
+ unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ int err = -EBADMSG;
+
+ /*
+ * Bail out as we have only two maneuvering scatterlists in icv. Check
+ * also if the request assoc len matches the scatterlist len
+ */
+ if (!req->assoclen || !sg_is_last(assoc) ||
+ req->assoclen != assoc->length)
+ return err;
+
+ /*
+ * Prepend associated data to the source scatterlist. If the source is
+ * empty, use directly the associated data scatterlist
+ */
+ if (srclen) {
+ sg_init_table(icv, 2);
+ sg_set_page(icv, sg_page(assoc), assoc->length, assoc->offset);
+ scatterwalk_sg_chain(icv, 2, src);
+ } else {
+ icv = assoc;
+ }
+ srclen += assoc->length;
+
+ init_completion(&ahash_op.completion);
+
+ /* the hash transform to be executed comes from the original request */
+ ahash_request_set_tfm(ahreq, ctx->auth);
+ /* prepare the hash request with input data and result pointer */
+ ahash_request_set_crypt(ahreq, icv, hash, srclen);
+ /* set the notifier for when the async hash function returns */
+ ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
+ tls_async_op_done, &ahash_op);
+
+ /* Calculate the digest on the given data. The result is put in hash */
+ err = crypto_ahash_digest(ahreq);
+ if (err == -EINPROGRESS) {
+ err = wait_for_completion_interruptible(&ahash_op.completion);
+ if (!err)
+ err = ahash_op.err;
+ }
+
+ return err;
+}
+
+/**
+ * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
+ * @hash: (output) buffer to save the digest and padding into
+ * @phashlen: (output) the size of digest + padding
+ * @req: (input) aead request
+ **/
+static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
+ struct aead_request *req)
+{
+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
+ unsigned int hash_size = crypto_aead_authsize(tls);
+ unsigned int block_size = crypto_aead_blocksize(tls);
+ unsigned int srclen = req->cryptlen + hash_size;
+ unsigned int padlen;
+ int err;
+
+ err = crypto_tls_genicv(hash, req->src, req->cryptlen, req);
+ if (err)
+ goto out;
+
+ /* add padding after digest */
+ padlen = block_size - (srclen % block_size);
+ memset(hash + hash_size, padlen - 1, padlen);
+
+ *phashlen = hash_size + padlen;
+out:
+ return err;
+}
+
+static int crypto_tls_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
+ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
+ struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
+
+ unsigned int cryptlen, phashlen;
+ struct scatterlist *cipher = treq_ctx->cipher;
+ struct scatterlist *sg, *src_last = NULL;
+ int err;
+ /*
+ * The hash and the cipher are applied at different times and their
+ * requests can use the same memory space without interference
+ */
+ struct ablkcipher_request *abreq = (void *)(treq_ctx->tail +
+ ctx->reqoff);
+ /*
+ * The hash result is saved at the beginning of the tls request and is
+ * aligned as required by the hash transform. Enough space was
+ * allocated in crypto_tls_init_tfm to accomodate the difference. The
+ * requests themselves start later at treq_ctx->tail + ctx->reqoff so
+ * the result is not overwritten by the second (cipher) request
+ */
+ u8 *hash = treq_ctx->tail;
+ hash = (u8 *)ALIGN((unsigned long)hash +
+ crypto_ahash_alignmask(ctx->auth),
+ crypto_ahash_alignmask(ctx->auth) + 1);
+
+ /*
+ * STEP 1: create ICV together with necessary padding
+ */
+ err = crypto_tls_gen_padicv(hash, &phashlen, req);
+ if (err)
+ return err;
+
+ /*
+ * STEP 2: Hash and padding are combined with the payload
+ * depending on the form it arrives. Scatter tables must have at least
+ * one page of data before chaining with another table and can't have
+ * an empty data page. The following code addresses these requirements.
+ *
+ * For same-destination, hash is copied directly after the
+ * payload since the buffers must have enough space for encryption.
+ * For different destination there are several casess to check.
+ * If the payload is empty, only the hash is encrypted, otherwise the
+ * payload scatterlist is merged with the hash. A special merging case
+ * is when the payload has only one page of data. In that case the
+ * payload page is moved to another scatterlist and prepared there for
+ * encryption.
+ */
+
+ if (req->src == req->dst) {
+ scatterwalk_map_and_copy(hash, req->src, req->cryptlen,
+ phashlen, 1);
+ } else {
+ if (req->cryptlen) {
+ sg_init_table(cipher, 2);
+ sg_set_buf(cipher + 1, hash, phashlen);
+ if (sg_is_last(req->src)) {
+ sg_set_page(cipher, sg_page(req->src),
+ req->src->length, req->src->offset);
+ req->src = cipher;
+ } else {
+ for (sg = req->src; sg; sg = sg_next(sg))
+ src_last = sg;
+ sg_set_page(cipher, sg_page(src_last),
+ src_last->length, src_last->offset);
+ scatterwalk_sg_chain(src_last, 1, cipher);
+ }
+ } else {
+ sg_init_one(req->src, hash, phashlen);
+ }
+ }
+
+ /*
+ * STEP 3: encrypt the frame and return the result
+ */
+ cryptlen = req->cryptlen + phashlen;
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen,
+ req->iv);
+ /* set the callback for encryption request termination */
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ /*
+ * Apply the cipher transform. The result will be in req->dst when the
+ * asynchronuous call terminates
+ */
+ err = crypto_ablkcipher_encrypt(abreq);
+
+ return err;
+}
+
+static int crypto_tls_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
+ struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
+ struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
+ struct scatterlist *assoc = req->assoc;
+ unsigned int cryptlen = req->cryptlen;
+ unsigned int hash_size = crypto_aead_authsize(tls);
+ unsigned int block_size = crypto_aead_blocksize(tls);
+ struct ablkcipher_request *abreq = (void *)(treq_ctx->tail +
+ ctx->reqoff);
+ u8 padding[255]; /* padding can be 0-255 bytes */
+ u8 pad_size;
+ u16 *len_field;
+ u8 *ihash, *hash = treq_ctx->tail;
+
+ int paderr = 0;
+ int err = -EINVAL;
+ int i;
+ struct async_op ciph_op;
+
+ /*
+ * Rule out bad packets. The input packet length must be at least one
+ * byte more than the hash_size
+ */
+ if (cryptlen <= hash_size || cryptlen % block_size)
+ goto out;
+
+ /*
+ * Step 1 - Decrypt the source
+ */
+ init_completion(&ciph_op.completion);
+
+ ablkcipher_request_set_tfm(abreq, ctx->enc);
+ ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+ tls_async_op_done, &ciph_op);
+ ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen,
+ req->iv);
+ err = crypto_ablkcipher_decrypt(abreq);
+ if (err == -EINPROGRESS) {
+ err = wait_for_completion_interruptible(&ciph_op.completion);
+ if (!err)
+ err = ciph_op.err;
+ }
+ if (err)
+ goto out;
+
+ /*
+ * Step 2 - Verify padding
+ * Retrieve the last byte of the payload; this is the padding size
+ */
+ cryptlen -= 1;
+ scatterwalk_map_and_copy(&pad_size, req->dst, cryptlen, 1, 0);
+
+ /* RFC recommendation for invalid padding size */
+ if (cryptlen < pad_size + hash_size) {
+ pad_size = 0;
+ paderr = -EBADMSG;
+ }
+ cryptlen -= pad_size;
+ scatterwalk_map_and_copy(padding, req->dst, cryptlen, pad_size, 0);
+
+ /* Padding content must be equal with pad_size. We verify it all */
+ for (i = 0; i < pad_size; i++)
+ if (padding[i] != pad_size)
+ paderr = -EBADMSG;
+
+ /*
+ * Step 3 - Verify hash
+ * Align the digest result as required by the hash transform. Enough
+ * space was allocated in crypto_tls_init_tfm
+ */
+ hash = (u8 *)ALIGN((unsigned long)hash +
+ crypto_ahash_alignmask(ctx->auth),
+ crypto_ahash_alignmask(ctx->auth) + 1);
+ /*
+ * Two bytes at the end of the associated data make the length field.
+ * It must be updated with the length of the cleartext message before
+ * the hash is calculated.
+ */
+ len_field = sg_virt(assoc) + assoc->length - 2;
+ cryptlen -= hash_size;
+ *len_field = htons(cryptlen);
+
+ /* This is the hash from the decrypted packet. Save it for later */
+ ihash = hash + hash_size;
+ scatterwalk_map_and_copy(ihash, req->dst, cryptlen, hash_size, 0);
+
+ /* Now compute and compare our ICV with the one from the packet */
+ err = crypto_tls_genicv(hash, req->dst, cryptlen, req);
+ if (!err)
+ err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
+
+ /* return the first found error */
+ if (paderr)
+ err = paderr;
+
+out:
+ aead_request_complete(req, err);
+ return err;
+}
+
+static int crypto_tls_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+ struct tls_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct crypto_tls_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ahash *auth;
+ struct crypto_ablkcipher *enc;
+ int err;
+
+ auth = crypto_spawn_ahash(&ictx->auth);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ enc = crypto_spawn_skcipher(&ictx->enc);
+ err = PTR_ERR(enc);
+ if (IS_ERR(enc))
+ goto err_free_ahash;
+
+ ctx->auth = auth;
+ ctx->enc = enc;
+ /*
+ * Allow enough space for two digests. The two digests will be compared
+ * during the decryption phase. One will come from the decrypted packet
+ * and the other will be calculated. For encryption, one digest is
+ * padded (up to a cipher blocksize) and chained with the payload
+ */
+ ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
+ crypto_ahash_alignmask(auth),
+ crypto_ahash_alignmask(auth) + 1) +
+ max(crypto_ahash_digestsize(auth),
+ crypto_ablkcipher_blocksize(enc));
+
+ tfm->crt_aead.reqsize = sizeof(struct tls_request_ctx) + ctx->reqoff +
+ max_t(unsigned int,
+ crypto_ahash_reqsize(auth) +
+ sizeof(struct ahash_request),
+ crypto_ablkcipher_reqsize(enc) +
+ sizeof(struct ablkcipher_request));
+
+ return 0;
+
+err_free_ahash:
+ crypto_free_ahash(auth);
+ return err;
+}
+
+static void crypto_tls_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_tls_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(ctx->auth);
+ crypto_free_ablkcipher(ctx->enc);
+}
+
+static struct crypto_instance *crypto_tls_alloc(struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+ struct crypto_instance *inst;
+ struct hash_alg_common *auth;
+ struct crypto_alg *auth_base;
+ struct crypto_alg *enc;
+ struct tls_instance_ctx *ctx;
+ const char *enc_name;
+ int err;
+
+ algt = crypto_get_attr_type(tb);
+ err = PTR_ERR(algt);
+ if (IS_ERR(algt))
+ return ERR_PTR(err);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
+ return ERR_PTR(-EINVAL);
+
+ auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
+ CRYPTO_ALG_TYPE_AHASH_MASK);
+ if (IS_ERR(auth))
+ return ERR_CAST(auth);
+
+ auth_base = &auth->base;
+
+ enc_name = crypto_attr_alg_name(tb[2]);
+ err = PTR_ERR(enc_name);
+ if (IS_ERR(enc_name))
+ goto out_put_auth;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!inst)
+ goto out_put_auth;
+
+ ctx = crypto_instance_ctx(inst);
+
+ err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
+ if (err)
+ goto err_free_inst;
+
+ crypto_set_skcipher_spawn(&ctx->enc, inst);
+ err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
+ if (err)
+ goto err_drop_auth;
+
+ enc = crypto_skcipher_spawn_alg(&ctx->enc);
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+ "tls10(%s,%s)", auth_base->cra_name, enc->cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_drop_enc;
+
+ if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "tls10(%s,%s)", auth_base->cra_driver_name,
+ enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_drop_enc;
+
+ inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
+ inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
+ /* priority calculation is taken from authenc.c */
+ inst->alg.cra_priority = enc->cra_priority * 10 +
+ auth_base->cra_priority;
+ inst->alg.cra_blocksize = enc->cra_blocksize;
+ inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
+ inst->alg.cra_type = &crypto_aead_type;
+
+ inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
+ inst->alg.cra_aead.maxauthsize = auth->digestsize;
+
+ inst->alg.cra_ctxsize = sizeof(struct crypto_tls_ctx);
+
+ inst->alg.cra_init = crypto_tls_init_tfm;
+ inst->alg.cra_exit = crypto_tls_exit_tfm;
+
+ inst->alg.cra_aead.setkey = crypto_tls_setkey;
+ inst->alg.cra_aead.encrypt = crypto_tls_encrypt;
+ inst->alg.cra_aead.decrypt = crypto_tls_decrypt;
+
+out:
+ crypto_mod_put(auth_base);
+ return inst;
+
+err_drop_enc:
+ crypto_drop_skcipher(&ctx->enc);
+err_drop_auth:
+ crypto_drop_ahash(&ctx->auth);
+err_free_inst:
+ kfree(inst);
+out_put_auth:
+ inst = ERR_PTR(err);
+ goto out;
+}
+
+static void crypto_tls_free(struct crypto_instance *inst)
+{
+ struct tls_instance_ctx *ctx = crypto_instance_ctx(inst);
+
+ crypto_drop_skcipher(&ctx->enc);
+ crypto_drop_ahash(&ctx->auth);
+ kfree(inst);
+}
+
+static struct crypto_template crypto_tls_tmpl = {
+ .name = "tls10",
+ .alloc = crypto_tls_alloc,
+ .free = crypto_tls_free,
+ .module = THIS_MODULE,
+};
+
+static int __init crypto_tls_module_init(void)
+{
+ return crypto_register_template(&crypto_tls_tmpl);
+}
+
+static void __exit crypto_tls_module_exit(void)
+{
+ crypto_unregister_template(&crypto_tls_tmpl);
+}
+
+module_init(crypto_tls_module_init);
+module_exit(crypto_tls_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TLS 1.0 record encryption");