From 1a92b2ba339221a4afee43adf125fcc9a41353f7 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 14 Apr 2015 11:51:29 -0500 Subject: crypto: mv_cesa - ensure backlog is initialised backlog is not initialised so in the case where cpg->eng_st != ENGINE_IDLE it is never initialised and hence which could lead to an illegal memory dereference in the statement: backlog->complete(backlog, -EINPROGRESS); Discovered with cppcheck static analsys: [drivers/crypto/mv_cesa.c:616]: (error) Uninitialized variable: backlog Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index f91f15d..e63efbd 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -595,7 +595,7 @@ static int queue_manag(void *data) cpg->eng_st = ENGINE_IDLE; do { struct crypto_async_request *async_req = NULL; - struct crypto_async_request *backlog; + struct crypto_async_request *backlog = NULL; __set_current_state(TASK_INTERRUPTIBLE); -- cgit v0.10.2 From 8e0498d99f182dd06c012dfc62768e8ca0450adf Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Fri, 17 Apr 2015 14:54:08 +0200 Subject: cryoto: drbg - clear all temporary memory The buffer uses for temporary data must be cleared entirely. In AES192 the used buffer is drbg_statelen(drbg) + drbg_blocklen(drbg) as documented in the comment above drbg_ctr_df. This patch ensures that the temp buffer is completely wiped. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu diff --git a/crypto/drbg.c b/crypto/drbg.c index b69409c..8d2944f 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -487,7 +487,7 @@ static int drbg_ctr_df(struct drbg_state *drbg, out: memset(iv, 0, drbg_blocklen(drbg)); - memset(temp, 0, drbg_statelen(drbg)); + memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg)); memset(pad, 0, drbg_blocklen(drbg)); return ret; } -- cgit v0.10.2 From 2529bc371c4154edcf61d76b9691e5da961e23c5 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:31:49 +0200 Subject: crypto: talitos - Use zero entry to init descriptors ptrs to zero Do use zero_entry value to init the descriptors ptrs to zero instead of writing 0 in each field Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 857414a..7bf1b2b 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1373,9 +1373,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, int sg_count, ret; /* first DWORD empty */ - desc->ptr[0].len = 0; - to_talitos_ptr(&desc->ptr[0], 0); - desc->ptr[0].j_extent = 0; + desc->ptr[0] = zero_entry; /* cipher iv */ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); @@ -1445,9 +1443,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, DMA_FROM_DEVICE); /* last DWORD empty */ - desc->ptr[6].len = 0; - to_talitos_ptr(&desc->ptr[6], 0); - desc->ptr[6].j_extent = 0; + desc->ptr[6] = zero_entry; ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { -- cgit v0.10.2 From 032d197eaaea13d220d141cdbca69e519b7af145 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:31:51 +0200 Subject: crypto: talitos - Refactor the sg in/out chain allocation This patch refactors the handling of the input and output data that is quite similar in several functions Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 7bf1b2b..5a7e345 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1327,16 +1327,23 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, return 0; } +static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src, + struct scatterlist *dst, unsigned int len, + struct talitos_edesc *edesc) +{ + talitos_sg_unmap(dev, edesc, src, dst); +} + static void common_nonsnoop_unmap(struct device *dev, struct talitos_edesc *edesc, struct ablkcipher_request *areq) { unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); + + unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); - talitos_sg_unmap(dev, edesc, areq->src, areq->dst); - if (edesc->dma_len) dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); @@ -1358,6 +1365,65 @@ static void ablkcipher_done(struct device *dev, areq->base.complete(&areq->base, err); } +int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, + unsigned int len, struct talitos_edesc *edesc, + enum dma_data_direction dir, struct talitos_ptr *ptr) +{ + int sg_count; + + ptr->len = cpu_to_be16(len); + ptr->j_extent = 0; + + sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, + edesc->src_chained); + + if (sg_count == 1) { + to_talitos_ptr(ptr, sg_dma_address(src)); + } else { + sg_count = sg_to_link_tbl(src, sg_count, len, + &edesc->link_tbl[0]); + if (sg_count > 1) { + to_talitos_ptr(ptr, edesc->dma_link_tbl); + ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } else { + /* Only one segment now, so no link tbl needed */ + to_talitos_ptr(ptr, sg_dma_address(src)); + } + } + return sg_count; +} + +void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, + unsigned int len, struct talitos_edesc *edesc, + enum dma_data_direction dir, + struct talitos_ptr *ptr, int sg_count) +{ + ptr->len = cpu_to_be16(len); + ptr->j_extent = 0; + + if (dir != DMA_NONE) + sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1, + dir, edesc->dst_chained); + + if (sg_count == 1) { + to_talitos_ptr(ptr, sg_dma_address(dst)); + } else { + struct talitos_ptr *link_tbl_ptr = + &edesc->link_tbl[edesc->src_nents + 1]; + + to_talitos_ptr(ptr, edesc->dma_link_tbl + + (edesc->src_nents + 1) * + sizeof(struct talitos_ptr)); + ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; + sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, DMA_BIDIRECTIONAL); + } +} + static int common_nonsnoop(struct talitos_edesc *edesc, struct ablkcipher_request *areq, void (*callback) (struct device *dev, @@ -1387,56 +1453,16 @@ static int common_nonsnoop(struct talitos_edesc *edesc, /* * cipher in */ - desc->ptr[3].len = cpu_to_be16(cryptlen); - desc->ptr[3].j_extent = 0; - - sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, - (areq->src == areq->dst) ? DMA_BIDIRECTIONAL - : DMA_TO_DEVICE, - edesc->src_chained); - - if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src)); - } else { - sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, - &edesc->link_tbl[0]); - if (sg_count > 1) { - to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); - desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; - dma_sync_single_for_device(dev, edesc->dma_link_tbl, - edesc->dma_len, - DMA_BIDIRECTIONAL); - } else { - /* Only one segment now, so no link tbl needed */ - to_talitos_ptr(&desc->ptr[3], - sg_dma_address(areq->src)); - } - } + sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc, + (areq->src == areq->dst) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE, + &desc->ptr[3]); /* cipher out */ - desc->ptr[4].len = cpu_to_be16(cryptlen); - desc->ptr[4].j_extent = 0; - - if (areq->src != areq->dst) - sg_count = talitos_map_sg(dev, areq->dst, - edesc->dst_nents ? : 1, - DMA_FROM_DEVICE, edesc->dst_chained); - - if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst)); - } else { - struct talitos_ptr *link_tbl_ptr = - &edesc->link_tbl[edesc->src_nents + 1]; - - to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + - (edesc->src_nents + 1) * - sizeof(struct talitos_ptr)); - desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; - sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, - link_tbl_ptr); - dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, - edesc->dma_len, DMA_BIDIRECTIONAL); - } + map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc, + (areq->src == areq->dst) ? DMA_NONE + : DMA_FROM_DEVICE, + &desc->ptr[4], sg_count); /* iv out */ map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0, @@ -1506,6 +1532,8 @@ static void common_nonsnoop_hash_unmap(struct device *dev, unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); + unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc); + /* When using hashctx-in, must unmap it. */ if (edesc->desc.ptr[1].len) unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], @@ -1515,8 +1543,6 @@ static void common_nonsnoop_hash_unmap(struct device *dev, unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); - talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL); - if (edesc->dma_len) dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); @@ -1555,7 +1581,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); struct device *dev = ctx->dev; struct talitos_desc *desc = &edesc->desc; - int sg_count, ret; + int ret; /* first DWORD empty */ desc->ptr[0] = zero_entry; @@ -1583,31 +1609,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, /* * data in */ - desc->ptr[3].len = cpu_to_be16(length); - desc->ptr[3].j_extent = 0; - - sg_count = talitos_map_sg(dev, req_ctx->psrc, - edesc->src_nents ? : 1, - DMA_TO_DEVICE, edesc->src_chained); - - if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc)); - } else { - sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length, - &edesc->link_tbl[0]); - if (sg_count > 1) { - desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; - to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); - dma_sync_single_for_device(ctx->dev, - edesc->dma_link_tbl, - edesc->dma_len, - DMA_BIDIRECTIONAL); - } else { - /* Only one segment now, so no link tbl needed */ - to_talitos_ptr(&desc->ptr[3], - sg_dma_address(req_ctx->psrc)); - } - } + map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc, + DMA_TO_DEVICE, &desc->ptr[3]); /* fifth DWORD empty */ desc->ptr[4] = zero_entry; -- cgit v0.10.2 From edc6bd698a4d335db0962aac8ab07f2840786a02 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:31:53 +0200 Subject: crypto: talitos - talitos_ptr renamed ptr for more lisibility Linux CodyingStyle recommends to use short variables for local variables. ptr is just good enough for those 3 lines functions. It helps keep single lines shorter than 80 characters. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 5a7e345..fca0aed 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -55,37 +55,37 @@ #include "talitos.h" -static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) +static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr) { - talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); - talitos_ptr->eptr = upper_32_bits(dma_addr); + ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); + ptr->eptr = upper_32_bits(dma_addr); } /* * map virtual single (contiguous) pointer to h/w descriptor pointer */ static void map_single_talitos_ptr(struct device *dev, - struct talitos_ptr *talitos_ptr, + struct talitos_ptr *ptr, unsigned short len, void *data, unsigned char extent, enum dma_data_direction dir) { dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); - talitos_ptr->len = cpu_to_be16(len); - to_talitos_ptr(talitos_ptr, dma_addr); - talitos_ptr->j_extent = extent; + ptr->len = cpu_to_be16(len); + to_talitos_ptr(ptr, dma_addr); + ptr->j_extent = extent; } /* * unmap bus single (contiguous) h/w descriptor pointer */ static void unmap_single_talitos_ptr(struct device *dev, - struct talitos_ptr *talitos_ptr, + struct talitos_ptr *ptr, enum dma_data_direction dir) { - dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr), - be16_to_cpu(talitos_ptr->len), dir); + dma_unmap_single(dev, be32_to_cpu(ptr->ptr), + be16_to_cpu(ptr->len), dir); } static int reset_channel(struct device *dev, int ch) -- cgit v0.10.2 From 185eb79f6a6536c2b61d2638e138ca439bd327c3 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:31:55 +0200 Subject: crypto: talitos - Add a helper function to clear j_extent field j_extent field is specific to SEC2 so we add a helper function to clear it so that SEC1 can redefine that function as nop Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index fca0aed..c93f79b 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -61,6 +61,11 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr) ptr->eptr = upper_32_bits(dma_addr); } +static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr) +{ + ptr->j_extent = 0; +} + /* * map virtual single (contiguous) pointer to h/w descriptor pointer */ @@ -1372,7 +1377,7 @@ int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, int sg_count; ptr->len = cpu_to_be16(len); - ptr->j_extent = 0; + to_talitos_ptr_extent_clear(ptr); sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, edesc->src_chained); @@ -1402,7 +1407,7 @@ void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, struct talitos_ptr *ptr, int sg_count) { ptr->len = cpu_to_be16(len); - ptr->j_extent = 0; + to_talitos_ptr_extent_clear(ptr); if (dir != DMA_NONE) sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1, @@ -1444,7 +1449,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, /* cipher iv */ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); desc->ptr[1].len = cpu_to_be16(ivsize); - desc->ptr[1].j_extent = 0; + to_talitos_ptr_extent_clear(&desc->ptr[1]); /* cipher key */ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, -- cgit v0.10.2 From a2b35aa86eeaca6c1d416e3c43d44222c34727fb Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:31:57 +0200 Subject: crypto: talitos - remove param 'extent' in map_single_talitos_ptr() map_single_talitos_ptr() is always called with extent == 0, so lets remove this unused parameter Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index c93f79b..81e5636 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -72,14 +72,13 @@ static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr) static void map_single_talitos_ptr(struct device *dev, struct talitos_ptr *ptr, unsigned short len, void *data, - unsigned char extent, enum dma_data_direction dir) { dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); ptr->len = cpu_to_be16(len); to_talitos_ptr(ptr, dma_addr); - ptr->j_extent = extent; + to_talitos_ptr_extent_clear(ptr); } /* @@ -958,7 +957,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, /* hmac key */ map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, - 0, DMA_TO_DEVICE); + DMA_TO_DEVICE); /* hmac data */ desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize); @@ -1002,7 +1001,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, /* cipher key */ map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, - (char *)&ctx->key + ctx->authkeylen, 0, + (char *)&ctx->key + ctx->authkeylen, DMA_TO_DEVICE); /* @@ -1080,7 +1079,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, } /* iv out */ - map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, + map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, DMA_FROM_DEVICE); ret = talitos_submit(dev, ctx->ch, desc, callback, areq); @@ -1453,7 +1452,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, /* cipher key */ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, - (char *)&ctx->key, 0, DMA_TO_DEVICE); + (char *)&ctx->key, DMA_TO_DEVICE); /* * cipher in @@ -1470,7 +1469,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, &desc->ptr[4], sg_count); /* iv out */ - map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0, + map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, DMA_FROM_DEVICE); /* last DWORD empty */ @@ -1595,7 +1594,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, if (!req_ctx->first || req_ctx->swinit) { map_single_talitos_ptr(dev, &desc->ptr[1], req_ctx->hw_context_size, - (char *)req_ctx->hw_context, 0, + (char *)req_ctx->hw_context, DMA_TO_DEVICE); req_ctx->swinit = 0; } else { @@ -1607,7 +1606,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, /* HMAC key */ if (ctx->keylen) map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, - (char *)&ctx->key, 0, DMA_TO_DEVICE); + (char *)&ctx->key, DMA_TO_DEVICE); else desc->ptr[2] = zero_entry; @@ -1624,11 +1623,11 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, if (req_ctx->last) map_single_talitos_ptr(dev, &desc->ptr[5], crypto_ahash_digestsize(tfm), - areq->result, 0, DMA_FROM_DEVICE); + areq->result, DMA_FROM_DEVICE); else map_single_talitos_ptr(dev, &desc->ptr[5], req_ctx->hw_context_size, - req_ctx->hw_context, 0, DMA_FROM_DEVICE); + req_ctx->hw_context, DMA_FROM_DEVICE); /* last DWORD empty */ desc->ptr[6] = zero_entry; -- cgit v0.10.2 From 538caf83374c0b24e2dfe2ca381354f207ca7cde Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:31:59 +0200 Subject: crypto: talitos - helper function for ptr len This patch adds a helper function for reads and writes of the len param of the talitos descriptor. This will help implement SEC1 later. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 81e5636..bca6ded 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -61,6 +61,16 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr) ptr->eptr = upper_32_bits(dma_addr); } +static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len) +{ + ptr->len = cpu_to_be16(len); +} + +static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr) +{ + return be16_to_cpu(ptr->len); +} + static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr) { ptr->j_extent = 0; @@ -76,7 +86,7 @@ static void map_single_talitos_ptr(struct device *dev, { dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); - ptr->len = cpu_to_be16(len); + to_talitos_ptr_len(ptr, len); to_talitos_ptr(ptr, dma_addr); to_talitos_ptr_extent_clear(ptr); } @@ -89,7 +99,7 @@ static void unmap_single_talitos_ptr(struct device *dev, enum dma_data_direction dir) { dma_unmap_single(dev, be32_to_cpu(ptr->ptr), - be16_to_cpu(ptr->len), dir); + from_talitos_ptr_len(ptr), dir); } static int reset_channel(struct device *dev, int ch) @@ -1375,7 +1385,7 @@ int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, { int sg_count; - ptr->len = cpu_to_be16(len); + to_talitos_ptr_len(ptr, len); to_talitos_ptr_extent_clear(ptr); sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, @@ -1405,7 +1415,7 @@ void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, enum dma_data_direction dir, struct talitos_ptr *ptr, int sg_count) { - ptr->len = cpu_to_be16(len); + to_talitos_ptr_len(ptr, len); to_talitos_ptr_extent_clear(ptr); if (dir != DMA_NONE) @@ -1447,7 +1457,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc, /* cipher iv */ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); - desc->ptr[1].len = cpu_to_be16(ivsize); + to_talitos_ptr_len(&desc->ptr[1], ivsize); to_talitos_ptr_extent_clear(&desc->ptr[1]); /* cipher key */ @@ -1539,11 +1549,11 @@ static void common_nonsnoop_hash_unmap(struct device *dev, unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc); /* When using hashctx-in, must unmap it. */ - if (edesc->desc.ptr[1].len) + if (from_talitos_ptr_len(&edesc->desc.ptr[1])) unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); - if (edesc->desc.ptr[2].len) + if (from_talitos_ptr_len(&edesc->desc.ptr[2])) unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); -- cgit v0.10.2 From 90490752eb03ddc1015233034ec26816f0ea0de3 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:01 +0200 Subject: crypto: talitos - enhanced talitos_desc struct for SEC1 This patch enhances the talitos_desc struct with fields for SEC1. SEC1 has only one header field, and has a 'next_desc' field in addition. This mixed descriptor will continue to fit SEC2, and for SEC1 we will recopy hdr value into hdr1 value in talitos_submit() Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 61a1405..f078da1 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -37,9 +37,17 @@ /* descriptor pointer entry */ struct talitos_ptr { - __be16 len; /* length */ - u8 j_extent; /* jump to sg link table and/or extent */ - u8 eptr; /* extended address */ + union { + struct { /* SEC2 format */ + __be16 len; /* length */ + u8 j_extent; /* jump to sg link table and/or extent*/ + u8 eptr; /* extended address */ + }; + struct { /* SEC1 format */ + __be16 res; + __be16 len1; /* length */ + }; + }; __be32 ptr; /* address */ }; @@ -53,8 +61,12 @@ static const struct talitos_ptr zero_entry = { /* descriptor */ struct talitos_desc { __be32 hdr; /* header high bits */ - __be32 hdr_lo; /* header low bits */ + union { + __be32 hdr_lo; /* header low bits */ + __be32 hdr1; /* header for SEC1 */ + }; struct talitos_ptr ptr[7]; /* ptr/len pair array */ + __be32 next_desc; /* next descriptor (SEC1) */ }; /** -- cgit v0.10.2 From 5b841a65dcae3ac76dc34e56c948c629de48c1a3 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:03 +0200 Subject: crypto: talitos - add sub-choice in talitos CONFIG for SEC1 This patch adds a CONFIG option to select SEC1, SEC2+ or both. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 800bf41..8a76a01 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -222,6 +222,24 @@ config CRYPTO_DEV_TALITOS To compile this driver as a module, choose M here: the module will be called talitos. +config CRYPTO_DEV_TALITOS1 + bool "SEC1 (SEC 1.0 and SEC Lite 1.2)" + depends on CRYPTO_DEV_TALITOS + depends on PPC_8xx || PPC_82xx + default y + help + Say 'Y' here to use the Freescale Security Engine (SEC) version 1.0 + found on MPC82xx or the Freescale Security Engine (SEC Lite) + version 1.2 found on MPC8xx + +config CRYPTO_DEV_TALITOS2 + bool "SEC2+ (SEC version 2.0 or upper)" + depends on CRYPTO_DEV_TALITOS + default y if !PPC_8xx + help + Say 'Y' here to use the Freescale Security Engine (SEC) + version 2 and following as found on MPC83xx, MPC85xx, etc ... + config CRYPTO_DEV_IXP4XX tristate "Driver for IXP4xx crypto hardware acceleration" depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE -- cgit v0.10.2 From 21590888490ce2a46ff4703b1503f562f4a59571 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:05 +0200 Subject: crypto: talitos - Add a feature to tag SEC1 We add a new feature in the features field, to mark compatible "fsl,sec1.0" We also define a helper function called has_ftr_sec1() to help functions quickly determine if they are running on SEC1 or SEC2+. When only SEC1 or SEC2 is compiled in, has_ftr_sec1() return trivial corresponding value. If both are compiled in, feature field is checked. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index bca6ded..db95023 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2709,6 +2709,9 @@ static int talitos_probe(struct platform_device *ofdev) TALITOS_FTR_SHA224_HWINIT | TALITOS_FTR_HMAC_OK; + if (of_device_is_compatible(np, "fsl,sec1.0")) + priv->features |= TALITOS_FTR_SEC1; + priv->chan = kzalloc(sizeof(struct talitos_channel) * priv->num_channels, GFP_KERNEL); if (!priv->chan) { diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index f078da1..b0bdb4e 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -156,6 +156,23 @@ extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 #define TALITOS_FTR_SHA224_HWINIT 0x00000004 #define TALITOS_FTR_HMAC_OK 0x00000008 +#define TALITOS_FTR_SEC1 0x00000010 + +/* + * If both CONFIG_CRYPTO_DEV_TALITOS1 and CONFIG_CRYPTO_DEV_TALITOS2 are + * defined, we check the features which are set according to the device tree. + * Otherwise, we answer true or false directly + */ +static inline bool has_ftr_sec1(struct talitos_private *priv) +{ +#if defined(CONFIG_CRYPTO_DEV_TALITOS1) && defined(CONFIG_CRYPTO_DEV_TALITOS2) + return priv->features & TALITOS_FTR_SEC1 ? true : false; +#elif defined(CONFIG_CRYPTO_DEV_TALITOS1) + return true; +#else + return false; +#endif +} /* * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register -- cgit v0.10.2 From 922f9dc8d36651abea51c4f2ff9c08ba4db692ff Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:07 +0200 Subject: crypto: talitos - fill in talitos descriptor iaw SEC1 or SEC2+ talitos descriptor is slightly different for SEC1 and SEC2+, so lets the helper function that fills the descriptor take into account the type of SEC. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index db95023..678b528 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -55,25 +55,38 @@ #include "talitos.h" -static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr) +static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, + bool is_sec1) { ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); - ptr->eptr = upper_32_bits(dma_addr); + if (!is_sec1) + ptr->eptr = upper_32_bits(dma_addr); } -static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len) +static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len, + bool is_sec1) { - ptr->len = cpu_to_be16(len); + if (is_sec1) { + ptr->res = 0; + ptr->len1 = cpu_to_be16(len); + } else { + ptr->len = cpu_to_be16(len); + } } -static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr) +static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr, + bool is_sec1) { - return be16_to_cpu(ptr->len); + if (is_sec1) + return be16_to_cpu(ptr->len1); + else + return be16_to_cpu(ptr->len); } -static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr) +static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1) { - ptr->j_extent = 0; + if (!is_sec1) + ptr->j_extent = 0; } /* @@ -85,10 +98,12 @@ static void map_single_talitos_ptr(struct device *dev, enum dma_data_direction dir) { dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); - to_talitos_ptr_len(ptr, len); - to_talitos_ptr(ptr, dma_addr); - to_talitos_ptr_extent_clear(ptr); + to_talitos_ptr_len(ptr, len, is_sec1); + to_talitos_ptr(ptr, dma_addr, is_sec1); + to_talitos_ptr_extent_clear(ptr, is_sec1); } /* @@ -98,8 +113,11 @@ static void unmap_single_talitos_ptr(struct device *dev, struct talitos_ptr *ptr, enum dma_data_direction dir) { + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + dma_unmap_single(dev, be32_to_cpu(ptr->ptr), - from_talitos_ptr_len(ptr), dir); + from_talitos_ptr_len(ptr, is_sec1), dir); } static int reset_channel(struct device *dev, int ch) @@ -922,7 +940,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, int n_sg = sg_count; while (n_sg--) { - to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg)); + to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0); link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); link_tbl_ptr->j_extent = 0; link_tbl_ptr++; @@ -976,7 +994,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * - sizeof(struct talitos_ptr)); + sizeof(struct talitos_ptr), 0); desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; /* assoc_nents - 1 entries for assoc, 1 for IV */ @@ -987,7 +1005,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, tbl_ptr += sg_count - 1; tbl_ptr->j_extent = 0; tbl_ptr++; - to_talitos_ptr(tbl_ptr, edesc->iv_dma); + to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0); tbl_ptr->len = cpu_to_be16(ivsize); tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; @@ -996,14 +1014,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, } else { if (areq->assoclen) to_talitos_ptr(&desc->ptr[1], - sg_dma_address(areq->assoc)); + sg_dma_address(areq->assoc), 0); else - to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); + to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0); desc->ptr[1].j_extent = 0; } /* cipher iv */ - to_talitos_ptr(&desc->ptr[2], edesc->iv_dma); + to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0); desc->ptr[2].len = cpu_to_be16(ivsize); desc->ptr[2].j_extent = 0; /* Sync needed for the aead_givencrypt case */ @@ -1029,7 +1047,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, edesc->src_chained); if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src)); + to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); } else { sg_link_tbl_len = cryptlen; @@ -1040,14 +1058,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, &edesc->link_tbl[0]); if (sg_count > 1) { desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; - to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl); + to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0); dma_sync_single_for_device(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); } else { /* Only one segment now, so no link tbl needed */ to_talitos_ptr(&desc->ptr[4], - sg_dma_address(areq->src)); + sg_dma_address(areq->src), 0); } } @@ -1061,13 +1079,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, DMA_FROM_DEVICE, edesc->dst_chained); if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst)); + to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); } else { int tbl_off = edesc->src_nents + 1; struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + - tbl_off * sizeof(struct talitos_ptr)); + tbl_off * sizeof(struct talitos_ptr), 0); sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, tbl_ptr); @@ -1082,7 +1100,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + (tbl_off + edesc->dst_nents + 1 + edesc->assoc_nents) * - sizeof(struct talitos_ptr)); + sizeof(struct talitos_ptr), 0); desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); @@ -1384,27 +1402,29 @@ int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, enum dma_data_direction dir, struct talitos_ptr *ptr) { int sg_count; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); - to_talitos_ptr_len(ptr, len); - to_talitos_ptr_extent_clear(ptr); + to_talitos_ptr_len(ptr, len, is_sec1); + to_talitos_ptr_extent_clear(ptr, is_sec1); sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, edesc->src_chained); if (sg_count == 1) { - to_talitos_ptr(ptr, sg_dma_address(src)); + to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); } else { sg_count = sg_to_link_tbl(src, sg_count, len, &edesc->link_tbl[0]); if (sg_count > 1) { - to_talitos_ptr(ptr, edesc->dma_link_tbl); + to_talitos_ptr(ptr, edesc->dma_link_tbl, 0); ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; dma_sync_single_for_device(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); } else { /* Only one segment now, so no link tbl needed */ - to_talitos_ptr(ptr, sg_dma_address(src)); + to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); } } return sg_count; @@ -1415,22 +1435,25 @@ void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, enum dma_data_direction dir, struct talitos_ptr *ptr, int sg_count) { - to_talitos_ptr_len(ptr, len); - to_talitos_ptr_extent_clear(ptr); + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + + to_talitos_ptr_len(ptr, len, is_sec1); + to_talitos_ptr_extent_clear(ptr, is_sec1); if (dir != DMA_NONE) sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1, dir, edesc->dst_chained); if (sg_count == 1) { - to_talitos_ptr(ptr, sg_dma_address(dst)); + to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); } else { struct talitos_ptr *link_tbl_ptr = &edesc->link_tbl[edesc->src_nents + 1]; to_talitos_ptr(ptr, edesc->dma_link_tbl + (edesc->src_nents + 1) * - sizeof(struct talitos_ptr)); + sizeof(struct talitos_ptr), 0); ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr); dma_sync_single_for_device(dev, edesc->dma_link_tbl, @@ -1451,14 +1474,16 @@ static int common_nonsnoop(struct talitos_edesc *edesc, unsigned int cryptlen = areq->nbytes; unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); int sg_count, ret; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); /* first DWORD empty */ desc->ptr[0] = zero_entry; /* cipher iv */ - to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); - to_talitos_ptr_len(&desc->ptr[1], ivsize); - to_talitos_ptr_extent_clear(&desc->ptr[1]); + to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1); + to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1); + to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1); /* cipher key */ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, @@ -1543,17 +1568,19 @@ static void common_nonsnoop_hash_unmap(struct device *dev, struct ahash_request *areq) { struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc); /* When using hashctx-in, must unmap it. */ - if (from_talitos_ptr_len(&edesc->desc.ptr[1])) + if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); - if (from_talitos_ptr_len(&edesc->desc.ptr[2])) + if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1)) unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); @@ -1596,6 +1623,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, struct device *dev = ctx->dev; struct talitos_desc *desc = &edesc->desc; int ret; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); /* first DWORD empty */ desc->ptr[0] = zero_entry; -- cgit v0.10.2 From 7d607c6a71f7a4905831fc2b1636e080533ab2db Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:09 +0200 Subject: crypto: talitos - adaptation of talitos_submit() for SEC1 SEC1 descriptor is a bit different to SEC2+ descriptor. talitos_submit() will have to copy hdr field into hdr1 field and send the descriptor starting at hdr1 up to next_desc. For SEC2, it remains unchanged and next_desc is just ignored. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 678b528..e6ea651 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -236,6 +236,7 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, struct talitos_request *request; unsigned long flags; int head; + bool is_sec1 = has_ftr_sec1(priv); spin_lock_irqsave(&priv->chan[ch].head_lock, flags); @@ -249,8 +250,17 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, request = &priv->chan[ch].fifo[head]; /* map descriptor and save caller data */ - request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), - DMA_BIDIRECTIONAL); + if (is_sec1) { + desc->hdr1 = desc->hdr; + desc->next_desc = 0; + request->dma_desc = dma_map_single(dev, &desc->hdr1, + TALITOS_DESC_SIZE, + DMA_BIDIRECTIONAL); + } else { + request->dma_desc = dma_map_single(dev, desc, + TALITOS_DESC_SIZE, + DMA_BIDIRECTIONAL); + } request->callback = callback; request->context = context; @@ -282,16 +292,21 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) struct talitos_request *request, saved_req; unsigned long flags; int tail, status; + bool is_sec1 = has_ftr_sec1(priv); spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); tail = priv->chan[ch].tail; while (priv->chan[ch].fifo[tail].desc) { + __be32 hdr; + request = &priv->chan[ch].fifo[tail]; /* descriptors with their done bits set don't get the error */ rmb(); - if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE) + hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr; + + if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) status = 0; else if (!error) @@ -300,7 +315,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) status = error; dma_unmap_single(dev, request->dma_desc, - sizeof(struct talitos_desc), + TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL); /* copy entries so we can call callback outside lock */ diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index b0bdb4e..f827c04 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -69,6 +69,8 @@ struct talitos_desc { __be32 next_desc; /* next descriptor (SEC1) */ }; +#define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32)) + /** * talitos_request - descriptor submission request * @desc: descriptor pointer (kernel virtual) -- cgit v0.10.2 From 5fa7fa147b1572ae724819631ef442e1c9570f9a Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:11 +0200 Subject: crypto: talitos - base address for Execution Units SEC 1.0, 1.2 and 2.x+ have different EU base addresses, so we need to define pointers for each EU in the driver private data structure. The proper address is set by the probe function depending on the SEC type, in order to provide access to the proper address. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index e6ea651..6d77699 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -208,7 +208,7 @@ static int init_device(struct device *dev) /* disable integrity check error interrupts (use writeback instead) */ if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) - setbits32(priv->reg + TALITOS_MDEUICR_LO, + setbits32(priv->reg_mdeu + TALITOS_EUICR_LO, TALITOS_MDEUICR_LO_ICE); return 0; @@ -424,44 +424,44 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) switch (desc_hdr & DESC_HDR_SEL0_MASK) { case DESC_HDR_SEL0_AFEU: dev_err(dev, "AFEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_AFEUISR), - in_be32(priv->reg + TALITOS_AFEUISR_LO)); + in_be32(priv->reg_afeu + TALITOS_EUISR), + in_be32(priv->reg_afeu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_DEU: dev_err(dev, "DEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_DEUISR), - in_be32(priv->reg + TALITOS_DEUISR_LO)); + in_be32(priv->reg_deu + TALITOS_EUISR), + in_be32(priv->reg_deu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_MDEUA: case DESC_HDR_SEL0_MDEUB: dev_err(dev, "MDEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_MDEUISR), - in_be32(priv->reg + TALITOS_MDEUISR_LO)); + in_be32(priv->reg_mdeu + TALITOS_EUISR), + in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_RNG: dev_err(dev, "RNGUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_RNGUISR), - in_be32(priv->reg + TALITOS_RNGUISR_LO)); + in_be32(priv->reg_rngu + TALITOS_ISR), + in_be32(priv->reg_rngu + TALITOS_ISR_LO)); break; case DESC_HDR_SEL0_PKEU: dev_err(dev, "PKEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_PKEUISR), - in_be32(priv->reg + TALITOS_PKEUISR_LO)); + in_be32(priv->reg_pkeu + TALITOS_EUISR), + in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_AESU: dev_err(dev, "AESUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_AESUISR), - in_be32(priv->reg + TALITOS_AESUISR_LO)); + in_be32(priv->reg_aesu + TALITOS_EUISR), + in_be32(priv->reg_aesu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_CRCU: dev_err(dev, "CRCUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_CRCUISR), - in_be32(priv->reg + TALITOS_CRCUISR_LO)); + in_be32(priv->reg_crcu + TALITOS_EUISR), + in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_KEU: dev_err(dev, "KEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_KEUISR), - in_be32(priv->reg + TALITOS_KEUISR_LO)); + in_be32(priv->reg_pkeu + TALITOS_EUISR), + in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); break; } @@ -469,13 +469,13 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) case DESC_HDR_SEL1_MDEUA: case DESC_HDR_SEL1_MDEUB: dev_err(dev, "MDEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_MDEUISR), - in_be32(priv->reg + TALITOS_MDEUISR_LO)); + in_be32(priv->reg_mdeu + TALITOS_EUISR), + in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL1_CRCU: dev_err(dev, "CRCUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_CRCUISR), - in_be32(priv->reg + TALITOS_CRCUISR_LO)); + in_be32(priv->reg_crcu + TALITOS_EUISR), + in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); break; } @@ -614,7 +614,7 @@ static int talitos_rng_data_present(struct hwrng *rng, int wait) int i; for (i = 0; i < 20; i++) { - ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) & + ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) & TALITOS_RNGUSR_LO_OFL; if (ofl || !wait) break; @@ -630,8 +630,8 @@ static int talitos_rng_data_read(struct hwrng *rng, u32 *data) struct talitos_private *priv = dev_get_drvdata(dev); /* rng fifo requires 64-bit accesses */ - *data = in_be32(priv->reg + TALITOS_RNGU_FIFO); - *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO); + *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO); + *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO); return sizeof(u32); } @@ -642,8 +642,9 @@ static int talitos_rng_init(struct hwrng *rng) struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; - setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR); - while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD) + setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR); + while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO) + & TALITOS_RNGUSR_LO_RD) && --timeout) cpu_relax(); if (timeout == 0) { @@ -652,7 +653,7 @@ static int talitos_rng_init(struct hwrng *rng) } /* start generating */ - setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0); + setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0); return 0; } @@ -2687,6 +2688,7 @@ static int talitos_probe(struct platform_device *ofdev) struct talitos_private *priv; const unsigned int *prop; int i, err; + int stride; priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); if (!priv) @@ -2756,6 +2758,31 @@ static int talitos_probe(struct platform_device *ofdev) if (of_device_is_compatible(np, "fsl,sec1.0")) priv->features |= TALITOS_FTR_SEC1; + if (of_device_is_compatible(np, "fsl,sec1.2")) { + priv->reg_deu = priv->reg + TALITOS12_DEU; + priv->reg_aesu = priv->reg + TALITOS12_AESU; + priv->reg_mdeu = priv->reg + TALITOS12_MDEU; + stride = TALITOS1_CH_STRIDE; + } else if (of_device_is_compatible(np, "fsl,sec1.0")) { + priv->reg_deu = priv->reg + TALITOS10_DEU; + priv->reg_aesu = priv->reg + TALITOS10_AESU; + priv->reg_mdeu = priv->reg + TALITOS10_MDEU; + priv->reg_afeu = priv->reg + TALITOS10_AFEU; + priv->reg_rngu = priv->reg + TALITOS10_RNGU; + priv->reg_pkeu = priv->reg + TALITOS10_PKEU; + stride = TALITOS1_CH_STRIDE; + } else { + priv->reg_deu = priv->reg + TALITOS2_DEU; + priv->reg_aesu = priv->reg + TALITOS2_AESU; + priv->reg_mdeu = priv->reg + TALITOS2_MDEU; + priv->reg_afeu = priv->reg + TALITOS2_AFEU; + priv->reg_rngu = priv->reg + TALITOS2_RNGU; + priv->reg_pkeu = priv->reg + TALITOS2_PKEU; + priv->reg_keu = priv->reg + TALITOS2_KEU; + priv->reg_crcu = priv->reg + TALITOS2_CRCU; + stride = TALITOS2_CH_STRIDE; + } + priv->chan = kzalloc(sizeof(struct talitos_channel) * priv->num_channels, GFP_KERNEL); if (!priv->chan) { @@ -2767,7 +2794,7 @@ static int talitos_probe(struct platform_device *ofdev) priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); for (i = 0; i < priv->num_channels; i++) { - priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1); + priv->chan[i].reg = priv->reg + stride * (i + 1); if (!priv->irq[1] || !(i & 1)) priv->chan[i].reg += TALITOS_CH_BASE_OFFSET; diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index f827c04..4faa3b6 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -111,6 +111,14 @@ struct talitos_private { struct device *dev; struct platform_device *ofdev; void __iomem *reg; + void __iomem *reg_deu; + void __iomem *reg_aesu; + void __iomem *reg_mdeu; + void __iomem *reg_afeu; + void __iomem *reg_rngu; + void __iomem *reg_pkeu; + void __iomem *reg_keu; + void __iomem *reg_crcu; int irq[2]; /* SEC global registers lock */ @@ -206,7 +214,8 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) /* channel register address stride */ #define TALITOS_CH_BASE_OFFSET 0x1000 /* default channel map base */ -#define TALITOS_CH_STRIDE 0x100 +#define TALITOS1_CH_STRIDE 0x1000 +#define TALITOS2_CH_STRIDE 0x100 /* channel configuration register */ #define TALITOS_CCCR 0x8 @@ -255,37 +264,46 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) #define TALITOS_SCATTER 0xe0 #define TALITOS_SCATTER_LO 0xe4 +/* execution unit registers base */ +#define TALITOS2_DEU 0x2000 +#define TALITOS2_AESU 0x4000 +#define TALITOS2_MDEU 0x6000 +#define TALITOS2_AFEU 0x8000 +#define TALITOS2_RNGU 0xa000 +#define TALITOS2_PKEU 0xc000 +#define TALITOS2_KEU 0xe000 +#define TALITOS2_CRCU 0xf000 + +#define TALITOS12_AESU 0x4000 +#define TALITOS12_DEU 0x5000 +#define TALITOS12_MDEU 0x6000 + +#define TALITOS10_AFEU 0x8000 +#define TALITOS10_DEU 0xa000 +#define TALITOS10_MDEU 0xc000 +#define TALITOS10_RNGU 0xe000 +#define TALITOS10_PKEU 0x10000 +#define TALITOS10_AESU 0x12000 + /* execution unit interrupt status registers */ -#define TALITOS_DEUISR 0x2030 /* DES unit */ -#define TALITOS_DEUISR_LO 0x2034 -#define TALITOS_AESUISR 0x4030 /* AES unit */ -#define TALITOS_AESUISR_LO 0x4034 -#define TALITOS_MDEUISR 0x6030 /* message digest unit */ -#define TALITOS_MDEUISR_LO 0x6034 -#define TALITOS_MDEUICR 0x6038 /* interrupt control */ -#define TALITOS_MDEUICR_LO 0x603c +#define TALITOS_EUDSR 0x10 /* data size */ +#define TALITOS_EUDSR_LO 0x14 +#define TALITOS_EURCR 0x18 /* reset control*/ +#define TALITOS_EURCR_LO 0x1c +#define TALITOS_EUSR 0x28 /* rng status */ +#define TALITOS_EUSR_LO 0x2c +#define TALITOS_EUISR 0x30 +#define TALITOS_EUISR_LO 0x34 +#define TALITOS_EUICR 0x38 /* int. control */ +#define TALITOS_EUICR_LO 0x3c +#define TALITOS_EU_FIFO 0x800 /* output FIFO */ +#define TALITOS_EU_FIFO_LO 0x804 /* output FIFO */ +/* message digest unit */ #define TALITOS_MDEUICR_LO_ICE 0x4000 /* integrity check IRQ enable */ -#define TALITOS_AFEUISR 0x8030 /* arc4 unit */ -#define TALITOS_AFEUISR_LO 0x8034 -#define TALITOS_RNGUISR 0xa030 /* random number unit */ -#define TALITOS_RNGUISR_LO 0xa034 -#define TALITOS_RNGUSR 0xa028 /* rng status */ -#define TALITOS_RNGUSR_LO 0xa02c +/* random number unit */ #define TALITOS_RNGUSR_LO_RD 0x1 /* reset done */ #define TALITOS_RNGUSR_LO_OFL 0xff0000/* output FIFO length */ -#define TALITOS_RNGUDSR 0xa010 /* data size */ -#define TALITOS_RNGUDSR_LO 0xa014 -#define TALITOS_RNGU_FIFO 0xa800 /* output FIFO */ -#define TALITOS_RNGU_FIFO_LO 0xa804 /* output FIFO */ -#define TALITOS_RNGURCR 0xa018 /* reset control */ -#define TALITOS_RNGURCR_LO 0xa01c #define TALITOS_RNGURCR_LO_SR 0x1 /* software reset */ -#define TALITOS_PKEUISR 0xc030 /* public key unit */ -#define TALITOS_PKEUISR_LO 0xc034 -#define TALITOS_KEUISR 0xe030 /* kasumi unit */ -#define TALITOS_KEUISR_LO 0xe034 -#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/ -#define TALITOS_CRCUISR_LO 0xf034 #define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28 #define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48 -- cgit v0.10.2 From dd3c0987f5426d2df4a0c92de82dac65874bb9a4 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:13 +0200 Subject: crypto: talitos - adapt interrupts and reset functions to SEC1 This patch adapts the interrupts handling and reset function for SEC1. On SEC1, registers are almost similar to SEC2+, but bits are sometimes located at different places. So we need to define TALITOS1 and TALITOS2 versions of some fields, and manage according to whether it is SEC1 or SEC2. On SEC1, only one interrupt vector is dedicated to the SEC, so only interrupt_4ch is needed. On SEC1, interrupts are enabled by clearing related bits in IMR, while on SEC2, interrupts are enabled by seting the bits in IMR. SEC1 also performs parity verification in the DES Unit. We have to disable this feature because the test vectors provided in the kernel have parity errors. In reset functions, only SEC2 supports continuation after error. For SEC1, we have to reset in all cases. For errors handling, SEC2+ names have been kept, but displayed text have been amended to reflect exact meaning on SEC1. Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 6d77699..1265405 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -124,12 +124,23 @@ static int reset_channel(struct device *dev, int ch) { struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; + bool is_sec1 = has_ftr_sec1(priv); - setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET); + if (is_sec1) { + setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, + TALITOS1_CCCR_LO_RESET); - while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET) - && --timeout) - cpu_relax(); + while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) & + TALITOS1_CCCR_LO_RESET) && --timeout) + cpu_relax(); + } else { + setbits32(priv->chan[ch].reg + TALITOS_CCCR, + TALITOS2_CCCR_RESET); + + while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & + TALITOS2_CCCR_RESET) && --timeout) + cpu_relax(); + } if (timeout == 0) { dev_err(dev, "failed to reset channel %d\n", ch); @@ -152,11 +163,12 @@ static int reset_device(struct device *dev) { struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; - u32 mcr = TALITOS_MCR_SWR; + bool is_sec1 = has_ftr_sec1(priv); + u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR; setbits32(priv->reg + TALITOS_MCR, mcr); - while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR) + while ((in_be32(priv->reg + TALITOS_MCR) & mcr) && --timeout) cpu_relax(); @@ -180,6 +192,7 @@ static int init_device(struct device *dev) { struct talitos_private *priv = dev_get_drvdata(dev); int ch, err; + bool is_sec1 = has_ftr_sec1(priv); /* * Master reset @@ -203,8 +216,15 @@ static int init_device(struct device *dev) } /* enable channel done and error interrupts */ - setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); - setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); + if (is_sec1) { + clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT); + clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); + /* disable parity error check in DEU (erroneous? test vect.) */ + setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE); + } else { + setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT); + setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); + } /* disable integrity check error interrupts (use writeback instead) */ if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) @@ -349,8 +369,37 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) /* * process completed requests for channels that have done status */ -#define DEF_TALITOS_DONE(name, ch_done_mask) \ -static void talitos_done_##name(unsigned long data) \ +#define DEF_TALITOS1_DONE(name, ch_done_mask) \ +static void talitos1_done_##name(unsigned long data) \ +{ \ + struct device *dev = (struct device *)data; \ + struct talitos_private *priv = dev_get_drvdata(dev); \ + unsigned long flags; \ + \ + if (ch_done_mask & 0x10000000) \ + flush_channel(dev, 0, 0, 0); \ + if (priv->num_channels == 1) \ + goto out; \ + if (ch_done_mask & 0x40000000) \ + flush_channel(dev, 1, 0, 0); \ + if (ch_done_mask & 0x00010000) \ + flush_channel(dev, 2, 0, 0); \ + if (ch_done_mask & 0x00040000) \ + flush_channel(dev, 3, 0, 0); \ + \ +out: \ + /* At this point, all completed channels have been processed */ \ + /* Unmask done interrupts for channels completed later on. */ \ + spin_lock_irqsave(&priv->reg_lock, flags); \ + clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ + clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ +} + +DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE) + +#define DEF_TALITOS2_DONE(name, ch_done_mask) \ +static void talitos2_done_##name(unsigned long data) \ { \ struct device *dev = (struct device *)data; \ struct talitos_private *priv = dev_get_drvdata(dev); \ @@ -372,12 +421,13 @@ out: \ /* Unmask done interrupts for channels completed later on. */ \ spin_lock_irqsave(&priv->reg_lock, flags); \ setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ - setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \ + setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \ spin_unlock_irqrestore(&priv->reg_lock, flags); \ } -DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE) -DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE) -DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE) + +DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE) +DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE) +DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) /* * locate current (offending) descriptor @@ -492,13 +542,21 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) { struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; - int ch, error, reset_dev = 0, reset_ch = 0; + int ch, error, reset_dev = 0; u32 v, v_lo; + bool is_sec1 = has_ftr_sec1(priv); + int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */ for (ch = 0; ch < priv->num_channels; ch++) { /* skip channels without errors */ - if (!(isr & (1 << (ch * 2 + 1)))) - continue; + if (is_sec1) { + /* bits 29, 31, 17, 19 */ + if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6)))) + continue; + } else { + if (!(isr & (1 << (ch * 2 + 1)))) + continue; + } error = -EINVAL; @@ -518,23 +576,28 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) if (v_lo & TALITOS_CCPSR_LO_MDTE) dev_err(dev, "master data transfer error\n"); if (v_lo & TALITOS_CCPSR_LO_SGDLZ) - dev_err(dev, "s/g data length zero error\n"); + dev_err(dev, is_sec1 ? "pointeur not complete error\n" + : "s/g data length zero error\n"); if (v_lo & TALITOS_CCPSR_LO_FPZ) - dev_err(dev, "fetch pointer zero error\n"); + dev_err(dev, is_sec1 ? "parity error\n" + : "fetch pointer zero error\n"); if (v_lo & TALITOS_CCPSR_LO_IDH) dev_err(dev, "illegal descriptor header error\n"); if (v_lo & TALITOS_CCPSR_LO_IEU) - dev_err(dev, "invalid execution unit error\n"); + dev_err(dev, is_sec1 ? "static assignment error\n" + : "invalid exec unit error\n"); if (v_lo & TALITOS_CCPSR_LO_EU) report_eu_error(dev, ch, current_desc_hdr(dev, ch)); - if (v_lo & TALITOS_CCPSR_LO_GB) - dev_err(dev, "gather boundary error\n"); - if (v_lo & TALITOS_CCPSR_LO_GRL) - dev_err(dev, "gather return/length error\n"); - if (v_lo & TALITOS_CCPSR_LO_SB) - dev_err(dev, "scatter boundary error\n"); - if (v_lo & TALITOS_CCPSR_LO_SRL) - dev_err(dev, "scatter return/length error\n"); + if (!is_sec1) { + if (v_lo & TALITOS_CCPSR_LO_GB) + dev_err(dev, "gather boundary error\n"); + if (v_lo & TALITOS_CCPSR_LO_GRL) + dev_err(dev, "gather return/length error\n"); + if (v_lo & TALITOS_CCPSR_LO_SB) + dev_err(dev, "scatter boundary error\n"); + if (v_lo & TALITOS_CCPSR_LO_SRL) + dev_err(dev, "scatter return/length error\n"); + } flush_channel(dev, ch, error, reset_ch); @@ -542,10 +605,10 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) reset_channel(dev, ch); } else { setbits32(priv->chan[ch].reg + TALITOS_CCCR, - TALITOS_CCCR_CONT); + TALITOS2_CCCR_CONT); setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0); while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & - TALITOS_CCCR_CONT) && --timeout) + TALITOS2_CCCR_CONT) && --timeout) cpu_relax(); if (timeout == 0) { dev_err(dev, "failed to restart channel %d\n", @@ -554,9 +617,14 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) } } } - if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) { - dev_err(dev, "done overflow, internal time out, or rngu error: " - "ISR 0x%08x_%08x\n", isr, isr_lo); + if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) || + (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) { + if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR)) + dev_err(dev, "TEA error: ISR 0x%08x_%08x\n", + isr, isr_lo); + else + dev_err(dev, "done overflow, internal time out, or " + "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo); /* purge request queues */ for (ch = 0; ch < priv->num_channels; ch++) @@ -567,8 +635,43 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) } } -#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ -static irqreturn_t talitos_interrupt_##name(int irq, void *data) \ +#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ +static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \ +{ \ + struct device *dev = data; \ + struct talitos_private *priv = dev_get_drvdata(dev); \ + u32 isr, isr_lo; \ + unsigned long flags; \ + \ + spin_lock_irqsave(&priv->reg_lock, flags); \ + isr = in_be32(priv->reg + TALITOS_ISR); \ + isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ + /* Acknowledge interrupt */ \ + out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ + out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ + \ + if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ + talitos_error(dev, isr & ch_err_mask, isr_lo); \ + } \ + else { \ + if (likely(isr & ch_done_mask)) { \ + /* mask further done interrupts. */ \ + setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ + /* done_task will unmask done interrupts at exit */ \ + tasklet_schedule(&priv->done_task[tlet]); \ + } \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ + } \ + \ + return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ + IRQ_NONE; \ +} + +DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0) + +#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ +static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \ { \ struct device *dev = data; \ struct talitos_private *priv = dev_get_drvdata(dev); \ @@ -599,9 +702,12 @@ static irqreturn_t talitos_interrupt_##name(int irq, void *data) \ return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ IRQ_NONE; \ } -DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0) -DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0) -DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1) + +DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0) +DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR, + 0) +DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR, + 1) /* * hwrng @@ -2639,29 +2745,35 @@ static int talitos_probe_irq(struct platform_device *ofdev) struct device_node *np = ofdev->dev.of_node; struct talitos_private *priv = dev_get_drvdata(dev); int err; + bool is_sec1 = has_ftr_sec1(priv); priv->irq[0] = irq_of_parse_and_map(np, 0); if (!priv->irq[0]) { dev_err(dev, "failed to map irq\n"); return -EINVAL; } + if (is_sec1) { + err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0, + dev_driver_string(dev), dev); + goto primary_out; + } priv->irq[1] = irq_of_parse_and_map(np, 1); /* get the primary irq line */ if (!priv->irq[1]) { - err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0, + err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0, dev_driver_string(dev), dev); goto primary_out; } - err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0, + err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0, dev_driver_string(dev), dev); if (err) goto primary_out; /* get the secondary irq line */ - err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0, + err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0, dev_driver_string(dev), dev); if (err) { dev_err(dev, "failed to request secondary irq\n"); @@ -2702,20 +2814,6 @@ static int talitos_probe(struct platform_device *ofdev) spin_lock_init(&priv->reg_lock); - err = talitos_probe_irq(ofdev); - if (err) - goto err_out; - - if (!priv->irq[1]) { - tasklet_init(&priv->done_task[0], talitos_done_4ch, - (unsigned long)dev); - } else { - tasklet_init(&priv->done_task[0], talitos_done_ch0_2, - (unsigned long)dev); - tasklet_init(&priv->done_task[1], talitos_done_ch1_3, - (unsigned long)dev); - } - priv->reg = of_iomap(np, 0); if (!priv->reg) { dev_err(dev, "failed to of_iomap\n"); @@ -2783,6 +2881,25 @@ static int talitos_probe(struct platform_device *ofdev) stride = TALITOS2_CH_STRIDE; } + err = talitos_probe_irq(ofdev); + if (err) + goto err_out; + + if (of_device_is_compatible(np, "fsl,sec1.0")) { + tasklet_init(&priv->done_task[0], talitos1_done_4ch, + (unsigned long)dev); + } else { + if (!priv->irq[1]) { + tasklet_init(&priv->done_task[0], talitos2_done_4ch, + (unsigned long)dev); + } else { + tasklet_init(&priv->done_task[0], talitos2_done_ch0_2, + (unsigned long)dev); + tasklet_init(&priv->done_task[1], talitos2_done_ch1_3, + (unsigned long)dev); + } + } + priv->chan = kzalloc(sizeof(struct talitos_channel) * priv->num_channels, GFP_KERNEL); if (!priv->chan) { diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 4faa3b6..9507c4f 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -188,26 +188,38 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register */ +#define ISR1_FORMAT(x) (((x) << 28) | ((x) << 16)) +#define ISR2_FORMAT(x) (((x) << 4) | (x)) + /* global register offset addresses */ #define TALITOS_MCR 0x1030 /* master control register */ #define TALITOS_MCR_RCA0 (1 << 15) /* remap channel 0 */ #define TALITOS_MCR_RCA1 (1 << 14) /* remap channel 1 */ #define TALITOS_MCR_RCA2 (1 << 13) /* remap channel 2 */ #define TALITOS_MCR_RCA3 (1 << 12) /* remap channel 3 */ -#define TALITOS_MCR_SWR 0x1 /* s/w reset */ +#define TALITOS1_MCR_SWR 0x1000000 /* s/w reset */ +#define TALITOS2_MCR_SWR 0x1 /* s/w reset */ #define TALITOS_MCR_LO 0x1034 #define TALITOS_IMR 0x1008 /* interrupt mask register */ -#define TALITOS_IMR_INIT 0x100ff /* enable channel IRQs */ -#define TALITOS_IMR_DONE 0x00055 /* done IRQs */ +/* enable channel IRQs */ +#define TALITOS1_IMR_INIT ISR1_FORMAT(0xf) +#define TALITOS1_IMR_DONE ISR1_FORMAT(0x5) /* done IRQs */ +/* enable channel IRQs */ +#define TALITOS2_IMR_INIT (ISR2_FORMAT(0xf) | 0x10000) +#define TALITOS2_IMR_DONE ISR1_FORMAT(0x5) /* done IRQs */ #define TALITOS_IMR_LO 0x100C -#define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */ +#define TALITOS1_IMR_LO_INIT 0x2000000 /* allow RNGU error IRQs */ +#define TALITOS2_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */ #define TALITOS_ISR 0x1010 /* interrupt status register */ -#define TALITOS_ISR_4CHERR 0xaa /* 4 channel errors mask */ -#define TALITOS_ISR_4CHDONE 0x55 /* 4 channel done mask */ -#define TALITOS_ISR_CH_0_2_ERR 0x22 /* channels 0, 2 errors mask */ -#define TALITOS_ISR_CH_0_2_DONE 0x11 /* channels 0, 2 done mask */ -#define TALITOS_ISR_CH_1_3_ERR 0x88 /* channels 1, 3 errors mask */ -#define TALITOS_ISR_CH_1_3_DONE 0x44 /* channels 1, 3 done mask */ +#define TALITOS1_ISR_4CHERR ISR1_FORMAT(0xa) /* 4 ch errors mask */ +#define TALITOS1_ISR_4CHDONE ISR1_FORMAT(0x5) /* 4 ch done mask */ +#define TALITOS1_ISR_TEA_ERR 0x00000040 +#define TALITOS2_ISR_4CHERR ISR2_FORMAT(0xa) /* 4 ch errors mask */ +#define TALITOS2_ISR_4CHDONE ISR2_FORMAT(0x5) /* 4 ch done mask */ +#define TALITOS2_ISR_CH_0_2_ERR ISR2_FORMAT(0x2) /* ch 0, 2 err mask */ +#define TALITOS2_ISR_CH_0_2_DONE ISR2_FORMAT(0x1) /* ch 0, 2 done mask */ +#define TALITOS2_ISR_CH_1_3_ERR ISR2_FORMAT(0x8) /* ch 1, 3 err mask */ +#define TALITOS2_ISR_CH_1_3_DONE ISR2_FORMAT(0x4) /* ch 1, 3 done mask */ #define TALITOS_ISR_LO 0x1014 #define TALITOS_ICR 0x1018 /* interrupt clear register */ #define TALITOS_ICR_LO 0x101C @@ -219,14 +231,15 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) /* channel configuration register */ #define TALITOS_CCCR 0x8 -#define TALITOS_CCCR_CONT 0x2 /* channel continue */ -#define TALITOS_CCCR_RESET 0x1 /* channel reset */ +#define TALITOS2_CCCR_CONT 0x2 /* channel continue on SEC2 */ +#define TALITOS2_CCCR_RESET 0x1 /* channel reset on SEC2 */ #define TALITOS_CCCR_LO 0xc #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ #define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */ #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ +#define TALITOS1_CCCR_LO_RESET 0x1 /* channel reset on SEC1 */ /* CCPSR: channel pointer status register */ #define TALITOS_CCPSR 0x10 @@ -298,6 +311,8 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) #define TALITOS_EUICR_LO 0x3c #define TALITOS_EU_FIFO 0x800 /* output FIFO */ #define TALITOS_EU_FIFO_LO 0x804 /* output FIFO */ +/* DES unit */ +#define TALITOS1_DEUICR_KPE 0x00200000 /* Key Parity Error */ /* message digest unit */ #define TALITOS_MDEUICR_LO_ICE 0x4000 /* integrity check IRQ enable */ /* random number unit */ -- cgit v0.10.2 From 6f65f6ac5fb36a90ebf9a8d57cc4076b82d5009e Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:15 +0200 Subject: crypto: talitos - implement scatter/gather copy for SEC1 SEC1 doesn't support scatter/gather, SEC1 doesn't handle link tables. Therefore, for SEC1 we have to do it by SW. For that, we reserve space at the end of the extended descriptor, in lieu of the space reserved for the link tables on SEC2, and we perform sg_copy() when preparing the descriptors We also adapt the max buffer size which is only 32k on SEC1 while it is 64k on SEC2+ Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 1265405..dddf4b3 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -866,9 +866,10 @@ badkey: * @dst_chained: whether dst is chained or not * @iv_dma: dma address of iv for checking continuity and link table * @dma_len: length of dma mapped link_tbl space - * @dma_link_tbl: bus physical address of link_tbl + * @dma_link_tbl: bus physical address of link_tbl/buf * @desc: h/w descriptor - * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) + * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) + * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) * * if decrypting (with authcheck), or either one of src_nents or dst_nents * is greater than 1, an integrity check value is concatenated to the end @@ -885,7 +886,10 @@ struct talitos_edesc { int dma_len; dma_addr_t dma_link_tbl; struct talitos_desc desc; - struct talitos_ptr link_tbl[0]; + union { + struct talitos_ptr link_tbl[0]; + u8 buf[0]; + }; }; static int talitos_map_sg(struct device *dev, struct scatterlist *sg, @@ -1282,8 +1286,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, dma_addr_t iv_dma = 0; gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; - if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) { + if (cryptlen + authsize > max_len) { dev_err(dev, "length exceeds h/w max limit\n"); return ERR_PTR(-EINVAL); } @@ -1327,8 +1334,12 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, */ alloc_len = sizeof(struct talitos_edesc); if (assoc_nents || src_nents || dst_nents) { - dma_len = (src_nents + dst_nents + 2 + assoc_nents) * - sizeof(struct talitos_ptr) + authsize; + if (is_sec1) + dma_len = src_nents ? cryptlen : 0 + + dst_nents ? cryptlen : 0; + else + dma_len = (src_nents + dst_nents + 2 + assoc_nents) * + sizeof(struct talitos_ptr) + authsize; alloc_len += dma_len; } else { dma_len = 0; @@ -1485,7 +1496,27 @@ static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src, struct scatterlist *dst, unsigned int len, struct talitos_edesc *edesc) { - talitos_sg_unmap(dev, edesc, src, dst); + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + + if (is_sec1) { + if (!edesc->src_nents) { + dma_unmap_sg(dev, src, 1, + dst != src ? DMA_TO_DEVICE + : DMA_BIDIRECTIONAL); + } + if (dst && edesc->dst_nents) { + dma_sync_single_for_device(dev, + edesc->dma_link_tbl + len, + len, DMA_FROM_DEVICE); + sg_copy_from_buffer(dst, edesc->dst_nents ? : 1, + edesc->buf + len, len); + } else if (dst && dst != src) { + dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE); + } + } else { + talitos_sg_unmap(dev, edesc, src, dst); + } } static void common_nonsnoop_unmap(struct device *dev, @@ -1528,25 +1559,42 @@ int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, bool is_sec1 = has_ftr_sec1(priv); to_talitos_ptr_len(ptr, len, is_sec1); - to_talitos_ptr_extent_clear(ptr, is_sec1); - sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, - edesc->src_chained); + if (is_sec1) { + sg_count = edesc->src_nents ? : 1; - if (sg_count == 1) { - to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); - } else { - sg_count = sg_to_link_tbl(src, sg_count, len, - &edesc->link_tbl[0]); - if (sg_count > 1) { - to_talitos_ptr(ptr, edesc->dma_link_tbl, 0); - ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; - dma_sync_single_for_device(dev, edesc->dma_link_tbl, - edesc->dma_len, - DMA_BIDIRECTIONAL); + if (sg_count == 1) { + dma_map_sg(dev, src, 1, dir); + to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); } else { - /* Only one segment now, so no link tbl needed */ + sg_copy_to_buffer(src, sg_count, edesc->buf, len); + to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + len, DMA_TO_DEVICE); + } + } else { + to_talitos_ptr_extent_clear(ptr, is_sec1); + + sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, + edesc->src_chained); + + if (sg_count == 1) { to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); + } else { + sg_count = sg_to_link_tbl(src, sg_count, len, + &edesc->link_tbl[0]); + if (sg_count > 1) { + to_talitos_ptr(ptr, edesc->dma_link_tbl, 0); + ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; + dma_sync_single_for_device(dev, + edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } else { + /* Only one segment now, so no link tbl needed*/ + to_talitos_ptr(ptr, sg_dma_address(src), + is_sec1); + } } } return sg_count; @@ -1560,26 +1608,42 @@ void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); - to_talitos_ptr_len(ptr, len, is_sec1); - to_talitos_ptr_extent_clear(ptr, is_sec1); - if (dir != DMA_NONE) sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1, dir, edesc->dst_chained); - if (sg_count == 1) { - to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); + to_talitos_ptr_len(ptr, len, is_sec1); + + if (is_sec1) { + if (sg_count == 1) { + if (dir != DMA_NONE) + dma_map_sg(dev, dst, 1, dir); + to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); + } else { + to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1); + dma_sync_single_for_device(dev, + edesc->dma_link_tbl + len, + len, DMA_FROM_DEVICE); + } } else { - struct talitos_ptr *link_tbl_ptr = - &edesc->link_tbl[edesc->src_nents + 1]; - - to_talitos_ptr(ptr, edesc->dma_link_tbl + - (edesc->src_nents + 1) * - sizeof(struct talitos_ptr), 0); - ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; - sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr); - dma_sync_single_for_device(dev, edesc->dma_link_tbl, - edesc->dma_len, DMA_BIDIRECTIONAL); + to_talitos_ptr_extent_clear(ptr, is_sec1); + + if (sg_count == 1) { + to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); + } else { + struct talitos_ptr *link_tbl_ptr = + &edesc->link_tbl[edesc->src_nents + 1]; + + to_talitos_ptr(ptr, edesc->dma_link_tbl + + (edesc->src_nents + 1) * + sizeof(struct talitos_ptr), 0); + ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; + sg_count = sg_to_link_tbl(dst, sg_count, len, + link_tbl_ptr); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } } } diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 9507c4f..314daf5 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -29,7 +29,8 @@ */ #define TALITOS_TIMEOUT 100000 -#define TALITOS_MAX_DATA_LEN 65535 +#define TALITOS1_MAX_DATA_LEN 32768 +#define TALITOS2_MAX_DATA_LEN 65535 #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f) #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf) -- cgit v0.10.2 From 2d02905ebd22c0271a25e424ab209c8b7067be67 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:18 +0200 Subject: crypto: talitos - SEC1 bugs on 0 data hash SEC1 bugs on 0 data hash, so we submit an already padded block representing 0 data Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index dddf4b3..f1406d7b 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1797,6 +1797,27 @@ static void ahash_done(struct device *dev, areq->base.complete(&areq->base, err); } +/* + * SEC1 doesn't like hashing of 0 sized message, so we do the padding + * ourself and submit a padded block + */ +void talitos_handle_buggy_hash(struct talitos_ctx *ctx, + struct talitos_edesc *edesc, + struct talitos_ptr *ptr) +{ + static u8 padded_hash[64] = { + 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + }; + + pr_err_once("Bug in SEC1, padding ourself\n"); + edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD; + map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash), + (char *)padded_hash, DMA_TO_DEVICE); +} + static int common_nonsnoop_hash(struct talitos_edesc *edesc, struct ahash_request *areq, unsigned int length, void (*callback) (struct device *dev, @@ -1857,6 +1878,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, /* last DWORD empty */ desc->ptr[6] = zero_entry; + if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) + talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); + ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { common_nonsnoop_hash_unmap(dev, edesc, areq); -- cgit v0.10.2 From 0635b7db143195281c5a29d272bb836be58af1d4 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:20 +0200 Subject: crypto: talitos - Add fsl,sec1.0 compatible We add a specific compatible for SEC1, to handle the differences between SEC1 and SEC2+ Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index f1406d7b..c04074d 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -3086,9 +3086,16 @@ err_out: } static const struct of_device_id talitos_match[] = { +#ifdef CONFIG_CRYPTO_DEV_TALITOS1 + { + .compatible = "fsl,sec1.0", + }, +#endif +#ifdef CONFIG_CRYPTO_DEV_TALITOS2 { .compatible = "fsl,sec2.0", }, +#endif {}, }; MODULE_DEVICE_TABLE(of, talitos_match); -- cgit v0.10.2 From 04a34d4680e75e9be63204b3a7b51a6abe3ce35a Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Fri, 17 Apr 2015 16:32:22 +0200 Subject: crypto: talitos - Update DT bindings with SEC1 This patch updates the documentation by including SEC1 into SEC2/3 doc Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec2.txt b/Documentation/devicetree/bindings/crypto/fsl-sec2.txt index 38988ef..f0d926b 100644 --- a/Documentation/devicetree/bindings/crypto/fsl-sec2.txt +++ b/Documentation/devicetree/bindings/crypto/fsl-sec2.txt @@ -1,9 +1,11 @@ -Freescale SoC SEC Security Engines versions 2.x-3.x +Freescale SoC SEC Security Engines versions 1.x-2.x-3.x Required properties: - compatible : Should contain entries for this and backward compatible - SEC versions, high to low, e.g., "fsl,sec2.1", "fsl,sec2.0" + SEC versions, high to low, e.g., "fsl,sec2.1", "fsl,sec2.0" (SEC2/3) + e.g., "fsl,sec1.2", "fsl,sec1.0" (SEC1) + warning: SEC1 and SEC2 are mutually exclusive - reg : Offset and length of the register set for the device - interrupts : the SEC's interrupt number - fsl,num-channels : An integer representing the number of channels -- cgit v0.10.2 From 082eb10ba97449a370e58f746522a4b90bcaee78 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Sat, 18 Apr 2015 19:35:45 +0200 Subject: crypto: drbg - fix drbg_generate return val check The drbg_generate returns 0 in success case. That means that drbg_generate_long will always only generate drbg_max_request_bytes at most. Longer requests will be truncated to drbg_max_request_bytes. Reported-by: Herbert Xu Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu diff --git a/crypto/drbg.c b/crypto/drbg.c index 8d2944f..5a218a5 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1442,19 +1442,19 @@ static int drbg_generate_long(struct drbg_state *drbg, unsigned char *buf, unsigned int buflen, struct drbg_string *addtl) { - int len = 0; + unsigned int len = 0; unsigned int slice = 0; do { - int tmplen = 0; + int err = 0; unsigned int chunk = 0; slice = ((buflen - len) / drbg_max_request_bytes(drbg)); chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len); - tmplen = drbg_generate(drbg, buf + len, chunk, addtl); - if (0 >= tmplen) - return tmplen; - len += tmplen; + err = drbg_generate(drbg, buf + len, chunk, addtl); + if (0 > err) + return err; + len += chunk; } while (slice > 0 && (len < buflen)); - return len; + return 0; } /* -- cgit v0.10.2 From 76899a41f830d17affe6f9c58cc4b23ba26f5e00 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Sat, 18 Apr 2015 19:36:17 +0200 Subject: crypto: drbg - replace spinlock with mutex The creation of a shadow copy is intended to only hold a short term lock. But the drawback is that parallel users have a very similar DRBG state which only differs by a high-resolution time stamp. The DRBG will now hold a long term lock. Therefore, the lock is changed to a mutex which implies that the DRBG can only be used in process context. The lock now guards the instantiation as well as the entire DRBG generation operation. Therefore, multiple callers are fully serialized when generating a random number. As the locking is changed to use a long-term lock to avoid such similar DRBG states, the entire creation and maintenance of a shadow copy can be removed. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu diff --git a/crypto/drbg.c b/crypto/drbg.c index 5a218a5..a278f84 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1181,7 +1181,6 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) if (!drbg->scratchpad) goto err; } - spin_lock_init(&drbg->drbg_lock); return 0; err: @@ -1189,79 +1188,6 @@ err: return ret; } -/* - * Strategy to avoid holding long term locks: generate a shadow copy of DRBG - * and perform all operations on this shadow copy. After finishing, restore - * the updated state of the shadow copy into original drbg state. This way, - * only the read and write operations of the original drbg state must be - * locked - */ -static inline void drbg_copy_drbg(struct drbg_state *src, - struct drbg_state *dst) -{ - if (!src || !dst) - return; - memcpy(dst->V, src->V, drbg_statelen(src)); - memcpy(dst->C, src->C, drbg_statelen(src)); - dst->reseed_ctr = src->reseed_ctr; - dst->seeded = src->seeded; - dst->pr = src->pr; -#ifdef CONFIG_CRYPTO_FIPS - dst->fips_primed = src->fips_primed; - memcpy(dst->prev, src->prev, drbg_blocklen(src)); -#endif - /* - * Not copied: - * scratchpad is initialized drbg_alloc_state; - * priv_data is initialized with call to crypto_init; - * d_ops and core are set outside, as these parameters are const; - * test_data is set outside to prevent it being copied back. - */ -} - -static int drbg_make_shadow(struct drbg_state *drbg, struct drbg_state **shadow) -{ - int ret = -ENOMEM; - struct drbg_state *tmp = NULL; - - tmp = kzalloc(sizeof(struct drbg_state), GFP_KERNEL); - if (!tmp) - return -ENOMEM; - - /* read-only data as they are defined as const, no lock needed */ - tmp->core = drbg->core; - tmp->d_ops = drbg->d_ops; - - ret = drbg_alloc_state(tmp); - if (ret) - goto err; - - spin_lock_bh(&drbg->drbg_lock); - drbg_copy_drbg(drbg, tmp); - /* only make a link to the test buffer, as we only read that data */ - tmp->test_data = drbg->test_data; - spin_unlock_bh(&drbg->drbg_lock); - *shadow = tmp; - return 0; - -err: - kzfree(tmp); - return ret; -} - -static void drbg_restore_shadow(struct drbg_state *drbg, - struct drbg_state **shadow) -{ - struct drbg_state *tmp = *shadow; - - spin_lock_bh(&drbg->drbg_lock); - drbg_copy_drbg(tmp, drbg); - spin_unlock_bh(&drbg->drbg_lock); - drbg_dealloc_state(tmp); - kzfree(tmp); - *shadow = NULL; -} - /************************************************************************* * DRBG interface functions *************************************************************************/ @@ -1287,13 +1213,7 @@ static int drbg_generate(struct drbg_state *drbg, struct drbg_string *addtl) { int len = 0; - struct drbg_state *shadow = NULL; LIST_HEAD(addtllist); - struct drbg_string timestamp; - union { - cycles_t cycles; - unsigned char char_cycles[sizeof(cycles_t)]; - } now; if (0 == buflen || !buf) { pr_devel("DRBG: no output buffer provided\n"); @@ -1304,15 +1224,9 @@ static int drbg_generate(struct drbg_state *drbg, return -EINVAL; } - len = drbg_make_shadow(drbg, &shadow); - if (len) { - pr_devel("DRBG: shadow copy cannot be generated\n"); - return len; - } - /* 9.3.1 step 2 */ len = -EINVAL; - if (buflen > (drbg_max_request_bytes(shadow))) { + if (buflen > (drbg_max_request_bytes(drbg))) { pr_devel("DRBG: requested random numbers too large %u\n", buflen); goto err; @@ -1321,7 +1235,7 @@ static int drbg_generate(struct drbg_state *drbg, /* 9.3.1 step 3 is implicit with the chosen DRBG */ /* 9.3.1 step 4 */ - if (addtl && addtl->len > (drbg_max_addtl(shadow))) { + if (addtl && addtl->len > (drbg_max_addtl(drbg))) { pr_devel("DRBG: additional information string too long %zu\n", addtl->len); goto err; @@ -1332,46 +1246,34 @@ static int drbg_generate(struct drbg_state *drbg, * 9.3.1 step 6 and 9 supplemented by 9.3.2 step c is implemented * here. The spec is a bit convoluted here, we make it simpler. */ - if ((drbg_max_requests(shadow)) < shadow->reseed_ctr) - shadow->seeded = false; + if ((drbg_max_requests(drbg)) < drbg->reseed_ctr) + drbg->seeded = false; /* allocate cipher handle */ - len = shadow->d_ops->crypto_init(shadow); + len = drbg->d_ops->crypto_init(drbg); if (len) goto err; - if (shadow->pr || !shadow->seeded) { + if (drbg->pr || !drbg->seeded) { pr_devel("DRBG: reseeding before generation (prediction " "resistance: %s, state %s)\n", drbg->pr ? "true" : "false", drbg->seeded ? "seeded" : "unseeded"); /* 9.3.1 steps 7.1 through 7.3 */ - len = drbg_seed(shadow, addtl, true); + len = drbg_seed(drbg, addtl, true); if (len) goto err; /* 9.3.1 step 7.4 */ addtl = NULL; } - /* - * Mix the time stamp into the DRBG state if the DRBG is not in - * test mode. If there are two callers invoking the DRBG at the same - * time, i.e. before the first caller merges its shadow state back, - * both callers would obtain the same random number stream without - * changing the state here. - */ - if (!drbg->test_data) { - now.cycles = random_get_entropy(); - drbg_string_fill(×tamp, now.char_cycles, sizeof(cycles_t)); - list_add_tail(×tamp.list, &addtllist); - } if (addtl && 0 < addtl->len) list_add_tail(&addtl->list, &addtllist); /* 9.3.1 step 8 and 10 */ - len = shadow->d_ops->generate(shadow, buf, buflen, &addtllist); + len = drbg->d_ops->generate(drbg, buf, buflen, &addtllist); /* 10.1.1.4 step 6, 10.1.2.5 step 7, 10.2.1.5.2 step 7 */ - shadow->reseed_ctr++; + drbg->reseed_ctr++; if (0 >= len) goto err; @@ -1391,7 +1293,7 @@ static int drbg_generate(struct drbg_state *drbg, * case somebody has a need to implement the test of 11.3.3. */ #if 0 - if (shadow->reseed_ctr && !(shadow->reseed_ctr % 4096)) { + if (drbg->reseed_ctr && !(drbg->reseed_ctr % 4096)) { int err = 0; pr_devel("DRBG: start to perform self test\n"); if (drbg->core->flags & DRBG_HMAC) @@ -1410,8 +1312,6 @@ static int drbg_generate(struct drbg_state *drbg, * are returned when reusing this DRBG cipher handle */ drbg_uninstantiate(drbg); - drbg_dealloc_state(shadow); - kzfree(shadow); return 0; } else { pr_devel("DRBG: self test successful\n"); @@ -1425,8 +1325,7 @@ static int drbg_generate(struct drbg_state *drbg, */ len = 0; err: - shadow->d_ops->crypto_fini(shadow); - drbg_restore_shadow(drbg, &shadow); + drbg->d_ops->crypto_fini(drbg); return len; } @@ -1449,7 +1348,9 @@ static int drbg_generate_long(struct drbg_state *drbg, unsigned int chunk = 0; slice = ((buflen - len) / drbg_max_request_bytes(drbg)); chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len); + mutex_lock(&drbg->drbg_mutex); err = drbg_generate(drbg, buf + len, chunk, addtl); + mutex_unlock(&drbg->drbg_mutex); if (0 > err) return err; len += chunk; @@ -1477,10 +1378,11 @@ static int drbg_generate_long(struct drbg_state *drbg, static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, int coreref, bool pr) { - int ret = -ENOMEM; + int ret = -EOPNOTSUPP; pr_devel("DRBG: Initializing DRBG core %d with prediction resistance " "%s\n", coreref, pr ? "enabled" : "disabled"); + mutex_lock(&drbg->drbg_mutex); drbg->core = &drbg_cores[coreref]; drbg->pr = pr; drbg->seeded = false; @@ -1501,7 +1403,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, break; #endif /* CONFIG_CRYPTO_DRBG_CTR */ default: - return -EOPNOTSUPP; + goto unlock; } /* 9.1 step 1 is implicit with the selected DRBG type */ @@ -1516,7 +1418,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, ret = drbg_alloc_state(drbg); if (ret) - return ret; + goto unlock; ret = -EFAULT; if (drbg->d_ops->crypto_init(drbg)) @@ -1526,10 +1428,13 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, if (ret) goto err; + mutex_unlock(&drbg->drbg_mutex); return 0; err: drbg_dealloc_state(drbg); +unlock: + mutex_unlock(&drbg->drbg_mutex); return ret; } @@ -1544,10 +1449,10 @@ err: */ static int drbg_uninstantiate(struct drbg_state *drbg) { - spin_lock_bh(&drbg->drbg_lock); + mutex_lock(&drbg->drbg_mutex); drbg_dealloc_state(drbg); /* no scrubbing of test_data -- this shall survive an uninstantiate */ - spin_unlock_bh(&drbg->drbg_lock); + mutex_unlock(&drbg->drbg_mutex); return 0; } @@ -1562,9 +1467,9 @@ static inline void drbg_set_testdata(struct drbg_state *drbg, { if (!test_data || !test_data->testentropy) return; - spin_lock_bh(&drbg->drbg_lock); + mutex_lock(&drbg->drbg_mutex);; drbg->test_data = test_data; - spin_unlock_bh(&drbg->drbg_lock); + mutex_unlock(&drbg->drbg_mutex); } /*************************************************************** @@ -1717,6 +1622,7 @@ static int drbg_kcapi_init(struct crypto_tfm *tfm) bool pr = false; int coreref = 0; + mutex_init(&drbg->drbg_mutex); drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm), &coreref, &pr); /* * when personalization string is needed, the caller must call reset diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 5186f75..a43a7ed 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -49,7 +49,7 @@ #include #include #include -#include +#include #include /* @@ -104,7 +104,7 @@ struct drbg_test_data { }; struct drbg_state { - spinlock_t drbg_lock; /* lock around DRBG */ + struct mutex drbg_mutex; /* lock around DRBG */ unsigned char *V; /* internal state 10.1.1.1 1a) */ /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ unsigned char *C; -- cgit v0.10.2 From fa3ae6253ccda65e6824a2a38c8a87c204c28d84 Mon Sep 17 00:00:00 2001 From: Stephan Mueller Date: Sat, 18 Apr 2015 19:37:00 +0200 Subject: crypto: drbg - leave cipher handles operational As the DRBG does not operate on shadow copies of the DRBG instance any more, the cipher handles only need to be allocated once during initalization time and deallocated during uninstantiate time. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu diff --git a/crypto/drbg.c b/crypto/drbg.c index a278f84..30ec2a6 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1249,11 +1249,6 @@ static int drbg_generate(struct drbg_state *drbg, if ((drbg_max_requests(drbg)) < drbg->reseed_ctr) drbg->seeded = false; - /* allocate cipher handle */ - len = drbg->d_ops->crypto_init(drbg); - if (len) - goto err; - if (drbg->pr || !drbg->seeded) { pr_devel("DRBG: reseeding before generation (prediction " "resistance: %s, state %s)\n", @@ -1325,7 +1320,6 @@ static int drbg_generate(struct drbg_state *drbg, */ len = 0; err: - drbg->d_ops->crypto_fini(drbg); return len; } @@ -1424,9 +1418,10 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, if (drbg->d_ops->crypto_init(drbg)) goto err; ret = drbg_seed(drbg, pers, false); - drbg->d_ops->crypto_fini(drbg); - if (ret) + if (ret) { + drbg->d_ops->crypto_fini(drbg); goto err; + } mutex_unlock(&drbg->drbg_mutex); return 0; @@ -1450,6 +1445,7 @@ unlock: static int drbg_uninstantiate(struct drbg_state *drbg) { mutex_lock(&drbg->drbg_mutex); + drbg->d_ops->crypto_fini(drbg); drbg_dealloc_state(drbg); /* no scrubbing of test_data -- this shall survive an uninstantiate */ mutex_unlock(&drbg->drbg_mutex); -- cgit v0.10.2 From e11a754813e28dbfab63a23ae8fd22e9198489e1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 20 Apr 2015 11:26:48 +0800 Subject: crypto: drbg - Initialise mutex in drbg_healthcheck_sanity As we moved the mutex init out of drbg_instantiate and into cra_init we need to explicitly initialise the mutex in drbg_healthcheck_sanity. Signed-off-by: Herbert Xu Acked-by: Stephan Mueller diff --git a/crypto/drbg.c b/crypto/drbg.c index 30ec2a6..57fd479 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1741,6 +1741,8 @@ static inline int __init drbg_healthcheck_sanity(void) if (!drbg) return -ENOMEM; + mutex_init(&drbg->drbg_mutex); + /* * if the following tests fail, it is likely that there is a buffer * overflow as buf is much smaller than the requested or provided -- cgit v0.10.2 From 2a57e4241ec9a11ce89f43099a0f6b83a28058fa Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 20 Apr 2015 11:29:15 +0800 Subject: crypto: drbg - Do not seed RNG in drbg_kcapi_init Initialising the RNG in drbg_kcapi_init is a waste of precious entropy because all users will immediately seed the RNG after the allocation. In fact, all users should seed the RNG before using it. So there is no point in doing the seeding in drbg_kcapi_init. This patch removes the initial seeding and the user must seed the RNG explicitly (as they all currently do). This patch also changes drbg_kcapi_reset to allow reseeding. That is, if you call it after a successful initial seeding, then it will not reset the internal state of the DRBG before mixing the new input and entropy. If you still wish to reset the internal state, you can always free the DRBG and allocate a new one. Finally this patch removes locking from drbg_uninstantiate because it's now only called from the destruction path which must not be executed in parallel with normal operations. Signed-off-by: Herbert Xu Acked-by: Stephan Mueller diff --git a/crypto/drbg.c b/crypto/drbg.c index 57fd479..5bce159 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1136,6 +1136,8 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) kzfree(drbg->scratchpad); drbg->scratchpad = NULL; drbg->reseed_ctr = 0; + drbg->d_ops = NULL; + drbg->core = NULL; #ifdef CONFIG_CRYPTO_FIPS kzfree(drbg->prev); drbg->prev = NULL; @@ -1152,6 +1154,27 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) int ret = -ENOMEM; unsigned int sb_size = 0; + switch (drbg->core->flags & DRBG_TYPE_MASK) { +#ifdef CONFIG_CRYPTO_DRBG_HMAC + case DRBG_HMAC: + drbg->d_ops = &drbg_hmac_ops; + break; +#endif /* CONFIG_CRYPTO_DRBG_HMAC */ +#ifdef CONFIG_CRYPTO_DRBG_HASH + case DRBG_HASH: + drbg->d_ops = &drbg_hash_ops; + break; +#endif /* CONFIG_CRYPTO_DRBG_HASH */ +#ifdef CONFIG_CRYPTO_DRBG_CTR + case DRBG_CTR: + drbg->d_ops = &drbg_ctr_ops; + break; +#endif /* CONFIG_CRYPTO_DRBG_CTR */ + default: + ret = -EOPNOTSUPP; + goto err; + } + drbg->V = kmalloc(drbg_statelen(drbg), GFP_KERNEL); if (!drbg->V) goto err; @@ -1215,6 +1238,10 @@ static int drbg_generate(struct drbg_state *drbg, int len = 0; LIST_HEAD(addtllist); + if (!drbg->core) { + pr_devel("DRBG: not yet seeded\n"); + return -EINVAL; + } if (0 == buflen || !buf) { pr_devel("DRBG: no output buffer provided\n"); return -EINVAL; @@ -1372,33 +1399,12 @@ static int drbg_generate_long(struct drbg_state *drbg, static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, int coreref, bool pr) { - int ret = -EOPNOTSUPP; + int ret; + bool reseed = true; pr_devel("DRBG: Initializing DRBG core %d with prediction resistance " "%s\n", coreref, pr ? "enabled" : "disabled"); mutex_lock(&drbg->drbg_mutex); - drbg->core = &drbg_cores[coreref]; - drbg->pr = pr; - drbg->seeded = false; - switch (drbg->core->flags & DRBG_TYPE_MASK) { -#ifdef CONFIG_CRYPTO_DRBG_HMAC - case DRBG_HMAC: - drbg->d_ops = &drbg_hmac_ops; - break; -#endif /* CONFIG_CRYPTO_DRBG_HMAC */ -#ifdef CONFIG_CRYPTO_DRBG_HASH - case DRBG_HASH: - drbg->d_ops = &drbg_hash_ops; - break; -#endif /* CONFIG_CRYPTO_DRBG_HASH */ -#ifdef CONFIG_CRYPTO_DRBG_CTR - case DRBG_CTR: - drbg->d_ops = &drbg_ctr_ops; - break; -#endif /* CONFIG_CRYPTO_DRBG_CTR */ - default: - goto unlock; - } /* 9.1 step 1 is implicit with the selected DRBG type */ @@ -1410,21 +1416,31 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, /* 9.1 step 4 is implicit in drbg_sec_strength */ - ret = drbg_alloc_state(drbg); - if (ret) - goto unlock; + if (!drbg->core) { + drbg->core = &drbg_cores[coreref]; + drbg->pr = pr; + drbg->seeded = false; - ret = -EFAULT; - if (drbg->d_ops->crypto_init(drbg)) - goto err; - ret = drbg_seed(drbg, pers, false); - if (ret) { + ret = drbg_alloc_state(drbg); + if (ret) + goto unlock; + + ret = -EFAULT; + if (drbg->d_ops->crypto_init(drbg)) + goto err; + + reseed = false; + } + + ret = drbg_seed(drbg, pers, reseed); + + if (ret && !reseed) { drbg->d_ops->crypto_fini(drbg); goto err; } mutex_unlock(&drbg->drbg_mutex); - return 0; + return ret; err: drbg_dealloc_state(drbg); @@ -1444,11 +1460,10 @@ unlock: */ static int drbg_uninstantiate(struct drbg_state *drbg) { - mutex_lock(&drbg->drbg_mutex); - drbg->d_ops->crypto_fini(drbg); + if (drbg->d_ops) + drbg->d_ops->crypto_fini(drbg); drbg_dealloc_state(drbg); /* no scrubbing of test_data -- this shall survive an uninstantiate */ - mutex_unlock(&drbg->drbg_mutex); return 0; } @@ -1615,16 +1630,10 @@ static inline void drbg_convert_tfm_core(const char *cra_driver_name, static int drbg_kcapi_init(struct crypto_tfm *tfm) { struct drbg_state *drbg = crypto_tfm_ctx(tfm); - bool pr = false; - int coreref = 0; mutex_init(&drbg->drbg_mutex); - drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm), &coreref, &pr); - /* - * when personalization string is needed, the caller must call reset - * and provide the personalization string as seed information - */ - return drbg_instantiate(drbg, NULL, coreref, pr); + + return 0; } static void drbg_kcapi_cleanup(struct crypto_tfm *tfm) @@ -1665,10 +1674,9 @@ static int drbg_kcapi_random(struct crypto_rng *tfm, u8 *rdata, } /* - * Reset the DRBG invoked by the kernel crypto API - * The reset implies a full re-initialization of the DRBG. Similar to the - * generate function of drbg_kcapi_random, this function extends the - * kernel crypto API interface with struct drbg_gen + * Seed the DRBG invoked by the kernel crypto API + * Similar to the generate function of drbg_kcapi_random, this + * function extends the kernel crypto API interface with struct drbg_gen */ static int drbg_kcapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) { @@ -1678,7 +1686,6 @@ static int drbg_kcapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) struct drbg_string seed_string; int coreref = 0; - drbg_uninstantiate(drbg); drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm_base), &coreref, &pr); if (0 < slen) { -- cgit v0.10.2 From 38d21433112c25acdb8e93f60be629e7a1c27a26 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 20 Apr 2015 13:39:00 +0800 Subject: crypto: api - Add crypto_alg_extsize helper This patch adds a crypto_alg_extsize helper that can be used by algorithm types such as pcompress and shash. Signed-off-by: Herbert Xu diff --git a/crypto/algapi.c b/crypto/algapi.c index d2627a3..a60f626 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -964,6 +964,12 @@ void crypto_xor(u8 *dst, const u8 *src, unsigned int size) } EXPORT_SYMBOL_GPL(crypto_xor); +unsigned int crypto_alg_extsize(struct crypto_alg *alg) +{ + return alg->cra_ctxsize; +} +EXPORT_SYMBOL_GPL(crypto_alg_extsize); + static int __init crypto_algapi_init(void) { crypto_init_proc(); diff --git a/crypto/internal.h b/crypto/internal.h index bd39bfc..ed7a70c 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -103,6 +103,8 @@ int crypto_register_notifier(struct notifier_block *nb); int crypto_unregister_notifier(struct notifier_block *nb); int crypto_probing_notify(unsigned long val, void *v); +unsigned int crypto_alg_extsize(struct crypto_alg *alg); + static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) { atomic_inc(&alg->cra_refcnt); -- cgit v0.10.2 From ac611680cb4f6fce26d97a79fd8aac3d42c5da91 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 20 Apr 2015 13:39:01 +0800 Subject: crypto: shash - Use crypto_alg_extsize helper This patch replaces crypto_shash_extsize function with crypto_alg_extsize. Signed-off-by: Herbert Xu diff --git a/crypto/shash.c b/crypto/shash.c index 47c7139..ecb1e3d 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -520,11 +520,6 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) return 0; } -static unsigned int crypto_shash_extsize(struct crypto_alg *alg) -{ - return alg->cra_ctxsize; -} - #ifdef CONFIG_NET static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg) { @@ -564,7 +559,7 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) static const struct crypto_type crypto_shash_type = { .ctxsize = crypto_shash_ctxsize, - .extsize = crypto_shash_extsize, + .extsize = crypto_alg_extsize, .init = crypto_init_shash_ops, .init_tfm = crypto_shash_init_tfm, #ifdef CONFIG_PROC_FS -- cgit v0.10.2 From f7c9bebe8bc79ddb8a50db441a53b59b61ae3ba8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 20 Apr 2015 13:39:02 +0800 Subject: crypto: pcomp - Use crypto_alg_extsize helper This patch replaces crypto_pcomp_extsize function with crypto_alg_extsize. Signed-off-by: Herbert Xu diff --git a/crypto/pcompress.c b/crypto/pcompress.c index 7140fe7..7a13b40 100644 --- a/crypto/pcompress.c +++ b/crypto/pcompress.c @@ -38,11 +38,6 @@ static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask) return 0; } -static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg) -{ - return alg->cra_ctxsize; -} - static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm) { return 0; @@ -77,7 +72,7 @@ static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg) } static const struct crypto_type crypto_pcomp_type = { - .extsize = crypto_pcomp_extsize, + .extsize = crypto_alg_extsize, .init = crypto_pcomp_init, .init_tfm = crypto_pcomp_init_tfm, #ifdef CONFIG_PROC_FS -- cgit v0.10.2 From d0e83059a6c9b04f00264a74b8f6439948de4613 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 20 Apr 2015 13:39:03 +0800 Subject: crypto: rng - Convert crypto_rng to new style crypto_type This patch converts the top-level crypto_rng to the "new" style. It was the last algorithm type added before we switched over to the new way of doing things exemplified by shash. All users will automatically switch over to the new interface. Note that this patch does not touch the low-level interface to rng implementations. Signed-off-by: Herbert Xu diff --git a/crypto/rng.c b/crypto/rng.c index e0a25c2..87fa2f4 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -24,11 +24,18 @@ #include #include +#include "internal.h" + static DEFINE_MUTEX(crypto_default_rng_lock); struct crypto_rng *crypto_default_rng; EXPORT_SYMBOL_GPL(crypto_default_rng); static int crypto_default_rng_refcnt; +static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_rng, base); +} + static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) { u8 *buf = NULL; @@ -49,13 +56,13 @@ static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) return err; } -static int crypto_init_rng_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +static int crypto_rng_init_tfm(struct crypto_tfm *tfm) { + struct crypto_rng *rng = __crypto_rng_cast(tfm); struct rng_alg *alg = &tfm->__crt_alg->cra_rng; - struct rng_tfm *ops = &tfm->crt_rng; - ops->rng_gen_random = alg->rng_make_random; - ops->rng_reset = rngapi_reset; + rng->generate = alg->rng_make_random; + rng->seed = rngapi_reset; return 0; } @@ -92,22 +99,26 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "seedsize : %u\n", alg->cra_rng.seedsize); } -static unsigned int crypto_rng_ctxsize(struct crypto_alg *alg, u32 type, - u32 mask) -{ - return alg->cra_ctxsize; -} - const struct crypto_type crypto_rng_type = { - .ctxsize = crypto_rng_ctxsize, - .init = crypto_init_rng_ops, + .extsize = crypto_alg_extsize, + .init_tfm = crypto_rng_init_tfm, #ifdef CONFIG_PROC_FS .show = crypto_rng_show, #endif .report = crypto_rng_report, + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_RNG, + .tfmsize = offsetof(struct crypto_rng, base), }; EXPORT_SYMBOL_GPL(crypto_rng_type); +struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask) +{ + return crypto_alloc_tfm(alg_name, &crypto_rng_type, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_alloc_rng); + int crypto_get_default_rng(void) { struct crypto_rng *rng; diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 6e28ea5..f13f3fa 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -15,6 +15,12 @@ #include +struct crypto_rng { + int (*generate)(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen); + int (*seed)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); + struct crypto_tfm base; +}; + extern struct crypto_rng *crypto_default_rng; int crypto_get_default_rng(void); @@ -27,11 +33,6 @@ void crypto_put_default_rng(void); * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto) */ -static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) -{ - return (struct crypto_rng *)tfm; -} - /** * crypto_alloc_rng() -- allocate RNG handle * @alg_name: is the cra_name / name or cra_driver_name / driver name of the @@ -52,15 +53,7 @@ static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) * Return: allocated cipher handle in case of success; IS_ERR() is true in case * of an error, PTR_ERR() returns the error code. */ -static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name, - u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_RNG; - mask |= CRYPTO_ALG_TYPE_MASK; - - return __crypto_rng_cast(crypto_alloc_base(alg_name, type, mask)); -} +struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask); static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm) { @@ -80,18 +73,13 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng; } -static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm) -{ - return &crypto_rng_tfm(tfm)->crt_rng; -} - /** * crypto_free_rng() - zeroize and free RNG handle * @tfm: cipher handle to be freed */ static inline void crypto_free_rng(struct crypto_rng *tfm) { - crypto_free_tfm(crypto_rng_tfm(tfm)); + crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); } /** @@ -108,7 +96,7 @@ static inline void crypto_free_rng(struct crypto_rng *tfm) static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen) { - return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen); + return tfm->generate(tfm, rdata, dlen); } /** @@ -131,7 +119,7 @@ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, static inline int crypto_rng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) { - return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen); + return tfm->seed(tfm, seed, slen); } /** diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 10df5d2..781f7d5 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -655,19 +655,12 @@ struct compress_tfm { u8 *dst, unsigned int *dlen); }; -struct rng_tfm { - int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata, - unsigned int dlen); - int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); -}; - #define crt_ablkcipher crt_u.ablkcipher #define crt_aead crt_u.aead #define crt_blkcipher crt_u.blkcipher #define crt_cipher crt_u.cipher #define crt_hash crt_u.hash #define crt_compress crt_u.compress -#define crt_rng crt_u.rng struct crypto_tfm { @@ -680,7 +673,6 @@ struct crypto_tfm { struct cipher_tfm cipher; struct hash_tfm hash; struct compress_tfm compress; - struct rng_tfm rng; } crt_u; void (*exit)(struct crypto_tfm *tfm); @@ -714,10 +706,6 @@ struct crypto_hash { struct crypto_tfm base; }; -struct crypto_rng { - struct crypto_tfm base; -}; - enum { CRYPTOA_UNSPEC, CRYPTOA_ALG, -- cgit v0.10.2 From ff030b099a21a4753af575b4304249e88400e506 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 20 Apr 2015 13:39:04 +0800 Subject: crypto: rng - Introduce crypto_rng_generate This patch adds the new top-level function crypto_rng_generate which generates random numbers with additional input. It also extends the mid-level rng_gen_random function to take additional data as input. Signed-off-by: Herbert Xu diff --git a/crypto/rng.c b/crypto/rng.c index 87fa2f4..4514d37 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -36,6 +36,12 @@ static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) return container_of(tfm, struct crypto_rng, base); } +static int generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen) +{ + return crypto_rng_alg(tfm)->rng_make_random(tfm, dst, dlen); +} + static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) { u8 *buf = NULL; @@ -59,9 +65,8 @@ static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) static int crypto_rng_init_tfm(struct crypto_tfm *tfm) { struct crypto_rng *rng = __crypto_rng_cast(tfm); - struct rng_alg *alg = &tfm->__crt_alg->cra_rng; - rng->generate = alg->rng_make_random; + rng->generate = generate; rng->seed = rngapi_reset; return 0; diff --git a/include/crypto/rng.h b/include/crypto/rng.h index f13f3fa..f20f068 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -16,7 +16,9 @@ #include struct crypto_rng { - int (*generate)(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen); + int (*generate)(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen); int (*seed)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); struct crypto_tfm base; }; @@ -83,6 +85,27 @@ static inline void crypto_free_rng(struct crypto_rng *tfm) } /** + * crypto_rng_generate() - get random number + * @tfm: cipher handle + * @src: Input buffer holding additional data, may be NULL + * @slen: Length of additional data + * @dst: output buffer holding the random numbers + * @dlen: length of the output buffer + * + * This function fills the caller-allocated buffer with random + * numbers using the random number generator referenced by the + * cipher handle. + * + * Return: 0 function was successful; < 0 if an error occurred + */ +static inline int crypto_rng_generate(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen) +{ + return tfm->generate(tfm, src, slen, dst, dlen); +} + +/** * crypto_rng_get_bytes() - get random number * @tfm: cipher handle * @rdata: output buffer holding the random numbers @@ -96,7 +119,7 @@ static inline void crypto_free_rng(struct crypto_rng *tfm) static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen) { - return tfm->generate(tfm, rdata, dlen); + return crypto_rng_generate(tfm, NULL, 0, rdata, dlen); } /** -- cgit v0.10.2 From 3c5d8fa9f56ad0928e7a1f06003e5034f5eedb52 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Apr 2015 10:46:37 +0800 Subject: crypto: rng - Mark crypto_rng_reset seed as const There is no reason why crypto_rng_reset should modify the seed so this patch marks it as const. Since our algorithms don't export a const seed function yet we have to go through some contortions for now. Signed-off-by: Herbert Xu diff --git a/crypto/rng.c b/crypto/rng.c index 4514d37..f1d6494 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -42,7 +42,29 @@ static int generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, return crypto_rng_alg(tfm)->rng_make_random(tfm, dst, dlen); } -static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) +static int rngapi_reset(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen) +{ + u8 *buf = NULL; + u8 *src = (u8 *)seed; + int err; + + if (slen) { + buf = kmalloc(slen, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf, seed, slen); + src = buf; + } + + err = crypto_rng_alg(tfm)->rng_reset(tfm, src, slen); + + kzfree(buf); + return err; +} + +int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { u8 *buf = NULL; int err; @@ -56,11 +78,12 @@ static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) seed = buf; } - err = crypto_rng_alg(tfm)->rng_reset(tfm, seed, slen); + err = tfm->seed(tfm, seed, slen); kfree(buf); return err; } +EXPORT_SYMBOL_GPL(crypto_rng_reset); static int crypto_rng_init_tfm(struct crypto_tfm *tfm) { diff --git a/include/crypto/rng.h b/include/crypto/rng.h index f20f068..7fca371 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -19,7 +19,7 @@ struct crypto_rng { int (*generate)(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen); - int (*seed)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); + int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); struct crypto_tfm base; }; @@ -139,11 +139,8 @@ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, * * Return: 0 if the setting of the key was successful; < 0 if an error occurred */ -static inline int crypto_rng_reset(struct crypto_rng *tfm, - u8 *seed, unsigned int slen) -{ - return tfm->seed(tfm, seed, slen); -} +int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen); /** * crypto_rng_seedsize() - obtain seed size of RNG -- cgit v0.10.2 From acec27ff35af9caf34d76d16ee17ff3b292e7d83 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Apr 2015 10:46:38 +0800 Subject: crypto: rng - Convert low-level crypto_rng to new style This patch converts the low-level crypto_rng interface to the "new" style. This allows existing implementations to be converted over one- by-one. Once that is complete we can then remove the old rng interface. Signed-off-by: Herbert Xu diff --git a/crypto/rng.c b/crypto/rng.c index f1d6494..5e0425a 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -36,10 +36,15 @@ static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) return container_of(tfm, struct crypto_rng, base); } +static inline struct old_rng_alg *crypto_old_rng_alg(struct crypto_rng *tfm) +{ + return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng; +} + static int generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { - return crypto_rng_alg(tfm)->rng_make_random(tfm, dst, dlen); + return crypto_old_rng_alg(tfm)->rng_make_random(tfm, dst, dlen); } static int rngapi_reset(struct crypto_rng *tfm, const u8 *seed, @@ -58,7 +63,7 @@ static int rngapi_reset(struct crypto_rng *tfm, const u8 *seed, src = buf; } - err = crypto_rng_alg(tfm)->rng_reset(tfm, src, slen); + err = crypto_old_rng_alg(tfm)->rng_reset(tfm, src, slen); kzfree(buf); return err; @@ -88,13 +93,31 @@ EXPORT_SYMBOL_GPL(crypto_rng_reset); static int crypto_rng_init_tfm(struct crypto_tfm *tfm) { struct crypto_rng *rng = __crypto_rng_cast(tfm); + struct rng_alg *alg = crypto_rng_alg(rng); + struct old_rng_alg *oalg = crypto_old_rng_alg(rng); + + if (oalg->rng_make_random) { + rng->generate = generate; + rng->seed = rngapi_reset; + rng->seedsize = oalg->seedsize; + return 0; + } - rng->generate = generate; - rng->seed = rngapi_reset; + rng->generate = alg->generate; + rng->seed = alg->seed; + rng->seedsize = alg->seedsize; return 0; } +static unsigned int seedsize(struct crypto_alg *alg) +{ + struct rng_alg *ralg = container_of(alg, struct rng_alg, base); + + return alg->cra_rng.rng_make_random ? + alg->cra_rng.seedsize : ralg->seedsize; +} + #ifdef CONFIG_NET static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) { @@ -102,7 +125,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg) strncpy(rrng.type, "rng", sizeof(rrng.type)); - rrng.seedsize = alg->cra_rng.seedsize; + rrng.seedsize = seedsize(alg); if (nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(struct crypto_report_rng), &rrng)) @@ -124,7 +147,7 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : rng\n"); - seq_printf(m, "seedsize : %u\n", alg->cra_rng.seedsize); + seq_printf(m, "seedsize : %u\n", seedsize(alg)); } const struct crypto_type crypto_rng_type = { @@ -189,5 +212,26 @@ void crypto_put_default_rng(void) } EXPORT_SYMBOL_GPL(crypto_put_default_rng); +int crypto_register_rng(struct rng_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + if (alg->seedsize > PAGE_SIZE / 8) + return -EINVAL; + + base->cra_type = &crypto_rng_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_RNG; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_rng); + +void crypto_unregister_rng(struct rng_alg *alg) +{ + crypto_unregister_alg(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_rng); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Random Number Generator"); diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h index 8969733..76f3c95 100644 --- a/include/crypto/internal/rng.h +++ b/include/crypto/internal/rng.h @@ -18,6 +18,9 @@ extern const struct crypto_type crypto_rng_type; +int crypto_register_rng(struct rng_alg *alg); +void crypto_unregister_rng(struct rng_alg *alg); + static inline void *crypto_rng_ctx(struct crypto_rng *tfm) { return crypto_tfm_ctx(&tfm->base); diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 7fca371..133f044 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -15,11 +15,48 @@ #include +struct crypto_rng; + +/** + * struct rng_alg - random number generator definition + * + * @generate: The function defined by this variable obtains a + * random number. The random number generator transform + * must generate the random number out of the context + * provided with this call, plus any additional data + * if provided to the call. + * @seed: Seed or reseed the random number generator. With the + * invocation of this function call, the random number + * generator shall become ready fo generation. If the + * random number generator requires a seed for setting + * up a new state, the seed must be provided by the + * consumer while invoking this function. The required + * size of the seed is defined with @seedsize . + * @seedsize: The seed size required for a random number generator + * initialization defined with this variable. Some + * random number generators does not require a seed + * as the seeding is implemented internally without + * the need of support by the consumer. In this case, + * the seed size is set to zero. + * @base: Common crypto API algorithm data structure. + */ +struct rng_alg { + int (*generate)(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen); + int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); + + unsigned int seedsize; + + struct crypto_alg base; +}; + struct crypto_rng { int (*generate)(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen); int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); + unsigned int seedsize; struct crypto_tfm base; }; @@ -72,7 +109,8 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm) */ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) { - return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng; + return container_of(crypto_rng_tfm(tfm)->__crt_alg, + struct rng_alg, base); } /** @@ -156,7 +194,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, */ static inline int crypto_rng_seedsize(struct crypto_rng *tfm) { - return crypto_rng_alg(tfm)->seedsize; + return tfm->seedsize; } #endif diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 781f7d5..2fa9b05 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -427,7 +427,7 @@ struct compress_alg { }; /** - * struct rng_alg - random number generator definition + * struct old_rng_alg - random number generator definition * @rng_make_random: The function defined by this variable obtains a random * number. The random number generator transform must generate * the random number out of the context provided with this @@ -445,7 +445,7 @@ struct compress_alg { * seeding is implemented internally without the need of support by * the consumer. In this case, the seed size is set to zero. */ -struct rng_alg { +struct old_rng_alg { int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen); int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); @@ -559,7 +559,7 @@ struct crypto_alg { struct blkcipher_alg blkcipher; struct cipher_alg cipher; struct compress_alg compress; - struct rng_alg rng; + struct old_rng_alg rng; } cra_u; int (*cra_init)(struct crypto_tfm *tfm); -- cgit v0.10.2 From 7ca99d814821e8a8ac6d7c48b2ccfc24bda27b1f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Apr 2015 10:46:39 +0800 Subject: crypto: rng - Add crypto_rng_set_entropy This patch adds the function crypto_rng_set_entropy. It is only meant to be used by testmgr when testing RNG implementations by providing fixed entropy data in order to verify test vectors. Signed-off-by: Herbert Xu diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h index 76f3c95..93d41bc 100644 --- a/include/crypto/internal/rng.h +++ b/include/crypto/internal/rng.h @@ -26,4 +26,10 @@ static inline void *crypto_rng_ctx(struct crypto_rng *tfm) return crypto_tfm_ctx(&tfm->base); } +static inline void crypto_rng_set_entropy(struct crypto_rng *tfm, + const u8 *data, unsigned int len) +{ + crypto_rng_alg(tfm)->set_ent(tfm, data, len); +} + #endif diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 133f044..cc22e52 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -32,6 +32,8 @@ struct crypto_rng; * up a new state, the seed must be provided by the * consumer while invoking this function. The required * size of the seed is defined with @seedsize . + * @set_ent: Set entropy that would otherwise be obtained from + * entropy source. Internal use only. * @seedsize: The seed size required for a random number generator * initialization defined with this variable. Some * random number generators does not require a seed @@ -45,6 +47,8 @@ struct rng_alg { const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen); int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); + void (*set_ent)(struct crypto_rng *tfm, const u8 *data, + unsigned int len); unsigned int seedsize; -- cgit v0.10.2 From 881cd6c570af412c2fab278b0656f7597dc5ee74 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Apr 2015 10:46:40 +0800 Subject: crypto: rng - Add multiple algorithm registration interface This patch adds the helpers that allow the registration and removal of multiple RNG algorithms. Signed-off-by: Herbert Xu diff --git a/crypto/rng.c b/crypto/rng.c index 5e0425a..e98ce1c 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -233,5 +233,34 @@ void crypto_unregister_rng(struct rng_alg *alg) } EXPORT_SYMBOL_GPL(crypto_unregister_rng); +int crypto_register_rngs(struct rng_alg *algs, int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + ret = crypto_register_rng(algs + i); + if (ret) + goto err; + } + + return 0; + +err: + for (--i; i >= 0; --i) + crypto_unregister_rng(algs + i); + + return ret; +} +EXPORT_SYMBOL_GPL(crypto_register_rngs); + +void crypto_unregister_rngs(struct rng_alg *algs, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_rng(algs + i); +} +EXPORT_SYMBOL_GPL(crypto_unregister_rngs); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Random Number Generator"); diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h index 93d41bc..2c9a865 100644 --- a/include/crypto/internal/rng.h +++ b/include/crypto/internal/rng.h @@ -20,6 +20,8 @@ extern const struct crypto_type crypto_rng_type; int crypto_register_rng(struct rng_alg *alg); void crypto_unregister_rng(struct rng_alg *alg); +int crypto_register_rngs(struct rng_alg *algs, int count); +void crypto_unregister_rngs(struct rng_alg *algs, int count); static inline void *crypto_rng_ctx(struct crypto_rng *tfm) { -- cgit v0.10.2 From 8fded5925d0a733c46f8d0b5edd1c9b315882b1d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Apr 2015 10:46:41 +0800 Subject: crypto: drbg - Convert to new rng interface This patch converts the DRBG implementation to the new low-level rng interface. This allows us to get rid of struct drbg_gen by using the new RNG API instead. Signed-off-by: Herbert Xu Acked-by: Stephan Mueller diff --git a/crypto/drbg.c b/crypto/drbg.c index 5bce159..ec6bffd 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -235,7 +235,7 @@ static bool drbg_fips_continuous_test(struct drbg_state *drbg, #ifdef CONFIG_CRYPTO_FIPS int ret = 0; /* skip test if we test the overall system */ - if (drbg->test_data) + if (list_empty(&drbg->test_data.list)) return true; /* only perform test in FIPS mode */ if (0 == fips_enabled) @@ -1068,9 +1068,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, return -EINVAL; } - if (drbg->test_data && drbg->test_data->testentropy) { - drbg_string_fill(&data1, drbg->test_data->testentropy->buf, - drbg->test_data->testentropy->len); + if (list_empty(&drbg->test_data.list)) { + drbg_string_fill(&data1, drbg->test_data.buf, + drbg->test_data.len); pr_devel("DRBG: using test entropy\n"); } else { /* @@ -1471,15 +1471,16 @@ static int drbg_uninstantiate(struct drbg_state *drbg) * Helper function for setting the test data in the DRBG * * @drbg DRBG state handle - * @test_data test data to sets + * @data test data + * @len test data length */ -static inline void drbg_set_testdata(struct drbg_state *drbg, - struct drbg_test_data *test_data) +static void drbg_kcapi_set_entropy(struct crypto_rng *tfm, + const u8 *data, unsigned int len) { - if (!test_data || !test_data->testentropy) - return; - mutex_lock(&drbg->drbg_mutex);; - drbg->test_data = test_data; + struct drbg_state *drbg = crypto_rng_ctx(tfm); + + mutex_lock(&drbg->drbg_mutex); + drbg_string_fill(&drbg->test_data, data, len); mutex_unlock(&drbg->drbg_mutex); } @@ -1645,63 +1646,49 @@ static void drbg_kcapi_cleanup(struct crypto_tfm *tfm) * Generate random numbers invoked by the kernel crypto API: * The API of the kernel crypto API is extended as follows: * - * If dlen is larger than zero, rdata is interpreted as the output buffer - * where random data is to be stored. - * - * If dlen is zero, rdata is interpreted as a pointer to a struct drbg_gen - * which holds the additional information string that is used for the - * DRBG generation process. The output buffer that is to be used to store - * data is also pointed to by struct drbg_gen. + * src is additional input supplied to the RNG. + * slen is the length of src. + * dst is the output buffer where random data is to be stored. + * dlen is the length of dst. */ -static int drbg_kcapi_random(struct crypto_rng *tfm, u8 *rdata, - unsigned int dlen) +static int drbg_kcapi_random(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen) { struct drbg_state *drbg = crypto_rng_ctx(tfm); - if (0 < dlen) { - return drbg_generate_long(drbg, rdata, dlen, NULL); - } else { - struct drbg_gen *data = (struct drbg_gen *)rdata; - struct drbg_string addtl; - /* catch NULL pointer */ - if (!data) - return 0; - drbg_set_testdata(drbg, data->test_data); + struct drbg_string *addtl = NULL; + struct drbg_string string; + + if (slen) { /* linked list variable is now local to allow modification */ - drbg_string_fill(&addtl, data->addtl->buf, data->addtl->len); - return drbg_generate_long(drbg, data->outbuf, data->outlen, - &addtl); + drbg_string_fill(&string, src, slen); + addtl = &string; } + + return drbg_generate_long(drbg, dst, dlen, addtl); } /* * Seed the DRBG invoked by the kernel crypto API - * Similar to the generate function of drbg_kcapi_random, this - * function extends the kernel crypto API interface with struct drbg_gen */ -static int drbg_kcapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) +static int drbg_kcapi_seed(struct crypto_rng *tfm, + const u8 *seed, unsigned int slen) { struct drbg_state *drbg = crypto_rng_ctx(tfm); struct crypto_tfm *tfm_base = crypto_rng_tfm(tfm); bool pr = false; - struct drbg_string seed_string; + struct drbg_string string; + struct drbg_string *seed_string = NULL; int coreref = 0; drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm_base), &coreref, &pr); if (0 < slen) { - drbg_string_fill(&seed_string, seed, slen); - return drbg_instantiate(drbg, &seed_string, coreref, pr); - } else { - struct drbg_gen *data = (struct drbg_gen *)seed; - /* allow invocation of API call with NULL, 0 */ - if (!data) - return drbg_instantiate(drbg, NULL, coreref, pr); - drbg_set_testdata(drbg, data->test_data); - /* linked list variable is now local to allow modification */ - drbg_string_fill(&seed_string, data->addtl->buf, - data->addtl->len); - return drbg_instantiate(drbg, &seed_string, coreref, pr); + drbg_string_fill(&string, seed, slen); + seed_string = &string; } + + return drbg_instantiate(drbg, seed_string, coreref, pr); } /*************************************************************** @@ -1793,32 +1780,31 @@ outbuf: #endif /* CONFIG_CRYPTO_FIPS */ } -static struct crypto_alg drbg_algs[22]; +static struct rng_alg drbg_algs[22]; /* * Fill the array drbg_algs used to register the different DRBGs * with the kernel crypto API. To fill the array, the information * from drbg_cores[] is used. */ -static inline void __init drbg_fill_array(struct crypto_alg *alg, +static inline void __init drbg_fill_array(struct rng_alg *alg, const struct drbg_core *core, int pr) { int pos = 0; static int priority = 100; - memset(alg, 0, sizeof(struct crypto_alg)); - memcpy(alg->cra_name, "stdrng", 6); + memcpy(alg->base.cra_name, "stdrng", 6); if (pr) { - memcpy(alg->cra_driver_name, "drbg_pr_", 8); + memcpy(alg->base.cra_driver_name, "drbg_pr_", 8); pos = 8; } else { - memcpy(alg->cra_driver_name, "drbg_nopr_", 10); + memcpy(alg->base.cra_driver_name, "drbg_nopr_", 10); pos = 10; } - memcpy(alg->cra_driver_name + pos, core->cra_name, + memcpy(alg->base.cra_driver_name + pos, core->cra_name, strlen(core->cra_name)); - alg->cra_priority = priority; + alg->base.cra_priority = priority; priority++; /* * If FIPS mode enabled, the selected DRBG shall have the @@ -1826,17 +1812,16 @@ static inline void __init drbg_fill_array(struct crypto_alg *alg, * it is selected. */ if (fips_enabled) - alg->cra_priority += 200; - - alg->cra_flags = CRYPTO_ALG_TYPE_RNG; - alg->cra_ctxsize = sizeof(struct drbg_state); - alg->cra_type = &crypto_rng_type; - alg->cra_module = THIS_MODULE; - alg->cra_init = drbg_kcapi_init; - alg->cra_exit = drbg_kcapi_cleanup; - alg->cra_u.rng.rng_make_random = drbg_kcapi_random; - alg->cra_u.rng.rng_reset = drbg_kcapi_reset; - alg->cra_u.rng.seedsize = 0; + alg->base.cra_priority += 200; + + alg->base.cra_ctxsize = sizeof(struct drbg_state); + alg->base.cra_module = THIS_MODULE; + alg->base.cra_init = drbg_kcapi_init; + alg->base.cra_exit = drbg_kcapi_cleanup; + alg->generate = drbg_kcapi_random; + alg->seed = drbg_kcapi_seed; + alg->set_ent = drbg_kcapi_set_entropy; + alg->seedsize = 0; } static int __init drbg_init(void) @@ -1869,12 +1854,12 @@ static int __init drbg_init(void) drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 1); for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++) drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 0); - return crypto_register_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); + return crypto_register_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); } static void __exit drbg_exit(void) { - crypto_unregister_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); + crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); } module_init(drbg_init); diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index a43a7ed..480d7a0 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -121,7 +121,7 @@ struct drbg_state { #endif const struct drbg_state_ops *d_ops; const struct drbg_core *core; - struct drbg_test_data *test_data; + struct drbg_string test_data; }; static inline __u8 drbg_statelen(struct drbg_state *drbg) @@ -177,19 +177,8 @@ static inline size_t drbg_max_requests(struct drbg_state *drbg) } /* - * kernel crypto API input data structure for DRBG generate in case dlen - * is set to 0 - */ -struct drbg_gen { - unsigned char *outbuf; /* output buffer for random numbers */ - unsigned int outlen; /* size of output buffer */ - struct drbg_string *addtl; /* additional information string */ - struct drbg_test_data *test_data; /* test data */ -}; - -/* * This is a wrapper to the kernel crypto API function of - * crypto_rng_get_bytes() to allow the caller to provide additional data. + * crypto_rng_generate() to allow the caller to provide additional data. * * @drng DRBG handle -- see crypto_rng_get_bytes * @outbuf output buffer -- see crypto_rng_get_bytes @@ -204,21 +193,15 @@ static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng, unsigned char *outbuf, unsigned int outlen, struct drbg_string *addtl) { - int ret; - struct drbg_gen genbuf; - genbuf.outbuf = outbuf; - genbuf.outlen = outlen; - genbuf.addtl = addtl; - genbuf.test_data = NULL; - ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0); - return ret; + return crypto_rng_generate(drng, addtl->buf, addtl->len, + outbuf, outlen); } /* * TEST code * * This is a wrapper to the kernel crypto API function of - * crypto_rng_get_bytes() to allow the caller to provide additional data and + * crypto_rng_generate() to allow the caller to provide additional data and * allow furnishing of test_data * * @drng DRBG handle -- see crypto_rng_get_bytes @@ -236,14 +219,10 @@ static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng, struct drbg_string *addtl, struct drbg_test_data *test_data) { - int ret; - struct drbg_gen genbuf; - genbuf.outbuf = outbuf; - genbuf.outlen = outlen; - genbuf.addtl = addtl; - genbuf.test_data = test_data; - ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0); - return ret; + crypto_rng_set_entropy(drng, test_data->testentropy->buf, + test_data->testentropy->len); + return crypto_rng_generate(drng, addtl->buf, addtl->len, + outbuf, outlen); } /* @@ -264,14 +243,9 @@ static inline int crypto_drbg_reset_test(struct crypto_rng *drng, struct drbg_string *pers, struct drbg_test_data *test_data) { - int ret; - struct drbg_gen genbuf; - genbuf.outbuf = NULL; - genbuf.outlen = 0; - genbuf.addtl = pers; - genbuf.test_data = test_data; - ret = crypto_rng_reset(drng, (u8 *)&genbuf, 0); - return ret; + crypto_rng_set_entropy(drng, test_data->testentropy->buf, + test_data->testentropy->len); + return crypto_rng_reset(drng, pers->buf, pers->len); } /* DRBG type flags */ -- cgit v0.10.2 From 6f7e3caa9ecd194cb3f20d0712de1c76d8149e73 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Apr 2015 10:46:43 +0800 Subject: crypto: ansi_cprng - Remove bogus inclusion of internal.h The file internal.h is only meant to be used by internel API implementation and not algorithm implementations. In fact it isn't even needed here so this patch removes it. Signed-off-by: Herbert Xu Acked-by: Neil Horman diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index 765fe76..e4945ec 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -20,8 +20,6 @@ #include #include -#include "internal.h" - #define DEFAULT_PRNG_KEY "0123456789abcdef" #define DEFAULT_PRNG_KSZ 16 #define DEFAULT_BLK_SZ 16 -- cgit v0.10.2 From e7c2422a839bfc6876a2f7a9b283bb2963f0287b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Apr 2015 10:46:44 +0800 Subject: crypto: ansi_cprng - Convert to new rng interface This patch ocnverts the ANSI CPRNG implementation to the new low-level rng interface. Signed-off-by: Herbert Xu Acked-by: Neil Horman diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index e4945ec..eff337c 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -279,11 +279,11 @@ static void free_prng_context(struct prng_context *ctx) } static int reset_prng_context(struct prng_context *ctx, - unsigned char *key, size_t klen, - unsigned char *V, unsigned char *DT) + const unsigned char *key, size_t klen, + const unsigned char *V, const unsigned char *DT) { int ret; - unsigned char *prng_key; + const unsigned char *prng_key; spin_lock_bh(&ctx->prng_lock); ctx->flags |= PRNG_NEED_RESET; @@ -351,8 +351,9 @@ static void cprng_exit(struct crypto_tfm *tfm) free_prng_context(crypto_tfm_ctx(tfm)); } -static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata, - unsigned int dlen) +static int cprng_get_random(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *rdata, unsigned int dlen) { struct prng_context *prng = crypto_rng_ctx(tfm); @@ -365,11 +366,12 @@ static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata, * V and KEY are required during reset, and DT is optional, detected * as being present by testing the length of the seed */ -static int cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) +static int cprng_reset(struct crypto_rng *tfm, + const u8 *seed, unsigned int slen) { struct prng_context *prng = crypto_rng_ctx(tfm); - u8 *key = seed + DEFAULT_BLK_SZ; - u8 *dt = NULL; + const u8 *key = seed + DEFAULT_BLK_SZ; + const u8 *dt = NULL; if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ) return -EINVAL; @@ -385,18 +387,20 @@ static int cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) } #ifdef CONFIG_CRYPTO_FIPS -static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata, - unsigned int dlen) +static int fips_cprng_get_random(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *rdata, unsigned int dlen) { struct prng_context *prng = crypto_rng_ctx(tfm); return get_prng_bytes(rdata, dlen, prng, 1); } -static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) +static int fips_cprng_reset(struct crypto_rng *tfm, + const u8 *seed, unsigned int slen) { u8 rdata[DEFAULT_BLK_SZ]; - u8 *key = seed + DEFAULT_BLK_SZ; + const u8 *key = seed + DEFAULT_BLK_SZ; int rc; struct prng_context *prng = crypto_rng_ctx(tfm); @@ -422,40 +426,32 @@ out: } #endif -static struct crypto_alg rng_algs[] = { { - .cra_name = "stdrng", - .cra_driver_name = "ansi_cprng", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_RNG, - .cra_ctxsize = sizeof(struct prng_context), - .cra_type = &crypto_rng_type, - .cra_module = THIS_MODULE, - .cra_init = cprng_init, - .cra_exit = cprng_exit, - .cra_u = { - .rng = { - .rng_make_random = cprng_get_random, - .rng_reset = cprng_reset, - .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ, - } +static struct rng_alg rng_algs[] = { { + .generate = cprng_get_random, + .seed = cprng_reset, + .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ, + .base = { + .cra_name = "stdrng", + .cra_driver_name = "ansi_cprng", + .cra_priority = 100, + .cra_ctxsize = sizeof(struct prng_context), + .cra_module = THIS_MODULE, + .cra_init = cprng_init, + .cra_exit = cprng_exit, } #ifdef CONFIG_CRYPTO_FIPS }, { - .cra_name = "fips(ansi_cprng)", - .cra_driver_name = "fips_ansi_cprng", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_RNG, - .cra_ctxsize = sizeof(struct prng_context), - .cra_type = &crypto_rng_type, - .cra_module = THIS_MODULE, - .cra_init = cprng_init, - .cra_exit = cprng_exit, - .cra_u = { - .rng = { - .rng_make_random = fips_cprng_get_random, - .rng_reset = fips_cprng_reset, - .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ, - } + .generate = fips_cprng_get_random, + .seed = fips_cprng_reset, + .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ, + .base = { + .cra_name = "fips(ansi_cprng)", + .cra_driver_name = "fips_ansi_cprng", + .cra_priority = 300, + .cra_ctxsize = sizeof(struct prng_context), + .cra_module = THIS_MODULE, + .cra_init = cprng_init, + .cra_exit = cprng_exit, } #endif } }; @@ -463,12 +459,12 @@ static struct crypto_alg rng_algs[] = { { /* Module initalization */ static int __init prng_mod_init(void) { - return crypto_register_algs(rng_algs, ARRAY_SIZE(rng_algs)); + return crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs)); } static void __exit prng_mod_fini(void) { - crypto_unregister_algs(rng_algs, ARRAY_SIZE(rng_algs)); + crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs)); } MODULE_LICENSE("GPL"); -- cgit v0.10.2 From e33cf2c5aab7d0012e7890089e89ae2466c2449c Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Apr 2015 10:46:45 +0800 Subject: crypto: krng - Convert to new rng interface This patch ocnverts the KRNG implementation to the new low-level rng interface. Signed-off-by: Herbert Xu diff --git a/crypto/krng.c b/crypto/krng.c index 0224841..40ed78e 100644 --- a/crypto/krng.c +++ b/crypto/krng.c @@ -16,31 +16,27 @@ #include #include -static int krng_get_random(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen) +static int krng_generate(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *rdata, unsigned int dlen) { get_random_bytes(rdata, dlen); return 0; } -static int krng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) +static int krng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { return 0; } -static struct crypto_alg krng_alg = { - .cra_name = "stdrng", - .cra_driver_name = "krng", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_RNG, - .cra_ctxsize = 0, - .cra_type = &crypto_rng_type, - .cra_module = THIS_MODULE, - .cra_u = { - .rng = { - .rng_make_random = krng_get_random, - .rng_reset = krng_reset, - .seedsize = 0, - } +static struct rng_alg krng_alg = { + .generate = krng_generate, + .seed = krng_seed, + .base = { + .cra_name = "stdrng", + .cra_driver_name = "krng", + .cra_priority = 200, + .cra_module = THIS_MODULE, } }; @@ -48,13 +44,12 @@ static struct crypto_alg krng_alg = { /* Module initalization */ static int __init krng_mod_init(void) { - return crypto_register_alg(&krng_alg); + return crypto_register_rng(&krng_alg); } static void __exit krng_mod_fini(void) { - crypto_unregister_alg(&krng_alg); - return; + crypto_unregister_rng(&krng_alg); } module_init(krng_mod_init); -- cgit v0.10.2 From 94f1bb15bed84ad6c893916b7e7b9db6f1d7eec6 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Apr 2015 10:46:46 +0800 Subject: crypto: rng - Remove old low-level rng interface Now that all rng implementations have switched over to the new interface, we can remove the old low-level interface. Signed-off-by: Herbert Xu diff --git a/crypto/rng.c b/crypto/rng.c index e98ce1c..055e276 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -4,6 +4,7 @@ * RNG operations. * * Copyright (c) 2008 Neil Horman + * Copyright (c) 2015 Herbert Xu * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -36,39 +37,6 @@ static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) return container_of(tfm, struct crypto_rng, base); } -static inline struct old_rng_alg *crypto_old_rng_alg(struct crypto_rng *tfm) -{ - return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng; -} - -static int generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, - u8 *dst, unsigned int dlen) -{ - return crypto_old_rng_alg(tfm)->rng_make_random(tfm, dst, dlen); -} - -static int rngapi_reset(struct crypto_rng *tfm, const u8 *seed, - unsigned int slen) -{ - u8 *buf = NULL; - u8 *src = (u8 *)seed; - int err; - - if (slen) { - buf = kmalloc(slen, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - memcpy(buf, seed, slen); - src = buf; - } - - err = crypto_old_rng_alg(tfm)->rng_reset(tfm, src, slen); - - kzfree(buf); - return err; -} - int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { u8 *buf = NULL; @@ -83,7 +51,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) seed = buf; } - err = tfm->seed(tfm, seed, slen); + err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); kfree(buf); return err; @@ -92,21 +60,6 @@ EXPORT_SYMBOL_GPL(crypto_rng_reset); static int crypto_rng_init_tfm(struct crypto_tfm *tfm) { - struct crypto_rng *rng = __crypto_rng_cast(tfm); - struct rng_alg *alg = crypto_rng_alg(rng); - struct old_rng_alg *oalg = crypto_old_rng_alg(rng); - - if (oalg->rng_make_random) { - rng->generate = generate; - rng->seed = rngapi_reset; - rng->seedsize = oalg->seedsize; - return 0; - } - - rng->generate = alg->generate; - rng->seed = alg->seed; - rng->seedsize = alg->seedsize; - return 0; } @@ -114,8 +67,7 @@ static unsigned int seedsize(struct crypto_alg *alg) { struct rng_alg *ralg = container_of(alg, struct rng_alg, base); - return alg->cra_rng.rng_make_random ? - alg->cra_rng.seedsize : ralg->seedsize; + return ralg->seedsize; } #ifdef CONFIG_NET @@ -150,7 +102,7 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "seedsize : %u\n", seedsize(alg)); } -const struct crypto_type crypto_rng_type = { +static const struct crypto_type crypto_rng_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_rng_init_tfm, #ifdef CONFIG_PROC_FS @@ -162,7 +114,6 @@ const struct crypto_type crypto_rng_type = { .type = CRYPTO_ALG_TYPE_RNG, .tfmsize = offsetof(struct crypto_rng, base), }; -EXPORT_SYMBOL_GPL(crypto_rng_type); struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask) { diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h index 2c9a865..263f1a5 100644 --- a/include/crypto/internal/rng.h +++ b/include/crypto/internal/rng.h @@ -2,6 +2,7 @@ * RNG: Random Number Generator algorithms under the crypto API * * Copyright (c) 2008 Neil Horman + * Copyright (c) 2015 Herbert Xu * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -16,8 +17,6 @@ #include #include -extern const struct crypto_type crypto_rng_type; - int crypto_register_rng(struct rng_alg *alg); void crypto_unregister_rng(struct rng_alg *alg); int crypto_register_rngs(struct rng_alg *algs, int count); diff --git a/include/crypto/rng.h b/include/crypto/rng.h index cc22e52..c5d4684 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -2,6 +2,7 @@ * RNG: Random Number Generator algorithms under the crypto API * * Copyright (c) 2008 Neil Horman + * Copyright (c) 2015 Herbert Xu * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -56,11 +57,6 @@ struct rng_alg { }; struct crypto_rng { - int (*generate)(struct crypto_rng *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int dlen); - int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); - unsigned int seedsize; struct crypto_tfm base; }; @@ -144,7 +140,7 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { - return tfm->generate(tfm, src, slen, dst, dlen); + return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); } /** @@ -198,7 +194,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, */ static inline int crypto_rng_seedsize(struct crypto_rng *tfm) { - return tfm->seedsize; + return crypto_rng_alg(tfm)->seedsize; } #endif diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 2fa9b05..ee14140 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -138,7 +138,6 @@ struct crypto_async_request; struct crypto_aead; struct crypto_blkcipher; struct crypto_hash; -struct crypto_rng; struct crypto_tfm; struct crypto_type; struct aead_givcrypt_request; @@ -426,40 +425,12 @@ struct compress_alg { unsigned int slen, u8 *dst, unsigned int *dlen); }; -/** - * struct old_rng_alg - random number generator definition - * @rng_make_random: The function defined by this variable obtains a random - * number. The random number generator transform must generate - * the random number out of the context provided with this - * call. - * @rng_reset: Reset of the random number generator by clearing the entire state. - * With the invocation of this function call, the random number - * generator shall completely reinitialize its state. If the random - * number generator requires a seed for setting up a new state, - * the seed must be provided by the consumer while invoking this - * function. The required size of the seed is defined with - * @seedsize . - * @seedsize: The seed size required for a random number generator - * initialization defined with this variable. Some random number - * generators like the SP800-90A DRBG does not require a seed as the - * seeding is implemented internally without the need of support by - * the consumer. In this case, the seed size is set to zero. - */ -struct old_rng_alg { - int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, - unsigned int dlen); - int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); - - unsigned int seedsize; -}; - #define cra_ablkcipher cra_u.ablkcipher #define cra_aead cra_u.aead #define cra_blkcipher cra_u.blkcipher #define cra_cipher cra_u.cipher #define cra_compress cra_u.compress -#define cra_rng cra_u.rng /** * struct crypto_alg - definition of a cryptograpic cipher algorithm @@ -559,7 +530,6 @@ struct crypto_alg { struct blkcipher_alg blkcipher; struct cipher_alg cipher; struct compress_alg compress; - struct old_rng_alg rng; } cra_u; int (*cra_init)(struct crypto_tfm *tfm); -- cgit v0.10.2 From 654ae152b3d1f3d5d473d845a403e4b5c1a39389 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Apr 2015 10:46:47 +0800 Subject: crypto: algif_rng - Remove obsolete const-removal cast Now that crypto_rng_reset takes a const argument, we no longer need to cast away the const qualifier. Signed-off-by: Herbert Xu diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c index 8109aaa..150c2b6 100644 --- a/crypto/algif_rng.c +++ b/crypto/algif_rng.c @@ -164,7 +164,7 @@ static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen) * Check whether seedlen is of sufficient size is done in RNG * implementations. */ - return crypto_rng_reset(private, (u8 *)seed, seedlen); + return crypto_rng_reset(private, seed, seedlen); } static const struct af_alg_type algif_type_rng = { -- cgit v0.10.2 From b617b702da4e922277806f81c411d3051107d462 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Apr 2015 10:46:49 +0800 Subject: crypto: rng - Zero seed in crypto_rng_reset If we allocate a seed on behalf ot the user in crypto_rng_reset, we must ensure that it is zeroed afterwards or the RNG may be compromised. Reported-by: Stephan Mueller Signed-off-by: Herbert Xu diff --git a/crypto/rng.c b/crypto/rng.c index 055e276..1315505 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -53,7 +53,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); - kfree(buf); + kzfree(buf); return err; } EXPORT_SYMBOL_GPL(crypto_rng_reset); -- cgit v0.10.2 From 43a9607d86e8fb110b596d300dbaae895c198fed Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 11:02:27 +0800 Subject: crypto: tcrypt - Handle async return from crypto_ahash_init The function crypto_ahash_init can also be asynchronous just like update and final. So all callers must be able to handle an async return. Signed-off-by: Herbert Xu diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 1a28001..bf41c34 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -808,7 +808,7 @@ static int test_ahash_jiffies(struct ahash_request *req, int blen, for (start = jiffies, end = start + secs * HZ, bcount = 0; time_before(jiffies, end); bcount++) { - ret = crypto_ahash_init(req); + ret = do_one_ahash_op(req, crypto_ahash_init(req)); if (ret) return ret; for (pcount = 0; pcount < blen; pcount += plen) { @@ -877,7 +877,7 @@ static int test_ahash_cycles(struct ahash_request *req, int blen, /* Warm-up run. */ for (i = 0; i < 4; i++) { - ret = crypto_ahash_init(req); + ret = do_one_ahash_op(req, crypto_ahash_init(req)); if (ret) goto out; for (pcount = 0; pcount < blen; pcount += plen) { @@ -896,7 +896,7 @@ static int test_ahash_cycles(struct ahash_request *req, int blen, start = get_cycles(); - ret = crypto_ahash_init(req); + ret = do_one_ahash_op(req, crypto_ahash_init(req)); if (ret) goto out; for (pcount = 0; pcount < blen; pcount += plen) { -- cgit v0.10.2 From 59afdc7b32143528524455039e7557a46b60e4c8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 11:28:46 +0800 Subject: crypto: api - Move module sig ifdef into accessor function Currently we're hiding mod->sig_ok under an ifdef in open code. This patch adds a module_sig_ok accessor function and removes that ifdef. Signed-off-by: Herbert Xu Acked-by: Rusty Russell diff --git a/crypto/algapi.c b/crypto/algapi.c index a60f626..f835f43 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -43,12 +43,9 @@ static inline int crypto_set_driver_name(struct crypto_alg *alg) static inline void crypto_check_module_sig(struct module *mod) { -#ifdef CONFIG_CRYPTO_FIPS - if (fips_enabled && mod && !mod->sig_ok) + if (fips_enabled && mod && !module_sig_ok(mod)) panic("Module %s signature verification failed in FIPS mode\n", mod->name); -#endif - return; } static int crypto_check_alg(struct crypto_alg *alg) diff --git a/include/linux/module.h b/include/linux/module.h index c883b86..1e54360 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -655,4 +655,16 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr, static inline void module_bug_cleanup(struct module *mod) {} #endif /* CONFIG_GENERIC_BUG */ +#ifdef CONFIG_MODULE_SIG +static inline bool module_sig_ok(struct module *module) +{ + return module->sig_ok; +} +#else /* !CONFIG_MODULE_SIG */ +static inline bool module_sig_ok(struct module *module) +{ + return true; +} +#endif /* CONFIG_MODULE_SIG */ + #endif /* _LINUX_MODULE_H */ -- cgit v0.10.2 From b94e7dc581c68e383165b09f2aab7a98a597f10a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 11:42:35 +0800 Subject: crypto: drbg - Remove FIPS ifdef from drbg_healthcheck_sanity This patch removes the unnecessary CRYPTO_FIPS ifdef from drbg_healthcheck_sanity so that the code always gets checked by the compiler. Signed-off-by: Herbert Xu Acked-by: Stephan Mueller diff --git a/crypto/drbg.c b/crypto/drbg.c index ec6bffd..23d444e 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1707,7 +1707,6 @@ static int drbg_kcapi_seed(struct crypto_rng *tfm, */ static inline int __init drbg_healthcheck_sanity(void) { -#ifdef CONFIG_CRYPTO_FIPS int len = 0; #define OUTBUFLEN 16 unsigned char buf[OUTBUFLEN]; @@ -1775,9 +1774,6 @@ static inline int __init drbg_healthcheck_sanity(void) outbuf: kzfree(drbg); return rc; -#else /* CONFIG_CRYPTO_FIPS */ - return 0; -#endif /* CONFIG_CRYPTO_FIPS */ } static struct rng_alg drbg_algs[22]; -- cgit v0.10.2 From d9b3682ffd6d7fa98195eb64307b70e9a25cc655 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 11:56:34 +0800 Subject: crypto: caam - Remove bogus references to crypto API internals The caam driver includes algorithm types that it doesn't even use, such as struct rng_alg which has recently been moved to an internal header file and consequently broke the build of caam. This patch removes these bogus references. Reported-by: Fengguang Wu Signed-off-by: Herbert Xu diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 29071a1..3c025d4 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -3380,10 +3380,6 @@ struct caam_alg_template { union { struct ablkcipher_alg ablkcipher; struct aead_alg aead; - struct blkcipher_alg blkcipher; - struct cipher_alg cipher; - struct compress_alg compress; - struct rng_alg rng; } template_u; u32 class1_alg_type; u32 class2_alg_type; -- cgit v0.10.2 From 3133d76fc60bce6f3e00efb6c3540f2f449ff569 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 13:25:53 +0800 Subject: crypto: api - Include linux/fips.h All users of fips_enabled should include linux/fips.h directly instead of getting it through internal.h. Signed-off-by: Herbert Xu diff --git a/crypto/algapi.c b/crypto/algapi.c index f835f43..c63836f 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include -- cgit v0.10.2 From 76450f93f17bb03a27476371c4c907e26a3c78a4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 13:25:54 +0800 Subject: crypto: fips - Remove bogus inclusion of internal.h The header file internal.h is only meant for internal crypto API implementors such as rng.c. So fips has no business in including it. This patch removes that inclusions and instead adds inclusions of the actual features used by fips. Signed-off-by: Herbert Xu diff --git a/crypto/fips.c b/crypto/fips.c index 5539700..0f65df9 100644 --- a/crypto/fips.c +++ b/crypto/fips.c @@ -10,7 +10,10 @@ * */ -#include "internal.h" +#include +#include +#include +#include int fips_enabled; EXPORT_SYMBOL_GPL(fips_enabled); -- cgit v0.10.2 From 94072cb20eed369c64364c95bcfa3c012f54f466 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 13:25:56 +0800 Subject: crypto: fips - Move fips_enabled sysctl into fips.c There is currently a large ifdef FIPS code section in proc.c. Ostensibly it's there because the fips_enabled sysctl sits under /proc/sys/crypto. However, no other crypto sysctls exist. In fact, the whole ethos of the crypto API is against such user interfaces so this patch moves all the FIPS sysctl code over to fips.c. Signed-off-by: Herbert Xu diff --git a/crypto/fips.c b/crypto/fips.c index 0f65df9..9d627c1 100644 --- a/crypto/fips.c +++ b/crypto/fips.c @@ -13,7 +13,9 @@ #include #include #include +#include #include +#include int fips_enabled; EXPORT_SYMBOL_GPL(fips_enabled); @@ -28,3 +30,49 @@ static int fips_enable(char *str) } __setup("fips=", fips_enable); + +static struct ctl_table crypto_sysctl_table[] = { + { + .procname = "fips_enabled", + .data = &fips_enabled, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_dointvec + }, + {} +}; + +static struct ctl_table crypto_dir_table[] = { + { + .procname = "crypto", + .mode = 0555, + .child = crypto_sysctl_table + }, + {} +}; + +static struct ctl_table_header *crypto_sysctls; + +static void crypto_proc_fips_init(void) +{ + crypto_sysctls = register_sysctl_table(crypto_dir_table); +} + +static void crypto_proc_fips_exit(void) +{ + unregister_sysctl_table(crypto_sysctls); +} + +static int __init fips_init(void) +{ + crypto_proc_fips_init(); + return 0; +} + +static void __exit fips_exit(void) +{ + crypto_proc_fips_exit(); +} + +module_init(fips_init); +module_exit(fips_exit); diff --git a/crypto/proc.c b/crypto/proc.c index 4ffe73b..2cc10c9 100644 --- a/crypto/proc.c +++ b/crypto/proc.c @@ -20,47 +20,8 @@ #include #include #include -#include #include "internal.h" -#ifdef CONFIG_CRYPTO_FIPS -static struct ctl_table crypto_sysctl_table[] = { - { - .procname = "fips_enabled", - .data = &fips_enabled, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = proc_dointvec - }, - {} -}; - -static struct ctl_table crypto_dir_table[] = { - { - .procname = "crypto", - .mode = 0555, - .child = crypto_sysctl_table - }, - {} -}; - -static struct ctl_table_header *crypto_sysctls; - -static void crypto_proc_fips_init(void) -{ - crypto_sysctls = register_sysctl_table(crypto_dir_table); -} - -static void crypto_proc_fips_exit(void) -{ - if (crypto_sysctls) - unregister_sysctl_table(crypto_sysctls); -} -#else -#define crypto_proc_fips_init() -#define crypto_proc_fips_exit() -#endif - static void *c_start(struct seq_file *m, loff_t *pos) { down_read(&crypto_alg_sem); @@ -148,11 +109,9 @@ static const struct file_operations proc_crypto_ops = { void __init crypto_init_proc(void) { proc_create("crypto", 0, NULL, &proc_crypto_ops); - crypto_proc_fips_init(); } void __exit crypto_exit_proc(void) { - crypto_proc_fips_exit(); remove_proc_entry("crypto", NULL); } -- cgit v0.10.2 From daf0944cfb1fd2f6ca17fd4ed1ecbbef63d8f8f8 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 13:25:57 +0800 Subject: crypto: tcrypt - Include linux/fips.h for fips_enabled All users of fips_enabled should include linux/fips.h directly instead of getting it through internal.h which is reserved for internal crypto API implementors. Signed-off-by: Herbert Xu diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index bf41c34..22cdd61 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -34,7 +35,6 @@ #include #include #include "tcrypt.h" -#include "internal.h" /* * Need slab memory for testing (size in number of pages). -- cgit v0.10.2 From 1c41b88249f623a3c92e53aa7cc221b2245bd044 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 13:25:58 +0800 Subject: crypto: testmgr - Include linux/fips.h for fips_enabled All users of fips_enabled should include linux/fips.h directly instead of getting it through internal.h. Signed-off-by: Herbert Xu diff --git a/crypto/testmgr.c b/crypto/testmgr.c index f9bce3d..18b7d49 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include -- cgit v0.10.2 From 7be58b1250f471a4548c0fe7a6b4001b62ea1e30 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 13:25:59 +0800 Subject: crypto: api - Remove linux/fips.h from internal.h Now that all fips_enabled users are including linux/fips.h directly instead of getting it through internal.h, we can remove the fips.h inclusions from internal.h. Signed-off-by: Herbert Xu diff --git a/crypto/internal.h b/crypto/internal.h index ed7a70c..00e42a3 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -25,7 +25,6 @@ #include #include #include -#include /* Crypto notification events. */ enum { -- cgit v0.10.2 From 34ed9a35788a562d80648247022ae9497cc88ebc Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 15:06:27 +0800 Subject: crypto: arm64/aes-ce-ccm - Include crypto/internal/aead.h All implementers of AEAD should include crypto/internal/aead.h instead of include/linux/crypto.h. Signed-off-by: Herbert Xu Acked-by: David S. Miller diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 6c348df..3303e8a 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include "aes-ce-setkey.h" -- cgit v0.10.2 From 89081da51de3d0ffaef5b3a05fcdb7cc13bb5a61 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 15:06:28 +0800 Subject: crypto: algif_aead - Include crypto/aead.h All users of AEAD should include crypto/aead.h instead of include/linux/crypto.h. Signed-off-by: Herbert Xu Acked-by: David S. Miller diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 00a6fe1..53702e9 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -13,6 +13,7 @@ * any later version. */ +#include #include #include #include -- cgit v0.10.2 From 1ce5a04d9f3e8f1f25aa1d6da8612bfeeca7b306 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 15:06:30 +0800 Subject: crypto: tcrypt - Include crypto/aead.h All users of AEAD should include crypto/aead.h instead of include/linux/crypto.h. Signed-off-by: Herbert Xu Acked-by: David S. Miller diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 22cdd61..2bff613 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -22,6 +22,7 @@ * */ +#include #include #include #include -- cgit v0.10.2 From 1ce3311580515268504db929c3219e57927b9e6a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 15:06:31 +0800 Subject: crypto: testmgr - Include crypto/aead.h All users of AEAD should include crypto/aead.h instead of include/linux/crypto.h. Signed-off-by: Herbert Xu Acked-by: David S. Miller diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 18b7d49..d463978 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -20,6 +20,7 @@ * */ +#include #include #include #include -- cgit v0.10.2 From d8fe0ddd0256711e9cf2c539e77c341daa0eb2cf Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 15:06:32 +0800 Subject: mac80211: Include crypto/aead.h All users of AEAD should include crypto/aead.h instead of include/linux/crypto.h. Signed-off-by: Herbert Xu Acked-by: David S. Miller diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c index 208df7c..70d53da 100644 --- a/net/mac80211/aes_ccm.c +++ b/net/mac80211/aes_ccm.c @@ -11,9 +11,8 @@ #include #include -#include #include -#include +#include #include #include "key.h" diff --git a/net/mac80211/aes_gcm.c b/net/mac80211/aes_gcm.c index fd278bb..b91c9d7 100644 --- a/net/mac80211/aes_gcm.c +++ b/net/mac80211/aes_gcm.c @@ -8,9 +8,8 @@ #include #include -#include #include -#include +#include #include #include "key.h" diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c index f1321b7..c34b06c 100644 --- a/net/mac80211/aes_gmac.c +++ b/net/mac80211/aes_gmac.c @@ -9,8 +9,8 @@ #include #include -#include #include +#include #include #include -- cgit v0.10.2 From 8a1a2b717e0d4d5f3e3bb59b7dee5079a15ab24b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 22 Apr 2015 15:06:33 +0800 Subject: mac802154: Include crypto/aead.h All users of AEAD should include crypto/aead.h instead of include/linux/crypto.h. This patch also removes a bogus inclusion of algapi.h which should only be used by algorithm/driver implementors and not crypto users. Instead linux/crypto.h is added which is necessary because mac802154 also uses blkcipher in addition to aead. Signed-off-by: Herbert Xu Acked-by: David S. Miller diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c index dcf7395..3ccf1e9 100644 --- a/net/mac802154/llsec.c +++ b/net/mac802154/llsec.c @@ -17,8 +17,9 @@ #include #include #include +#include #include -#include +#include #include "ieee802154_i.h" #include "llsec.h" -- cgit v0.10.2 From bd4a7c69aaed79ae1a299db8063fe4daf5e4a2f1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 23 Apr 2015 14:48:05 +0800 Subject: crypto: api - Fix build error when modules are disabled The commit 59afdc7b32143528524455039e7557a46b60e4c8 ("crypto: api - Move module sig ifdef into accessor function") broke the build when modules are completely disabled because we directly dereference module->name. This patch fixes this by using the accessor function module_name. Reported-by: Fengguang Wu Signed-off-by: Herbert Xu diff --git a/crypto/algapi.c b/crypto/algapi.c index c63836f..3103e6a 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -46,7 +46,7 @@ static inline void crypto_check_module_sig(struct module *mod) { if (fips_enabled && mod && !module_sig_ok(mod)) panic("Module %s signature verification failed in FIPS mode\n", - mod->name); + module_name(mod)); } static int crypto_check_alg(struct crypto_alg *alg) -- cgit v0.10.2 From 26739535206e819946b0740347c09c94c4e48ba9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 23 Apr 2015 16:34:47 +0800 Subject: crypto: skcipher - Fix corner case in crypto_lookup_skcipher When the user explicitly states that they don't care whether the algorithm has been tested (type = CRYPTO_ALG_TESTED and mask = 0), there is a corner case where we may erroneously return ENOENT. This patch fixes it by correcting the logic in the test. Signed-off-by: Herbert Xu diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index db201bca..b3dded4 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -636,7 +636,7 @@ struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask) if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_GIVCIPHER) { - if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) { + if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) { crypto_mod_put(alg); alg = ERR_PTR(-ENOENT); } -- cgit v0.10.2 From 80f7b3552c1c925478a955fd4c700345beaf2982 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 23 Apr 2015 16:37:46 +0800 Subject: crypto: aead - Fix corner case in crypto_lookup_aead When the user explicitly states that they don't care whether the algorithm has been tested (type = CRYPTO_ALG_TESTED and mask = 0), there is a corner case where we may erroneously return ENOENT. This patch fixes it by correcting the logic in the test. Signed-off-by: Herbert Xu diff --git a/crypto/aead.c b/crypto/aead.c index 2222710..d6ad0c6 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -489,7 +489,7 @@ struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask) return alg; if (alg->cra_type == &crypto_aead_type) { - if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) { + if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) { crypto_mod_put(alg); alg = ERR_PTR(-ENOENT); } -- cgit v0.10.2 From 21a6dd5b391e419efb227251745b08d5838f09db Mon Sep 17 00:00:00 2001 From: firo yang Date: Thu, 23 Apr 2015 18:12:10 +0800 Subject: crypto: sha1-mb - Remove pointless cast Since kzalloc() returns a void pointer, we don't need to cast the return value in arch/x86/crypto/sha-mb/sha1_mb.c::sha1_mb_mod_init(). Signed-off-by: Firo Yang Signed-off-by: Herbert Xu diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c index e510b1c..0cb5149 100644 --- a/arch/x86/crypto/sha-mb/sha1_mb.c +++ b/arch/x86/crypto/sha-mb/sha1_mb.c @@ -885,7 +885,8 @@ static int __init sha1_mb_mod_init(void) INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); cpu_state->cpu = cpu; cpu_state->alg_state = &sha1_mb_alg_state; - cpu_state->mgr = (struct sha1_ctx_mgr *) kzalloc(sizeof(struct sha1_ctx_mgr), GFP_KERNEL); + cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr), + GFP_KERNEL); if (!cpu_state->mgr) goto err2; sha1_ctx_mgr_init(cpu_state->mgr); -- cgit v0.10.2 From c3365ce130e50176533debe1cabebcdb8e61156c Mon Sep 17 00:00:00 2001 From: Leonidas Da Silva Barbosa Date: Thu, 23 Apr 2015 17:40:30 -0300 Subject: crypto: nx - Fixing NX data alignment with nx_sg list In NX we need to pass always a 16 multiple size nx_sg_list to co processor. Trim function handle with this assuring all nx_sg_lists are 16 multiple size, although data was not being considerated when crop was done. It was causing an unalignment between size of the list and data, corrupting csbcpb fields returning a -23 H_ST_PARM error, or invalid operation. This patch fix this recalculating how much data should be put back in to_process variable what assures the size of sg_list will be correct with size of the data. Signed-off-by: Leonidas S. Barbosa Signed-off-by: Herbert Xu diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 1da6dc5..4856f72 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -215,8 +215,15 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, * @delta: is the amount we need to crop in order to bound the list. * */ -static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta) +static long int trim_sg_list(struct nx_sg *sg, + struct nx_sg *end, + unsigned int delta, + unsigned int *nbytes) { + long int oplen; + long int data_back; + unsigned int is_delta = delta; + while (delta && end > sg) { struct nx_sg *last = end - 1; @@ -228,7 +235,20 @@ static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int d delta -= last->len; } } - return (sg - end) * sizeof(struct nx_sg); + + /* There are cases where we need to crop list in order to make it + * a block size multiple, but we also need to align data. In order to + * that we need to calculate how much we need to put back to be + * processed + */ + oplen = (sg - end) * sizeof(struct nx_sg); + if (is_delta) { + data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len; + data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1)); + *nbytes -= data_back; + } + + return oplen; } /** @@ -330,8 +350,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, /* these lengths should be negative, which will indicate to phyp that * the input and output parameters are scatterlists, not linear * buffers */ - nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta); - nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta); + nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes); + nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes); return 0; } -- cgit v0.10.2 From 10d87b730e1d9f1442cae6487bb3aef8632bed23 Mon Sep 17 00:00:00 2001 From: Leonidas Da Silva Barbosa Date: Thu, 23 Apr 2015 17:41:43 -0300 Subject: crypto: nx - Fixing SHA update bug Bug happens when a data size less than SHA block size is passed. Since first attempt will be saved in buffer, second round attempt get into two step to calculate op.inlen and op.outlen. The issue resides in this step. A wrong value of op.inlen and outlen was being calculated. This patch fix this eliminate the nx_sha_build_sg_list, that is useless in SHA's algorithm context. Instead we call nx_build_sg_list directly and pass a previous calculated max_sg_len to it. Signed-off-by: Leonidas S. Barbosa Signed-off-by: Herbert Xu diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 23621da..4e91bdb 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -33,8 +33,9 @@ static int nx_sha256_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_sg *out_sg; int len; - int rc; + u32 max_sg_len; nx_ctx_init(nx_ctx, HCOP_FC_SHA); @@ -44,15 +45,18 @@ static int nx_sha256_init(struct shash_desc *desc) NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + len = SHA256_DIGEST_SIZE; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, - &nx_ctx->op.outlen, - &len, - (u8 *) sctx->state, - NX_DS_SHA256); + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + &len, max_sg_len); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); - if (rc) - goto out; + if (len != SHA256_DIGEST_SIZE) + return -EINVAL; sctx->state[0] = __cpu_to_be32(SHA256_H0); sctx->state[1] = __cpu_to_be32(SHA256_H1); @@ -64,7 +68,6 @@ static int nx_sha256_init(struct shash_desc *desc) sctx->state[7] = __cpu_to_be32(SHA256_H7); sctx->count = 0; -out: return 0; } @@ -74,10 +77,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, struct sha256_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg; u64 to_process = 0, leftover, total; unsigned long irq_flags; int rc = 0; int data_len; + u32 max_sg_len; u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); @@ -97,6 +102,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + in_sg = nx_ctx->in_sg; + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + do { /* * to_process: the SHA256_BLOCK_SIZE data chunk to process in @@ -108,25 +119,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, if (buf_len) { data_len = buf_len; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &data_len, - (u8 *) sctx->buf, - NX_DS_SHA256); + in_sg = nx_build_sg_list(nx_ctx->in_sg, + (u8 *) sctx->buf, + &data_len, + max_sg_len); - if (rc || data_len != buf_len) + if (data_len != buf_len) { + rc = -EINVAL; goto out; + } } data_len = to_process - buf_len; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &data_len, - (u8 *) data, - NX_DS_SHA256); + in_sg = nx_build_sg_list(in_sg, (u8 *) data, + &data_len, max_sg_len); - if (rc) - goto out; + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); to_process = (data_len + buf_len); leftover = total - to_process; @@ -173,12 +181,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) struct sha256_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg, *out_sg; unsigned long irq_flags; - int rc; + u32 max_sg_len; + int rc = 0; int len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + /* final is represented by continuing the operation and indicating that * this is not an intermediate operation */ if (sctx->count >= SHA256_BLOCK_SIZE) { @@ -195,25 +210,24 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); len = sctx->count & (SHA256_BLOCK_SIZE - 1); - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &len, - (u8 *) sctx->buf, - NX_DS_SHA256); + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf, + &len, max_sg_len); - if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) + if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) { + rc = -EINVAL; goto out; + } len = SHA256_DIGEST_SIZE; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, - &nx_ctx->op.outlen, - &len, - out, - NX_DS_SHA256); + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len); - if (rc || len != SHA256_DIGEST_SIZE) + if (len != SHA256_DIGEST_SIZE) { + rc = -EINVAL; goto out; + } + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (!nx_ctx->op.outlen) { rc = -EINVAL; goto out; diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index b3adf10..e6a58d2 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -32,8 +32,9 @@ static int nx_sha512_init(struct shash_desc *desc) { struct sha512_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_sg *out_sg; int len; - int rc; + u32 max_sg_len; nx_ctx_init(nx_ctx, HCOP_FC_SHA); @@ -43,15 +44,18 @@ static int nx_sha512_init(struct shash_desc *desc) NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + len = SHA512_DIGEST_SIZE; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, - &nx_ctx->op.outlen, - &len, - (u8 *)sctx->state, - NX_DS_SHA512); + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + &len, max_sg_len); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); - if (rc || len != SHA512_DIGEST_SIZE) - goto out; + if (len != SHA512_DIGEST_SIZE) + return -EINVAL; sctx->state[0] = __cpu_to_be64(SHA512_H0); sctx->state[1] = __cpu_to_be64(SHA512_H1); @@ -63,7 +67,6 @@ static int nx_sha512_init(struct shash_desc *desc) sctx->state[7] = __cpu_to_be64(SHA512_H7); sctx->count[0] = 0; -out: return 0; } @@ -73,10 +76,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, struct sha512_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg; u64 to_process, leftover = 0, total; unsigned long irq_flags; int rc = 0; int data_len; + u32 max_sg_len; u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); @@ -96,6 +101,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + in_sg = nx_ctx->in_sg; + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + do { /* * to_process: the SHA512_BLOCK_SIZE data chunk to process in @@ -108,25 +119,26 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, if (buf_len) { data_len = buf_len; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &data_len, - (u8 *) sctx->buf, - NX_DS_SHA512); + in_sg = nx_build_sg_list(nx_ctx->in_sg, + (u8 *) sctx->buf, + &data_len, max_sg_len); - if (rc || data_len != buf_len) + if (data_len != buf_len) { + rc = -EINVAL; goto out; + } } data_len = to_process - buf_len; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &data_len, - (u8 *) data, - NX_DS_SHA512); + in_sg = nx_build_sg_list(in_sg, (u8 *) data, + &data_len, max_sg_len); - if (rc || data_len != (to_process - buf_len)) + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + + if (data_len != (to_process - buf_len)) { + rc = -EINVAL; goto out; + } to_process = (data_len + buf_len); leftover = total - to_process; @@ -172,13 +184,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) struct sha512_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg, *out_sg; + u32 max_sg_len; u64 count0; unsigned long irq_flags; - int rc; + int rc = 0; int len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + /* final is represented by continuing the operation and indicating that * this is not an intermediate operation */ if (sctx->count[0] >= SHA512_BLOCK_SIZE) { @@ -200,24 +219,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) csbcpb->cpb.sha512.message_bit_length_lo = count0; len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &len, - (u8 *)sctx->buf, - NX_DS_SHA512); + in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len, + max_sg_len); - if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) + if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) { + rc = -EINVAL; goto out; + } len = SHA512_DIGEST_SIZE; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, - &nx_ctx->op.outlen, - &len, - out, - NX_DS_SHA512); + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, + max_sg_len); - if (rc) - goto out; + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (!nx_ctx->op.outlen) { rc = -EINVAL; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 4856f72..2e2529c 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -252,53 +252,6 @@ static long int trim_sg_list(struct nx_sg *sg, } /** - * nx_sha_build_sg_list - walk and build sg list to sha modes - * using right bounds and limits. - * @nx_ctx: NX crypto context for the lists we're building - * @nx_sg: current sg list in or out list - * @op_len: current op_len to be used in order to build a sg list - * @nbytes: number or bytes to be processed - * @offset: buf offset - * @mode: SHA256 or SHA512 - */ -int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx, - struct nx_sg *nx_in_outsg, - s64 *op_len, - unsigned int *nbytes, - u8 *offset, - u32 mode) -{ - unsigned int delta = 0; - unsigned int total = *nbytes; - struct nx_sg *nx_insg = nx_in_outsg; - unsigned int max_sg_len; - - max_sg_len = min_t(u64, nx_ctx->ap->sglen, - nx_driver.of.max_sg_len/sizeof(struct nx_sg)); - max_sg_len = min_t(u64, max_sg_len, - nx_ctx->ap->databytelen/NX_PAGE_SIZE); - - *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); - nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len); - - switch (mode) { - case NX_DS_SHA256: - if (*nbytes < total) - delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1)); - break; - case NX_DS_SHA512: - if (*nbytes < total) - delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1)); - break; - default: - return -EINVAL; - } - *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta); - - return 0; -} - -/** * nx_build_sg_lists - walk the input scatterlists and build arrays of NX * scatterlists based on them. * diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index 6c9ecaa..41b87ee 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -153,8 +153,6 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm); void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, u32 may_sleep); -int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *, - s64 *, unsigned int *, u8 *, u32); struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32); int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int *, -- cgit v0.10.2 From dcd93e8303cbd0ef3dba165daa0633ebd05b3e17 Mon Sep 17 00:00:00 2001 From: "Allan, Bruce W" Date: Mon, 27 Apr 2015 13:58:27 -0700 Subject: crypto: qat - do not duplicate string containing firmware name Use ADF_DH895XCC_FW instead of duplicating the string "qat_895xcc.bin" when referring to the DH895xCC firmware. Signed-off-by: Bruce Allan Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index 9decea2..c1b3f0b 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -417,5 +417,5 @@ module_exit(adfdrv_release); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); -MODULE_FIRMWARE("qat_895xcc.bin"); +MODULE_FIRMWARE(ADF_DH895XCC_FW); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); -- cgit v0.10.2 From f94a359763c19e1d03dac61352b1041692c6a698 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 28 Apr 2015 15:36:30 +0100 Subject: crypto: pcomp - Constify (de)compression parameters In testmgr, struct pcomp_testvec takes a non-const 'params' field, which is pointed to a const deflate_comp_params or deflate_decomp_params object. With gcc-5 this incurs the following warnings: In file included from ../crypto/testmgr.c:44:0: ../crypto/testmgr.h:28736:13: warning: initialization discards 'const' qualifier from pointer target type [-Wdiscarded-array-qualifiers] .params = &deflate_comp_params, ^ ../crypto/testmgr.h:28748:13: warning: initialization discards 'const' qualifier from pointer target type [-Wdiscarded-array-qualifiers] .params = &deflate_comp_params, ^ ../crypto/testmgr.h:28776:13: warning: initialization discards 'const' qualifier from pointer target type [-Wdiscarded-array-qualifiers] .params = &deflate_decomp_params, ^ ../crypto/testmgr.h:28800:13: warning: initialization discards 'const' qualifier from pointer target type [-Wdiscarded-array-qualifiers] .params = &deflate_decomp_params, ^ Fix this by making the parameters pointer const and constifying the things that use it. Signed-off-by: David Howells Signed-off-by: Herbert Xu diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 62e2485..1f5a31f2 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -28591,7 +28591,7 @@ struct comp_testvec { }; struct pcomp_testvec { - void *params; + const void *params; unsigned int paramsize; int inlen, outlen; char input[COMP_BUF_SIZE]; diff --git a/crypto/zlib.c b/crypto/zlib.c index 0eefa9d..d51a30a 100644 --- a/crypto/zlib.c +++ b/crypto/zlib.c @@ -78,7 +78,7 @@ static void zlib_exit(struct crypto_tfm *tfm) } -static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params, +static int zlib_compress_setup(struct crypto_pcomp *tfm, const void *params, unsigned int len) { struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); @@ -209,7 +209,7 @@ static int zlib_compress_final(struct crypto_pcomp *tfm, } -static int zlib_decompress_setup(struct crypto_pcomp *tfm, void *params, +static int zlib_decompress_setup(struct crypto_pcomp *tfm, const void *params, unsigned int len) { struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); diff --git a/include/crypto/compress.h b/include/crypto/compress.h index 86163ef..5b67af8 100644 --- a/include/crypto/compress.h +++ b/include/crypto/compress.h @@ -55,14 +55,14 @@ struct crypto_pcomp { }; struct pcomp_alg { - int (*compress_setup)(struct crypto_pcomp *tfm, void *params, + int (*compress_setup)(struct crypto_pcomp *tfm, const void *params, unsigned int len); int (*compress_init)(struct crypto_pcomp *tfm); int (*compress_update)(struct crypto_pcomp *tfm, struct comp_request *req); int (*compress_final)(struct crypto_pcomp *tfm, struct comp_request *req); - int (*decompress_setup)(struct crypto_pcomp *tfm, void *params, + int (*decompress_setup)(struct crypto_pcomp *tfm, const void *params, unsigned int len); int (*decompress_init)(struct crypto_pcomp *tfm); int (*decompress_update)(struct crypto_pcomp *tfm, @@ -97,7 +97,7 @@ static inline struct pcomp_alg *crypto_pcomp_alg(struct crypto_pcomp *tfm) } static inline int crypto_compress_setup(struct crypto_pcomp *tfm, - void *params, unsigned int len) + const void *params, unsigned int len) { return crypto_pcomp_alg(tfm)->compress_setup(tfm, params, len); } @@ -120,7 +120,7 @@ static inline int crypto_compress_final(struct crypto_pcomp *tfm, } static inline int crypto_decompress_setup(struct crypto_pcomp *tfm, - void *params, unsigned int len) + const void *params, unsigned int len) { return crypto_pcomp_alg(tfm)->decompress_setup(tfm, params, len); } -- cgit v0.10.2 From 09e217844a2e2d02a950bc9c129f6fe423e426e0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 28 Apr 2015 15:36:36 +0100 Subject: crypto: testmgr - Wrap the LHS in expressions of the form !x == y In the test manager, there are a number of if-statements with expressions of the form !x == y that incur warnings with gcc-5 of the following form: ../crypto/testmgr.c: In function '__test_aead': ../crypto/testmgr.c:523:12: warning: logical not is only applied to the left hand side of comparison [-Wlogical-not-parentheses] if (!ret == template[i].fail) { ^ By converting the 'fail' member of struct aead_testvec and struct cipher_testvec to a bool, we can get rid of the warnings. Signed-off-by: David Howells Signed-off-by: Herbert Xu diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 1f5a31f2..1cd62a7 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -54,7 +54,7 @@ struct cipher_testvec { unsigned short tap[MAX_TAP]; int np; unsigned char also_non_np; - unsigned char fail; + bool fail; unsigned char wk; /* weak key flag */ unsigned char klen; unsigned short ilen; @@ -71,7 +71,7 @@ struct aead_testvec { unsigned char atap[MAX_TAP]; int np; int anp; - unsigned char fail; + bool fail; unsigned char novrfy; /* ccm dec verification failure expected */ unsigned char wk; /* weak key flag */ unsigned char klen; @@ -3018,7 +3018,7 @@ static struct cipher_testvec des_enc_tv_template[] = { "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90", .rlen = 24, }, { /* Weak key */ - .fail = 1, + .fail = true, .wk = 1, .key = "\x01\x01\x01\x01\x01\x01\x01\x01", .klen = 8, -- cgit v0.10.2 From ebb3472f5cc9d4cffe3968dfd816978ab2dd06d6 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 4 May 2015 11:00:17 +0200 Subject: crypto: testmgr - add test cases for CRC32 This adds a couple of test cases for CRC32 (not CRC32c) to ensure that the generic and arch specific implementations are in sync. Signed-off-by: Ard Biesheuvel Acked-by: Steve Capper Signed-off-by: Herbert Xu diff --git a/crypto/testmgr.c b/crypto/testmgr.c index d463978..1817252 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2320,6 +2320,15 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "compress_null", .test = alg_test_null, }, { + .alg = "crc32", + .test = alg_test_hash, + .suite = { + .hash = { + .vecs = crc32_tv_template, + .count = CRC32_TEST_VECTORS + } + } + }, { .alg = "crc32c", .test = alg_test_crc32c, .fips_allowed = 1, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 1cd62a7..90f43b9 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -28946,6 +28946,440 @@ static struct hash_testvec michael_mic_tv_template[] = { }; /* + * CRC32 test vectors + */ +#define CRC32_TEST_VECTORS 14 + +static struct hash_testvec crc32_tv_template[] = { + { + .key = "\x87\xa9\xcb\xed", + .ksize = 4, + .psize = 0, + .digest = "\x87\xa9\xcb\xed", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08" + "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" + "\x11\x12\x13\x14\x15\x16\x17\x18" + "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20" + "\x21\x22\x23\x24\x25\x26\x27\x28", + .psize = 40, + .digest = "\x3a\xdf\x4b\xb0", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30" + "\x31\x32\x33\x34\x35\x36\x37\x38" + "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40" + "\x41\x42\x43\x44\x45\x46\x47\x48" + "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50", + .psize = 40, + .digest = "\xa9\x7a\x7f\x7b", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\x51\x52\x53\x54\x55\x56\x57\x58" + "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60" + "\x61\x62\x63\x64\x65\x66\x67\x68" + "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70" + "\x71\x72\x73\x74\x75\x76\x77\x78", + .psize = 40, + .digest = "\xba\xd3\xf8\x1c", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80" + "\x81\x82\x83\x84\x85\x86\x87\x88" + "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90" + "\x91\x92\x93\x94\x95\x96\x97\x98" + "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0", + .psize = 40, + .digest = "\xa8\xa9\xc2\x02", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8" + "\xa9\xaa\xab\xac\xad\xae\xaf\xb0" + "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8" + "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0" + "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8", + .psize = 40, + .digest = "\x27\xf0\x57\xe2", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0" + "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8" + "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0" + "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8" + "\xe9\xea\xeb\xec\xed\xee\xef\xf0", + .psize = 40, + .digest = "\x49\x78\x10\x08", + }, + { + .key = "\x80\xea\xd3\xf1", + .ksize = 4, + .plaintext = "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30" + "\x31\x32\x33\x34\x35\x36\x37\x38" + "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40" + "\x41\x42\x43\x44\x45\x46\x47\x48" + "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50", + .psize = 40, + .digest = "\x9a\xb1\xdc\xf0", + }, + { + .key = "\xf3\x4a\x1d\x5d", + .ksize = 4, + .plaintext = "\x51\x52\x53\x54\x55\x56\x57\x58" + "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60" + "\x61\x62\x63\x64\x65\x66\x67\x68" + "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70" + "\x71\x72\x73\x74\x75\x76\x77\x78", + .psize = 40, + .digest = "\xb4\x97\xcc\xd4", + }, + { + .key = "\x2e\x80\x04\x59", + .ksize = 4, + .plaintext = "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80" + "\x81\x82\x83\x84\x85\x86\x87\x88" + "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90" + "\x91\x92\x93\x94\x95\x96\x97\x98" + "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0", + .psize = 40, + .digest = "\x67\x9b\xfa\x79", + }, + { + .key = "\xa6\xcc\x19\x85", + .ksize = 4, + .plaintext = "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8" + "\xa9\xaa\xab\xac\xad\xae\xaf\xb0" + "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8" + "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0" + "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8", + .psize = 40, + .digest = "\x24\xb5\x16\xef", + }, + { + .key = "\x41\xfc\xfe\x2d", + .ksize = 4, + .plaintext = "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0" + "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8" + "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0" + "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8" + "\xe9\xea\xeb\xec\xed\xee\xef\xf0", + .psize = 40, + .digest = "\x15\x94\x80\x39", + }, + { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08" + "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10" + "\x11\x12\x13\x14\x15\x16\x17\x18" + "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20" + "\x21\x22\x23\x24\x25\x26\x27\x28" + "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30" + "\x31\x32\x33\x34\x35\x36\x37\x38" + "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40" + "\x41\x42\x43\x44\x45\x46\x47\x48" + "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50" + "\x51\x52\x53\x54\x55\x56\x57\x58" + "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60" + "\x61\x62\x63\x64\x65\x66\x67\x68" + "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70" + "\x71\x72\x73\x74\x75\x76\x77\x78" + "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80" + "\x81\x82\x83\x84\x85\x86\x87\x88" + "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90" + "\x91\x92\x93\x94\x95\x96\x97\x98" + "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0" + "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8" + "\xa9\xaa\xab\xac\xad\xae\xaf\xb0" + "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8" + "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0" + "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8" + "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0" + "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8" + "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0" + "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8" + "\xe9\xea\xeb\xec\xed\xee\xef\xf0", + .psize = 240, + .digest = "\x6c\xc6\x56\xde", + .np = 2, + .tap = { 31, 209 } + }, { + .key = "\xff\xff\xff\xff", + .ksize = 4, + .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49" + "\xe0\x54\xeb\x82\x19\x8d\x24\xbb" + "\x2f\xc6\x5d\xf4\x68\xff\x96\x0a" + "\xa1\x38\xcf\x43\xda\x71\x08\x7c" + "\x13\xaa\x1e\xb5\x4c\xe3\x57\xee" + "\x85\x1c\x90\x27\xbe\x32\xc9\x60" + "\xf7\x6b\x02\x99\x0d\xa4\x3b\xd2" + "\x46\xdd\x74\x0b\x7f\x16\xad\x21" + "\xb8\x4f\xe6\x5a\xf1\x88\x1f\x93" + "\x2a\xc1\x35\xcc\x63\xfa\x6e\x05" + "\x9c\x10\xa7\x3e\xd5\x49\xe0\x77" + "\x0e\x82\x19\xb0\x24\xbb\x52\xe9" + "\x5d\xf4\x8b\x22\x96\x2d\xc4\x38" + "\xcf\x66\xfd\x71\x08\x9f\x13\xaa" + "\x41\xd8\x4c\xe3\x7a\x11\x85\x1c" + "\xb3\x27\xbe\x55\xec\x60\xf7\x8e" + "\x02\x99\x30\xc7\x3b\xd2\x69\x00" + "\x74\x0b\xa2\x16\xad\x44\xdb\x4f" + "\xe6\x7d\x14\x88\x1f\xb6\x2a\xc1" + "\x58\xef\x63\xfa\x91\x05\x9c\x33" + "\xca\x3e\xd5\x6c\x03\x77\x0e\xa5" + "\x19\xb0\x47\xde\x52\xe9\x80\x17" + "\x8b\x22\xb9\x2d\xc4\x5b\xf2\x66" + "\xfd\x94\x08\x9f\x36\xcd\x41\xd8" + "\x6f\x06\x7a\x11\xa8\x1c\xb3\x4a" + "\xe1\x55\xec\x83\x1a\x8e\x25\xbc" + "\x30\xc7\x5e\xf5\x69\x00\x97\x0b" + "\xa2\x39\xd0\x44\xdb\x72\x09\x7d" + "\x14\xab\x1f\xb6\x4d\xe4\x58\xef" + "\x86\x1d\x91\x28\xbf\x33\xca\x61" + "\xf8\x6c\x03\x9a\x0e\xa5\x3c\xd3" + "\x47\xde\x75\x0c\x80\x17\xae\x22" + "\xb9\x50\xe7\x5b\xf2\x89\x20\x94" + "\x2b\xc2\x36\xcd\x64\xfb\x6f\x06" + "\x9d\x11\xa8\x3f\xd6\x4a\xe1\x78" + "\x0f\x83\x1a\xb1\x25\xbc\x53\xea" + "\x5e\xf5\x8c\x00\x97\x2e\xc5\x39" + "\xd0\x67\xfe\x72\x09\xa0\x14\xab" + "\x42\xd9\x4d\xe4\x7b\x12\x86\x1d" + "\xb4\x28\xbf\x56\xed\x61\xf8\x8f" + "\x03\x9a\x31\xc8\x3c\xd3\x6a\x01" + "\x75\x0c\xa3\x17\xae\x45\xdc\x50" + "\xe7\x7e\x15\x89\x20\xb7\x2b\xc2" + "\x59\xf0\x64\xfb\x92\x06\x9d\x34" + "\xcb\x3f\xd6\x6d\x04\x78\x0f\xa6" + "\x1a\xb1\x48\xdf\x53\xea\x81\x18" + "\x8c\x23\xba\x2e\xc5\x5c\xf3\x67" + "\xfe\x95\x09\xa0\x37\xce\x42\xd9" + "\x70\x07\x7b\x12\xa9\x1d\xb4\x4b" + "\xe2\x56\xed\x84\x1b\x8f\x26\xbd" + "\x31\xc8\x5f\xf6\x6a\x01\x98\x0c" + "\xa3\x3a\xd1\x45\xdc\x73\x0a\x7e" + "\x15\xac\x20\xb7\x4e\xe5\x59\xf0" + "\x87\x1e\x92\x29\xc0\x34\xcb\x62" + "\xf9\x6d\x04\x9b\x0f\xa6\x3d\xd4" + "\x48\xdf\x76\x0d\x81\x18\xaf\x23" + "\xba\x51\xe8\x5c\xf3\x8a\x21\x95" + "\x2c\xc3\x37\xce\x65\xfc\x70\x07" + "\x9e\x12\xa9\x40\xd7\x4b\xe2\x79" + "\x10\x84\x1b\xb2\x26\xbd\x54\xeb" + "\x5f\xf6\x8d\x01\x98\x2f\xc6\x3a" + "\xd1\x68\xff\x73\x0a\xa1\x15\xac" + "\x43\xda\x4e\xe5\x7c\x13\x87\x1e" + "\xb5\x29\xc0\x57\xee\x62\xf9\x90" + "\x04\x9b\x32\xc9\x3d\xd4\x6b\x02" + "\x76\x0d\xa4\x18\xaf\x46\xdd\x51" + "\xe8\x7f\x16\x8a\x21\xb8\x2c\xc3" + "\x5a\xf1\x65\xfc\x93\x07\x9e\x35" + "\xcc\x40\xd7\x6e\x05\x79\x10\xa7" + "\x1b\xb2\x49\xe0\x54\xeb\x82\x19" + "\x8d\x24\xbb\x2f\xc6\x5d\xf4\x68" + "\xff\x96\x0a\xa1\x38\xcf\x43\xda" + "\x71\x08\x7c\x13\xaa\x1e\xb5\x4c" + "\xe3\x57\xee\x85\x1c\x90\x27\xbe" + "\x32\xc9\x60\xf7\x6b\x02\x99\x0d" + "\xa4\x3b\xd2\x46\xdd\x74\x0b\x7f" + "\x16\xad\x21\xb8\x4f\xe6\x5a\xf1" + "\x88\x1f\x93\x2a\xc1\x35\xcc\x63" + "\xfa\x6e\x05\x9c\x10\xa7\x3e\xd5" + "\x49\xe0\x77\x0e\x82\x19\xb0\x24" + "\xbb\x52\xe9\x5d\xf4\x8b\x22\x96" + "\x2d\xc4\x38\xcf\x66\xfd\x71\x08" + "\x9f\x13\xaa\x41\xd8\x4c\xe3\x7a" + "\x11\x85\x1c\xb3\x27\xbe\x55\xec" + "\x60\xf7\x8e\x02\x99\x30\xc7\x3b" + "\xd2\x69\x00\x74\x0b\xa2\x16\xad" + "\x44\xdb\x4f\xe6\x7d\x14\x88\x1f" + "\xb6\x2a\xc1\x58\xef\x63\xfa\x91" + "\x05\x9c\x33\xca\x3e\xd5\x6c\x03" + "\x77\x0e\xa5\x19\xb0\x47\xde\x52" + "\xe9\x80\x17\x8b\x22\xb9\x2d\xc4" + "\x5b\xf2\x66\xfd\x94\x08\x9f\x36" + "\xcd\x41\xd8\x6f\x06\x7a\x11\xa8" + "\x1c\xb3\x4a\xe1\x55\xec\x83\x1a" + "\x8e\x25\xbc\x30\xc7\x5e\xf5\x69" + "\x00\x97\x0b\xa2\x39\xd0\x44\xdb" + "\x72\x09\x7d\x14\xab\x1f\xb6\x4d" + "\xe4\x58\xef\x86\x1d\x91\x28\xbf" + "\x33\xca\x61\xf8\x6c\x03\x9a\x0e" + "\xa5\x3c\xd3\x47\xde\x75\x0c\x80" + "\x17\xae\x22\xb9\x50\xe7\x5b\xf2" + "\x89\x20\x94\x2b\xc2\x36\xcd\x64" + "\xfb\x6f\x06\x9d\x11\xa8\x3f\xd6" + "\x4a\xe1\x78\x0f\x83\x1a\xb1\x25" + "\xbc\x53\xea\x5e\xf5\x8c\x00\x97" + "\x2e\xc5\x39\xd0\x67\xfe\x72\x09" + "\xa0\x14\xab\x42\xd9\x4d\xe4\x7b" + "\x12\x86\x1d\xb4\x28\xbf\x56\xed" + "\x61\xf8\x8f\x03\x9a\x31\xc8\x3c" + "\xd3\x6a\x01\x75\x0c\xa3\x17\xae" + "\x45\xdc\x50\xe7\x7e\x15\x89\x20" + "\xb7\x2b\xc2\x59\xf0\x64\xfb\x92" + "\x06\x9d\x34\xcb\x3f\xd6\x6d\x04" + "\x78\x0f\xa6\x1a\xb1\x48\xdf\x53" + "\xea\x81\x18\x8c\x23\xba\x2e\xc5" + "\x5c\xf3\x67\xfe\x95\x09\xa0\x37" + "\xce\x42\xd9\x70\x07\x7b\x12\xa9" + "\x1d\xb4\x4b\xe2\x56\xed\x84\x1b" + "\x8f\x26\xbd\x31\xc8\x5f\xf6\x6a" + "\x01\x98\x0c\xa3\x3a\xd1\x45\xdc" + "\x73\x0a\x7e\x15\xac\x20\xb7\x4e" + "\xe5\x59\xf0\x87\x1e\x92\x29\xc0" + "\x34\xcb\x62\xf9\x6d\x04\x9b\x0f" + "\xa6\x3d\xd4\x48\xdf\x76\x0d\x81" + "\x18\xaf\x23\xba\x51\xe8\x5c\xf3" + "\x8a\x21\x95\x2c\xc3\x37\xce\x65" + "\xfc\x70\x07\x9e\x12\xa9\x40\xd7" + "\x4b\xe2\x79\x10\x84\x1b\xb2\x26" + "\xbd\x54\xeb\x5f\xf6\x8d\x01\x98" + "\x2f\xc6\x3a\xd1\x68\xff\x73\x0a" + "\xa1\x15\xac\x43\xda\x4e\xe5\x7c" + "\x13\x87\x1e\xb5\x29\xc0\x57\xee" + "\x62\xf9\x90\x04\x9b\x32\xc9\x3d" + "\xd4\x6b\x02\x76\x0d\xa4\x18\xaf" + "\x46\xdd\x51\xe8\x7f\x16\x8a\x21" + "\xb8\x2c\xc3\x5a\xf1\x65\xfc\x93" + "\x07\x9e\x35\xcc\x40\xd7\x6e\x05" + "\x79\x10\xa7\x1b\xb2\x49\xe0\x54" + "\xeb\x82\x19\x8d\x24\xbb\x2f\xc6" + "\x5d\xf4\x68\xff\x96\x0a\xa1\x38" + "\xcf\x43\xda\x71\x08\x7c\x13\xaa" + "\x1e\xb5\x4c\xe3\x57\xee\x85\x1c" + "\x90\x27\xbe\x32\xc9\x60\xf7\x6b" + "\x02\x99\x0d\xa4\x3b\xd2\x46\xdd" + "\x74\x0b\x7f\x16\xad\x21\xb8\x4f" + "\xe6\x5a\xf1\x88\x1f\x93\x2a\xc1" + "\x35\xcc\x63\xfa\x6e\x05\x9c\x10" + "\xa7\x3e\xd5\x49\xe0\x77\x0e\x82" + "\x19\xb0\x24\xbb\x52\xe9\x5d\xf4" + "\x8b\x22\x96\x2d\xc4\x38\xcf\x66" + "\xfd\x71\x08\x9f\x13\xaa\x41\xd8" + "\x4c\xe3\x7a\x11\x85\x1c\xb3\x27" + "\xbe\x55\xec\x60\xf7\x8e\x02\x99" + "\x30\xc7\x3b\xd2\x69\x00\x74\x0b" + "\xa2\x16\xad\x44\xdb\x4f\xe6\x7d" + "\x14\x88\x1f\xb6\x2a\xc1\x58\xef" + "\x63\xfa\x91\x05\x9c\x33\xca\x3e" + "\xd5\x6c\x03\x77\x0e\xa5\x19\xb0" + "\x47\xde\x52\xe9\x80\x17\x8b\x22" + "\xb9\x2d\xc4\x5b\xf2\x66\xfd\x94" + "\x08\x9f\x36\xcd\x41\xd8\x6f\x06" + "\x7a\x11\xa8\x1c\xb3\x4a\xe1\x55" + "\xec\x83\x1a\x8e\x25\xbc\x30\xc7" + "\x5e\xf5\x69\x00\x97\x0b\xa2\x39" + "\xd0\x44\xdb\x72\x09\x7d\x14\xab" + "\x1f\xb6\x4d\xe4\x58\xef\x86\x1d" + "\x91\x28\xbf\x33\xca\x61\xf8\x6c" + "\x03\x9a\x0e\xa5\x3c\xd3\x47\xde" + "\x75\x0c\x80\x17\xae\x22\xb9\x50" + "\xe7\x5b\xf2\x89\x20\x94\x2b\xc2" + "\x36\xcd\x64\xfb\x6f\x06\x9d\x11" + "\xa8\x3f\xd6\x4a\xe1\x78\x0f\x83" + "\x1a\xb1\x25\xbc\x53\xea\x5e\xf5" + "\x8c\x00\x97\x2e\xc5\x39\xd0\x67" + "\xfe\x72\x09\xa0\x14\xab\x42\xd9" + "\x4d\xe4\x7b\x12\x86\x1d\xb4\x28" + "\xbf\x56\xed\x61\xf8\x8f\x03\x9a" + "\x31\xc8\x3c\xd3\x6a\x01\x75\x0c" + "\xa3\x17\xae\x45\xdc\x50\xe7\x7e" + "\x15\x89\x20\xb7\x2b\xc2\x59\xf0" + "\x64\xfb\x92\x06\x9d\x34\xcb\x3f" + "\xd6\x6d\x04\x78\x0f\xa6\x1a\xb1" + "\x48\xdf\x53\xea\x81\x18\x8c\x23" + "\xba\x2e\xc5\x5c\xf3\x67\xfe\x95" + "\x09\xa0\x37\xce\x42\xd9\x70\x07" + "\x7b\x12\xa9\x1d\xb4\x4b\xe2\x56" + "\xed\x84\x1b\x8f\x26\xbd\x31\xc8" + "\x5f\xf6\x6a\x01\x98\x0c\xa3\x3a" + "\xd1\x45\xdc\x73\x0a\x7e\x15\xac" + "\x20\xb7\x4e\xe5\x59\xf0\x87\x1e" + "\x92\x29\xc0\x34\xcb\x62\xf9\x6d" + "\x04\x9b\x0f\xa6\x3d\xd4\x48\xdf" + "\x76\x0d\x81\x18\xaf\x23\xba\x51" + "\xe8\x5c\xf3\x8a\x21\x95\x2c\xc3" + "\x37\xce\x65\xfc\x70\x07\x9e\x12" + "\xa9\x40\xd7\x4b\xe2\x79\x10\x84" + "\x1b\xb2\x26\xbd\x54\xeb\x5f\xf6" + "\x8d\x01\x98\x2f\xc6\x3a\xd1\x68" + "\xff\x73\x0a\xa1\x15\xac\x43\xda" + "\x4e\xe5\x7c\x13\x87\x1e\xb5\x29" + "\xc0\x57\xee\x62\xf9\x90\x04\x9b" + "\x32\xc9\x3d\xd4\x6b\x02\x76\x0d" + "\xa4\x18\xaf\x46\xdd\x51\xe8\x7f" + "\x16\x8a\x21\xb8\x2c\xc3\x5a\xf1" + "\x65\xfc\x93\x07\x9e\x35\xcc\x40" + "\xd7\x6e\x05\x79\x10\xa7\x1b\xb2" + "\x49\xe0\x54\xeb\x82\x19\x8d\x24" + "\xbb\x2f\xc6\x5d\xf4\x68\xff\x96" + "\x0a\xa1\x38\xcf\x43\xda\x71\x08" + "\x7c\x13\xaa\x1e\xb5\x4c\xe3\x57" + "\xee\x85\x1c\x90\x27\xbe\x32\xc9" + "\x60\xf7\x6b\x02\x99\x0d\xa4\x3b" + "\xd2\x46\xdd\x74\x0b\x7f\x16\xad" + "\x21\xb8\x4f\xe6\x5a\xf1\x88\x1f" + "\x93\x2a\xc1\x35\xcc\x63\xfa\x6e" + "\x05\x9c\x10\xa7\x3e\xd5\x49\xe0" + "\x77\x0e\x82\x19\xb0\x24\xbb\x52" + "\xe9\x5d\xf4\x8b\x22\x96\x2d\xc4" + "\x38\xcf\x66\xfd\x71\x08\x9f\x13" + "\xaa\x41\xd8\x4c\xe3\x7a\x11\x85" + "\x1c\xb3\x27\xbe\x55\xec\x60\xf7" + "\x8e\x02\x99\x30\xc7\x3b\xd2\x69" + "\x00\x74\x0b\xa2\x16\xad\x44\xdb" + "\x4f\xe6\x7d\x14\x88\x1f\xb6\x2a" + "\xc1\x58\xef\x63\xfa\x91\x05\x9c" + "\x33\xca\x3e\xd5\x6c\x03\x77\x0e" + "\xa5\x19\xb0\x47\xde\x52\xe9\x80" + "\x17\x8b\x22\xb9\x2d\xc4\x5b\xf2" + "\x66\xfd\x94\x08\x9f\x36\xcd\x41" + "\xd8\x6f\x06\x7a\x11\xa8\x1c\xb3" + "\x4a\xe1\x55\xec\x83\x1a\x8e\x25" + "\xbc\x30\xc7\x5e\xf5\x69\x00\x97" + "\x0b\xa2\x39\xd0\x44\xdb\x72\x09" + "\x7d\x14\xab\x1f\xb6\x4d\xe4\x58" + "\xef\x86\x1d\x91\x28\xbf\x33\xca" + "\x61\xf8\x6c\x03\x9a\x0e\xa5\x3c" + "\xd3\x47\xde\x75\x0c\x80\x17\xae" + "\x22\xb9\x50\xe7\x5b\xf2\x89\x20" + "\x94\x2b\xc2\x36\xcd\x64\xfb\x6f" + "\x06\x9d\x11\xa8\x3f\xd6\x4a\xe1" + "\x78\x0f\x83\x1a\xb1\x25\xbc\x53" + "\xea\x5e\xf5\x8c\x00\x97\x2e\xc5" + "\x39\xd0\x67\xfe\x72\x09\xa0\x14" + "\xab\x42\xd9\x4d\xe4\x7b\x12\x86" + "\x1d\xb4\x28\xbf\x56\xed\x61\xf8" + "\x8f\x03\x9a\x31\xc8\x3c\xd3\x6a" + "\x01\x75\x0c\xa3\x17\xae\x45\xdc" + "\x50\xe7\x7e\x15\x89\x20\xb7\x2b" + "\xc2\x59\xf0\x64\xfb\x92\x06\x9d" + "\x34\xcb\x3f\xd6\x6d\x04\x78\x0f" + "\xa6\x1a\xb1\x48\xdf\x53\xea\x81" + "\x18\x8c\x23\xba\x2e\xc5\x5c\xf3" + "\x67\xfe\x95\x09\xa0\x37\xce\x42" + "\xd9\x70\x07\x7b\x12\xa9\x1d\xb4" + "\x4b\xe2\x56\xed\x84\x1b\x8f\x26" + "\xbd\x31\xc8\x5f\xf6\x6a\x01\x98", + .psize = 2048, + .digest = "\xfb\x3a\x7a\xda", + } +}; + +/* * CRC32C test vectors */ #define CRC32C_TEST_VECTORS 15 -- cgit v0.10.2 From b130e7c04f1130f241eddf72098a6e08d41d08fe Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 7 May 2015 13:49:12 -0400 Subject: powerpc: export of_get_ibm_chip_id function Export the of_get_ibm_chip_id() function. This will be used by the PowerNV NX-842 driver. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 308c5e1..ea2cea7 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -800,6 +800,7 @@ int of_get_ibm_chip_id(struct device_node *np) } return -1; } +EXPORT_SYMBOL(of_get_ibm_chip_id); /** * cpu_to_chip_id - Return the cpus chip-id -- cgit v0.10.2 From edc424f8cd84bbae530d8a9f86caf1923606fb24 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 7 May 2015 13:49:13 -0400 Subject: powerpc: Add ICSWX instruction Add the asm ICSWX and ICSWEPX opcodes. Add definitions for the Coprocessor Request structures needed to use the icswx calls to coprocessors. Add icswx() function to perform the ICSWX asm using the provided Coprocessor Command Word value and Coprocessor Request Block structure. This is required for communication with the NX-842 coprocessor on a PowerNV system. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h new file mode 100644 index 0000000..9f8402b --- /dev/null +++ b/arch/powerpc/include/asm/icswx.h @@ -0,0 +1,184 @@ +/* + * ICSWX api + * + * Copyright (C) 2015 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * This provides the Initiate Coprocessor Store Word Indexed (ICSWX) + * instruction. This instruction is used to communicate with PowerPC + * coprocessors. This also provides definitions of the structures used + * to communicate with the coprocessor. + * + * The RFC02130: Coprocessor Architecture document is the reference for + * everything in this file unless otherwise noted. + */ +#ifndef _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_ +#define _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_ + +#include /* for PPC_ICSWX */ + +/* Chapter 6.5.8 Coprocessor-Completion Block (CCB) */ + +#define CCB_VALUE (0x3fffffffffffffff) +#define CCB_ADDRESS (0xfffffffffffffff8) +#define CCB_CM (0x0000000000000007) +#define CCB_CM0 (0x0000000000000004) +#define CCB_CM12 (0x0000000000000003) + +#define CCB_CM0_ALL_COMPLETIONS (0x0) +#define CCB_CM0_LAST_IN_CHAIN (0x4) +#define CCB_CM12_STORE (0x0) +#define CCB_CM12_INTERRUPT (0x1) + +#define CCB_SIZE (0x10) +#define CCB_ALIGN CCB_SIZE + +struct coprocessor_completion_block { + __be64 value; + __be64 address; +} __packed __aligned(CCB_ALIGN); + + +/* Chapter 6.5.7 Coprocessor-Status Block (CSB) */ + +#define CSB_V (0x80) +#define CSB_F (0x04) +#define CSB_CH (0x03) +#define CSB_CE_INCOMPLETE (0x80) +#define CSB_CE_TERMINATION (0x40) +#define CSB_CE_TPBC (0x20) + +#define CSB_CC_SUCCESS (0) +#define CSB_CC_INVALID_ALIGN (1) +#define CSB_CC_OPERAND_OVERLAP (2) +#define CSB_CC_DATA_LENGTH (3) +#define CSB_CC_TRANSLATION (5) +#define CSB_CC_PROTECTION (6) +#define CSB_CC_RD_EXTERNAL (7) +#define CSB_CC_INVALID_OPERAND (8) +#define CSB_CC_PRIVILEGE (9) +#define CSB_CC_INTERNAL (10) +#define CSB_CC_WR_EXTERNAL (12) +#define CSB_CC_NOSPC (13) +#define CSB_CC_EXCESSIVE_DDE (14) +#define CSB_CC_WR_TRANSLATION (15) +#define CSB_CC_WR_PROTECTION (16) +#define CSB_CC_UNKNOWN_CODE (17) +#define CSB_CC_ABORT (18) +#define CSB_CC_TRANSPORT (20) +#define CSB_CC_SEGMENTED_DDL (31) +#define CSB_CC_PROGRESS_POINT (32) +#define CSB_CC_DDE_OVERFLOW (33) +#define CSB_CC_SESSION (34) +#define CSB_CC_PROVISION (36) +#define CSB_CC_CHAIN (37) +#define CSB_CC_SEQUENCE (38) +#define CSB_CC_HW (39) + +#define CSB_SIZE (0x10) +#define CSB_ALIGN CSB_SIZE + +struct coprocessor_status_block { + u8 flags; + u8 cs; + u8 cc; + u8 ce; + __be32 count; + __be64 address; +} __packed __aligned(CSB_ALIGN); + + +/* Chapter 6.5.10 Data-Descriptor List (DDL) + * each list contains one or more Data-Descriptor Entries (DDE) + */ + +#define DDE_P (0x8000) + +#define DDE_SIZE (0x10) +#define DDE_ALIGN DDE_SIZE + +struct data_descriptor_entry { + __be16 flags; + u8 count; + u8 index; + __be32 length; + __be64 address; +} __packed __aligned(DDE_ALIGN); + + +/* Chapter 6.5.2 Coprocessor-Request Block (CRB) */ + +#define CRB_SIZE (0x80) +#define CRB_ALIGN (0x100) /* Errata: requires 256 alignment */ + +/* Coprocessor Status Block field + * ADDRESS address of CSB + * C CCB is valid + * AT 0 = addrs are virtual, 1 = addrs are phys + * M enable perf monitor + */ +#define CRB_CSB_ADDRESS (0xfffffffffffffff0) +#define CRB_CSB_C (0x0000000000000008) +#define CRB_CSB_AT (0x0000000000000002) +#define CRB_CSB_M (0x0000000000000001) + +struct coprocessor_request_block { + __be32 ccw; + __be32 flags; + __be64 csb_addr; + + struct data_descriptor_entry source; + struct data_descriptor_entry target; + + struct coprocessor_completion_block ccb; + + u8 reserved[48]; + + struct coprocessor_status_block csb; +} __packed __aligned(CRB_ALIGN); + + +/* RFC02167 Initiate Coprocessor Instructions document + * Chapter 8.2.1.1.1 RS + * Chapter 8.2.3 Coprocessor Directive + * Chapter 8.2.4 Execution + * + * The CCW must be converted to BE before passing to icswx() + */ + +#define CCW_PS (0xff000000) +#define CCW_CT (0x00ff0000) +#define CCW_CD (0x0000ffff) +#define CCW_CL (0x0000c000) + + +/* RFC02167 Initiate Coprocessor Instructions document + * Chapter 8.2.1 Initiate Coprocessor Store Word Indexed (ICSWX) + * Chapter 8.2.4.1 Condition Register 0 + */ + +#define ICSWX_INITIATED (0x8) +#define ICSWX_BUSY (0x4) +#define ICSWX_REJECTED (0x2) + +static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb) +{ + __be64 ccw_reg = ccw; + u32 cr; + + __asm__ __volatile__( + PPC_ICSWX(%1,0,%2) "\n" + "mfcr %0\n" + : "=r" (cr) + : "r" (ccw_reg), "r" (crb) + : "cr0", "memory"); + + return (int)((cr >> 28) & 0xf); +} + + +#endif /* _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_ */ diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 5c93f69..8452335 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -136,6 +136,8 @@ #define PPC_INST_DCBAL 0x7c2005ec #define PPC_INST_DCBZL 0x7c2007ec #define PPC_INST_ICBT 0x7c00002c +#define PPC_INST_ICSWX 0x7c00032d +#define PPC_INST_ICSWEPX 0x7c00076d #define PPC_INST_ISEL 0x7c00001e #define PPC_INST_ISEL_MASK 0xfc00003e #define PPC_INST_LDARX 0x7c0000a8 @@ -403,4 +405,15 @@ #define MFTMR(tmr, r) stringify_in_c(.long PPC_INST_MFTMR | \ TMRN(tmr) | ___PPC_RT(r)) +/* Coprocessor instructions */ +#define PPC_ICSWX(s, a, b) stringify_in_c(.long PPC_INST_ICSWX | \ + ___PPC_RS(s) | \ + ___PPC_RA(a) | \ + ___PPC_RB(b)) +#define PPC_ICSWEPX(s, a, b) stringify_in_c(.long PPC_INST_ICSWEPX | \ + ___PPC_RS(s) | \ + ___PPC_RA(a) | \ + ___PPC_RB(b)) + + #endif /* _ASM_POWERPC_PPC_OPCODE_H */ -- cgit v0.10.2 From 2da572c959dd5815aef153cf62010b16a498a0d3 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Thu, 7 May 2015 13:49:14 -0400 Subject: lib: add software 842 compression/decompression Add 842-format software compression and decompression functions. Update the MAINTAINERS 842 section to include the new files. The 842 compression function can compress any input data into the 842 compression format. The 842 decompression function can decompress any standard-format 842 compressed data - specifically, either a compressed data buffer created by the 842 software compression function, or a compressed data buffer created by the 842 hardware compressor (located in PowerPC coprocessors). The 842 compressed data format is explained in the header comments. This is used in a later patch to provide a full software 842 compression and decompression crypto interface. Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu diff --git a/MAINTAINERS b/MAINTAINERS index 56a432d..96684fd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4863,6 +4863,8 @@ M: Dan Streetman S: Supported F: drivers/crypto/nx/nx-842.c F: include/linux/nx842.h +F: include/linux/sw842.h +F: lib/842/ IBM Power Linux RAID adapter M: Brian King diff --git a/include/linux/sw842.h b/include/linux/sw842.h new file mode 100644 index 0000000..109ba04 --- /dev/null +++ b/include/linux/sw842.h @@ -0,0 +1,12 @@ +#ifndef __SW842_H__ +#define __SW842_H__ + +#define SW842_MEM_COMPRESS (0xf000) + +int sw842_compress(const u8 *src, unsigned int srclen, + u8 *dst, unsigned int *destlen, void *wmem); + +int sw842_decompress(const u8 *src, unsigned int srclen, + u8 *dst, unsigned int *destlen); + +#endif diff --git a/lib/842/842.h b/lib/842/842.h new file mode 100644 index 0000000..7c20003 --- /dev/null +++ b/lib/842/842.h @@ -0,0 +1,127 @@ + +#ifndef __842_H__ +#define __842_H__ + +/* The 842 compressed format is made up of multiple blocks, each of + * which have the format: + * + *