summaryrefslogtreecommitdiff
path: root/drivers/crypto/caam
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam')
-rw-r--r--drivers/crypto/caam/caamalg.c161
-rw-r--r--drivers/crypto/caam/caamhash.c581
-rw-r--r--drivers/crypto/caam/ctrl.c3
-rw-r--r--drivers/crypto/caam/desc.h6
-rw-r--r--drivers/crypto/caam/desc_constr.h17
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c26
-rw-r--r--drivers/crypto/caam/regs.h8
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h2
9 files changed, 453 insertions, 352 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index b304421..156aad1 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -111,6 +111,42 @@
#else
#define debug(format, arg...)
#endif
+
+#ifdef DEBUG
+#include <linux/highmem.h>
+
+static void dbg_dump_sg(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ struct scatterlist *sg, size_t tlen, bool ascii,
+ bool may_sleep)
+{
+ struct scatterlist *it;
+ void *it_page;
+ size_t len;
+ void *buf;
+
+ for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
+ /*
+ * make sure the scatterlist's page
+ * has a valid virtual memory mapping
+ */
+ it_page = kmap_atomic(sg_page(it));
+ if (unlikely(!it_page)) {
+ printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
+ return;
+ }
+
+ buf = it_page + it->offset;
+ len = min(tlen, it->length);
+ print_hex_dump(level, prefix_str, prefix_type, rowsize,
+ groupsize, buf, len, ascii);
+ tlen -= len;
+
+ kunmap_atomic(it_page);
+ }
+}
+#endif
+
static struct list_head alg_list;
struct caam_alg_entry {
@@ -227,8 +263,9 @@ static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
if (is_rfc3686) {
nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
enckeylen);
- append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc,
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
@@ -500,11 +537,10 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
- LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Class 1 operation */
append_operation(desc, ctx->class1_alg_type |
@@ -578,11 +614,10 @@ skip_enc:
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
- LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Choose operation */
if (ctr_mode)
@@ -683,11 +718,10 @@ copy_iv:
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
- LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Class 1 operation */
append_operation(desc, ctx->class1_alg_type |
@@ -1478,7 +1512,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
int ret = 0;
u32 *key_jump_cmd;
u32 *desc;
- u32 *nonce;
+ u8 *nonce;
u32 geniv;
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
@@ -1531,9 +1565,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- nonce = (u32 *)(key + keylen);
- append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ nonce = (u8 *)key + keylen;
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
@@ -1549,11 +1584,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load counter into CONTEXT1 reg */
if (is_rfc3686)
- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
- LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Load operation */
append_operation(desc, ctx->class1_alg_type |
@@ -1590,9 +1624,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- nonce = (u32 *)(key + keylen);
- append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ nonce = (u8 *)key + keylen;
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
@@ -1608,11 +1643,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load counter into CONTEXT1 reg */
if (is_rfc3686)
- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
- LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Choose operation */
if (ctr_mode)
@@ -1653,9 +1687,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load Nonce into CONTEXT1 reg */
if (is_rfc3686) {
- nonce = (u32 *)(key + keylen);
- append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ nonce = (u8 *)key + keylen;
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
@@ -1685,11 +1720,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
- append_load_imm_u32(desc, (u32)1, LDST_IMM |
- LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
if (ctx1_iv_off)
append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
@@ -1995,9 +2029,9 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2027,9 +2061,9 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2184,12 +2218,15 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
+ bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->src_nents ? 100 : req->nbytes, 1);
+ printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
+ dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
#endif
len = desc_len(sh_desc);
@@ -2241,12 +2278,14 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
+ bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->src_nents ? 100 : req->nbytes, 1);
+ dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
#endif
len = desc_len(sh_desc);
@@ -2516,18 +2555,20 @@ static int aead_decrypt(struct aead_request *req)
u32 *desc;
int ret = 0;
+#ifdef DEBUG
+ bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
+ dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ req->assoclen + req->cryptlen, 1, may_sleep);
+#endif
+
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
&all_contig, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- req->assoclen + req->cryptlen, 1);
-#endif
-
/* Create and submit job descriptor*/
init_authenc_job(req, edesc, all_contig, false);
#ifdef DEBUG
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 36365b3..660dc20 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -99,17 +99,17 @@ static struct list_head hash_list;
/* ahash per-session context */
struct caam_hash_ctx {
- struct device *jrdev;
- u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
- u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
- u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
- u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
- u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
- dma_addr_t sh_desc_update_dma;
+ u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ dma_addr_t sh_desc_update_dma ____cacheline_aligned;
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
dma_addr_t sh_desc_finup_dma;
+ struct device *jrdev;
u32 alg_type;
u32 alg_op;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
@@ -187,15 +187,6 @@ static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
return buf_dma;
}
-/* Map req->src and put it in link table */
-static inline void src_map_to_sec4_sg(struct device *jrdev,
- struct scatterlist *src, int src_nents,
- struct sec4_sg_entry *sec4_sg)
-{
- dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
- sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
-}
-
/*
* Only put buffer in link table if it contains data, which is possible,
* since a buffer has previously been used, and needs to be unmapped,
@@ -449,7 +440,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *desc;
struct split_key_result result;
dma_addr_t src_dma, dst_dma;
- int ret = 0;
+ int ret;
desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
@@ -526,7 +517,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
- int ret = 0;
+ int ret;
u8 *hashed_key = NULL;
#ifdef DEBUG
@@ -534,14 +525,15 @@ static int ahash_setkey(struct crypto_ahash *ahash,
#endif
if (keylen > blocksize) {
- hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
- GFP_DMA);
+ hashed_key = kmalloc_array(digestsize,
+ sizeof(*hashed_key),
+ GFP_KERNEL | GFP_DMA);
if (!hashed_key)
return -ENOMEM;
ret = hash_digest_key(ctx, key, &keylen, hashed_key,
digestsize);
if (ret)
- goto badkey;
+ goto bad_free_key;
key = hashed_key;
}
@@ -559,14 +551,14 @@ static int ahash_setkey(struct crypto_ahash *ahash,
ret = gen_split_hash_key(ctx, key, keylen);
if (ret)
- goto badkey;
+ goto bad_free_key;
ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
ret = -ENOMEM;
- goto map_err;
+ goto error_free_key;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
@@ -579,11 +571,10 @@ static int ahash_setkey(struct crypto_ahash *ahash,
dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
DMA_TO_DEVICE);
}
-
-map_err:
+ error_free_key:
kfree(hashed_key);
return ret;
-badkey:
+ bad_free_key:
kfree(hashed_key);
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
@@ -595,16 +586,16 @@ badkey:
* @sec4_sg_dma: physical mapped address of h/w link table
* @src_nents: number of segments in input scatterlist
* @sec4_sg_bytes: length of dma mapped sec4_sg space
- * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
+ * @sec4_sg: h/w link table
*/
struct ahash_edesc {
dma_addr_t dst_dma;
dma_addr_t sec4_sg_dma;
int src_nents;
int sec4_sg_bytes;
- struct sec4_sg_entry *sec4_sg;
- u32 hw_desc[0];
+ u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
+ struct sec4_sg_entry sec4_sg[0];
};
static inline void ahash_unmap(struct device *dev,
@@ -774,6 +765,65 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
req->base.complete(&req->base, err);
}
+/*
+ * Allocate an enhanced descriptor, which contains the hardware descriptor
+ * and space for hardware scatter table containing sg_num entries.
+ */
+static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
+ int sg_num, u32 *sh_desc,
+ dma_addr_t sh_desc_dma,
+ gfp_t flags)
+{
+ struct ahash_edesc *edesc;
+ unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
+
+ edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
+ return NULL;
+ }
+
+ init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
+ HDR_SHARE_DEFER | HDR_REVERSE);
+
+ return edesc;
+}
+
+static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, int nents,
+ unsigned int first_sg,
+ unsigned int first_bytes, size_t to_hash)
+{
+ dma_addr_t src_dma;
+ u32 options;
+
+ if (nents > 1 || first_sg) {
+ struct sec4_sg_entry *sg = edesc->sec4_sg;
+ unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
+
+ sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
+
+ src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->jrdev, src_dma)) {
+ dev_err(ctx->jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
+ edesc->sec4_sg_bytes = sgsize;
+ edesc->sec4_sg_dma = src_dma;
+ options = LDST_SGF;
+ } else {
+ src_dma = sg_dma_address(req->src);
+ options = 0;
+ }
+
+ append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
+ options);
+
+ return 0;
+}
+
/* submit update job descriptor */
static int ahash_update_ctx(struct ahash_request *req)
{
@@ -789,12 +839,10 @@ static int ahash_update_ctx(struct ahash_request *req)
int *next_buflen = state->current_buf ? &state->buflen_0 :
&state->buflen_1, last_buflen;
int in_len = *buflen + req->nbytes, to_hash;
- u32 *sh_desc = ctx->sh_desc_update, *desc;
- dma_addr_t ptr = ctx->sh_desc_update_dma;
- int src_nents, sec4_sg_bytes, sec4_sg_src_index;
+ u32 *desc;
+ int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
- int sh_len;
last_buflen = *next_buflen;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
@@ -807,40 +855,51 @@ static int ahash_update_ctx(struct ahash_request *req)
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
- sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+ ctx->sh_desc_update,
+ ctx->sh_desc_update_dma, flags);
if (!edesc) {
- dev_err(jrdev,
- "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
- return ret;
+ goto unmap_ctx;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
edesc->sec4_sg + 1,
buf, state->buf_dma,
*buflen, last_buflen);
- if (src_nents) {
- src_map_to_sec4_sg(jrdev, req->src, src_nents,
- edesc->sec4_sg + sec4_sg_src_index);
+ if (mapped_nents) {
+ sg_to_sec4_sg_last(req->src, mapped_nents,
+ edesc->sec4_sg + sec4_sg_src_index,
+ 0);
if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
@@ -852,17 +911,15 @@ static int ahash_update_ctx(struct ahash_request *req)
state->current_buf = !state->current_buf;
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
- HDR_REVERSE);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unmap_ctx;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
@@ -877,13 +934,10 @@ static int ahash_update_ctx(struct ahash_request *req)
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
- DMA_BIDIRECTIONAL);
- kfree(edesc);
- }
+ if (ret)
+ goto unmap_ctx;
+
+ ret = -EINPROGRESS;
} else if (*next_buflen) {
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
@@ -899,6 +953,10 @@ static int ahash_update_ctx(struct ahash_request *req)
#endif
return ret;
+ unmap_ctx:
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ kfree(edesc);
+ return ret;
}
static int ahash_final_ctx(struct ahash_request *req)
@@ -913,38 +971,31 @@ static int ahash_final_ctx(struct ahash_request *req)
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
int last_buflen = state->current_buf ? state->buflen_0 :
state->buflen_1;
- u32 *sh_desc = ctx->sh_desc_fin, *desc;
- dma_addr_t ptr = ctx->sh_desc_fin_dma;
+ u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
- int ret = 0;
- int sh_len;
+ int ret;
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
- GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
+ edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
+ ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
+ flags);
+ if (!edesc)
return -ENOMEM;
- }
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
edesc->src_nents = 0;
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
- return ret;
+ goto unmap_ctx;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
@@ -956,7 +1007,8 @@ static int ahash_final_ctx(struct ahash_request *req)
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unmap_ctx;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
@@ -966,7 +1018,8 @@ static int ahash_final_ctx(struct ahash_request *req)
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unmap_ctx;
}
#ifdef DEBUG
@@ -975,13 +1028,13 @@ static int ahash_final_ctx(struct ahash_request *req)
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
- kfree(edesc);
- }
+ if (ret)
+ goto unmap_ctx;
+ return -EINPROGRESS;
+ unmap_ctx:
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ kfree(edesc);
return ret;
}
@@ -997,68 +1050,66 @@ static int ahash_finup_ctx(struct ahash_request *req)
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
int last_buflen = state->current_buf ? state->buflen_0 :
state->buflen_1;
- u32 *sh_desc = ctx->sh_desc_finup, *desc;
- dma_addr_t ptr = ctx->sh_desc_finup_dma;
- int sec4_sg_bytes, sec4_sg_src_index;
- int src_nents;
+ u32 *desc;
+ int sec4_sg_src_index;
+ int src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
- int ret = 0;
- int sh_len;
+ int ret;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
- sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
- sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
- GFP_DMA | flags);
+ edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+ ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
+ flags);
if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
edesc->src_nents = src_nents;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
- return ret;
+ goto unmap_ctx;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
- src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
- sec4_sg_src_index);
-
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
- }
-
- append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
- buflen + req->nbytes, LDST_SGF);
+ ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
+ sec4_sg_src_index, ctx->ctx_len + buflen,
+ req->nbytes);
+ if (ret)
+ goto unmap_ctx;
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unmap_ctx;
}
#ifdef DEBUG
@@ -1067,13 +1118,13 @@ static int ahash_finup_ctx(struct ahash_request *req)
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
- kfree(edesc);
- }
+ if (ret)
+ goto unmap_ctx;
+ return -EINPROGRESS;
+ unmap_ctx:
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ kfree(edesc);
return ret;
}
@@ -1084,60 +1135,56 @@ static int ahash_digest(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- u32 *sh_desc = ctx->sh_desc_digest, *desc;
- dma_addr_t ptr = ctx->sh_desc_digest_dma;
+ u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
- int src_nents, sec4_sg_bytes;
- dma_addr_t src_dma;
+ int src_nents, mapped_nents;
struct ahash_edesc *edesc;
- int ret = 0;
- u32 options;
- int sh_len;
+ int ret;
- src_nents = sg_count(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
- dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
- sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to map source for DMA\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
- GFP_DMA | flags);
+ edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
+ ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
+ flags);
if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->src_nents = src_nents;
- sh_len = desc_len(sh_desc);
- desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+ edesc->src_nents = src_nents;
- if (src_nents) {
- sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
- }
- src_dma = edesc->sec4_sg_dma;
- options = LDST_SGF;
- } else {
- src_dma = sg_dma_address(req->src);
- options = 0;
+ ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
+ req->nbytes);
+ if (ret) {
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ return ret;
}
- append_seq_in_ptr(desc, src_dma, req->nbytes, options);
+
+ desc = edesc->hw_desc;
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
return -ENOMEM;
}
@@ -1168,29 +1215,23 @@ static int ahash_final_no_ctx(struct ahash_request *req)
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
- u32 *sh_desc = ctx->sh_desc_digest, *desc;
- dma_addr_t ptr = ctx->sh_desc_digest_dma;
+ u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
- int ret = 0;
- int sh_len;
+ int ret;
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
+ edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
+ ctx->sh_desc_digest_dma, flags);
+ if (!edesc)
return -ENOMEM;
- }
- edesc->sec4_sg_bytes = 0;
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, state->buf_dma)) {
dev_err(jrdev, "unable to map src\n");
- return -ENOMEM;
+ goto unmap;
}
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
@@ -1199,7 +1240,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
- return -ENOMEM;
+ goto unmap;
}
edesc->src_nents = 0;
@@ -1217,6 +1258,11 @@ static int ahash_final_no_ctx(struct ahash_request *req)
}
return ret;
+ unmap:
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ return -ENOMEM;
+
}
/* submit ahash update if it the first job descriptor after update */
@@ -1234,48 +1280,58 @@ static int ahash_update_no_ctx(struct ahash_request *req)
int *next_buflen = state->current_buf ? &state->buflen_0 :
&state->buflen_1;
int in_len = *buflen + req->nbytes, to_hash;
- int sec4_sg_bytes, src_nents;
+ int sec4_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
- u32 *desc, *sh_desc = ctx->sh_desc_update_first;
- dma_addr_t ptr = ctx->sh_desc_update_first_dma;
+ u32 *desc;
int ret = 0;
- int sh_len;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
to_hash = in_len - *next_buflen;
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
- req->nbytes - (*next_buflen));
+ req->nbytes - *next_buflen);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
- sec4_sg_bytes = (1 + src_nents) *
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ sec4_sg_bytes = (1 + mapped_nents) *
sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
+ ctx->sh_desc_update_first,
+ ctx->sh_desc_update_first_dma,
+ flags);
if (!edesc) {
- dev_err(jrdev,
- "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
edesc->dst_dma = 0;
state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
buf, *buflen);
- src_map_to_sec4_sg(jrdev, req->src, src_nents,
- edesc->sec4_sg + 1);
+ sg_to_sec4_sg_last(req->src, mapped_nents,
+ edesc->sec4_sg + 1, 0);
+
if (*next_buflen) {
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
@@ -1284,24 +1340,22 @@ static int ahash_update_no_ctx(struct ahash_request *req)
state->current_buf = !state->current_buf;
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
- HDR_REVERSE);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unmap_ctx;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
if (ret)
- return ret;
+ goto unmap_ctx;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1310,16 +1364,13 @@ static int ahash_update_no_ctx(struct ahash_request *req)
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
- if (!ret) {
- ret = -EINPROGRESS;
- state->update = ahash_update_ctx;
- state->finup = ahash_finup_ctx;
- state->final = ahash_final_ctx;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
- DMA_TO_DEVICE);
- kfree(edesc);
- }
+ if (ret)
+ goto unmap_ctx;
+
+ ret = -EINPROGRESS;
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
} else if (*next_buflen) {
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
@@ -1335,6 +1386,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
#endif
return ret;
+ unmap_ctx:
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ kfree(edesc);
+ return ret;
}
/* submit ahash finup if it the first job descriptor after update */
@@ -1350,61 +1405,63 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
int last_buflen = state->current_buf ? state->buflen_0 :
state->buflen_1;
- u32 *sh_desc = ctx->sh_desc_digest, *desc;
- dma_addr_t ptr = ctx->sh_desc_digest_dma;
- int sec4_sg_bytes, sec4_sg_src_index, src_nents;
+ u32 *desc;
+ int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
- int sh_len;
- int ret = 0;
+ int ret;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
sec4_sg_src_index = 2;
- sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
- GFP_DMA | flags);
+ edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+ ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
+ flags);
if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
state->buf_dma, buflen,
last_buflen);
- src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);
-
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
+ req->nbytes);
+ if (ret) {
dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
+ goto unmap;
}
- append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
- req->nbytes, LDST_SGF);
-
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
- return -ENOMEM;
+ goto unmap;
}
#ifdef DEBUG
@@ -1421,6 +1478,11 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
}
return ret;
+ unmap:
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ return -ENOMEM;
+
}
/* submit first update job descriptor after init */
@@ -1436,78 +1498,65 @@ static int ahash_update_first(struct ahash_request *req)
int *next_buflen = state->current_buf ?
&state->buflen_1 : &state->buflen_0;
int to_hash;
- u32 *sh_desc = ctx->sh_desc_update_first, *desc;
- dma_addr_t ptr = ctx->sh_desc_update_first_dma;
- int sec4_sg_bytes, src_nents;
- dma_addr_t src_dma;
- u32 options;
+ u32 *desc;
+ int src_nents, mapped_nents;
struct ahash_edesc *edesc;
int ret = 0;
- int sh_len;
*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1);
to_hash = req->nbytes - *next_buflen;
if (to_hash) {
- src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - *next_buflen);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
- dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
- sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to map source for DMA\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
+ mapped_nents : 0,
+ ctx->sh_desc_update_first,
+ ctx->sh_desc_update_first_dma,
+ flags);
if (!edesc) {
- dev_err(jrdev,
- "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
edesc->src_nents = src_nents;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
edesc->dst_dma = 0;
- if (src_nents) {
- sg_to_sec4_sg_last(req->src, src_nents,
- edesc->sec4_sg, 0);
- edesc->sec4_sg_dma = dma_map_single(jrdev,
- edesc->sec4_sg,
- sec4_sg_bytes,
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
- }
- src_dma = edesc->sec4_sg_dma;
- options = LDST_SGF;
- } else {
- src_dma = sg_dma_address(req->src);
- options = 0;
- }
+ ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
+ to_hash);
+ if (ret)
+ goto unmap_ctx;
if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src, to_hash,
*next_buflen, 0);
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
- HDR_REVERSE);
-
- append_seq_in_ptr(desc, src_dma, to_hash, options);
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
if (ret)
- return ret;
+ goto unmap_ctx;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1515,18 +1564,14 @@ static int ahash_update_first(struct ahash_request *req)
desc_bytes(desc), 1);
#endif
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
- req);
- if (!ret) {
- ret = -EINPROGRESS;
- state->update = ahash_update_ctx;
- state->finup = ahash_finup_ctx;
- state->final = ahash_final_ctx;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
- DMA_TO_DEVICE);
- kfree(edesc);
- }
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = -EINPROGRESS;
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
} else if (*next_buflen) {
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
@@ -1541,6 +1586,10 @@ static int ahash_update_first(struct ahash_request *req)
#endif
return ret;
+ unmap_ctx:
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ kfree(edesc);
+ return ret;
}
static int ahash_finup_first(struct ahash_request *req)
@@ -1799,7 +1848,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
- int ret = 0;
/*
* Get a Job ring from Job Ring driver to ensure in-order
@@ -1819,10 +1867,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct caam_hash_state));
-
- ret = ahash_set_sh_desc(ahash);
-
- return ret;
+ return ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 0ec112e..72ff196 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -14,6 +14,7 @@
#include "jr.h"
#include "desc_constr.h"
#include "error.h"
+#include "ctrl.h"
bool caam_little_end;
EXPORT_SYMBOL(caam_little_end);
@@ -826,6 +827,8 @@ static int caam_probe(struct platform_device *pdev)
caam_remove:
caam_remove(pdev);
+ return ret;
+
iounmap_ctrl:
iounmap(ctrl);
disable_caam_emi_slow:
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 26427c1..513b664 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -23,13 +23,7 @@
#define SEC4_SG_OFFSET_MASK 0x00001fff
struct sec4_sg_entry {
-#if !defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && \
- defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
- u32 rsvd1;
- dma_addr_t ptr;
-#else
u64 ptr;
-#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
u32 len;
u32 bpid_offset;
};
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index d3869b9..a8cd8a7 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -325,6 +325,23 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
APPEND_CMD_RAW_IMM(load, LOAD, u32);
/*
+ * ee - endianness
+ * size - size of immediate type in bytes
+ */
+#define APPEND_CMD_RAW_IMM2(cmd, op, ee, size) \
+static inline void append_##cmd##_imm_##ee##size(u32 *desc, \
+ u##size immediate, \
+ u32 options) \
+{ \
+ __##ee##size data = cpu_to_##ee##size(immediate); \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(data)); \
+ append_data(desc, &data, sizeof(data)); \
+}
+
+APPEND_CMD_RAW_IMM2(load, LOAD, be, 32);
+
+/*
* Append math command. Only the last part of destination and source need to
* be specified
*/
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e2bcacc..5d4c050 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,7 +41,6 @@ struct caam_drv_private_jr {
struct device *dev;
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
- struct tasklet_struct irqtask;
int irq; /* One per queue */
/* Number of scatterlist crypt transforms active on the JobR */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index a81f551..757c27f 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -73,8 +73,6 @@ static int caam_jr_shutdown(struct device *dev)
ret = caam_reset_hw_jr(dev);
- tasklet_kill(&jrp->irqtask);
-
/* Release interrupt */
free_irq(jrp->irq, dev);
@@ -130,7 +128,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/*
* Check the output ring for ready responses, kick
- * tasklet if jobs done.
+ * the threaded irq if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
if (!irqstate)
@@ -152,18 +150,13 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
- preempt_disable();
- tasklet_schedule(&jrp->irqtask);
- preempt_enable();
-
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
-/* Deferred service handler, run as interrupt-fired tasklet */
-static void caam_jr_dequeue(unsigned long devarg)
+static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
{
int hw_idx, sw_idx, i, head, tail;
- struct device *dev = (struct device *)devarg;
+ struct device *dev = st_dev;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
@@ -237,6 +230,8 @@ static void caam_jr_dequeue(unsigned long devarg)
/* reenable / unmask IRQs */
clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
+
+ return IRQ_HANDLED;
}
/**
@@ -394,11 +389,10 @@ static int caam_jr_init(struct device *dev)
jrp = dev_get_drvdata(dev);
- tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
-
/* Connect job ring interrupt handler. */
- error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
- dev_name(dev), dev);
+ error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
+ caam_jr_threadirq, IRQF_SHARED,
+ dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
@@ -460,7 +454,6 @@ out_free_inpring:
out_free_irq:
free_irq(jrp->irq, dev);
out_kill_deq:
- tasklet_kill(&jrp->irqtask);
return error;
}
@@ -513,6 +506,7 @@ static int caam_jr_probe(struct platform_device *pdev)
error = caam_jr_init(jrdev); /* now turn on hardware */
if (error) {
irq_dispose_mapping(jrpriv->irq);
+ iounmap(ctrl);
return error;
}
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index b3c5016..84d2f83 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -196,6 +196,14 @@ static inline u64 rd_reg64(void __iomem *reg)
#define caam_dma_to_cpu(value) caam32_to_cpu(value)
#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+#define cpu_to_caam_dma64(value) \
+ (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
+ (u64)cpu_to_caam32(upper_32_bits(value)))
+#else
+#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
+#endif
+
/*
* jr_outentry
* Represents each entry in a JobR output ring
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 19dc64f..41cd5a3 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -15,7 +15,7 @@ struct sec4_sg_entry;
static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
{
- sec4_sg_ptr->ptr = cpu_to_caam_dma(dma);
+ sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
sec4_sg_ptr->len = cpu_to_caam32(len);
sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
#ifdef DEBUG