summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorAlex Porosanu <alexandru.porosanu@freescale.com>2015-02-18 18:56:50 (GMT)
committerHonghua Yin <Hong-Hua.Yin@freescale.com>2015-04-01 09:22:40 (GMT)
commitf462376952a15901b408f09d2a1e0675aa628e86 (patch)
tree355bd1959f19ccf9e8273a740c0a00d8a4093ce4 /drivers/crypto
parentc99156df536402328fd6f31a7e372e2f2c46b538 (diff)
downloadlinux-fsl-qoriq-f462376952a15901b408f09d2a1e0675aa628e86.tar.xz
crypto: caam - cleanup caamalg_qi
This patch synchronizez the QI shared descriptors creation with the corresponding JR ones, as well as cleaning up the unused parameters of the *edesc_alloc() functions. While here, also fix a formatting error in cra_init(). Change-Id: Ic5bae734e4a6e6dcb329f7e9530422b9f8cc0377 Signed-off-by: Alex Porosanu <alexandru.porosanu@freescale.com> Reviewed-on: http://git.am.freescale.net:8181/31370 Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Mircea Pop <mircea.pop@freescale.com> Reviewed-by: Honghua Yin <Hong-Hua.Yin@freescale.com>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/caam/caamalg_qi.c120
1 files changed, 52 insertions, 68 deletions
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 8ae129f..26546db 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -58,27 +58,14 @@ static inline void append_dec_op1(u32 *desc, u32 type)
}
/*
- * Wait for completion of class 1 key loading before allowing
- * error propagation
- */
-static inline void append_dec_shr_done(u32 *desc)
-{
- u32 *jump_cmd;
-
- jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-}
-
-/*
* For aead functions, read payload and write payload,
* both of which are specified in req->src and req->dst
*/
static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
{
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
}
/*
@@ -158,9 +145,6 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
append_key_aead(desc, ctx, keys_fit_inline);
set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
}
static int aead_set_sh_desc(struct crypto_aead *aead)
@@ -168,7 +152,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
struct aead_tfm *tfm = &aead->base.crt_aead;
struct caam_ctx *ctx = crypto_aead_ctx(aead);
bool keys_fit_inline;
- u32 *key_jump_cmd, *jump_cmd;
+ u32 *key_jump_cmd;
u32 geniv, moveiv;
u32 *desc;
@@ -251,11 +235,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_key_aead(desc, ctx, keys_fit_inline);
- /* Only propagate error immediately if shared */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, key_jump_cmd);
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
- set_jump_tgt_here(desc, jump_cmd);
/* Class 2 operation */
append_operation(desc, ctx->class2_alg_type |
@@ -284,7 +264,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Load ICV */
append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
- append_dec_shr_done(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "aead dec shdesc@"
@@ -971,7 +950,7 @@ static void aead_unmap(struct device *dev,
edesc->qm_sg_dma, edesc->qm_sg_bytes);
}
-static inline void aead_done(struct caam_drv_req *drv_req, u32 status)
+static void aead_done(struct caam_drv_req *drv_req, u32 status)
{
struct device *qidev;
struct aead_edesc *edesc;
@@ -982,7 +961,7 @@ static inline void aead_done(struct caam_drv_req *drv_req, u32 status)
qidev = caam_ctx->qidev;
- if (status) {
+ if (unlikely(status)) {
caam_jr_strstatus(qidev, status);
ecode = -EIO;
}
@@ -1074,8 +1053,7 @@ out:
* allocate and map the aead extended descriptor
*/
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
- bool *all_contig_ptr, bool encrypt,
- bool strip_icv)
+ bool encrypt, bool strip_icv)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -1094,12 +1072,20 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_req *drv_req;
+ bool src_is_dst = true;
assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
- if (unlikely(req->dst != req->src)) {
+ if (likely(req->dst == req->src)) {
+ src_nents = sg_count(req->src,
+ req->cryptlen +
+ (encrypt ? authsize : 0),
+ &src_chained);
+ sgc = dma_map_sg_chained(qidev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+ } else {
int extralen;
-
+ src_is_dst = false;
src_nents = sg_count(req->src, req->cryptlen, &src_chained);
if (encrypt)
@@ -1108,25 +1094,17 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
extralen = strip_icv ? (-authsize) : 0;
dst_nents = sg_count(req->dst, req->cryptlen + extralen,
&dst_chained);
- } else {
- src_nents = sg_count(req->src,
- req->cryptlen +
- (encrypt ? authsize : 0),
- &src_chained);
- }
- sgc = dma_map_sg_chained(qidev, req->assoc, assoc_nents ? : 1,
- DMA_TO_DEVICE, assoc_chained);
- if (likely(req->src == req->dst)) {
- sgc = dma_map_sg_chained(qidev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL, src_chained);
- } else {
sgc = dma_map_sg_chained(qidev, req->src, src_nents ? : 1,
DMA_TO_DEVICE, src_chained);
sgc = dma_map_sg_chained(qidev, req->dst, dst_nents ? : 1,
DMA_FROM_DEVICE, dst_chained);
+
}
+ sgc = dma_map_sg_chained(qidev, req->assoc, assoc_nents ? : 1,
+ DMA_TO_DEVICE, assoc_chained);
+
/* Check if data are contiguous */
iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
if (assoc_nents ||
@@ -1167,8 +1145,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->qm_sg_dma = qm_sg_dma;
edesc->qm_sg_bytes = qm_sg_bytes;
- *all_contig_ptr = all_contig;
-
fd_sgt[0].final = 0;
fd_sgt[0].__reserved2 = 0;
fd_sgt[0].bpid = 0;
@@ -1204,7 +1180,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sg_to_qm_sg_last(req->dst, dst_nents,
sg_table + qm_sg_index, 0);
- if (req->dst == req->src) {
+ if (likely(src_is_dst)) {
if (src_nents <= 1) {
fd_sgt[0].addr = sg_dma_address(req->src);
fd_sgt[0].extension = 0;
@@ -1273,7 +1249,6 @@ static int aead_encrypt(struct aead_request *req)
struct device *qidev = ctx->qidev;
struct caam_drv_ctx *drv_ctx;
struct caam_drv_req *drv_req;
- bool all_contig;
int ret;
drv_ctx = get_drv_ctx(ctx, ENCRYPT);
@@ -1284,7 +1259,7 @@ static int aead_encrypt(struct aead_request *req)
return -EAGAIN;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, &all_contig, true, true);
+ edesc = aead_edesc_alloc(req, true, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1316,7 +1291,6 @@ static int aead_decrypt(struct aead_request *req)
struct device *qidev = ctx->qidev;
struct caam_drv_ctx *drv_ctx;
struct caam_drv_req *drv_req;
- bool all_contig;
int ret = 0;
drv_ctx = get_drv_ctx(ctx, DECRYPT);
@@ -1327,7 +1301,7 @@ static int aead_decrypt(struct aead_request *req)
return -EAGAIN;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, &all_contig, false, true);
+ edesc = aead_edesc_alloc(req, false, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1359,7 +1333,6 @@ static int tls_encrypt(struct aead_request *req)
struct device *qidev = ctx->qidev;
struct caam_drv_ctx *drv_ctx;
struct caam_drv_req *drv_req;
- bool all_contig;
int ret;
unsigned int blocksize = crypto_aead_blocksize(aead);
unsigned int padsize;
@@ -1380,7 +1353,7 @@ static int tls_encrypt(struct aead_request *req)
* ctx->authsize is temporary set to include also padlen
*/
ctx->authsize += padsize;
- edesc = aead_edesc_alloc(req, &all_contig, true, true);
+ edesc = aead_edesc_alloc(req, true, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
ctx->authsize -= padsize;
@@ -1413,7 +1386,6 @@ static int tls_decrypt(struct aead_request *req)
struct device *qidev = ctx->qidev;
struct caam_drv_ctx *drv_ctx;
struct caam_drv_req *drv_req;
- bool all_contig;
int ret = 0;
drv_ctx = get_drv_ctx(ctx, DECRYPT);
@@ -1431,7 +1403,7 @@ static int tls_decrypt(struct aead_request *req)
* checks padding), req->dst has to be big enough to hold payloadlen +
* padlen + icvlen.
*/
- edesc = aead_edesc_alloc(req, &all_contig, false, false);
+ edesc = aead_edesc_alloc(req, false, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1462,7 +1434,7 @@ static int tls_decrypt(struct aead_request *req)
* allocate and map the aead extended descriptor for aead givencrypt
*/
static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
- *greq, u32 *contig_ptr)
+ *greq)
{
struct aead_request *req = &greq->areq;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -1482,19 +1454,22 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_req *drv_req;
+ bool src_is_dst = true;
+
assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
src_nents = sg_count(req->src, req->cryptlen, &src_chained);
-
- if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
- &dst_chained);
-
sgc = dma_map_sg_chained(qidev, req->assoc, assoc_nents ? : 1,
DMA_TO_DEVICE, assoc_chained);
+
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(qidev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
} else {
+ src_is_dst = false;
+
+ dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
+ &dst_chained);
+
sgc = dma_map_sg_chained(qidev, req->src, src_nents ? : 1,
DMA_TO_DEVICE, src_chained);
sgc = dma_map_sg_chained(qidev, req->dst, dst_nents ? : 1,
@@ -1512,7 +1487,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
contig &= ~GIV_DST_CONTIG;
- if (unlikely(req->src != req->dst)) {
+ if (unlikely(!src_is_dst)) {
dst_nents = dst_nents ? : 1;
qm_sg_ents += 1;
}
@@ -1521,7 +1496,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
assoc_nents = assoc_nents ? : 1;
src_nents = src_nents ? : 1;
qm_sg_ents += assoc_nents + 1 + src_nents;
- if (likely(req->src == req->dst))
+ if (likely(src_is_dst))
contig &= ~GIV_DST_CONTIG;
}
@@ -1554,10 +1529,19 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
edesc->qm_sg_bytes = qm_sg_bytes;
edesc->qm_sg_dma = qm_sg_dma;
- *contig_ptr = contig;
+ fd_sgt[0].final = 0;
+ fd_sgt[0].extension = 0;
+ fd_sgt[0].__reserved2 = 0;
+ fd_sgt[0].bpid = 0;
+ fd_sgt[0].__reserved3 = 0;
+ fd_sgt[0].offset = 0;
- memset(&fd_sgt[0], 0, 2 * sizeof(fd_sgt[0]));
fd_sgt[1].final = 1;
+ fd_sgt[1].extension = 0;
+ fd_sgt[1].__reserved2 = 0;
+ fd_sgt[1].bpid = 0;
+ fd_sgt[1].__reserved3 = 0;
+ fd_sgt[1].offset = 0;
qm_sg_index = 0;
if (unlikely(!(contig & GIV_SRC_CONTIG))) {
@@ -1582,7 +1566,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
fd_sgt[1].addr = sg_dma_address(req->assoc);
}
- if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
+ if (unlikely(!src_is_dst && !(contig & GIV_DST_CONTIG))) {
fd_sgt[0].addr = qm_sg_dma +
(sizeof(struct qm_sg_entry) * qm_sg_index);
fd_sgt[0].extension = 1;
@@ -1592,7 +1576,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sg_to_qm_sg_last(req->dst, dst_nents,
sg_table + qm_sg_index, 0);
} else {
- if (req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
+ if (src_is_dst && !(contig & GIV_DST_CONTIG)) {
fd_sgt[0].extension = 1;
fd_sgt[0].addr = edesc->qm_sg_dma +
sizeof(struct qm_sg_entry) *
@@ -1615,7 +1599,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
struct caam_drv_req *drv_req;
int ivsize = crypto_aead_ivsize(aead);
struct aead_edesc *edesc;
- u32 contig;
int ret;
drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
@@ -1626,7 +1609,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
return -EAGAIN;
/* allocate extended descriptor */
- edesc = aead_giv_edesc_alloc(areq, &contig);
+ edesc = aead_giv_edesc_alloc(areq);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -2119,8 +2102,9 @@ static int caam_cra_init(struct crypto_tfm *tfm)
if (op_id < ARRAY_SIZE(digest_size)) {
ctx->authsize = digest_size[op_id];
} else {
- dev_err(ctx->jrdev, "incorrect op_id %d; must be less than %d\n",
- op_id, ARRAY_SIZE(digest_size));
+ dev_err(ctx->jrdev,
+ "incorrect op_id %d; must be less than %zu\n",
+ op_id, ARRAY_SIZE(digest_size));
caam_jr_free(ctx->jrdev);
return -EINVAL;
}