summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRuchika Gupta <ruchika.gupta@freescale.com>2014-06-23 14:20:26 (GMT)
committerMatthew Weigel <Matthew.Weigel@freescale.com>2014-12-11 18:35:53 (GMT)
commit401354d54ace84f76675f93f7539eb3c3f8708ff (patch)
tree9882b67b6ac7b1ff3f8c0ca2939fc8bec0d89fc9
parent34d9bfa09275d2d165112633ab2cadedae1eddce (diff)
downloadlinux-fsl-qoriq-401354d54ace84f76675f93f7539eb3c3f8708ff.tar.xz
crypto: caam - Correct the dma mapping for sg table
At few places in caamhash and caamalg, after allocating a dmable buffer for sg table , the buffer was being modified. As per definition of DMA_FROM_DEVICE ,afer allocation the memory should be treated as read-only by the driver. This patch shifts the allocation of dmable buffer for sg table after it is populated by the driver, making it read-only as per the DMA API's requirement. Signed-off-by: Ruchika Gupta <ruchika.gupta@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> (cherry picked from commit 1da2be33ad4c30a2b1d5fe3053b5b7f63e6e2baa) Change-Id: I485040b955e27772c20623f037e8a5167404c18d Reviewed-on: http://git.am.freescale.net:8181/17736 Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Zhengxiong Jin <Jason.Jin@freescale.com> Tested-by: Zhengxiong Jin <Jason.Jin@freescale.com>
-rw-r--r--drivers/crypto/caam/caamalg.c8
-rw-r--r--drivers/crypto/caam/caamhash.c40
2 files changed, 27 insertions, 21 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2a4eb70..7e071b7 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2243,8 +2243,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
*all_contig_ptr = all_contig;
sec4_sg_index = 0;
@@ -2279,6 +2277,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sg_to_sec4_sg_last(req->dst, dst_nents,
edesc->sec4_sg + sec4_sg_index, 0);
}
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
return edesc;
}
@@ -2444,8 +2444,6 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
*contig_ptr = contig;
sec4_sg_index = 0;
@@ -2469,6 +2467,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sg_to_sec4_sg_last(req->dst, dst_nents,
edesc->sec4_sg + sec4_sg_index, 0);
}
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
return edesc;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 5592fec..52787e1 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -818,9 +818,6 @@ static int ahash_update_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes,
- DMA_TO_DEVICE);
ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_BIDIRECTIONAL);
@@ -849,6 +846,10 @@ static int ahash_update_ctx(struct ahash_request *req)
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
HDR_REVERSE);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
to_hash, LDST_SGF);
@@ -921,8 +922,6 @@ static int ahash_final_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
edesc->src_nents = 0;
ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
@@ -933,6 +932,9 @@ static int ahash_final_ctx(struct ahash_request *req)
last_buflen);
(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
LDST_SGF);
@@ -999,8 +1001,6 @@ static int ahash_finup_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
DMA_TO_DEVICE);
@@ -1012,6 +1012,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
sec4_sg_src_index, chained);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
buflen + req->nbytes, LDST_SGF);
@@ -1066,8 +1069,6 @@ static int ahash_digest(struct ahash_request *req)
}
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
edesc->src_nents = src_nents;
edesc->chained = chained;
@@ -1077,6 +1078,8 @@ static int ahash_digest(struct ahash_request *req)
if (src_nents) {
sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
src_dma = edesc->sec4_sg_dma;
options = LDST_SGF;
} else {
@@ -1207,9 +1210,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes,
- DMA_TO_DEVICE);
state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
buf, *buflen);
@@ -1226,6 +1226,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
HDR_REVERSE);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
@@ -1307,8 +1311,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
state->buf_dma, buflen,
@@ -1317,6 +1319,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
chained);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+
append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
req->nbytes, LDST_SGF);
@@ -1390,13 +1395,14 @@ static int ahash_update_first(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
DESC_JOB_IO_LEN;
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes,
- DMA_TO_DEVICE);
if (src_nents) {
sg_to_sec4_sg_last(req->src, src_nents,
edesc->sec4_sg, 0);
+ edesc->sec4_sg_dma = dma_map_single(jrdev,
+ edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
src_dma = edesc->sec4_sg_dma;
options = LDST_SGF;
} else {