summaryrefslogtreecommitdiff
path: root/drivers/crypto/caam
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam')
-rw-r--r--drivers/crypto/caam/caamalg.c12
-rw-r--r--drivers/crypto/caam/caamhash.c43
-rw-r--r--drivers/crypto/caam/ctrl.c25
-rw-r--r--drivers/crypto/caam/regs.h3
4 files changed, 44 insertions, 39 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 0f117d0..023a5d8 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3070,8 +3070,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -3310,8 +3310,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -3581,8 +3581,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index e99a45b..fb913c3 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -807,7 +807,7 @@ static int ahash_update_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -835,17 +835,17 @@ static int ahash_update_ctx(struct ahash_request *req)
src_map_to_sec4_sg(jrdev, req->src, src_nents,
edesc->sec4_sg + sec4_sg_src_index,
chained);
- if (*next_buflen) {
+ if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
*next_buflen, 0);
- state->current_buf = !state->current_buf;
- }
} else {
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
SEC4_SG_LEN_FIN;
}
+ state->current_buf = !state->current_buf;
+
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
@@ -909,17 +909,18 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma;
- int sec4_sg_bytes;
+ int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
- sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+ sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+ sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
- (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
@@ -1005,8 +1006,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1091,8 +1092,8 @@ static int ahash_digest(struct ahash_request *req)
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
- DESC_JOB_IO_LEN, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1165,8 +1166,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
int sh_len;
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
- GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1245,7 +1245,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -1268,9 +1268,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
*next_buflen, 0);
- state->current_buf = !state->current_buf;
}
+ state->current_buf = !state->current_buf;
+
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
@@ -1352,8 +1353,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1447,7 +1448,7 @@ static int ahash_update_first(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -1544,6 +1545,8 @@ static int ahash_init(struct ahash_request *req)
state->current_buf = 0;
state->buf_dma = 0;
+ state->buflen_0 = 0;
+ state->buflen_1 = 0;
return 0;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 0359c71..1615916 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -16,6 +16,12 @@
#include "qi.h"
#endif
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+static const bool is_arm = true;
+#else
+static const bool is_arm;
+#endif
+
/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
* load the JDKEK, TDKEK and TDSK registers
@@ -488,7 +494,7 @@ static int caam_probe(struct platform_device *pdev)
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
- u32 mcr, scfgr, comp_params;
+ u32 scfgr, comp_params;
int pg_size;
int BLOCK_OFFSET = 0;
@@ -537,11 +543,9 @@ static int caam_probe(struct platform_device *pdev)
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register
*/
- mcr = rd_reg32(&ctrl->mcr);
- mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0x2 << MCFGR_AWCACHE_SHIFT) |
- MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
- MCFGR_LONG_PTR : 0);
- wr_reg32(&ctrl->mcr, mcr);
+ setbits32(&ctrl->mcr, MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+ (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0) |
+ (is_arm ? 0x2 << MCFGR_AWCACHE_SHIFT : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
@@ -820,7 +824,6 @@ static int caam_resume(struct device *dev)
struct caam_drv_private *caam_priv;
struct caam_ctrl __iomem *ctrl;
struct caam_queue_if __iomem *qi;
- u32 mcr;
int ret;
caam_priv = dev_get_drvdata(dev);
@@ -830,11 +833,9 @@ static int caam_resume(struct device *dev)
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register
*/
- mcr = rd_reg32(&ctrl->mcr);
- mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0x2 << MCFGR_AWCACHE_SHIFT) |
- MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
- MCFGR_LONG_PTR : 0);
- wr_reg32(&ctrl->mcr, mcr);
+ setbits32(&ctrl->mcr, MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+ (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0) |
+ (is_arm ? 0x2 << MCFGR_AWCACHE_SHIFT : 0));
/* Enable QI interface of SEC */
if (caam_priv->qi_present)
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 72193f8..b2715d4 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -413,7 +413,8 @@ struct caam_ctrl {
#define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT)
#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
-#define MCFGR_BURST_64 0x00000001 /* Max burst size */
+#define MCFGR_LARGE_BURST 0x00000004 /* 128/256-byte burst size */
+#define MCFGR_BURST_64 0x00000001 /* 64-byte burst size */
/* JRSTART register offsets */
#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */