summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-08-17 14:57:46 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2015-08-17 14:57:46 (GMT)
commita36304b9e154ccaff67b1d1d0ece6ad6380e3648 (patch)
tree4d487254bb61712cb64146763d787e2b1040d4e9 /drivers
parent2c6625cd545bdd66acff14f3394865d43920a5c7 (diff)
parentb310c178e6d897f82abb9da3af1cd7c02b09f592 (diff)
downloadlinux-a36304b9e154ccaff67b1d1d0ece6ad6380e3648.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: "This fixes the following issues: - a regression caused by the conversion of IPsec ESP to the new AEAD interface: ESN with authencesn no longer works because it relied on the AD input SG list having a specific layout which is no longer the case. In linux-next authencesn is fixed properly and no longer assumes anything about the SG list format. While for this release a minimal fix is applied to authencesn so that it works with the new linear layout. - fix memory corruption caused by bogus index in the caam hash code. - fix powerpc nx SHA hashing which could cause module load failures if module signature verification is enabled" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: caam - fix memory corruption in ahash_final_ctx crypto: nx - respect sg limit bounds when building sg lists for SHA crypto: authencesn - Fix breakage with new ESP code
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/nx/nx-sha256.c27
-rw-r--r--drivers/crypto/nx/nx-sha512.c28
3 files changed, 36 insertions, 26 deletions
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index dae1e80..f9c7875 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma;
- int sec4_sg_bytes;
+ int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
- sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+ sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+ sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
- (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 08f8d5c..becb738 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process = 0, leftover, total;
unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
@@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
}
do {
- /*
- * to_process: the SHA256_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - to_process;
- to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len,
max_sg_len);
@@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
rc = -EINVAL;
goto out;
}
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index aff0fe5..b6e183d 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process, leftover = 0, total;
unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
@@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
}
do {
- /*
- * to_process: the SHA512_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - leftover;
- to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
- leftover = total - to_process;
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len, max_sg_len);
@@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
rc = -EINVAL;
goto out;
}
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
@@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
goto out;
}
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*