summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorHoria Geanta <horia.geanta@freescale.com>2013-10-30 17:50:44 (GMT)
committerJose Rivera <German.Rivera@freescale.com>2014-03-28 13:38:47 (GMT)
commit6e7e54c8a5649c66ab14c5b0e4baac89b9e715b6 (patch)
treee6eb89333bf03c248e9a54944629d431d2ed02c5 /drivers
parent3ff4b0241cf5bc265e8f61f53b39b701fa0d9317 (diff)
downloadlinux-fsl-qoriq-6e7e54c8a5649c66ab14c5b0e4baac89b9e715b6.tar.xz
crypto: caam - add support for TLS 1.0 record
TLS 1.0 descriptors run on SEC Era 4 or higher. For now, only tls10(hmac(sha1),cbc(aes)) algorithm is registered by the driver. Change-Id: I98a71d8eb61a0e5f2dd65835e99b1c906468bf52 Signed-off-by: Tudor Ambarus <tudor.ambarus@freescale.com> Signed-off-by: Horia Geanta <horia.geanta@freescale.com> Reviewed-on: http://git.am.freescale.net:8181/10376 Reviewed-by: Cristian Stoica <cristian.stoica@freescale.com> Reviewed-by: Mircea Pop <mircea.pop@freescale.com> Reviewed-by: Alexandru Porosanu <alexandru.porosanu@freescale.com> Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Jose Rivera <German.Rivera@freescale.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/caam/caamalg.c697
-rw-r--r--drivers/crypto/caam/desc.h25
2 files changed, 715 insertions, 7 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2f14c7e..95e1d1a 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -70,6 +70,9 @@
#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
+#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 23 * CAAM_CMD_SZ)
+
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
20 * CAAM_CMD_SZ)
@@ -452,6 +455,294 @@ static int aead_setauthsize(struct crypto_aead *authenc,
return 0;
}
+static int tls_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *zero_payload_jump_cmd;
+ u32 genpad, clrw, jumpback, stidx;
+ u32 *desc;
+ unsigned int blocksize = crypto_aead_blocksize(aead);
+ /* Associated data length is always = 13 for TLS */
+ unsigned int assoclen = 13;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * TLS 1.0 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_TLS10_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_enc;
+
+ stidx = 1 << HDR_START_IDX_SHIFT;
+ init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_CLASS_BOTH | JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD |
+ JUMP_COND_SELF);
+ if (keys_fit_inline) {
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key_as_imm(desc, (void *)ctx->key +
+ ctx->split_key_pad_len, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ } else {
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+ /* class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* payloadlen = input data length - (assoclen + ivlen) */
+ append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, assoclen +
+ tfm->ivsize);
+
+ /* math1 = payloadlen + icvlen */
+ append_math_add_imm_u32(desc, REG1, VARSEQINLEN, IMM, ctx->authsize);
+
+ /* padlen = block_size - math1 % block_size */
+ append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
+ append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
+
+ /* cryptlen = payloadlen + icvlen + padlen */
+ append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
+
+ /* if payload length is zero, jump to zero-payload commands */
+ append_math_add(desc, NONE, ZERO, VARSEQINLEN, 4);
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* read assoc for authentication */
+ append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_MSG);
+ /* load iv in context1 */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+ /* insnoop payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
+ FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 3);
+
+ /* zero-payload commands */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+ /* assoc data is the only data for authentication */
+ append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
+ /* load iv in context1 */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+
+ /* send icv to encryption */
+ append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
+ ctx->authsize);
+
+ /* generate padding and send it to encryption */
+ genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
+ NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
+ append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO_SZM | LDST_IMM |
+ (2 & LDST_LEN_MASK));
+
+ /* store encrypted payload, icv and padding */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "tls enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * TLS 1.0 decrypt shared descriptor
+ * Keys do not fit inline, regardless of algorithms used
+ */
+ desc = ctx->sh_desc_dec;
+
+ stidx = 1 << HDR_START_IDX_SHIFT;
+ init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_CLASS_BOTH | JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD |
+ JUMP_COND_SELF);
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ /* class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+
+ /* VSIL = input data length - 2 * block_size */
+ append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
+ blocksize);
+
+ /*
+ * payloadlen + icvlen + padlen = input data length - (assoclen +
+ * ivsize)
+ */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen +
+ tfm->ivsize);
+
+ /* skip data to the last but one cipher block */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
+
+ /* load iv for the last cipher block */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+
+ /* read last cipher block */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
+ FIFOLD_TYPE_LAST1 | blocksize);
+
+ /* move decrypted block into math0 and math1 */
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
+ blocksize);
+
+ /* compute padding length */
+ append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
+ append_math_add(desc, REG1, REG1, ONE, 4);
+
+ /* clear cha1 specific registers */
+ clrw = CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS | CLRW_CLR_C1CTX |
+ CLRW_RESET_CLS1_CHA;
+ append_load_imm_u32(desc, clrw, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_CLRW | LDST_IMM);
+
+ /* rewind input sequence */
+ append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
+
+ /* key1 is in decryption form */
+ append_operation(desc, ctx->class1_alg_type | OP_ALG_AAI_DK |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+
+ /* read sequence number */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
+ /* load Type, Version and Len fields in math0 */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH0 | 5);
+
+ /* load iv in context1 */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
+ LDST_SRCDST_WORD_CLASS_CTX | tfm->ivsize);
+
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
+
+ /* math2 = icvlen + padlen */
+ append_math_add_imm_u32(desc, REG2, REG1, IMM, ctx->authsize);
+
+ /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
+
+ /* VSOL = payloadlen + icvlen + padlen */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
+
+ /* update Len field */
+ append_math_rshift_imm_u64(desc, REG0, REG0, IMM, 24);
+ append_math_sub(desc, REG0, REG0, REG2, 8);
+
+ /* send Type, Version and Len(pre ICV) fields to authentication */
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
+ (3 << MOVE_OFFSET_SHIFT) | 5);
+
+ /* store decrypted payload, icv and padding */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
+
+ /* outsnooping payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
+ FIFOLDST_VLF);
+
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
+
+ /* load icvlen and padlen */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
+ FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
+
+ /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
+
+ /* move seqoutptr fields into math registers */
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF | MOVE_DEST_MATH0 |
+ (53 * 4 << MOVE_OFFSET_SHIFT) | 20);
+ /* seqinptr will point to seqoutptr */
+ append_math_and_imm_u32(desc, REG0, REG0, IMM,
+ ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
+ /* Load jump command */
+ jumpback = CMD_JUMP | (char)-9;
+ append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
+ LDST_SRCDST_WORD_DECO_MATH2 |
+ (4 << LDST_OFFSET_SHIFT));
+ /* move updated seqinptr fields to JD */
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 | MOVE_DEST_DESCBUF |
+ (53 * 4 << MOVE_OFFSET_SHIFT) | 24);
+ /* read updated seqinptr */
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
+
+ /* skip payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
+ /* check icv */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
+ FIFOLD_TYPE_LAST2 | ctx->authsize);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "tls dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+
+ ctx->authsize = authsize;
+
+ return 0;
+}
+
static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
u32 authkeylen)
{
@@ -535,6 +826,80 @@ badkey:
return -EINVAL;
}
+static int tls_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ struct rtattr *rta = (void *)key;
+ struct crypto_authenc_key_param *param;
+ unsigned int authkeylen;
+ unsigned int enckeylen;
+ int ret = 0;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ if (keylen < enckeylen)
+ goto badkey;
+
+ authkeylen = keylen - enckeylen;
+
+ if (keylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ /* Pick class 2 key length from algorithm submask */
+ ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT] * 2;
+ ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+
+#ifdef DEBUG
+ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", keylen,
+ enckeylen, authkeylen);
+ dev_err(jrdev, "split_key_len %d split_key_pad_len %d\n",
+ ctx->split_key_len, ctx->split_key_pad_len);
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ ret = gen_split_aead_key(ctx, key, authkeylen);
+ if (ret)
+ goto badkey;
+
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
+
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+ enckeylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->split_key_pad_len + enckeylen, 1);
+#endif
+
+ ctx->enckeylen = enckeylen;
+
+ ret = tls_set_sh_desc(aead);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+ enckeylen, DMA_TO_DEVICE);
+ }
+
+ return ret;
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const u8 *key, unsigned int keylen)
{
@@ -659,10 +1024,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
* @dst_nents: number of segments in output scatterlist
* @dst_chained: if destination is chained
* @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
+ * (variable length; must not exceed MAX_CAAM_DESCSIZE)
*/
struct aead_edesc {
int assoc_nents;
@@ -855,6 +1220,102 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
aead_request_complete(req, err);
}
+static void tls_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
+
+#ifdef DEBUG
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct aead_edesc *)((char *)desc -
+ offsetof(struct aead_edesc, hw_desc));
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ aead_unmap(jrdev, edesc, req);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen, 1);
+ print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
+ edesc->dst_nents ? 100 : req->cryptlen, 1);
+#endif
+
+ kfree(edesc);
+
+ aead_request_complete(req, err);
+}
+
+static void tls_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
+ int cryptlen = req->cryptlen;
+ u8 padsize;
+ u8 padding[255]; /* padding can be 0-255 bytes */
+ int i;
+
+#ifdef DEBUG
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct aead_edesc *)((char *)desc -
+ offsetof(struct aead_edesc, hw_desc));
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
+ req->cryptlen, 1);
+#endif
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ aead_unmap(jrdev, edesc, req);
+
+ /*
+ * verify hw auth check passed else return -EBADMSG
+ */
+ if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) {
+ err = -EBADMSG;
+ goto out;
+ }
+
+ /* Padding checking */
+ cryptlen -= 1;
+ scatterwalk_map_and_copy(&padsize, req->dst, cryptlen, 1, 0);
+ if (padsize > cryptlen) {
+ err = -EBADMSG;
+ goto out;
+ }
+ cryptlen -= padsize;
+ scatterwalk_map_and_copy(padding, req->dst, cryptlen, padsize, 0);
+ /* the padding content must be equal with padsize */
+ for (i = 0; i < padsize; i++)
+ if (padding[i] != padsize) {
+ err = -EBADMSG;
+ break;
+ }
+
+out:
+ kfree(edesc);
+
+ aead_request_complete(req, err);
+}
+
static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
@@ -1069,6 +1530,86 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
}
/*
+ * Fill in tls job descriptor either for encrypt or decrypt
+ */
+static void init_tls_job(u32 *sh_desc, dma_addr_t ptr,
+ struct aead_edesc *edesc,
+ struct aead_request *req,
+ bool all_contig, bool encrypt, unsigned int padsize)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
+ int authsize = ctx->authsize;
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, sec4_sg_index = 0;
+
+#ifdef DEBUG
+ debug("assoclen %d cryptlen %d authsize %d\n",
+ req->assoclen, req->cryptlen, authsize);
+ print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
+ print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents ? 100 : ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->cryptlen, 1);
+ print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
+ desc_bytes(sh_desc), 1);
+#endif
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (all_contig) {
+ src_dma = sg_dma_address(req->assoc);
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
+ (edesc->src_nents ? : 1);
+ in_options = LDST_SGF;
+ }
+
+ if (likely(req->src == req->dst)) {
+ if (all_contig) {
+ dst_dma = sg_dma_address(req->src);
+ } else {
+ dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
+ ((edesc->assoc_nents ? : 1) + 1);
+ out_options = LDST_SGF;
+ }
+ } else {
+ if (!edesc->dst_nents) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
+ }
+ }
+
+ /*
+ * For decrypt, do not strip ICV, Padding, Padding length since
+ * upper layer(s) perform padding checking.
+ */
+ if (encrypt)
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + padsize +
+ authsize, out_options);
+ else
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
+
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+ req->cryptlen, in_options);
+}
+
+/*
* Fill in ablkcipher job descriptor
*/
static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
@@ -1130,7 +1671,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
*/
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
int desc_bytes, bool *all_contig_ptr,
- bool encrypt)
+ bool encrypt, bool strip_icv)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -1150,10 +1691,15 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
if (unlikely(req->dst != req->src)) {
+ int extralen;
+
src_nents = sg_count(req->src, req->cryptlen, &src_chained);
- dst_nents = sg_count(req->dst,
- req->cryptlen +
- (encrypt ? authsize : (-authsize)),
+
+ if (encrypt)
+ extralen = authsize;
+ else
+ extralen = strip_icv ? (-authsize) : 0;
+ dst_nents = sg_count(req->dst, req->cryptlen + extralen,
&dst_chained);
} else {
src_nents = sg_count(req->src,
@@ -1246,7 +1792,7 @@ static int aead_encrypt(struct aead_request *req)
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig, true);
+ CAAM_CMD_SZ, &all_contig, true, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1283,7 +1829,7 @@ static int aead_decrypt(struct aead_request *req)
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig, false);
+ CAAM_CMD_SZ, &all_contig, false, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1469,6 +2015,104 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
return ret;
}
+static int tls_encrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool all_contig;
+ u32 *desc;
+ int ret = 0;
+ unsigned int blocksize = crypto_aead_blocksize(aead);
+ unsigned int padsize;
+
+ padsize = blocksize - ((req->cryptlen + ctx->authsize) % blocksize);
+
+ /*
+ * allocate extended tls descriptor
+ * TLS 1.0 has no explicit IV in the packet, but it is needed as input
+ * since it is used by CBC.
+ * ctx->authsize is temporary set to include also padlen
+ */
+ ctx->authsize += padsize;
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ,
+ &all_contig, true, true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+ ctx->authsize -= padsize;
+
+ /* Create and submit job descriptor */
+ init_tls_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
+ all_contig, true, padsize);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "tls enc jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, tls_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int tls_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool all_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /*
+ * allocate extended tls descriptor
+ * TLS 1.0 has no explicit IV in the packet, but it is needed as input
+ * since it is used by CBC.
+ * Assumption: since padding and ICV are not stripped (upper layer
+ * checks padding), req->dst has to be big enough to hold payloadlen +
+ * padlen + icvlen.
+ */
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ,
+ &all_contig, false, false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ req->cryptlen, 1);
+#endif
+
+ /* Create and submit job descriptor*/
+ init_tls_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req,
+ all_contig, false, 0);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "tls dec jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, tls_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
/*
* allocate and map the ablkcipher extended descriptor for ablkcipher
*/
@@ -2012,6 +2656,26 @@ static struct caam_alg_template driver_algs[] = {
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
+ /* TLS record descriptors */
+ {
+ .name = "tls10(hmac(sha1),cbc(aes))",
+ .driver_name = "tls10-hmac-sha1-cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = tls_setkey,
+ .setauthsize = tls_setauthsize,
+ .encrypt = tls_encrypt,
+ .decrypt = tls_decrypt,
+ .givencrypt = NULL,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
/* ablkcipher descriptor */
{
.name = "cbc(aes)",
@@ -2077,6 +2741,15 @@ static int caam_cra_init(struct crypto_tfm *tfm)
struct caam_crypto_alg *caam_alg =
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+ /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
+ static const u8 digest_size[] = {
+ MD5_DIGEST_SIZE,
+ SHA1_DIGEST_SIZE,
+ SHA224_DIGEST_SIZE,
+ SHA256_DIGEST_SIZE,
+ SHA384_DIGEST_SIZE,
+ SHA512_DIGEST_SIZE
+ };
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
@@ -2089,6 +2762,16 @@ static int caam_cra_init(struct crypto_tfm *tfm)
ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
+ /*
+ * Need authsize, in case setauthsize callback not called
+ * by upper layer (e.g. TLS).
+ */
+ if (caam_alg->alg_op)
+ ctx->authsize = digest_size[(ctx->alg_op &
+ OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT];
+ else
+ ctx->authsize = 0;
+
return 0;
}
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index d3278d3..4743d8e 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -237,6 +237,7 @@ struct sec4_sg_entry {
#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SZM (0x71 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
/* Offset in source/destination */
@@ -279,6 +280,28 @@ struct sec4_sg_entry {
#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+/* CCB Clear Written Register bits */
+#define CLRW_CLR_C1MODE 0x1
+#define CLRW_CLR_C1DATAS 0x4
+#define CLRW_CLR_C1ICV 0x8
+#define CLRW_CLR_C1CTX 0x20
+#define CLRW_CLR_C1KEY 0x40
+#define CLRW_CLR_PK_A 0x1000
+#define CLRW_CLR_PK_B 0x2000
+#define CLRW_CLR_PK_N 0x4000
+#define CLRW_CLR_PK_E 0x8000
+#define CLRW_CLR_C2MODE 0x10000
+#define CLRW_CLR_C2KEYS 0x20000
+#define CLRW_CLR_C2DATAS 0x40000
+#define CLRW_CLR_C2CTX 0x200000
+#define CLRW_CLR_C2KEY 0x400000
+#define CLRW_RESET_CLS2_DONE 0x04000000u /* era 4 */
+#define CLRW_RESET_CLS1_DONE 0x08000000u /* era 4 */
+#define CLRW_RESET_CLS2_CHA 0x10000000u /* era 4 */
+#define CLRW_RESET_CLS1_CHA 0x20000000u /* era 4 */
+#define CLRW_RESET_OFIFO 0x40000000u /* era 3 */
+#define CLRW_RESET_IFIFO_DFIFO 0x80000000u /* era 3 */
+
/*
* FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
* Command Constructs
@@ -1439,6 +1462,8 @@ struct sec4_sg_entry {
#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC1_VARSEQINLEN (0x08 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_VARSEQOUTLEN (0x09 << MATH_SRC1_SHIFT)
#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)