summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorHoria Geanta <horia.geanta@freescale.com>2013-12-13 08:29:40 (GMT)
committerJose Rivera <German.Rivera@freescale.com>2014-03-28 13:42:04 (GMT)
commit614fae17bf2a0a26f5acad9425373187a2695508 (patch)
tree90d514152860c7c8ab54d49438974c171e86fe18 /drivers
parent18a7d0e4661360a2b3781e904920e4dc8d4b2437 (diff)
downloadlinux-fsl-qoriq-614fae17bf2a0a26f5acad9425373187a2695508.tar.xz
crypto: caam/qi - add support for TLS 1.0 record
TLS 1.0 descriptors run on SEC Era 4 or higher. For now, only tls10(hmac(sha1),cbc(aes)) algorithm is registered by the driver. Change-Id: Ie8f761652f17a7a9e976a7371392c9b49cd5fe9b Signed-off-by: Horia Geanta <horia.geanta@freescale.com> Reviewed-on: http://git.am.freescale.net:8181/10385 Reviewed-by: Cristian Stoica <cristian.stoica@freescale.com> Reviewed-by: Mircea Pop <mircea.pop@freescale.com> Reviewed-by: Alexandru Porosanu <alexandru.porosanu@freescale.com> Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Jose Rivera <German.Rivera@freescale.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/caam/caamalg_qi.c667
1 files changed, 658 insertions, 9 deletions
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 1dba0c2..b0980d2 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -17,6 +17,7 @@
#include "key_gen.h"
#include "qi.h"
#include "jr.h"
+#include "ctrl.h"
/*
* crypto alg
@@ -34,6 +35,9 @@
#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
+#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 23 * CAAM_CMD_SZ)
+
#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
CAAM_MAX_KEY_SIZE)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -376,6 +380,289 @@ static int aead_setauthsize(struct crypto_aead *authenc,
return 0;
}
+static int tls_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
+ u32 genpad, clrw, jumpback, stidx;
+ u32 *desc;
+ unsigned int blocksize = crypto_aead_blocksize(aead);
+ /* Associated data length is always = 13 for TLS */
+ unsigned int assoclen = 13;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * TLS 1.0 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_TLS10_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_enc;
+
+ stidx = 1 << HDR_START_IDX_SHIFT;
+ init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_CLASS_BOTH | JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD |
+ JUMP_COND_SELF);
+ if (keys_fit_inline) {
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key_as_imm(desc, (void *)ctx->key +
+ ctx->split_key_pad_len, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ } else {
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+ /* class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* payloadlen = input data length - (assoclen + ivlen) */
+ append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, assoclen +
+ tfm->ivsize);
+
+ /* math1 = payloadlen + icvlen */
+ append_math_add_imm_u32(desc, REG1, VARSEQINLEN, IMM, ctx->authsize);
+
+ /* padlen = block_size - math1 % block_size */
+ append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
+ append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
+
+ /* cryptlen = payloadlen + icvlen + padlen */
+ append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
+
+ /* store encrypted payload, icv and padding */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
+
+ /* if payload length is zero, jump to zero-payload commands */
+ append_math_add(desc, NONE, ZERO, VARSEQINLEN, 4);
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* read assoc for authentication */
+ append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_MSG);
+ /* load iv in context1 */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+ /* insnoop payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
+ FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 3);
+
+ /* zero-payload commands */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+ /* assoc data is the only data for authentication */
+ append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
+ /* load iv in context1 */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+
+ /* send icv to encryption */
+ append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
+ ctx->authsize);
+
+ /* generate padding and send it to encryption */
+ genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
+ NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
+ append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO_SZM | LDST_IMM |
+ (2 & LDST_LEN_MASK));
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "tls enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * TLS 1.0 decrypt shared descriptor
+ * Keys do not fit inline, regardless of algorithms used
+ */
+ desc = ctx->sh_desc_dec;
+
+ stidx = 1 << HDR_START_IDX_SHIFT;
+ init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_CLASS_BOTH | JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD |
+ JUMP_COND_SELF);
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ /* class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+
+ /* VSIL = input data length - 2 * block_size */
+ append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
+ blocksize);
+
+ /*
+ * payloadlen + icvlen + padlen = input data length - (assoclen +
+ * ivsize)
+ */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen +
+ tfm->ivsize);
+
+ /* skip data to the last but one cipher block */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
+
+ /* load iv for the last cipher block */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+
+ /* read last cipher block */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
+ FIFOLD_TYPE_LAST1 | blocksize);
+
+ /* move decrypted block into math0 and math1 */
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
+ blocksize);
+
+ /* clear cha1 specific registers */
+ clrw = CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS | CLRW_CLR_C1CTX |
+ CLRW_RESET_CLS1_CHA;
+ append_load_imm_u32(desc, clrw, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_CLRW | LDST_IMM);
+
+ /* rewind input sequence */
+ append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
+
+ /* key1 is in decryption form */
+ append_operation(desc, ctx->class1_alg_type | OP_ALG_AAI_DK |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+
+ /* read sequence number */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
+ /* load Type, Version and Len fields in math0 */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH0 | 5);
+
+ /* load iv in context1 */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
+ LDST_SRCDST_WORD_CLASS_CTX | tfm->ivsize);
+
+ /* compute (padlen - 1) */
+ append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
+
+ /* math2 = icvlen + (padlen - 1) + 1 */
+ append_math_add_imm_u32(desc, REG2, REG1, IMM, ctx->authsize + 1);
+
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
+
+ /* VSOL = payloadlen + icvlen + padlen */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
+
+ /* update Len field */
+ append_math_rshift_imm_u64(desc, REG0, REG0, IMM, 24);
+ append_math_sub(desc, REG0, REG0, REG2, 8);
+
+ /* store decrypted payload, icv and padding */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
+
+ /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
+
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* send Type, Version and Len(pre ICV) fields to authentication */
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
+ (3 << MOVE_OFFSET_SHIFT) | 5);
+
+ /* outsnooping payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
+ FIFOLDST_VLF);
+ skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
+
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+ /* send Type, Version and Len(pre ICV) fields to authentication */
+ append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
+ MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
+ (3 << MOVE_OFFSET_SHIFT) | 5);
+
+ set_jump_tgt_here(desc, skip_zero_jump_cmd);
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
+
+ /* load icvlen and padlen */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
+ FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
+
+ /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
+
+ /* move seqoutptr fields into math registers */
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF | MOVE_DEST_MATH0 |
+ (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
+ /* seqinptr will point to seqoutptr */
+ append_math_and_imm_u32(desc, REG0, REG0, IMM,
+ ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
+ /* Load jump command */
+ jumpback = CMD_JUMP | (char)-9;
+ append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
+ LDST_SRCDST_WORD_DECO_MATH2 |
+ (4 << LDST_OFFSET_SHIFT));
+ /* move updated seqinptr fields to JD */
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 | MOVE_DEST_DESCBUF |
+ (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
+ /* read updated seqinptr */
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
+
+ /* skip payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
+ /* check icv */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
+ FIFOLD_TYPE_LAST2 | ctx->authsize);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "tls dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+
+ ctx->authsize = authsize;
+
+ return 0;
+}
+
static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
u32 authkeylen)
{
@@ -489,6 +776,108 @@ badkey:
}
+static int tls_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ struct rtattr *rta = (void *)key;
+ struct crypto_authenc_key_param *param;
+ unsigned int authkeylen;
+ unsigned int enckeylen;
+ int ret = 0;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ if (keylen < enckeylen)
+ goto badkey;
+
+ authkeylen = keylen - enckeylen;
+
+ if (keylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ /* Pick class 2 key length from algorithm submask */
+ ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT] * 2;
+ ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+
+#ifdef DEBUG
+ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", keylen,
+ enckeylen, authkeylen);
+ dev_err(jrdev, "split_key_len %d split_key_pad_len %d\n",
+ ctx->split_key_len, ctx->split_key_pad_len);
+ print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ ret = gen_split_aead_key(ctx, key, authkeylen);
+ if (ret)
+ goto badkey;
+
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
+
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+ enckeylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->split_key_pad_len + enckeylen, 1);
+#endif
+
+ ctx->enckeylen = enckeylen;
+
+ ret = tls_set_sh_desc(aead);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+ enckeylen, DMA_TO_DEVICE);
+ }
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ goto badkey;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ goto badkey;
+ }
+ }
+
+ if (ctx->drv_ctx[GIVENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
+ ctx->sh_desc_givenc);
+ if (ret) {
+ dev_err(jrdev, "driver givenc context update failed\n");
+ goto badkey;
+ }
+ }
+
+ return ret;
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
/*
* aead_edesc - s/w-extended aead descriptor
* @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
@@ -581,11 +970,92 @@ static inline void aead_done(struct caam_drv_req *drv_req, u32 status)
kfree(edesc);
}
+/* For now, identical to aead_done */
+static inline void tls_encrypt_done(struct caam_drv_req *drv_req, u32 status)
+{
+ struct device *qidev;
+ struct aead_edesc *edesc;
+ struct aead_request *aead_req = drv_req->app_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ qidev = caam_ctx->qidev;
+
+ if (status) {
+ char tmp[CAAM_ERROR_STR_MAX];
+ dev_err(qidev, "Rsp status: %#x: %s\n",
+ status, caam_jr_strstatus(tmp, status));
+ ecode = -EIO;
+ }
+
+ edesc = container_of(drv_req, struct aead_edesc, drv_req);
+ aead_unmap(qidev, edesc, aead_req);
+
+ aead_request_complete(aead_req, ecode);
+ kfree(edesc);
+}
+
+static inline void tls_decrypt_done(struct caam_drv_req *drv_req, u32 status)
+{
+ struct device *qidev;
+ struct aead_edesc *edesc;
+ struct aead_request *aead_req = drv_req->app_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+ int cryptlen = aead_req->cryptlen;
+ u8 padsize;
+ u8 padding[255]; /* padding can be 0-255 bytes */
+ int i;
+
+ qidev = caam_ctx->qidev;
+
+ if (status) {
+ char tmp[CAAM_ERROR_STR_MAX];
+ dev_err(qidev, "Rsp status: %#x: %s\n",
+ status, caam_jr_strstatus(tmp, status));
+ ecode = -EIO;
+ }
+
+ edesc = container_of(drv_req, struct aead_edesc, drv_req);
+ aead_unmap(qidev, edesc, aead_req);
+
+ /*
+ * verify hw auth check passed else return -EBADMSG
+ */
+ if ((status & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) {
+ ecode = -EBADMSG;
+ goto out;
+ }
+
+ /* Padding checking */
+ cryptlen -= 1;
+ scatterwalk_map_and_copy(&padsize, aead_req->dst, cryptlen, 1, 0);
+ if (padsize > cryptlen) {
+ ecode = -EBADMSG;
+ goto out;
+ }
+ cryptlen -= padsize;
+ scatterwalk_map_and_copy(padding, aead_req->dst, cryptlen, padsize, 0);
+ /* the padding content must be equal with padsize */
+ for (i = 0; i < padsize; i++)
+ if (padding[i] != padsize) {
+ ecode = -EBADMSG;
+ break;
+ }
+
+out:
+ aead_request_complete(aead_req, ecode);
+ kfree(edesc);
+}
+
/*
* allocate and map the aead extended descriptor
*/
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
- bool *all_contig_ptr, bool encrypt)
+ bool *all_contig_ptr, bool encrypt,
+ bool strip_icv)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -608,10 +1078,15 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
if (unlikely(req->dst != req->src)) {
+ int extralen;
+
src_nents = sg_count(req->src, req->cryptlen, &src_chained);
- dst_nents = sg_count(req->dst,
- req->cryptlen +
- (encrypt ? authsize : (-authsize)),
+
+ if (encrypt)
+ extralen = authsize;
+ else
+ extralen = strip_icv ? (-authsize) : 0;
+ dst_nents = sg_count(req->dst, req->cryptlen + extralen,
&dst_chained);
} else {
src_nents = sg_count(req->src,
@@ -621,7 +1096,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
}
sgc = dma_map_sg_chained(qidev, req->assoc, assoc_nents ? : 1,
- DMA_BIDIRECTIONAL, assoc_chained);
+ DMA_TO_DEVICE, assoc_chained);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(qidev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
@@ -788,7 +1263,7 @@ static int aead_encrypt(struct aead_request *req)
return -EAGAIN;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, &all_contig, true);
+ edesc = aead_edesc_alloc(req, &all_contig, true, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -831,7 +1306,7 @@ static int aead_decrypt(struct aead_request *req)
return -EAGAIN;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, &all_contig, false);
+ edesc = aead_edesc_alloc(req, &all_contig, false, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -854,6 +1329,114 @@ static int aead_decrypt(struct aead_request *req)
return ret;
}
+static int tls_encrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
+ struct device *qidev = ctx->qidev;
+ struct caam_drv_ctx *drv_ctx;
+ struct caam_drv_req *drv_req;
+ bool all_contig;
+ int ret;
+ unsigned int blocksize = crypto_aead_blocksize(aead);
+ unsigned int padsize;
+
+ drv_ctx = get_drv_ctx(ctx, ENCRYPT);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+ return PTR_ERR(drv_ctx);
+
+ if (unlikely(caam_drv_ctx_busy(drv_ctx)))
+ return -EAGAIN;
+
+ padsize = blocksize - ((req->cryptlen + ctx->authsize) % blocksize);
+
+ /*
+ * allocate extended tls descriptor
+ * TLS 1.0 has no explicit IV in the packet, but it is needed as input
+ * since it is used by CBC.
+ * ctx->authsize is temporary set to include also padlen
+ */
+ ctx->authsize += padsize;
+ edesc = aead_edesc_alloc(req, &all_contig, true, true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+ ctx->authsize -= padsize;
+
+ /* Create and submit job descriptor */
+ drv_req = &edesc->drv_req;
+ drv_req->app_ctx = req;
+ drv_req->cbk = tls_encrypt_done;
+ drv_req->fd_sgt[0].length = req->cryptlen + padsize + ctx->authsize;
+ drv_req->fd_sgt[1].length = req->assoclen + ivsize + req->cryptlen;
+
+ drv_req->drv_ctx = drv_ctx;
+ ret = caam_qi_enqueue(qidev, drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(qidev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int tls_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
+ struct device *qidev = ctx->qidev;
+ struct caam_drv_ctx *drv_ctx;
+ struct caam_drv_req *drv_req;
+ bool all_contig;
+ int ret = 0;
+
+ drv_ctx = get_drv_ctx(ctx, DECRYPT);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+ return PTR_ERR(drv_ctx);
+
+ if (unlikely(caam_drv_ctx_busy(drv_ctx)))
+ return -EAGAIN;
+
+ /*
+ * allocate extended descriptor
+ * TLS 1.0 has no explicit IV in the packet, but it is needed as input
+ * since it is used by CBC.
+ * Assumption: since padding and ICV are not stripped (upper layer
+ * checks padding), req->dst has to be big enough to hold payloadlen +
+ * padlen + icvlen.
+ */
+ edesc = aead_edesc_alloc(req, &all_contig, false, false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor */
+ drv_req = &edesc->drv_req;
+ drv_req->app_ctx = req;
+ drv_req->cbk = tls_decrypt_done;
+ /*
+ * For decrypt, do not strip ICV, Padding, Padding length since
+ * upper layer(s) perform padding checking.
+ */
+ drv_req->fd_sgt[0].length = req->cryptlen;
+ drv_req->fd_sgt[1].length = req->assoclen + ivsize + req->cryptlen;
+
+ drv_req->drv_ctx = drv_ctx;
+ ret = caam_qi_enqueue(qidev, drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(qidev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
/*
* allocate and map the aead extended descriptor for aead givencrypt
*/
@@ -886,7 +1469,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
&dst_chained);
sgc = dma_map_sg_chained(qidev, req->assoc, assoc_nents ? : 1,
- DMA_BIDIRECTIONAL, assoc_chained);
+ DMA_TO_DEVICE, assoc_chained);
if (likely(req->src == req->dst)) {
sgc = dma_map_sg_chained(qidev, req->src, src_nents ? : 1,
DMA_BIDIRECTIONAL, src_chained);
@@ -1062,6 +1645,7 @@ struct caam_alg_template {
u32 class1_alg_type;
u32 class2_alg_type;
u32 alg_op;
+ int min_era;
};
static struct caam_alg_template driver_algs[] = {
@@ -1084,6 +1668,7 @@ static struct caam_alg_template driver_algs[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha1),cbc(aes))",
@@ -1103,6 +1688,7 @@ static struct caam_alg_template driver_algs[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha224),cbc(aes))",
@@ -1123,6 +1709,7 @@ static struct caam_alg_template driver_algs[] = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha256),cbc(aes))",
@@ -1143,6 +1730,7 @@ static struct caam_alg_template driver_algs[] = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha384),cbc(aes))",
@@ -1163,6 +1751,7 @@ static struct caam_alg_template driver_algs[] = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
@@ -1184,6 +1773,7 @@ static struct caam_alg_template driver_algs[] = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(md5),cbc(des3_ede))",
@@ -1203,6 +1793,7 @@ static struct caam_alg_template driver_algs[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha1),cbc(des3_ede))",
@@ -1222,6 +1813,7 @@ static struct caam_alg_template driver_algs[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha224),cbc(des3_ede))",
@@ -1242,6 +1834,7 @@ static struct caam_alg_template driver_algs[] = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha256),cbc(des3_ede))",
@@ -1262,6 +1855,7 @@ static struct caam_alg_template driver_algs[] = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha384),cbc(des3_ede))",
@@ -1282,6 +1876,7 @@ static struct caam_alg_template driver_algs[] = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha512),cbc(des3_ede))",
@@ -1302,6 +1897,7 @@ static struct caam_alg_template driver_algs[] = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(md5),cbc(des))",
@@ -1321,6 +1917,7 @@ static struct caam_alg_template driver_algs[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha1),cbc(des))",
@@ -1340,6 +1937,7 @@ static struct caam_alg_template driver_algs[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha224),cbc(des))",
@@ -1360,6 +1958,7 @@ static struct caam_alg_template driver_algs[] = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha256),cbc(des))",
@@ -1380,6 +1979,7 @@ static struct caam_alg_template driver_algs[] = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha384),cbc(des))",
@@ -1400,6 +2000,7 @@ static struct caam_alg_template driver_algs[] = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
},
{
.name = "authenc(hmac(sha512),cbc(des))",
@@ -1420,6 +2021,28 @@ static struct caam_alg_template driver_algs[] = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ .min_era = 2,
+ },
+ /* TLS record descriptors */
+ {
+ .name = "tls10(hmac(sha1),cbc(aes))",
+ .driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = tls_setkey,
+ .setauthsize = tls_setauthsize,
+ .encrypt = tls_encrypt,
+ .decrypt = tls_decrypt,
+ .givencrypt = NULL,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ .min_era = 4,
}
};
@@ -1439,6 +2062,15 @@ static int caam_cra_init(struct crypto_tfm *tfm)
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
+ /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
+ static const u8 digest_size[] = {
+ MD5_DIGEST_SIZE,
+ SHA1_DIGEST_SIZE,
+ SHA224_DIGEST_SIZE,
+ SHA256_DIGEST_SIZE,
+ SHA384_DIGEST_SIZE,
+ SHA512_DIGEST_SIZE
+ };
/*
* distribute tfms across job rings to ensure in-order
@@ -1455,6 +2087,16 @@ static int caam_cra_init(struct crypto_tfm *tfm)
ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
+ /*
+ * Need authsize, in case setauthsize callback not called
+ * by upper layer (e.g. TLS).
+ */
+ if (caam_alg->alg_op)
+ ctx->authsize = digest_size[(ctx->alg_op &
+ OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT];
+ else
+ ctx->authsize = 0;
+
ctx->qidev = priv->qidev;
spin_lock_init(&ctx->lock);
@@ -1575,9 +2217,16 @@ static int __init caam_qi_algapi_init(void)
/* register crypto algorithms the device supports */
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- /* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg;
+ /* check if h/w supports alg */
+ if (priv->era > 0 && priv->era < driver_algs[i].min_era) {
+ dev_warn(priv->qidev, "%s needs Era %d or higher but SEC is Era %d, skipping it\n",
+ driver_algs[i].driver_name,
+ driver_algs[i].min_era, priv->era);
+ continue;
+ }
+
t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);