diff options
Diffstat (limited to 'drivers/crypto/caam/caamalg.c')
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 1566 |
1 files changed, 1439 insertions, 127 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 7c63b72..17ea75f 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -66,10 +66,21 @@ /* length of descriptors text */ #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) -#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) -#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) +#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) +#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ) #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ) + +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ) +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 23 * CAAM_CMD_SZ) + +#define DESC_GCM_BASE (3 * CAAM_CMD_SZ) +#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ) +#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ) + #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ 20 * CAAM_CMD_SZ) @@ -86,6 +97,7 @@ #else #define debug(format, arg...) #endif +static struct list_head alg_list; /* Set DK bit in class 1 operation if shared */ static inline void append_dec_op1(u32 *desc, u32 type) @@ -103,27 +115,14 @@ static inline void append_dec_op1(u32 *desc, u32 type) } /* - * Wait for completion of class 1 key loading before allowing - * error propagation - */ -static inline void append_dec_shr_done(u32 *desc) -{ - u32 *jump_cmd; - - jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL); - set_jump_tgt_here(desc, jump_cmd); - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); -} - -/* * For aead functions, read payload and write payload, * both of which are specified in req->src and req->dst */ static inline void aead_append_src_dst(u32 *desc, u32 msg_type) { + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); } /* @@ -210,9 +209,196 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, append_key_aead(desc, ctx, keys_fit_inline); set_jump_tgt_here(desc, key_jump_cmd); +} + +static int aead_null_set_sh_desc(struct crypto_aead *aead) +{ + struct aead_tfm *tfm = &aead->base.crt_aead; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool keys_fit_inline = false; + u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd; + u32 *desc; + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN + + ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + /* aead_encrypt shared descriptor */ + desc = ctx->sh_desc_enc; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (keys_fit_inline) + append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, + ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + else + append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + set_jump_tgt_here(desc, key_jump_cmd); + + /* cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + + /* + * NULL encryption; IV is zero + * assoclen = (assoclen + cryptlen) - cryptlen + */ + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | + KEY_VLF); + + /* Prepare to read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* + * MOVE_LEN opcode is not available in all SEC HW revisions, + * thus need to do some magic, i.e. self-patch the descriptor + * buffer. + */ + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | + MOVE_DEST_MATH3 | + (0x6 << MOVE_LEN_SHIFT)); + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | + MOVE_DEST_DESCBUF | + MOVE_WAITCOMP | + (0x8 << MOVE_LEN_SHIFT)); + + /* Class 2 operation */ + append_operation(desc, ctx->class2_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* Read and write cryptlen bytes */ + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + + set_move_tgt_here(desc, read_move_cmd); + set_move_tgt_here(desc, write_move_cmd); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | + MOVE_AUX_LS); + + /* Write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "aead null enc shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN + + ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + desc = ctx->sh_desc_dec; + + /* aead_decrypt shared descriptor */ + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (keys_fit_inline) + append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, + ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + else + append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + set_jump_tgt_here(desc, key_jump_cmd); - /* Propagate errors from shared to job descriptor */ - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); + /* Class 2 operation */ + append_operation(desc, ctx->class2_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); + + /* assoclen + cryptlen = seqinlen - ivsize - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, + ctx->authsize + tfm->ivsize); + /* assoclen = (assoclen + cryptlen) - cryptlen */ + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); + append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | + KEY_VLF); + + /* Prepare to read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); + + /* + * MOVE_LEN opcode is not available in all SEC HW revisions, + * thus need to do some magic, i.e. self-patch the descriptor + * buffer. + */ + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | + MOVE_DEST_MATH2 | + (0x6 << MOVE_LEN_SHIFT)); + write_move_cmd = append_move(desc, MOVE_SRC_MATH2 | + MOVE_DEST_DESCBUF | + MOVE_WAITCOMP | + (0x8 << MOVE_LEN_SHIFT)); + + /* Read and write cryptlen bytes */ + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + + /* + * Insert a NOP here, since we need at least 4 instructions between + * code patching the descriptor buffer and the location being patched. + */ + jump_cmd = append_jump(desc, JUMP_TEST_ALL); + set_jump_tgt_here(desc, jump_cmd); + + set_move_tgt_here(desc, read_move_cmd); + set_move_tgt_here(desc, write_move_cmd); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | + MOVE_AUX_LS); + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); + + /* Load ICV */ + append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); + + ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, + "aead null dec shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + return 0; } static int aead_set_sh_desc(struct crypto_aead *aead) @@ -221,13 +407,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead) struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool keys_fit_inline = false; - u32 *key_jump_cmd, *jump_cmd; u32 geniv, moveiv; u32 *desc; - if (!ctx->enckeylen || !ctx->authsize) + if (!ctx->authsize) return 0; + /* NULL encryption / decryption */ + if (!ctx->enckeylen) + return aead_null_set_sh_desc(aead); + /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer @@ -252,7 +441,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* assoclen + cryptlen = seqinlen - ivsize */ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); - /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */ + /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); /* read assoc before reading payload */ @@ -295,30 +484,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead) CAAM_DESC_BYTES_MAX) keys_fit_inline = true; - desc = ctx->sh_desc_dec; - /* aead_decrypt shared descriptor */ - init_sh_desc(desc, HDR_SHARE_SERIAL); - - /* Skip if already shared */ - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | - JUMP_COND_SHRD); - - append_key_aead(desc, ctx, keys_fit_inline); + desc = ctx->sh_desc_dec; - /* Only propagate error immediately if shared */ - jump_cmd = append_jump(desc, JUMP_TEST_ALL); - set_jump_tgt_here(desc, key_jump_cmd); - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); - set_jump_tgt_here(desc, jump_cmd); + init_sh_desc_key_aead(desc, ctx, keys_fit_inline); /* Class 2 operation */ append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); - /* assoclen + cryptlen = seqinlen - ivsize */ + /* assoclen + cryptlen = seqinlen - ivsize - authsize */ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + tfm->ivsize) + ctx->authsize + tfm->ivsize); /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); @@ -339,7 +516,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* Load ICV */ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); - append_dec_shr_done(desc); ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc), @@ -451,6 +627,534 @@ static int aead_setauthsize(struct crypto_aead *authenc, return 0; } +static int tls_set_sh_desc(struct crypto_aead *aead) +{ + struct aead_tfm *tfm = &aead->base.crt_aead; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool keys_fit_inline = false; + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd; + u32 genpad, clrw, jumpback, stidx; + u32 *desc; + unsigned int blocksize = crypto_aead_blocksize(aead); + /* Associated data length is always = 13 for TLS */ + unsigned int assoclen = 13; + + if (!ctx->enckeylen || !ctx->authsize) + return 0; + + /* + * TLS 1.0 encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (DESC_TLS10_ENC_LEN + DESC_JOB_IO_LEN + + ctx->split_key_pad_len + ctx->enckeylen <= + CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + desc = ctx->sh_desc_enc; + + stidx = 1 << HDR_START_IDX_SHIFT; + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx); + + /* skip key loading if they are loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_CLASS_BOTH | JUMP_JSL | + JUMP_TEST_ALL | JUMP_COND_SHRD | + JUMP_COND_SELF); + if (keys_fit_inline) { + append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, + ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + append_key_as_imm(desc, (void *)ctx->key + + ctx->split_key_pad_len, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + } else { + append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + append_key(desc, ctx->key_dma + ctx->split_key_pad_len, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + } + set_jump_tgt_here(desc, key_jump_cmd); + + /* class 2 operation */ + append_operation(desc, ctx->class2_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + /* class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* payloadlen = input data length - (assoclen + ivlen) */ + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, assoclen + + tfm->ivsize); + + /* math1 = payloadlen + icvlen */ + append_math_add_imm_u32(desc, REG1, VARSEQINLEN, IMM, ctx->authsize); + + /* padlen = block_size - math1 % block_size */ + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1); + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize); + + /* cryptlen = payloadlen + icvlen + padlen */ + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4); + + /* store encrypted payload, icv and padding */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF); + + /* if payload length is zero, jump to zero-payload commands */ + append_math_add(desc, NONE, ZERO, VARSEQINLEN, 4); + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + /* read assoc for authentication */ + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_MSG); + /* load iv in context1 */ + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX | + LDST_CLASS_1_CCB | tfm->ivsize); + /* insnoop payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG | + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF); + /* jump the zero-payload commands */ + append_jump(desc, JUMP_TEST_ALL | 3); + + /* zero-payload commands */ + set_jump_tgt_here(desc, zero_payload_jump_cmd); + /* assoc data is the only data for authentication */ + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); + /* load iv in context1 */ + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX | + LDST_CLASS_1_CCB | tfm->ivsize); + + /* send icv to encryption */ + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO | + ctx->authsize); + + /* generate padding and send it to encryption */ + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 | + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N; + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_INFO_FIFO_SZM | LDST_IMM | + (2 & LDST_LEN_MASK)); + + ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "tls enc shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* + * TLS 1.0 decrypt shared descriptor + * Keys do not fit inline, regardless of algorithms used + */ + desc = ctx->sh_desc_dec; + + stidx = 1 << HDR_START_IDX_SHIFT; + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx); + + /* skip key loading if they are loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_CLASS_BOTH | JUMP_JSL | + JUMP_TEST_ALL | JUMP_COND_SHRD | + JUMP_COND_SELF); + append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + append_key(desc, ctx->key_dma + ctx->split_key_pad_len, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* class 2 operation */ + append_operation(desc, ctx->class2_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); + /* class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); + + /* VSIL = input data length - 2 * block_size */ + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 * + blocksize); + + /* + * payloadlen + icvlen + padlen = input data length - (assoclen + + * ivsize) + */ + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + + tfm->ivsize); + + /* skip data to the last but one cipher block */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF); + + /* load iv for the last cipher block */ + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX | + LDST_CLASS_1_CCB | tfm->ivsize); + + /* read last cipher block */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG | + FIFOLD_TYPE_LAST1 | blocksize); + + /* move decrypted block into math0 and math1 */ + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 | + blocksize); + + /* clear cha1 specific registers */ + clrw = CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS | CLRW_CLR_C1CTX | + CLRW_RESET_CLS1_CHA; + append_load_imm_u32(desc, clrw, LDST_CLASS_IND_CCB | + LDST_SRCDST_WORD_CLRW | LDST_IMM); + + /* rewind input sequence */ + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO); + + /* key1 is in decryption form */ + append_operation(desc, ctx->class1_alg_type | OP_ALG_AAI_DK | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); + + /* read sequence number */ + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG); + /* load Type, Version and Len fields in math0 */ + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO | + LDST_SRCDST_WORD_DECO_MATH0 | 5); + + /* load iv in context1 */ + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB | + LDST_SRCDST_WORD_CLASS_CTX | tfm->ivsize); + + /* compute (padlen - 1) */ + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255); + + /* math2 = icvlen + (padlen - 1) + 1 */ + append_math_add_imm_u32(desc, REG2, REG1, IMM, ctx->authsize + 1); + + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1); + + /* VSOL = payloadlen + icvlen + padlen */ + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4); + + /* update Len field */ + append_math_rshift_imm_u64(desc, REG0, REG0, IMM, 24); + append_math_sub(desc, REG0, REG0, REG2, 8); + + /* store decrypted payload, icv and padding */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF); + + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/ + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4); + + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + /* send Type, Version and Len(pre ICV) fields to authentication */ + append_move(desc, MOVE_WAITCOMP | + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO | + (3 << MOVE_OFFSET_SHIFT) | 5); + + /* outsnooping payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 | + FIFOLDST_VLF); + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2); + + set_jump_tgt_here(desc, zero_payload_jump_cmd); + /* send Type, Version and Len(pre ICV) fields to authentication */ + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS | + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO | + (3 << MOVE_OFFSET_SHIFT) | 5); + + set_jump_tgt_here(desc, skip_zero_jump_cmd); + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4); + + /* load icvlen and padlen */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG | + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF); + + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */ + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4); + + /* move seqoutptr fields into math registers */ + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF | MOVE_DEST_MATH0 | + (55 * 4 << MOVE_OFFSET_SHIFT) | 20); + /* seqinptr will point to seqoutptr */ + append_math_and_imm_u32(desc, REG0, REG0, IMM, + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR)); + /* Load jump command */ + jumpback = CMD_JUMP | (char)-9; + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM | + LDST_SRCDST_WORD_DECO_MATH2 | + (4 << LDST_OFFSET_SHIFT)); + /* move updated seqinptr fields to JD */ + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 | MOVE_DEST_DESCBUF | + (55 * 4 << MOVE_OFFSET_SHIFT) | 24); + /* read updated seqinptr */ + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6); + + /* skip payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF); + /* check icv */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV | + FIFOLD_TYPE_LAST2 | ctx->authsize); + + ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "tls dec shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + return 0; +} + +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(tls); + + ctx->authsize = authsize; + + return 0; +} + +static int gcm_set_sh_desc(struct crypto_aead *aead) +{ + struct aead_tfm *tfm = &aead->base.crt_aead; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool keys_fit_inline = false; + u32 *key_jump_cmd, *zero_payload_jump_cmd, + *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2; + u32 *desc; + + if (!ctx->enckeylen || !ctx->authsize) + return 0; + + /* + * AES GCM encrypt shared descriptor + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer + */ + if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN + + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + desc = ctx->sh_desc_enc; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* skip key loading if they are loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD | JUMP_COND_SELF); + if (keys_fit_inline) + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, ctx->key_dma, ctx->enckeylen, + CLASS_1 | KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + + /* assoclen + cryptlen = seqinlen - ivsize */ + append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); + + /* assoclen = (assoclen + cryptlen) - cryptlen */ + append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ); + + /* if cryptlen is ZERO jump to zero-payload commands */ + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + /* read IV */ + append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + + /* if assoclen is ZERO, skip reading the assoc data */ + append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + /* read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + set_jump_tgt_here(desc, zero_assoc_jump_cmd1); + + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* write encrypted data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); + + /* read payload data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); + + /* jump the zero-payload commands */ + append_jump(desc, JUMP_TEST_ALL | 7); + + /* zero-payload commands */ + set_jump_tgt_here(desc, zero_payload_jump_cmd); + + /* if assoclen is ZERO, jump to IV reading - is the only input data */ + append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); + zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + /* read IV */ + append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + + /* read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); + + /* jump to ICV writing */ + append_jump(desc, JUMP_TEST_ALL | 2); + + /* read IV - is the only input data */ + set_jump_tgt_here(desc, zero_assoc_jump_cmd2); + append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | + FIFOLD_TYPE_LAST1); + + /* write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + + ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + /* + * Job Descriptor and Shared Descriptors + * must all fit into the 64-word Descriptor h/w Buffer + */ + if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN + + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + desc = ctx->sh_desc_dec; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* skip key loading if they are loaded due to sharing */ + key_jump_cmd = append_jump(desc, JUMP_JSL | + JUMP_TEST_ALL | JUMP_COND_SHRD | + JUMP_COND_SELF); + if (keys_fit_inline) + append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, + ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); + else + append_key(desc, ctx->key_dma, ctx->enckeylen, + CLASS_1 | KEY_DEST_CLASS_REG); + set_jump_tgt_here(desc, key_jump_cmd); + + /* class 1 operation */ + append_operation(desc, ctx->class1_alg_type | + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); + + /* assoclen + cryptlen = seqinlen - ivsize - icvsize */ + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, + ctx->authsize + tfm->ivsize); + + /* assoclen = (assoclen + cryptlen) - cryptlen */ + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); + append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ); + + /* read IV */ + append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + + /* jump to zero-payload command if cryptlen is zero */ + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); + /* if asoclen is ZERO, skip reading assoc data */ + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + /* read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + set_jump_tgt_here(desc, zero_assoc_jump_cmd1); + + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + + /* store encrypted data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); + + /* read payload data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + + /* jump the zero-payload commands */ + append_jump(desc, JUMP_TEST_ALL | 4); + + /* zero-payload command */ + set_jump_tgt_here(desc, zero_payload_jump_cmd); + + /* if assoclen is ZERO, jump to ICV reading */ + append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); + zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + /* read assoc data */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + set_jump_tgt_here(desc, zero_assoc_jump_cmd2); + + /* read ICV */ + append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); + + ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, + desc_bytes(desc), + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { + dev_err(jrdev, "unable to map shared descriptor\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); +#endif + + return 0; +} + +static int gcm_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct caam_ctx *ctx = crypto_aead_ctx(authenc); + + ctx->authsize = authsize; + gcm_set_sh_desc(authenc); + + return 0; +} + static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) { @@ -534,6 +1238,110 @@ badkey: return -EINVAL; } +static int tls_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + struct rtattr *rta = (void *)key; + struct crypto_authenc_key_param *param; + unsigned int authkeylen; + unsigned int enckeylen; + int ret = 0; + + param = RTA_DATA(rta); + enckeylen = be32_to_cpu(param->enckeylen); + + key += RTA_ALIGN(rta->rta_len); + keylen -= RTA_ALIGN(rta->rta_len); + + if (keylen < enckeylen) + goto badkey; + + authkeylen = keylen - enckeylen; + + if (keylen > CAAM_MAX_KEY_SIZE) + goto badkey; + + /* Pick class 2 key length from algorithm submask */ + ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> + OP_ALG_ALGSEL_SHIFT] * 2; + ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); + +#ifdef DEBUG + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", keylen, + enckeylen, authkeylen); + dev_err(jrdev, "split_key_len %d split_key_pad_len %d\n", + ctx->split_key_len, ctx->split_key_pad_len); + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + + ret = gen_split_aead_key(ctx, key, authkeylen); + if (ret) + goto badkey; + + /* postpend encryption key to auth split key */ + memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); + + ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + + enckeylen, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->key_dma)) { + dev_err(jrdev, "unable to map key i/o memory\n"); + return -ENOMEM; + } +#ifdef DEBUG + print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, + ctx->split_key_pad_len + enckeylen, 1); +#endif + + ctx->enckeylen = enckeylen; + + ret = tls_set_sh_desc(aead); + if (ret) { + dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len + + enckeylen, DMA_TO_DEVICE); + } + + return ret; +badkey: + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +} + +static int gcm_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + int ret = 0; + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + + memcpy(ctx->key, key, keylen); + ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, ctx->key_dma)) { + dev_err(jrdev, "unable to map key i/o memory\n"); + return -ENOMEM; + } + ctx->enckeylen = keylen; + + ret = gcm_set_sh_desc(aead); + if (ret) { + dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen, + DMA_TO_DEVICE); + } + + return ret; +} + static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, const u8 *key, unsigned int keylen) { @@ -541,7 +1349,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher; struct device *jrdev = ctx->jrdev; int ret = 0; - u32 *key_jump_cmd, *jump_cmd; + u32 *key_jump_cmd; u32 *desc; #ifdef DEBUG @@ -572,9 +1380,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, set_jump_tgt_here(desc, key_jump_cmd); - /* Propagate errors from shared to job descriptor */ - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); - /* Load iv */ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | tfm->ivsize); @@ -612,11 +1417,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); - /* For aead, only propagate error immediately if shared */ - jump_cmd = append_jump(desc, JUMP_TEST_ALL); set_jump_tgt_here(desc, key_jump_cmd); - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); - set_jump_tgt_here(desc, jump_cmd); /* load IV */ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | @@ -628,9 +1429,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, /* Perform operation */ ablkcipher_append_src_dst(desc); - /* Wait for key to load before allowing propagating error */ - append_dec_shr_done(desc); - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); @@ -658,10 +1456,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, * @dst_nents: number of segments in output scatterlist * @dst_chained: if destination is chained * @iv_dma: dma address of iv for checking continuity and link table - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) * @sec4_sg_bytes: length of dma mapped sec4_sg space * @sec4_sg_dma: bus physical mapped address of h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables + * (variable length; must not exceed MAX_CAAM_DESCSIZE) */ struct aead_edesc { int assoc_nents; @@ -817,7 +1615,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ivsize, 1); print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), - req->cryptlen, 1); + req->cryptlen - ctx->authsize, 1); #endif if (err) { @@ -854,6 +1652,102 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, aead_request_complete(req, err); } +static void tls_encrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct aead_request *req = context; + struct aead_edesc *edesc; + +#ifdef DEBUG + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = (struct aead_edesc *)((char *)desc - + offsetof(struct aead_edesc, hw_desc)); + + if (err) { + char tmp[CAAM_ERROR_STR_MAX]; + + dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); + } + + aead_unmap(jrdev, edesc, req); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), + req->assoclen, 1); + print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), + edesc->dst_nents ? 100 : req->cryptlen, 1); +#endif + + kfree(edesc); + + aead_request_complete(req, err); +} + +static void tls_decrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct aead_request *req = context; + struct aead_edesc *edesc; + int cryptlen = req->cryptlen; + u8 padsize; + u8 padding[255]; /* padding can be 0-255 bytes */ + int i; + +#ifdef DEBUG + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = (struct aead_edesc *)((char *)desc - + offsetof(struct aead_edesc, hw_desc)); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst), + req->cryptlen, 1); +#endif + + if (err) { + char tmp[CAAM_ERROR_STR_MAX]; + + dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); + } + + aead_unmap(jrdev, edesc, req); + + /* + * verify hw auth check passed else return -EBADMSG + */ + if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) { + err = -EBADMSG; + goto out; + } + + /* Padding checking */ + cryptlen -= 1; + scatterwalk_map_and_copy(&padsize, req->dst, cryptlen, 1, 0); + if (padsize > cryptlen) { + err = -EBADMSG; + goto out; + } + cryptlen -= padsize; + scatterwalk_map_and_copy(padding, req->dst, cryptlen, padsize, 0); + /* the padding content must be equal with padsize */ + for (i = 0; i < padsize; i++) + if (padding[i] != padsize) { + err = -EBADMSG; + break; + } + +out: + kfree(edesc); + + aead_request_complete(req, err); +} + static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, void *context) { @@ -971,12 +1865,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, (edesc->src_nents ? : 1); in_options = LDST_SGF; } - if (encrypt) - append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + - req->cryptlen - authsize, in_options); - else - append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + - req->cryptlen, in_options); + + append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, + in_options); if (likely(req->src == req->dst)) { if (all_contig) { @@ -997,7 +1888,8 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, } } if (encrypt) - append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); + append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, + out_options); else append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, out_options); @@ -1047,8 +1939,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents; in_options = LDST_SGF; } - append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + - req->cryptlen - authsize, in_options); + append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, + in_options); if (contig & GIV_DST_CONTIG) { dst_dma = edesc->iv_dma; @@ -1065,7 +1957,88 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, } } - append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options); + append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize, + out_options); +} + +/* + * Fill in tls job descriptor either for encrypt or decrypt + */ +static void init_tls_job(u32 *sh_desc, dma_addr_t ptr, + struct aead_edesc *edesc, + struct aead_request *req, + bool all_contig, bool encrypt, unsigned int padsize) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + int ivsize = crypto_aead_ivsize(aead); + int authsize = ctx->authsize; + u32 *desc = edesc->hw_desc; + u32 out_options = 0, in_options; + dma_addr_t dst_dma, src_dma; + int len, sec4_sg_index = 0; + +#ifdef DEBUG + debug("assoclen %d cryptlen %d authsize %d\n", + req->assoclen, req->cryptlen, authsize); + print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc), + req->assoclen , 1); + print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, req->iv, + edesc->src_nents ? 100 : ivsize, 1); + print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + edesc->src_nents ? 100 : req->cryptlen, 1); + print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, + desc_bytes(sh_desc), 1); +#endif + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + + if (all_contig) { + src_dma = sg_dma_address(req->assoc); + in_options = 0; + } else { + src_dma = edesc->sec4_sg_dma; + sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 + + (edesc->src_nents ? : 1); + in_options = LDST_SGF; + } + + if (likely(req->src == req->dst)) { + if (all_contig) { + dst_dma = sg_dma_address(req->src); + } else { + dst_dma = src_dma + sizeof(struct sec4_sg_entry) * + ((edesc->assoc_nents ? : 1) + 1); + out_options = LDST_SGF; + } + } else { + if (!edesc->dst_nents) { + dst_dma = sg_dma_address(req->dst); + } else { + dst_dma = edesc->sec4_sg_dma + + sec4_sg_index * + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; + } + } + + /* + * For decrypt, do not strip ICV, Padding, Padding length since + * upper layer(s) perform padding checking. + */ + if (encrypt) + append_seq_out_ptr(desc, dst_dma, req->cryptlen + padsize + + authsize, out_options); + else + append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); + + append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + + req->cryptlen, in_options); } /* @@ -1129,7 +2102,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, * allocate and map the aead extended descriptor */ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, - int desc_bytes, bool *all_contig_ptr) + int desc_bytes, bool *all_contig_ptr, + bool encrypt, bool strip_icv) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -1144,12 +2118,28 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, bool assoc_chained = false, src_chained = false, dst_chained = false; int ivsize = crypto_aead_ivsize(aead); int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; + unsigned int authsize = ctx->authsize; + bool is_gcm = false; assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); - src_nents = sg_count(req->src, req->cryptlen, &src_chained); - if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); + if (unlikely(req->dst != req->src)) { + int extralen; + + src_nents = sg_count(req->src, req->cryptlen, &src_chained); + + if (encrypt) + extralen = authsize; + else + extralen = strip_icv ? (-authsize) : 0; + dst_nents = sg_count(req->dst, req->cryptlen + extralen, + &dst_chained); + } else { + src_nents = sg_count(req->src, + req->cryptlen + + (encrypt ? authsize : 0), + &src_chained); + } sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, DMA_TO_DEVICE, assoc_chained); @@ -1163,16 +2153,32 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, DMA_FROM_DEVICE, dst_chained); } - /* Check if data are contiguous */ + if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) == + OP_ALG_ALGSEL_AES) && + ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM)) + is_gcm = true; + + /* + * Check if data are contiguous. + * GCM expected input sequence: IV, AAD, text + * All other - expected input sequence: AAD, IV, text + */ iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); - if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen != - iv_dma || src_nents || iv_dma + ivsize != - sg_dma_address(req->src)) { - all_contig = false; + if (is_gcm) + all_contig = (!assoc_nents && + iv_dma + ivsize == sg_dma_address(req->assoc) && + !src_nents && sg_dma_address(req->assoc) + + req->assoclen == sg_dma_address(req->src)); + else + all_contig = (!assoc_nents && sg_dma_address(req->assoc) + + req->assoclen == iv_dma && !src_nents && + iv_dma + ivsize == sg_dma_address(req->src)); + if (!all_contig) { assoc_nents = assoc_nents ? : 1; src_nents = src_nents ? : 1; sec4_sg_len = assoc_nents + 1 + src_nents; } + sec4_sg_len += dst_nents; sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); @@ -1201,14 +2207,26 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, sec4_sg_index = 0; if (!all_contig) { - sg_to_sec4_sg(req->assoc, - (assoc_nents ? : 1), - edesc->sec4_sg + - sec4_sg_index, 0); - sec4_sg_index += assoc_nents ? : 1; + if (!is_gcm) { + sg_to_sec4_sg(req->assoc, + (assoc_nents ? : 1), + edesc->sec4_sg + + sec4_sg_index, 0); + sec4_sg_index += assoc_nents ? : 1; + } + dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, iv_dma, ivsize, 0); sec4_sg_index += 1; + + if (is_gcm) { + sg_to_sec4_sg(req->assoc, + (assoc_nents ? : 1), + edesc->sec4_sg + + sec4_sg_index, 0); + sec4_sg_index += assoc_nents ? : 1; + } + sg_to_sec4_sg_last(req->src, (src_nents ? : 1), edesc->sec4_sg + @@ -1233,11 +2251,9 @@ static int aead_encrypt(struct aead_request *req) u32 *desc; int ret = 0; - req->cryptlen += ctx->authsize; - /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &all_contig); + CAAM_CMD_SZ, &all_contig, true, true); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1274,7 +2290,7 @@ static int aead_decrypt(struct aead_request *req) /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &all_contig); + CAAM_CMD_SZ, &all_contig, false, true); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -1331,7 +2347,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request src_nents = sg_count(req->src, req->cryptlen, &src_chained); if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); + dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize, + &dst_chained); sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, DMA_TO_DEVICE, assoc_chained); @@ -1425,8 +2442,6 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) u32 *desc; int ret = 0; - req->cryptlen += ctx->authsize; - /* allocate extended descriptor */ edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN * CAAM_CMD_SZ, &contig); @@ -1461,6 +2476,109 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) return ret; } +static int aead_null_givencrypt(struct aead_givcrypt_request *areq) +{ + return aead_encrypt(&areq->areq); +} + +static int tls_encrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + u32 *desc; + int ret = 0; + unsigned int blocksize = crypto_aead_blocksize(aead); + unsigned int padsize; + + padsize = blocksize - ((req->cryptlen + ctx->authsize) % blocksize); + + /* + * allocate extended tls descriptor + * TLS 1.0 has no explicit IV in the packet, but it is needed as input + * since it is used by CBC. + * ctx->authsize is temporary set to include also padlen + */ + ctx->authsize += padsize; + edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ, + &all_contig, true, true); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + ctx->authsize -= padsize; + + /* Create and submit job descriptor */ + init_tls_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, + all_contig, true, padsize); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "tls enc jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, tls_encrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +static int tls_decrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + u32 *desc; + int ret = 0; + + /* + * allocate extended tls descriptor + * TLS 1.0 has no explicit IV in the packet, but it is needed as input + * since it is used by CBC. + * Assumption: since padding and ICV are not stripped (upper layer + * checks padding), req->dst has to be big enough to hold payloadlen + + * padlen + icvlen. + */ + edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ, + &all_contig, false, false); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + +#ifdef DEBUG + print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src), + req->cryptlen, 1); +#endif + + /* Create and submit job descriptor*/ + init_tls_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req, + all_contig, false, 0); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "tls dec jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, tls_decrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + /* * allocate and map the ablkcipher extended descriptor for ablkcipher */ @@ -1645,11 +2763,130 @@ struct caam_alg_template { u32 class1_alg_type; u32 class2_alg_type; u32 alg_op; + int min_era; }; static struct caam_alg_template driver_algs[] = { /* single-pass ipsec_esp descriptor */ { + .name = "authenc(hmac(md5),ecb(cipher_null))", + .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam", + .blocksize = NULL_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_null_givencrypt, + .geniv = "<built-in>", + .ivsize = NULL_IV_SIZE, + .maxauthsize = MD5_DIGEST_SIZE, + }, + .class1_alg_type = 0, + .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha1),ecb(cipher_null))", + .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam", + .blocksize = NULL_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_null_givencrypt, + .geniv = "<built-in>", + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .class1_alg_type = 0, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha224),ecb(cipher_null))", + .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam", + .blocksize = NULL_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_null_givencrypt, + .geniv = "<built-in>", + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA224_DIGEST_SIZE, + }, + .class1_alg_type = 0, + .class2_alg_type = OP_ALG_ALGSEL_SHA224 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha256),ecb(cipher_null))", + .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam", + .blocksize = NULL_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_null_givencrypt, + .geniv = "<built-in>", + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + }, + .class1_alg_type = 0, + .class2_alg_type = OP_ALG_ALGSEL_SHA256 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha384),ecb(cipher_null))", + .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam", + .blocksize = NULL_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_null_givencrypt, + .geniv = "<built-in>", + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA384_DIGEST_SIZE, + }, + .class1_alg_type = 0, + .class2_alg_type = OP_ALG_ALGSEL_SHA384 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + }, + { + .name = "authenc(hmac(sha512),ecb(cipher_null))", + .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam", + .blocksize = NULL_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = aead_setkey, + .setauthsize = aead_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = aead_null_givencrypt, + .geniv = "<built-in>", + .ivsize = NULL_IV_SIZE, + .maxauthsize = SHA512_DIGEST_SIZE, + }, + .class1_alg_type = 0, + .class2_alg_type = OP_ALG_ALGSEL_SHA512 | + OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + }, + { .name = "authenc(hmac(md5),cbc(aes))", .driver_name = "authenc-hmac-md5-cbc-aes-caam", .blocksize = AES_BLOCK_SIZE, @@ -1667,6 +2904,7 @@ static struct caam_alg_template driver_algs[] = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha1),cbc(aes))", @@ -1686,6 +2924,7 @@ static struct caam_alg_template driver_algs[] = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha224),cbc(aes))", @@ -1706,6 +2945,7 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha256),cbc(aes))", @@ -1726,6 +2966,7 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha384),cbc(aes))", @@ -1746,6 +2987,7 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { @@ -1767,6 +3009,7 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(md5),cbc(des3_ede))", @@ -1786,6 +3029,7 @@ static struct caam_alg_template driver_algs[] = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha1),cbc(des3_ede))", @@ -1805,6 +3049,7 @@ static struct caam_alg_template driver_algs[] = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha224),cbc(des3_ede))", @@ -1825,6 +3070,7 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha256),cbc(des3_ede))", @@ -1845,6 +3091,7 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha384),cbc(des3_ede))", @@ -1865,6 +3112,7 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha512),cbc(des3_ede))", @@ -1885,6 +3133,7 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(md5),cbc(des))", @@ -1904,6 +3153,7 @@ static struct caam_alg_template driver_algs[] = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha1),cbc(des))", @@ -1923,6 +3173,7 @@ static struct caam_alg_template driver_algs[] = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha224),cbc(des))", @@ -1943,6 +3194,7 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha256),cbc(des))", @@ -1963,6 +3215,7 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha384),cbc(des))", @@ -1983,6 +3236,7 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, + .min_era = 2, }, { .name = "authenc(hmac(sha512),cbc(des))", @@ -2003,6 +3257,47 @@ static struct caam_alg_template driver_algs[] = { .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, + .min_era = 2, + }, + /* TLS record descriptors */ + { + .name = "tls10(hmac(sha1),cbc(aes))", + .driver_name = "tls10-hmac-sha1-cbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = tls_setkey, + .setauthsize = tls_setauthsize, + .encrypt = tls_encrypt, + .decrypt = tls_decrypt, + .givencrypt = NULL, + .geniv = "<built-in>", + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, + .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, + .min_era = 4, + }, + /* Galois Counter Mode */ + { + .name = "gcm(aes)", + .driver_name = "gcm-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .type = CRYPTO_ALG_TYPE_AEAD, + .template_aead = { + .setkey = gcm_setkey, + .setauthsize = gcm_setauthsize, + .encrypt = aead_encrypt, + .decrypt = aead_decrypt, + .givencrypt = NULL, + .geniv = "<built-in>", + .ivsize = 12, + .maxauthsize = 16, + }, + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + .min_era = 2, }, /* ablkcipher descriptor */ { @@ -2020,6 +3315,7 @@ static struct caam_alg_template driver_algs[] = { .ivsize = AES_BLOCK_SIZE, }, .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, + .min_era = 2, }, { .name = "cbc(des3_ede)", @@ -2036,6 +3332,7 @@ static struct caam_alg_template driver_algs[] = { .ivsize = DES3_EDE_BLOCK_SIZE, }, .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, + .min_era = 2, }, { .name = "cbc(des)", @@ -2052,12 +3349,12 @@ static struct caam_alg_template driver_algs[] = { .ivsize = DES_BLOCK_SIZE, }, .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, + .min_era = 2, } }; struct caam_crypto_alg { struct list_head entry; - struct device *ctrldev; int class1_alg_type; int class2_alg_type; int alg_op; @@ -2070,20 +3367,37 @@ static int caam_cra_init(struct crypto_tfm *tfm) struct caam_crypto_alg *caam_alg = container_of(alg, struct caam_crypto_alg, crypto_alg); struct caam_ctx *ctx = crypto_tfm_ctx(tfm); - struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev); - int tgt_jr = atomic_inc_return(&priv->tfm_count); - - /* - * distribute tfms across job rings to ensure in-order - * crypto request processing per tfm - */ - ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs]; + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */ + static const u8 digest_size[] = { + MD5_DIGEST_SIZE, + SHA1_DIGEST_SIZE, + SHA224_DIGEST_SIZE, + SHA256_DIGEST_SIZE, + SHA384_DIGEST_SIZE, + SHA512_DIGEST_SIZE + }; + + ctx->jrdev = caam_jr_alloc(); + if (IS_ERR(ctx->jrdev)) { + pr_err("Job Ring Device allocation for transform failed\n"); + return PTR_ERR(ctx->jrdev); + } /* copy descriptor header template value */ ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; + /* + * Need authsize, in case setauthsize callback not called + * by upper layer (e.g. TLS). + */ + if (caam_alg->alg_op) + ctx->authsize = digest_size[(ctx->alg_op & + OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT]; + else + ctx->authsize = 0; + return 0; } @@ -2104,44 +3418,31 @@ static void caam_cra_exit(struct crypto_tfm *tfm) dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, desc_bytes(ctx->sh_desc_givenc), DMA_TO_DEVICE); + if (ctx->key_dma && + !dma_mapping_error(ctx->jrdev, ctx->key_dma)) + dma_unmap_single(ctx->jrdev, ctx->key_dma, + ctx->enckeylen + ctx->split_key_pad_len, + DMA_TO_DEVICE); + + caam_jr_free(ctx->jrdev); } static void __exit caam_algapi_exit(void) { - struct device_node *dev_node; - struct platform_device *pdev; - struct device *ctrldev; - struct caam_drv_private *priv; struct caam_crypto_alg *t_alg, *n; - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); - if (!dev_node) { - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); - if (!dev_node) - return; - } - - pdev = of_find_device_by_node(dev_node); - if (!pdev) + if (!alg_list.next) return; - ctrldev = &pdev->dev; - of_node_put(dev_node); - priv = dev_get_drvdata(ctrldev); - - if (!priv->alg_list.next) - return; - - list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { + list_for_each_entry_safe(t_alg, n, &alg_list, entry) { crypto_unregister_alg(&t_alg->crypto_alg); list_del(&t_alg->entry); kfree(t_alg); } } -static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, - struct caam_alg_template +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template *template) { struct caam_crypto_alg *t_alg; @@ -2149,7 +3450,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); if (!t_alg) { - dev_err(ctrldev, "failed to allocate t_alg\n"); + pr_err("failed to allocate t_alg\n"); return ERR_PTR(-ENOMEM); } @@ -2181,7 +3482,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, t_alg->class1_alg_type = template->class1_alg_type; t_alg->class2_alg_type = template->class2_alg_type; t_alg->alg_op = template->alg_op; - t_alg->ctrldev = ctrldev; return t_alg; } @@ -2209,34 +3509,46 @@ static int __init caam_algapi_init(void) priv = dev_get_drvdata(ctrldev); of_node_put(dev_node); - INIT_LIST_HEAD(&priv->alg_list); + /* + * If priv is NULL, it's probably because the caam driver wasn't + * properly initialized (e.g. RNG4 init failed). Thus, bail out here. + */ + if (!priv) + return -ENODEV; + - atomic_set(&priv->tfm_count, -1); + INIT_LIST_HEAD(&alg_list); /* register crypto algorithms the device supports */ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { - /* TODO: check if h/w supports alg */ struct caam_crypto_alg *t_alg; - t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); + /* check if h/w supports alg */ + if (priv->era > 0 && priv->era < driver_algs[i].min_era) { + pr_warn("%s needs Era %d or higher but SEC is Era %d, skipping it\n", + driver_algs[i].driver_name, + driver_algs[i].min_era, priv->era); + continue; + } + + t_alg = caam_alg_alloc(&driver_algs[i]); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); - dev_warn(ctrldev, "%s alg allocation failed\n", - driver_algs[i].driver_name); + pr_warn("%s alg allocation failed\n", + driver_algs[i].driver_name); continue; } err = crypto_register_alg(&t_alg->crypto_alg); if (err) { - dev_warn(ctrldev, "%s alg registration failed\n", + pr_warn("%s alg registration failed\n", t_alg->crypto_alg.cra_driver_name); kfree(t_alg); } else - list_add_tail(&t_alg->entry, &priv->alg_list); + list_add_tail(&t_alg->entry, &alg_list); } - if (!list_empty(&priv->alg_list)) - dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", - (char *)of_get_property(dev_node, "compatible", NULL)); + if (!list_empty(&alg_list)) + pr_info("caam algorithms registered in /proc/crypto\n"); return err; } |