summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/caam/Kconfig72
-rw-r--r--drivers/crypto/caam/Makefile15
-rw-r--r--drivers/crypto/caam/caamalg.c2123
-rw-r--r--drivers/crypto/caam/caamalg_desc.c1913
-rw-r--r--drivers/crypto/caam/caamalg_desc.h127
-rw-r--r--drivers/crypto/caam/caamalg_qi.c2877
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c4428
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h265
-rw-r--r--drivers/crypto/caam/caamhash.c521
-rw-r--r--drivers/crypto/caam/caampkc.c471
-rw-r--r--drivers/crypto/caam/caampkc.h58
-rw-r--r--drivers/crypto/caam/caamrng.c16
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c356
-rw-r--r--drivers/crypto/caam/ctrl.h2
-rw-r--r--drivers/crypto/caam/desc.h55
-rw-r--r--drivers/crypto/caam/desc_constr.h139
-rw-r--r--drivers/crypto/caam/dpseci.c859
-rw-r--r--drivers/crypto/caam/dpseci.h395
-rw-r--r--drivers/crypto/caam/dpseci_cmd.h261
-rw-r--r--drivers/crypto/caam/error.c127
-rw-r--r--drivers/crypto/caam/error.h10
-rw-r--r--drivers/crypto/caam/intern.h31
-rw-r--r--drivers/crypto/caam/jr.c97
-rw-r--r--drivers/crypto/caam/jr.h2
-rw-r--r--drivers/crypto/caam/key_gen.c32
-rw-r--r--drivers/crypto/caam/key_gen.h36
-rw-r--r--drivers/crypto/caam/pdb.h62
-rw-r--r--drivers/crypto/caam/pkc_desc.c36
-rw-r--r--drivers/crypto/caam/qi.c797
-rw-r--r--drivers/crypto/caam/qi.h204
-rw-r--r--drivers/crypto/caam/regs.h63
-rw-r--r--drivers/crypto/caam/sg_sw_qm.h126
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h81
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h60
36 files changed, 14354 insertions, 2366 deletions
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad7250f..6d788fd 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 64bf302..3831a6f 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,6 +1,11 @@
+config CRYPTO_DEV_FSL_CAAM_COMMON
+ tristate
+
config CRYPTO_DEV_FSL_CAAM
- tristate "Freescale CAAM-Multicore driver backend"
+ tristate "Freescale CAAM-Multicore platform driver backend"
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+ select CRYPTO_DEV_FSL_CAAM_COMMON
+ select SOC_BUS
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -11,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
To compile this driver as a module, choose M here: the module
will be called caam.
+if CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_CAAM_DEBUG
+ bool "Enable debug output in CAAM driver"
+ help
+ Selecting this will enable printing of various debug
+ information in the CAAM driver.
+
config CRYPTO_DEV_FSL_CAAM_JR
tristate "Freescale CAAM Job Ring driver backend"
- depends on CRYPTO_DEV_FSL_CAAM
default y
help
Enables the driver module for Job Rings which are part of
@@ -24,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
To compile this driver as a module, choose M here: the module
will be called caam_jr.
+if CRYPTO_DEV_FSL_CAAM_JR
+
config CRYPTO_DEV_FSL_CAAM_RINGSIZE
int "Job Ring size"
- depends on CRYPTO_DEV_FSL_CAAM_JR
range 2 9
default "9"
help
@@ -44,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
- depends on CRYPTO_DEV_FSL_CAAM_JR
help
Enable the Job Ring's interrupt coalescing feature.
@@ -74,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -87,9 +98,25 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
To compile this as a module, choose M here: the module
will be called caamalg.
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+ tristate "Queue Interface as Crypto API backend"
+ depends on FSL_SDK_DPA && NET
+ default y
+ select CRYPTO_AUTHENC
+ select CRYPTO_BLKCIPHER
+ help
+ Selecting this will use CAAM Queue Interface (QI) for sending
+ & receiving crypto jobs to/from CAAM. This gives better performance
+ than job ring interface when the number of cores are more than the
+ number of job rings assigned to the kernel. The number of portals
+ assigned to the kernel should also be more than the number of
+ job rings.
+
+ To compile this as a module, choose M here: the module
+ will be called caamalg_qi.
+
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
@@ -101,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_PKC_API
tristate "Register public key cryptography implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RSA
help
@@ -113,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -124,13 +149,26 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
To compile this as a module, choose M here: the module
will be called caamrng.
-config CRYPTO_DEV_FSL_CAAM_IMX
- def_bool SOC_IMX6 || SOC_IMX7D
- depends on CRYPTO_DEV_FSL_CAAM
+endif # CRYPTO_DEV_FSL_CAAM_JR
-config CRYPTO_DEV_FSL_CAAM_DEBUG
- bool "Enable debug output in CAAM driver"
- depends on CRYPTO_DEV_FSL_CAAM
- help
- Selecting this will enable printing of various debug
- information in the CAAM driver.
+endif # CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_DPAA2_CAAM
+ tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
+ depends on FSL_MC_DPIO
+ select CRYPTO_DEV_FSL_CAAM_COMMON
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AUTHENC
+ select CRYPTO_AEAD
+ ---help---
+ CAAM driver for QorIQ Data Path Acceleration Architecture 2.
+ It handles DPSECI DPAA2 objects that sit on the Management Complex
+ (MC) fsl-mc bus.
+
+ To compile this as a module, choose M here: the module
+ will be called dpaa2_caam.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+ def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
+ CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
+ CRYPTO_DEV_FSL_DPAA2_CAAM)
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 08bf551..01f73a2 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -5,13 +5,26 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
ccflags-y := -DDEBUG
endif
+ccflags-y += -DVERSION=\"\"
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
caam-objs := ctrl.o
-caam_jr-objs := jr.o key_gen.o error.o
+caam_jr-objs := jr.o key_gen.o
caam_pkc-y := caampkc.o pkc_desc.o
+ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
+ ccflags-y += -DCONFIG_CAAM_QI
+ caam-objs += qi.o
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
+
+dpaa2_caam-y := caamalg_qi2.o dpseci.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 0d743c6..6480a01 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2,6 +2,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
*
* Based on talitos crypto API driver.
*
@@ -53,6 +54,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include "caamalg_desc.h"
/*
* crypto alg
@@ -62,8 +64,6 @@
#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
CTR_RFC3686_NONCE_SIZE + \
SHA512_DIGEST_SIZE * 2)
-/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
-#define CAAM_MAX_IV_LENGTH 16
#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
@@ -71,37 +71,6 @@
#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 5)
-/* length of descriptors text */
-#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
-
-/* Note: Nonce is counted in enckeylen */
-#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
-
-#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
-#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
-#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
- 20 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
- 15 * CAAM_CMD_SZ)
-
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -112,47 +81,11 @@
#define debug(format, arg...)
#endif
-#ifdef DEBUG
-#include <linux/highmem.h>
-
-static void dbg_dump_sg(const char *level, const char *prefix_str,
- int prefix_type, int rowsize, int groupsize,
- struct scatterlist *sg, size_t tlen, bool ascii,
- bool may_sleep)
-{
- struct scatterlist *it;
- void *it_page;
- size_t len;
- void *buf;
-
- for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
- /*
- * make sure the scatterlist's page
- * has a valid virtual memory mapping
- */
- it_page = kmap_atomic(sg_page(it));
- if (unlikely(!it_page)) {
- printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
- return;
- }
-
- buf = it_page + it->offset;
- len = min_t(size_t, tlen, it->length);
- print_hex_dump(level, prefix_str, prefix_type, rowsize,
- groupsize, buf, len, ascii);
- tlen -= len;
-
- kunmap_atomic(it_page);
- }
-}
-#endif
-
static struct list_head alg_list;
struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
- int alg_op;
bool rfc3686;
bool geniv;
};
@@ -163,302 +96,67 @@ struct caam_aead_alg {
bool registered;
};
-/* Set DK bit in class 1 operation if shared */
-static inline void append_dec_op1(u32 *desc, u32 type)
-{
- u32 *jump_cmd, *uncond_jump_cmd;
-
- /* DK bit is valid only for AES */
- if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
- return;
- }
-
- jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT);
- uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
- append_operation(desc, type | OP_ALG_AS_INITFINAL |
- OP_ALG_DECRYPT | OP_ALG_AAI_DK);
- set_jump_tgt_here(desc, uncond_jump_cmd);
-}
-
-/*
- * For aead functions, read payload and write payload,
- * both of which are specified in req->src and req->dst
- */
-static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
-{
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
- KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
-}
-
-/*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
- */
-static inline void ablkcipher_append_src_dst(u32 *desc)
-{
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
- KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
-}
-
/*
* per-session context
*/
struct caam_ctx {
- struct device *jrdev;
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+ u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
- u32 class1_alg_type;
- u32 class2_alg_type;
- u32 alg_op;
- u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
- unsigned int enckeylen;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
+ struct device *jrdev;
+ struct alginfo adata;
+ struct alginfo cdata;
unsigned int authsize;
};
-static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline, bool is_rfc3686)
-{
- u32 *nonce;
- unsigned int enckeylen = ctx->enckeylen;
-
- /*
- * RFC3686 specific:
- * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
- * | enckeylen = encryption key size + nonce size
- */
- if (is_rfc3686)
- enckeylen -= CTR_RFC3686_NONCE_SIZE;
-
- if (keys_fit_inline) {
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key_as_imm(desc, (void *)ctx->key +
- ctx->split_key_pad_len, enckeylen,
- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- } else {
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- }
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
- enckeylen);
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc,
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-}
-
-static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
- int keys_fit_inline, bool is_rfc3686)
-{
- u32 *key_jump_cmd;
-
- /* Note: Context registers are saved. */
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-}
-
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
+ ctx->adata.keylen_pad;
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
+ ctx->adata.key_inline = true;
+ ctx->adata.key_virt = ctx->key;
+ } else {
+ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ }
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Prepare to read and write cryptlen + assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
- MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
- MOVE_DEST_DESCBUF |
- MOVE_WAITCOMP |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
- MOVE_AUX_LS);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- desc = ctx->sh_desc_dec;
+ if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
+ ctx->adata.key_inline = true;
+ ctx->adata.key_virt = ctx->key;
+ } else {
+ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ }
/* aead_decrypt shared descriptor */
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Prepare to read and write cryptlen + assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
- MOVE_DEST_MATH2 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
- MOVE_DEST_DESCBUF |
- MOVE_WAITCOMP |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Read and write cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /*
- * Insert a NOP here, since we need at least 4 instructions between
- * code patching the descriptor buffer and the location being patched.
- */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
- MOVE_AUX_LS);
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Load ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-
- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "aead null dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
return 0;
}
@@ -470,11 +168,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline;
- u32 geniv, moveiv;
u32 ctx1_iv_off = 0;
- u32 *desc;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ u32 *desc, *nonce = NULL;
+ u32 inl_mask;
+ unsigned int data_len[2];
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
@@ -482,7 +180,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
return 0;
/* NULL encryption / decryption */
- if (!ctx->enckeylen)
+ if (!ctx->cdata.keylen)
return aead_null_set_sh_desc(aead);
/*
@@ -497,8 +195,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* RFC3686 specific:
* CONTEXT1[255:128] = {NONCE, IV, COUNTER}
*/
- if (is_rfc3686)
+ if (is_rfc3686) {
ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
if (alg->caam.geniv)
goto skip_enc;
@@ -507,146 +211,64 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_encrypt shared descriptor */
- desc = ctx->sh_desc_enc;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+ if (desc_inline_query(DESC_AEAD_ENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- FIFOLDST_VLF);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
+ false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
skip_enc:
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* aead_decrypt shared descriptor */
- desc = ctx->sh_desc_dec;
-
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ if (desc_inline_query(DESC_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- if (alg->caam.geniv)
- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
else
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
-
- if (alg->caam.geniv) {
- append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- (ctx1_iv_off << LDST_OFFSET_SHIFT));
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
- }
+ ctx->adata.key_dma = ctx->key_dma;
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Choose operation */
- if (ctr_mode)
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
else
- append_dec_op1(desc, ctx->class1_alg_type);
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- /* Read and write cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
- /* Load ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-
- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ /* aead_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, alg->caam.geniv, is_rfc3686,
+ nonce, ctx1_iv_off, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
if (!alg->caam.geniv)
goto skip_givenc;
@@ -655,107 +277,32 @@ skip_enc:
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen +
- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
- /* aead_givencrypt shared descriptor */
- desc = ctx->sh_desc_enc;
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
- /* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
- if (is_rfc3686)
- goto copy_iv;
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (ivsize << MOVE_LEN_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
-copy_iv:
- /* Copy IV to class 1 context */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (ivsize << MOVE_LEN_SHIFT));
-
- /* Return to encryption */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* ivsize + cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc before reading payload */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
- KEY_VLF);
-
- /* Copy iv from outfifo to class 2 fifo */
- moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
- NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Will write ivsize + cryptlen */
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Not need to reload iv */
- append_seq_fifo_load(desc, ivsize,
- FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
- FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ /* aead_givencrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
skip_givenc:
return 0;
@@ -776,12 +323,12 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *zero_payload_jump_cmd,
- *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -789,175 +336,35 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* skip key loading if they are loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD | JUMP_COND_SELF);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* if assoclen + cryptlen is ZERO, skip to ICV write */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* cryptlen = seqinlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
- /* if cryptlen is ZERO jump to zero-payload commands */
- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* write encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- /* jump the zero-payload commands */
- append_jump(desc, JUMP_TEST_ALL | 2);
-
- /* zero-payload commands */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
-
- /* There is no input data */
- set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
-
- /* write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* skip key loading if they are loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD |
- JUMP_COND_SELF);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* jump to zero-payload command if cryptlen is zero */
- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
-
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* store encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /* zero-payload command */
- set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
- /* read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
return 0;
}
@@ -976,11 +383,12 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -988,148 +396,37 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* cryptlen = seqoutlen - assoclen */
- append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Write encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* Read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Skip IV */
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Will read cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
-
- /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
- /* Skip assoc data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
- /* Will write cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Store payload data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* Read encrypted data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
- /* Read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
return 0;
}
@@ -1149,12 +446,12 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd;
- u32 *read_move_cmd, *write_move_cmd;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
/*
@@ -1162,151 +459,37 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_enc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* assoclen + cryptlen = seqinlen */
- append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Will write assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Read and write assoclen + cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move payload data to OFIFO */
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- keys_fit_inline = false;
- if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
+ if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
- /* assoclen + cryptlen = seqoutlen */
- append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Will read assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Will write assoclen + cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
- /* Store payload data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* In-snoop assoclen + cryptlen data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move payload data to OFIFO */
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Read ICV */
- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
return 0;
}
@@ -1322,19 +505,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
return 0;
}
-static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
- u32 authkeylen)
-{
- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
- ctx->split_key_pad_len, key_in, authkeylen,
- ctx->alg_op);
-}
-
static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
struct crypto_authenc_keys keys;
@@ -1343,53 +516,32 @@ static int aead_setkey(struct crypto_aead *aead,
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
- /* Pick class 2 key length from algorithm submask */
- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT] * 2;
- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
- if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
- goto badkey;
-
#ifdef DEBUG
printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
keys.authkeylen + keys.enckeylen, keys.enckeylen,
keys.authkeylen);
- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
- ctx->split_key_len, ctx->split_key_pad_len);
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
if (ret) {
goto badkey;
}
/* postpend encryption key to auth split key */
- memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
-
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
- keys.enckeylen, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->key_dma)) {
- dev_err(jrdev, "unable to map key i/o memory\n");
- return -ENOMEM;
- }
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
+ keys.enckeylen, DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len + keys.enckeylen, 1);
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
-
- ctx->enckeylen = keys.enckeylen;
-
- ret = aead_set_sh_desc(aead);
- if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
- keys.enckeylen, DMA_TO_DEVICE);
- }
-
- return ret;
+ ctx->cdata.keylen = keys.enckeylen;
+ return aead_set_sh_desc(aead);
badkey:
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
@@ -1400,7 +552,6 @@ static int gcm_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- int ret = 0;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
@@ -1408,21 +559,10 @@ static int gcm_setkey(struct crypto_aead *aead,
#endif
memcpy(ctx->key, key, keylen);
- ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->key_dma)) {
- dev_err(jrdev, "unable to map key i/o memory\n");
- return -ENOMEM;
- }
- ctx->enckeylen = keylen;
-
- ret = gcm_set_sh_desc(aead);
- if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
- DMA_TO_DEVICE);
- }
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ ctx->cdata.keylen = keylen;
- return ret;
+ return gcm_set_sh_desc(aead);
}
static int rfc4106_setkey(struct crypto_aead *aead,
@@ -1430,7 +570,6 @@ static int rfc4106_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- int ret = 0;
if (keylen < 4)
return -EINVAL;
@@ -1446,22 +585,10 @@ static int rfc4106_setkey(struct crypto_aead *aead,
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- ctx->enckeylen = keylen - 4;
-
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->key_dma)) {
- dev_err(jrdev, "unable to map key i/o memory\n");
- return -ENOMEM;
- }
-
- ret = rfc4106_set_sh_desc(aead);
- if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
- DMA_TO_DEVICE);
- }
-
- return ret;
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+ DMA_TO_DEVICE);
+ return rfc4106_set_sh_desc(aead);
}
static int rfc4543_setkey(struct crypto_aead *aead,
@@ -1469,7 +596,6 @@ static int rfc4543_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- int ret = 0;
if (keylen < 4)
return -EINVAL;
@@ -1485,43 +611,28 @@ static int rfc4543_setkey(struct crypto_aead *aead,
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- ctx->enckeylen = keylen - 4;
-
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->key_dma)) {
- dev_err(jrdev, "unable to map key i/o memory\n");
- return -ENOMEM;
- }
-
- ret = rfc4543_set_sh_desc(aead);
- if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
- DMA_TO_DEVICE);
- }
-
- return ret;
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+ DMA_TO_DEVICE);
+ return rfc4543_set_sh_desc(aead);
}
static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const u8 *key, unsigned int keylen)
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
const char *alg_name = crypto_tfm_alg_name(tfm);
struct device *jrdev = ctx->jrdev;
- int ret = 0;
- u32 *key_jump_cmd;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
u32 *desc;
- u8 *nonce;
- u32 geniv;
u32 ctx1_iv_off = 0;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = (ctr_mode &&
(strstr(alg_name, "rfc3686") != NULL));
+ memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -1544,215 +655,33 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- memcpy(ctx->key, key, keylen);
- ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->key_dma)) {
- dev_err(jrdev, "unable to map key i/o memory\n");
- return -ENOMEM;
- }
- ctx->enckeylen = keylen;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
/* ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
+ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Load iv */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
+ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* load IV */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- /* Choose operation */
- if (ctr_mode)
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
- else
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
/* ablkcipher_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
+ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 |
- KEY_DEST_CLASS_REG);
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- nonce = (u8 *)key + keylen;
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX |
- (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP |
- MOVE_SRC_INFIFO |
- MOVE_DEST_CLASS1CTX |
- (crt->ivsize << MOVE_LEN_SHIFT) |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Copy generated IV to memory */
- append_seq_store(desc, crt->ivsize,
- LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
- (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- if (ctx1_iv_off)
- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
- (1 << JUMP_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
- ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
-
- return ret;
+ return 0;
}
static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
@@ -1760,8 +689,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- u32 *key_jump_cmd, *desc;
- __be64 sector_size = cpu_to_be64(512);
+ u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(ablkcipher,
@@ -1771,126 +699,38 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
}
memcpy(ctx->key, key, keylen);
- ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->key_dma)) {
- dev_err(jrdev, "unable to map key i/o memory\n");
- return -ENOMEM;
- }
- ctx->enckeylen = keylen;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
/* xts_ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 keys only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load sector size with index 40 bytes (0x28) */
- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
- append_data(desc, (void *)&sector_size, 8);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
- */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Load operation */
- append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load sector size with index 40 bytes (0x28) */
- append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
- append_data(desc, (void *)&sector_size, 8);
-
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
- */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
- /* Load operation */
- append_dec_op1(desc, ctx->class1_alg_type);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
-
- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
- dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
+ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
return 0;
}
/*
* aead_edesc - s/w-extended aead descriptor
- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
- * @src_nents: number of segments in input scatterlist
- * @dst_nents: number of segments in output scatterlist
- * @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
+ * @src_nents: number of segments in input s/w scatterlist
+ * @dst_nents: number of segments in output s/w scatterlist
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct aead_edesc {
- int assoc_nents;
int src_nents;
int dst_nents;
- dma_addr_t iv_dma;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -1899,12 +739,12 @@ struct aead_edesc {
/*
* ablkcipher_edesc - s/w-extended ablkcipher descriptor
- * @src_nents: number of segments in input scatterlist
- * @dst_nents: number of segments in output scatterlist
+ * @src_nents: number of segments in input s/w scatterlist
+ * @dst_nents: number of segments in output s/w scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct ablkcipher_edesc {
@@ -1924,10 +764,11 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
int sec4_sg_bytes)
{
if (dst != src) {
- dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
- dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
+ if (src_nents)
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
- dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
if (iv_dma)
@@ -2021,8 +862,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ablkcipher_edesc *)((char *)desc -
- offsetof(struct ablkcipher_edesc, hw_desc));
+ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -2031,10 +871,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
#endif
+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
ablkcipher_unmap(jrdev, edesc, req);
@@ -2062,8 +902,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ablkcipher_edesc *)((char *)desc -
- offsetof(struct ablkcipher_edesc, hw_desc));
+ edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -2071,10 +910,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
#endif
+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
ablkcipher_unmap(jrdev, edesc, req);
@@ -2114,7 +953,7 @@ static void init_aead_job(struct aead_request *req,
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (all_contig) {
- src_dma = sg_dma_address(req->src);
+ src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
@@ -2129,7 +968,7 @@ static void init_aead_job(struct aead_request *req,
out_options = in_options;
if (unlikely(req->src != req->dst)) {
- if (!edesc->dst_nents) {
+ if (edesc->dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
} else {
dst_dma = edesc->sec4_sg_dma +
@@ -2175,7 +1014,7 @@ static void init_gcm_job(struct aead_request *req,
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
/* Append Salt */
if (!generic_gcm)
- append_data(desc, ctx->key + ctx->enckeylen, 4);
+ append_data(desc, ctx->key + ctx->cdata.keylen, 4);
/* Append IV */
append_data(desc, req->iv, ivsize);
/* End of blank commands */
@@ -2190,7 +1029,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
u32 *desc = edesc->hw_desc;
@@ -2236,16 +1075,15 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
- dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+ pr_err("asked=%d, nbytes%d\n",
+ (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
#endif
+ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ edesc->src_nents > 1 ? 100 : req->nbytes, 1);
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
@@ -2261,7 +1099,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
if (likely(req->src == req->dst)) {
- if (!edesc->src_nents && iv_contig) {
+ if (edesc->src_nents == 1 && iv_contig) {
dst_dma = sg_dma_address(req->src);
} else {
dst_dma = edesc->sec4_sg_dma +
@@ -2269,7 +1107,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
out_options = LDST_SGF;
}
} else {
- if (!edesc->dst_nents) {
+ if (edesc->dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
} else {
dst_dma = edesc->sec4_sg_dma +
@@ -2296,20 +1134,18 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
#endif
+ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ edesc->src_nents > 1 ? 100 : req->nbytes, 1);
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- if (!edesc->src_nents) {
+ if (edesc->src_nents == 1) {
src_dma = sg_dma_address(req->src);
in_options = 0;
} else {
@@ -2340,87 +1176,100 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- int src_nents, dst_nents = 0;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct aead_edesc *edesc;
- int sgc;
- bool all_contig = true;
- int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
unsigned int authsize = ctx->authsize;
if (unlikely(req->dst != req->src)) {
- src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
- dst_nents = sg_count(req->dst,
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : (-authsize)));
- } else {
- src_nents = sg_count(req->src,
- req->assoclen + req->cryptlen +
- (encrypt ? authsize : 0));
- }
-
- /* Check if data are contiguous. */
- all_contig = !src_nents;
- if (!all_contig) {
- src_nents = src_nents ? : 1;
- sec4_sg_len = src_nents;
- }
-
- sec4_sg_len += dst_nents;
-
- sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen);
+ return ERR_PTR(src_nents);
+ }
- /* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
- GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
- return ERR_PTR(-ENOMEM);
+ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize :
+ (-authsize)));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : (-authsize)));
+ return ERR_PTR(dst_nents);
+ }
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0));
+ return ERR_PTR(src_nents);
+ }
}
if (likely(req->src == req->dst)) {
- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL);
- if (unlikely(!sgc)) {
+ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
dev_err(jrdev, "unable to map source\n");
- kfree(edesc);
return ERR_PTR(-ENOMEM);
}
} else {
- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE);
- if (unlikely(!sgc)) {
- dev_err(jrdev, "unable to map source\n");
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
+ /* Cover also the case of null (zero length) input data */
+ if (src_nents) {
+ mapped_src_nents = dma_map_sg(jrdev, req->src,
+ src_nents, DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = 0;
}
- sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
- DMA_FROM_DEVICE);
- if (unlikely(!sgc)) {
+ mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
dev_err(jrdev, "unable to map destination\n");
- dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE);
- kfree(edesc);
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return ERR_PTR(-ENOMEM);
}
}
+ sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
+ sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
+ if (!edesc) {
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
- *all_contig_ptr = all_contig;
+ *all_contig_ptr = !(mapped_src_nents > 1);
sec4_sg_index = 0;
- if (!all_contig) {
- sg_to_sec4_sg_last(req->src, src_nents,
- edesc->sec4_sg + sec4_sg_index, 0);
- sec4_sg_index += src_nents;
+ if (mapped_src_nents > 1) {
+ sg_to_sec4_sg_last(req->src, mapped_src_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ sec4_sg_index += mapped_src_nents;
}
- if (dst_nents) {
- sg_to_sec4_sg_last(req->dst, dst_nents,
+ if (mapped_dst_nents > 1) {
+ sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
edesc->sec4_sg + sec4_sg_index, 0);
}
@@ -2573,13 +1422,9 @@ static int aead_decrypt(struct aead_request *req)
u32 *desc;
int ret = 0;
-#ifdef DEBUG
- bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
- dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- req->assoclen + req->cryptlen, 1, may_sleep);
-#endif
+ caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ req->assoclen + req->cryptlen, 1);
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
@@ -2619,51 +1464,80 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int src_nents, dst_nents = 0, sec4_sg_bytes;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct ablkcipher_edesc *edesc;
dma_addr_t iv_dma = 0;
- bool iv_contig = false;
- int sgc;
+ bool in_contig;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- int sec4_sg_index;
+ int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
- src_nents = sg_count(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (unlikely(src_nents < 0)) {
+ dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
+ req->nbytes);
+ return ERR_PTR(src_nents);
+ }
- if (req->dst != req->src)
- dst_nents = sg_count(req->dst, req->nbytes);
+ if (req->dst != req->src) {
+ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
+ req->nbytes);
+ return ERR_PTR(dst_nents);
+ }
+ }
if (likely(req->src == req->dst)) {
- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL);
+ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
} else {
- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE);
- sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
- DMA_FROM_DEVICE);
+ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
}
iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
- /*
- * Check if iv can be contiguous with source and destination.
- * If so, include it. If not, create scatterlist.
- */
- if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
- iv_contig = true;
- else
- src_nents = src_nents ? : 1;
- sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
- sizeof(struct sec4_sg_entry);
+ if (mapped_src_nents == 1 &&
+ iv_dma + ivsize == sg_dma_address(req->src)) {
+ in_contig = true;
+ sec4_sg_ents = 0;
+ } else {
+ in_contig = false;
+ sec4_sg_ents = 1 + mapped_src_nents;
+ }
+ dst_sg_idx = sec4_sg_ents;
+ sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2673,23 +1547,24 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
- sec4_sg_index = 0;
- if (!iv_contig) {
+ if (!in_contig) {
dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->src, src_nents,
+ sg_to_sec4_sg_last(req->src, mapped_src_nents,
edesc->sec4_sg + 1, 0);
- sec4_sg_index += 1 + src_nents;
}
- if (dst_nents) {
- sg_to_sec4_sg_last(req->dst, dst_nents,
- edesc->sec4_sg + sec4_sg_index, 0);
+ if (mapped_dst_nents > 1) {
+ sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+ edesc->sec4_sg + dst_sg_idx, 0);
}
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -2701,7 +1576,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sec4_sg_bytes, 1);
#endif
- *iv_contig_out = iv_contig;
+ *iv_contig_out = in_contig;
return edesc;
}
@@ -2792,30 +1667,54 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- int src_nents, dst_nents = 0, sec4_sg_bytes;
+ int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
struct ablkcipher_edesc *edesc;
dma_addr_t iv_dma = 0;
- bool iv_contig = false;
- int sgc;
+ bool out_contig;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- int sec4_sg_index;
-
- src_nents = sg_count(req->src, req->nbytes);
+ int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
- if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (unlikely(src_nents < 0)) {
+ dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
+ req->nbytes);
+ return ERR_PTR(src_nents);
+ }
if (likely(req->src == req->dst)) {
- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL);
+ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dst_nents = src_nents;
+ mapped_dst_nents = src_nents;
} else {
- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE);
- sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
- DMA_FROM_DEVICE);
+ mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(jrdev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
+ req->nbytes);
+ return ERR_PTR(dst_nents);
+ }
+
+ mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
}
/*
@@ -2825,21 +1724,29 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
- if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
- iv_contig = true;
- else
- dst_nents = dst_nents ? : 1;
- sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
- sizeof(struct sec4_sg_entry);
+ sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+ dst_sg_idx = sec4_sg_ents;
+ if (mapped_dst_nents == 1 &&
+ iv_dma + ivsize == sg_dma_address(req->dst)) {
+ out_contig = true;
+ } else {
+ out_contig = false;
+ sec4_sg_ents += 1 + mapped_dst_nents;
+ }
/* allocate space for base edesc and hw desc commands, link tables */
+ sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -2849,24 +1756,24 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
- sec4_sg_index = 0;
- if (src_nents) {
- sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
- sec4_sg_index += src_nents;
- }
+ if (mapped_src_nents > 1)
+ sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
+ 0);
- if (!iv_contig) {
- dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ if (!out_contig) {
+ dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
iv_dma, ivsize, 0);
- sec4_sg_index += 1;
- sg_to_sec4_sg_last(req->dst, dst_nents,
- edesc->sec4_sg + sec4_sg_index, 0);
+ sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+ edesc->sec4_sg + dst_sg_idx + 1, 0);
}
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ kfree(edesc);
return ERR_PTR(-ENOMEM);
}
edesc->iv_dma = iv_dma;
@@ -2878,7 +1785,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
sec4_sg_bytes, 1);
#endif
- *iv_contig_out = iv_contig;
+ *iv_contig_out = out_contig;
return edesc;
}
@@ -2889,7 +1796,7 @@ static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- bool iv_contig;
+ bool iv_contig = false;
u32 *desc;
int ret = 0;
@@ -2933,7 +1840,6 @@ struct caam_alg_template {
} template_u;
u32 class1_alg_type;
u32 class2_alg_type;
- u32 alg_op;
};
static struct caam_alg_template driver_algs[] = {
@@ -3118,7 +2024,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3140,7 +2045,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3162,7 +2066,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3184,7 +2087,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3206,7 +2108,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3228,7 +2129,6 @@ static struct caam_aead_alg driver_aeads[] = {
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3250,7 +2150,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3273,7 +2172,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3296,7 +2194,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3319,7 +2216,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3342,7 +2238,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3365,7 +2260,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3388,7 +2282,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3411,7 +2304,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3434,7 +2326,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3457,7 +2348,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3480,7 +2370,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3503,7 +2392,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3526,7 +2414,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
}
},
{
@@ -3549,7 +2436,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
}
},
@@ -3573,7 +2459,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3597,7 +2482,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3621,7 +2505,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3645,7 +2528,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3669,7 +2551,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3693,7 +2574,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3717,7 +2597,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3741,7 +2620,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3765,7 +2643,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3789,7 +2666,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3812,7 +2688,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3835,7 +2710,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3858,7 +2732,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3881,7 +2754,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3904,7 +2776,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3927,7 +2798,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3950,7 +2820,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
},
},
{
@@ -3973,7 +2842,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -3996,7 +2864,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
},
},
{
@@ -4019,7 +2886,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -4042,7 +2908,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
},
{
@@ -4065,7 +2930,6 @@ static struct caam_aead_alg driver_aeads[] = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.geniv = true,
},
},
@@ -4090,7 +2954,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4115,7 +2978,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4141,7 +3003,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4166,7 +3027,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4192,7 +3052,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4217,7 +3076,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4243,7 +3101,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4268,7 +3125,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4294,7 +3150,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4319,7 +3174,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4345,7 +3199,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
},
},
@@ -4370,7 +3223,6 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
.rfc3686 = true,
.geniv = true,
},
@@ -4385,16 +3237,34 @@ struct caam_crypto_alg {
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
{
+ dma_addr_t dma_addr;
+
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(ctx->jrdev);
}
+ dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
+ offsetof(struct caam_ctx,
+ sh_desc_enc_dma),
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->jrdev, dma_addr)) {
+ dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
+ caam_jr_free(ctx->jrdev);
+ return -ENOMEM;
+ }
+
+ ctx->sh_desc_enc_dma = dma_addr;
+ ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
+ sh_desc_dec);
+ ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
+ sh_desc_givenc);
+ ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
+
/* copy descriptor header template value */
- ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
- ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
return 0;
}
@@ -4421,25 +3291,9 @@ static int caam_aead_init(struct crypto_aead *tfm)
static void caam_exit_common(struct caam_ctx *ctx)
{
- if (ctx->sh_desc_enc_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
- dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
- if (ctx->sh_desc_dec_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
- dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
- if (ctx->sh_desc_givenc_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
- dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
- desc_bytes(ctx->sh_desc_givenc),
- DMA_TO_DEVICE);
- if (ctx->key_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->key_dma))
- dma_unmap_single(ctx->jrdev, ctx->key_dma,
- ctx->enckeylen + ctx->split_key_pad_len,
- DMA_TO_DEVICE);
-
+ dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
+ offsetof(struct caam_ctx, sh_desc_enc_dma),
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
@@ -4515,7 +3369,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
t_alg->caam.class1_alg_type = template->class1_alg_type;
t_alg->caam.class2_alg_type = template->class2_alg_type;
- t_alg->caam.alg_op = template->alg_op;
return t_alg;
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
new file mode 100644
index 0000000..d162120
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -0,0 +1,1913 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamalg_desc.h"
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+}
+
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+ u32 *jump_cmd, *uncond_jump_cmd;
+
+ /* DK bit is valid only for AES */
+ if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ return;
+ }
+
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with no (null) encryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Prepare to read and write cryptlen + assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
+
+/**
+ * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol) with no (null) decryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqoutlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Prepare to read and write cryptlen + assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH2 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /*
+ * Insert a NOP here, since we need at least 4 instructions between
+ * code patching the descriptor buffer and the location being patched.
+ */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
+
+static void init_sh_desc_key_aead(u32 * const desc,
+ struct alginfo * const cdata,
+ struct alginfo * const adata,
+ const bool is_rfc3686, u32 *nonce)
+{
+ u32 *key_jump_cmd;
+ unsigned int enckeylen = cdata->keylen;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /*
+ * RFC3686 specific:
+ * | key = {AUTH_KEY, ENC_KEY, NONCE}
+ * | enckeylen = encryption key size + nonce size
+ */
+ if (is_rfc3686)
+ enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, enckeylen,
+ enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686) {
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc,
+ MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX |
+ (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
+{
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ }
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ FIFOLDST_VLF);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
+
+/**
+ * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off, const bool is_qi)
+{
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ if (!geniv)
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ }
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ if (geniv) {
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+ }
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctx1_iv_off)
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
+
+/**
+ * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with HW-generated initialization
+ * vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off,
+ const bool is_qi)
+{
+ u32 geniv, moveiv;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+ }
+
+ if (is_rfc3686) {
+ if (is_qi)
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ goto copy_iv;
+ }
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+copy_iv:
+ /* Copy IV to class 1 context */
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+ (ivsize << MOVE_LEN_SHIFT));
+
+ /* Return to encryption */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Read and write assoclen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Copy iv from outfifo to class 2 fifo */
+ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Not need to reload iv */
+ append_seq_fifo_load(desc, ivsize,
+ FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
+ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead givenc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
+
+/**
+ * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
+ * with OP_ALG_AAI_CBC
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values OP_ALG_ALGSEL_SHA1 ANDed with
+ * OP_ALG_AAI_HMAC_PRECOMP.
+ * @assoclen: associated data length
+ * @ivsize: initialization vector size
+ * @authsize: authentication data size
+ * @blocksize: block cipher size
+ */
+void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int assoclen,
+ unsigned int ivsize, unsigned int authsize,
+ unsigned int blocksize)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd;
+ u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
+
+ /*
+ * Compute the index (in bytes) for the LOAD with destination of
+ * Class 1 Data Size Register and for the LOAD that generates padding
+ */
+ if (adata->key_inline) {
+ idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
+ cdata->keylen - 4 * CAAM_CMD_SZ;
+ idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
+ cdata->keylen - 2 * CAAM_CMD_SZ;
+ } else {
+ idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
+ 4 * CAAM_CMD_SZ;
+ idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
+ 2 * CAAM_CMD_SZ;
+ }
+
+ stidx = 1 << HDR_START_IDX_SHIFT;
+ init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* payloadlen = input data length - (assoclen + ivlen) */
+ append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
+
+ /* math1 = payloadlen + icvlen */
+ append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
+
+ /* padlen = block_size - math1 % block_size */
+ append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
+ append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
+
+ /* cryptlen = payloadlen + icvlen + padlen */
+ append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
+
+ /*
+ * update immediate data with the padding length value
+ * for the LOAD in the class 1 data size register.
+ */
+ append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
+ (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
+ (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
+
+ /* overwrite PL field for the padding iNFO FIFO entry */
+ append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
+ (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
+ (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
+
+ /* store encrypted payload, icv and padding */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
+
+ /* if payload length is zero, jump to zero-payload commands */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* load iv in context1 */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
+ LDST_CLASS_1_CCB | ivsize);
+
+ /* read assoc for authentication */
+ append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_MSG);
+ /* insnoop payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
+ FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
+
+ /* jump the zero-payload commands */
+ append_jump(desc, JUMP_TEST_ALL | 3);
+
+ /* zero-payload commands */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* load iv in context1 */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
+ LDST_CLASS_1_CCB | ivsize);
+
+ /* assoc data is the only data for authentication */
+ append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
+
+ /* send icv to encryption */
+ append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
+ authsize);
+
+ /* update class 1 data size register with padding length */
+ append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+ /* generate padding and send it to encryption */
+ genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
+ NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
+ append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
+
+/**
+ * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
+ * with OP_ALG_AAI_CBC
+ * @adata: pointer to authentication transform definitions. Note that since a
+ * split key is to be used, the size of the split key itself is
+ * specified. Valid algorithm values OP_ALG_ALGSEL_ SHA1 ANDed with
+ * OP_ALG_AAI_HMAC_PRECOMP.
+ * @assoclen: associated data length
+ * @ivsize: initialization vector size
+ * @authsize: authentication data size
+ * @blocksize: block cipher size
+ */
+void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int assoclen,
+ unsigned int ivsize, unsigned int authsize,
+ unsigned int blocksize)
+{
+ u32 stidx, jumpback;
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
+ /*
+ * Pointer Size bool determines the size of address pointers.
+ * false - Pointers fit in one 32-bit word.
+ * true - Pointers fit in two 32-bit words.
+ */
+ static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
+
+ stidx = 1 << HDR_START_IDX_SHIFT;
+ init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+
+ /* VSIL = input data length - 2 * block_size */
+ append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
+ blocksize);
+
+ /*
+ * payloadlen + icvlen + padlen = input data length - (assoclen +
+ * ivsize)
+ */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
+
+ /* skip data to the last but one cipher block */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
+
+ /* load iv for the last cipher block */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
+ LDST_CLASS_1_CCB | ivsize);
+
+ /* read last cipher block */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
+ FIFOLD_TYPE_LAST1 | blocksize);
+
+ /* move decrypted block into math0 and math1 */
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
+ blocksize);
+
+ /* reset AES CHA */
+ append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
+
+ /* rewind input sequence */
+ append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
+
+ /* key1 is in decryption form */
+ append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+
+ /* load iv in context1 */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
+ LDST_SRCDST_WORD_CLASS_CTX | ivsize);
+
+ /* read sequence number */
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
+ /* load Type, Version and Len fields in math0 */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
+
+ /* compute (padlen - 1) */
+ append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
+
+ /* math2 = icvlen + (padlen - 1) + 1 */
+ append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
+
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
+
+ /* VSOL = payloadlen + icvlen + padlen */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
+
+#ifdef __LITTLE_ENDIAN
+ append_moveb(desc, MOVE_WAITCOMP |
+ MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
+#endif
+ /* update Len field */
+ append_math_sub(desc, REG0, REG0, REG2, 8);
+
+ /* store decrypted payload, icv and padding */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
+
+ /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
+
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* send Type, Version and Len(pre ICV) fields to authentication */
+ append_move(desc, MOVE_WAITCOMP |
+ MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
+ (3 << MOVE_OFFSET_SHIFT) | 5);
+
+ /* outsnooping payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
+ FIFOLDST_VLF);
+ skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
+
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+ /* send Type, Version and Len(pre ICV) fields to authentication */
+ append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
+ MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
+ (3 << MOVE_OFFSET_SHIFT) | 5);
+
+ set_jump_tgt_here(desc, skip_zero_jump_cmd);
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
+
+ /* load icvlen and padlen */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
+ FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
+
+ /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
+
+ /*
+ * Start a new input sequence using the SEQ OUT PTR command options,
+ * pointer and length used when the current output sequence was defined.
+ */
+ if (ps) {
+ /*
+ * Move the lower 32 bits of Shared Descriptor address, the
+ * SEQ OUT PTR command, Output Pointer (2 words) and
+ * Output Length into math registers.
+ */
+#ifdef __LITTLE_ENDIAN
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) |
+ 20);
+#else
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
+ 20);
+#endif
+ /* Transform SEQ OUT PTR command in SEQ IN PTR command */
+ append_math_and_imm_u32(desc, REG0, REG0, IMM,
+ ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
+ /* Append a JUMP command after the copied fields */
+ jumpback = CMD_JUMP | (char)-9;
+ append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
+ LDST_SRCDST_WORD_DECO_MATH2 |
+ (4 << LDST_OFFSET_SHIFT));
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
+ /* Move the updated fields back to the Job Descriptor */
+#ifdef __LITTLE_ENDIAN
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
+ MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) |
+ 24);
+#else
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
+ MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
+ 24);
+#endif
+ /*
+ * Read the new SEQ IN PTR command, Input Pointer, Input Length
+ * and then jump back to the next command from the
+ * Shared Descriptor.
+ */
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
+ } else {
+ /*
+ * Move the SEQ OUT PTR command, Output Pointer (1 word) and
+ * Output Length into math registers.
+ */
+#ifdef __LITTLE_ENDIAN
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
+ 12);
+#else
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) |
+ 12);
+#endif
+ /* Transform SEQ OUT PTR command in SEQ IN PTR command */
+ append_math_and_imm_u64(desc, REG0, REG0, IMM,
+ ~(((u64)(CMD_SEQ_IN_PTR ^
+ CMD_SEQ_OUT_PTR)) << 32));
+ /* Append a JUMP command after the copied fields */
+ jumpback = CMD_JUMP | (char)-7;
+ append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
+ LDST_SRCDST_WORD_DECO_MATH1 |
+ (4 << LDST_OFFSET_SHIFT));
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
+ /* Move the updated fields back to the Job Descriptor */
+#ifdef __LITTLE_ENDIAN
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
+ MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
+ 16);
+#else
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
+ MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) |
+ 16);
+#endif
+ /*
+ * Read the new SEQ IN PTR command, Input Pointer, Input Length
+ * and then jump back to the next command from the
+ * Shared Descriptor.
+ */
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
+ }
+
+ /* skip payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
+ /* check icv */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
+ FIFOLD_TYPE_LAST2 | authsize);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
+
+/**
+ * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
+ *zero_assoc_jump_cmd2;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
+ ivsize);
+ } else {
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
+ CAAM_CMD_SZ);
+ }
+
+ /* if assoclen + cryptlen is ZERO, skip to ICV write */
+ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ if (is_qi)
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqinlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* if cryptlen is ZERO jump to zero-payload commands */
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* jump to ICV writing */
+ if (is_qi)
+ append_jump(desc, JUMP_TEST_ALL | 4);
+ else
+ append_jump(desc, JUMP_TEST_ALL | 2);
+
+ /* zero-payload commands */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+ if (is_qi)
+ /* jump to ICV writing */
+ append_jump(desc, JUMP_TEST_ALL | 2);
+
+ /* There is no input data */
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+
+ if (is_qi)
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
+ FIFOLD_TYPE_LAST1);
+
+ /* write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
+
+/**
+ * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL |
+ JUMP_TEST_ALL | JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* store encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* zero-payload command */
+ set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+ /* read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
+
+/**
+ * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ /* Read salt and IV */
+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Skip IV */
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Write encrypted data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read payload data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
+
+/**
+ * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ if (is_qi) {
+ u32 *wait_load_cmd;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ (4 << LDST_OFFSET_SHIFT));
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ /* Read salt and IV */
+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
+ append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* Read assoc data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+ /* Skip IV */
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+ /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* Will write cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* Read encrypted data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
+
+/**
+ * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ if (is_qi) {
+ /* assoclen is not needed, skip it */
+ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
+
+ /* Read salt and IV */
+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read and write assoclen + cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
+
+/**
+ * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
+ * (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip key loading if it is loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ else
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 1 operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ if (is_qi) {
+ /* assoclen is not needed, skip it */
+ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
+
+ /* Read salt and IV */
+ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
+ cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV);
+ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ }
+
+ /* assoclen + cryptlen = seqoutlen */
+ append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* Store payload data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+ /* In-snoop assoclen + cryptlen data */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+ FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ /* Move payload data to OFIFO */
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Read ICV */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+ FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
+ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/**
+ * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Load iv */
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* load IV */
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ /* Choose operation */
+ if (ctx1_iv_off)
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ else
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
+
+/**
+ * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
+ * with HW-generated initialization vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
+{
+ u32 *key_jump_cmd, geniv;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load Nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
+ u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+ MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+ (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
+ (ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
+ MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy generated IV to memory */
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+ /* Load Counter into CONTEXT1 reg */
+ if (is_rfc3686)
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
+
+ if (ctx1_iv_off)
+ append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+ (1 << JUMP_OFFSET_SHIFT));
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
+ * descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+{
+ __be64 sector_size = cpu_to_be64(512);
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 keys only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load sector size with index 40 bytes (0x28) */
+ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (0x28 << LDST_OFFSET_SHIFT));
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /*
+ * create sequence for loading the sector index
+ * Upper 8B of IV - will be used as sector index
+ * Lower 8B of IV - will be discarded
+ */
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Load operation */
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
+ * descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+{
+ __be64 sector_size = cpu_to_be64(512);
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* Load sector size with index 40 bytes (0x28) */
+ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (0x28 << LDST_OFFSET_SHIFT));
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /*
+ * create sequence for loading the sector index
+ * Upper 8B of IV - will be used as sector index
+ * Lower 8B of IV - will be discarded
+ */
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x20 << LDST_OFFSET_SHIFT));
+ append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+ /* Load operation */
+ append_dec_op1(desc, cdata->algtype);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM descriptor support");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
new file mode 100644
index 0000000..6b436f6
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -0,0 +1,127 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#ifndef _CAAMALG_DESC_H_
+#define _CAAMALG_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
+#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
+#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
+
+#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
+#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
+
+/* Note: Nonce is counted in cdata.keylen */
+#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
+
+#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
+#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
+#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
+
+#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
+
+#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
+#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+ 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+ 15 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+ unsigned int icvsize);
+
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off,
+ const bool is_qi);
+
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
+ const u32 ctx1_iv_off, const bool is_qi);
+
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off,
+ const bool is_qi);
+
+void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int assoclen,
+ unsigned int ivsize, unsigned int authsize,
+ unsigned int blocksize);
+
+void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int assoclen,
+ unsigned int ivsize, unsigned int authsize,
+ unsigned int blocksize);
+
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
+
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
+
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
+
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
+
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
+
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
+
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
+
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
+
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+
+#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
new file mode 100644
index 0000000..d6a9b0c
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -0,0 +1,2877 @@
+/*
+ * Freescale FSL CAAM support for crypto API over QI backend.
+ * Based on caamalg.c
+ *
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ */
+
+#include "compat.h"
+#include "ctrl.h"
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "error.h"
+#include "sg_sw_qm.h"
+#include "key_gen.h"
+#include "qi.h"
+#include "jr.h"
+#include "caamalg_desc.h"
+
+/*
+ * crypto alg
+ */
+#define CAAM_CRA_PRIORITY 2000
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
+ SHA512_DIGEST_SIZE * 2)
+
+#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
+ CAAM_MAX_KEY_SIZE)
+#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+struct caam_alg_entry {
+ int class1_alg_type;
+ int class2_alg_type;
+ bool rfc3686;
+ bool geniv;
+};
+
+struct caam_aead_alg {
+ struct aead_alg aead;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+/*
+ * per-session context
+ */
+struct caam_ctx {
+ struct device *jrdev;
+ u32 sh_desc_enc[DESC_MAX_USED_LEN];
+ u32 sh_desc_dec[DESC_MAX_USED_LEN];
+ u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t key_dma;
+ struct alginfo adata;
+ struct alginfo cdata;
+ unsigned int authsize;
+ struct device *qidev;
+ spinlock_t lock; /* Protects multiple init of driver context */
+ struct caam_drv_ctx *drv_ctx[NUM_OP];
+};
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ u32 ctx1_iv_off = 0;
+ u32 *nonce = NULL;
+ unsigned int data_len[2];
+ u32 inl_mask;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
+
+ if (alg->caam.geniv)
+ goto skip_enc;
+
+ /* aead_encrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off, true);
+
+skip_enc:
+ /* aead_decrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, alg->caam.geniv,
+ is_rfc3686, nonce, ctx1_iv_off, true);
+
+ if (!alg->caam.geniv)
+ goto skip_givenc;
+
+ /* aead_givencrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off, true);
+
+skip_givenc:
+ return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ aead_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ struct crypto_authenc_keys keys;
+ int ret = 0;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
+#ifdef DEBUG
+ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
+ if (ret)
+ goto badkey;
+
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
+ keys.enckeylen, DMA_TO_DEVICE);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+#endif
+
+ ctx->cdata.keylen = keys.enckeylen;
+
+ ret = aead_set_sh_desc(aead);
+ if (ret)
+ goto badkey;
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ goto badkey;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ goto badkey;
+ }
+ }
+
+ return ret;
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static int tls_set_sh_desc(struct crypto_aead *tls)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ unsigned int ivsize = crypto_aead_ivsize(tls);
+ unsigned int blocksize = crypto_aead_blocksize(tls);
+ unsigned int assoclen = 13; /* always 13 bytes for TLS */
+ unsigned int data_len[2];
+ u32 inl_mask;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * TLS 1.0 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
+
+ if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
+ &inl_mask, ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
+ assoclen, ivsize, ctx->authsize, blocksize);
+
+ /*
+ * TLS 1.0 decrypt shared descriptor
+ * Keys do not fit inline, regardless of algorithms used
+ */
+ ctx->adata.key_dma = ctx->key_dma;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
+ assoclen, ivsize, ctx->authsize, blocksize);
+
+ return 0;
+}
+
+static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+
+ ctx->authsize = authsize;
+ tls_set_sh_desc(tls);
+
+ return 0;
+}
+
+static int tls_setkey(struct crypto_aead *tls, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ struct device *jrdev = ctx->jrdev;
+ struct crypto_authenc_keys keys;
+ int ret = 0;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
+#ifdef DEBUG
+ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
+ if (ret)
+ goto badkey;
+
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
+ keys.enckeylen, DMA_TO_DEVICE);
+
+#ifdef DEBUG
+ dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+ ctx->adata.keylen, ctx->adata.keylen_pad);
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+#endif
+
+ ctx->cdata.keylen = keys.enckeylen;
+
+ ret = tls_set_sh_desc(tls);
+ if (ret)
+ goto badkey;
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ goto badkey;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ goto badkey;
+ }
+ }
+
+ return ret;
+badkey:
+ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct device *jrdev = ctx->jrdev;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ u32 ctx1_iv_off = 0;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+ int ret = 0;
+
+ memcpy(ctx->key, key, keylen);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
+
+ /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
+ cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
+ cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
+ cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
+ ivsize, is_rfc3686, ctx1_iv_off);
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ goto badkey;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ goto badkey;
+ }
+ }
+
+ if (ctx->drv_ctx[GIVENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
+ ctx->sh_desc_givenc);
+ if (ret) {
+ dev_err(jrdev, "driver givenc context update failed\n");
+ goto badkey;
+ }
+ }
+
+ return ret;
+badkey:
+ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+
+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(ablkcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ dev_err(jrdev, "key size mismatch\n");
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
+
+ /* xts ablkcipher encrypt, decrypt shared descriptors */
+ cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
+ cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+ goto badkey;
+ }
+ }
+
+ if (ctx->drv_ctx[DECRYPT]) {
+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+ goto badkey;
+ }
+ }
+
+ return ret;
+badkey:
+ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return 0;
+}
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @assoclen: associated data length, in CAAM endianness
+ * @assoclen_dma: bus physical mapped address of req->assoclen
+ * @drv_req: driver-specific request structure
+ * @sgt: the h/w link table
+ */
+struct aead_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ unsigned int assoclen;
+ dma_addr_t assoclen_dma;
+ struct caam_drv_req drv_req;
+#define CAAM_QI_MAX_AEAD_SG \
+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
+ sizeof(struct qm_sg_entry))
+ struct qm_sg_entry sgt[0];
+};
+
+/*
+ * tls_edesc - s/w-extended tls descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @drv_req: driver-specific request structure
+ * @sgt: the h/w link table
+ */
+struct tls_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ struct scatterlist tmp[2];
+ struct scatterlist *dst;
+ struct caam_drv_req drv_req;
+ struct qm_sg_entry sgt[0];
+};
+
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @drv_req: driver-specific request structure
+ * @sgt: the h/w link table
+ */
+struct ablkcipher_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ struct caam_drv_req drv_req;
+#define CAAM_QI_MAX_ABLKCIPHER_SG \
+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
+ sizeof(struct qm_sg_entry))
+ struct qm_sg_entry sgt[0];
+};
+
+static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
+ enum optype type)
+{
+ /*
+ * This function is called on the fast path with values of 'type'
+ * known at compile time. Invalid arguments are not expected and
+ * thus no checks are made.
+ */
+ struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
+ u32 *desc;
+
+ if (unlikely(!drv_ctx)) {
+ spin_lock(&ctx->lock);
+
+ /* Read again to check if some other core init drv_ctx */
+ drv_ctx = ctx->drv_ctx[type];
+ if (!drv_ctx) {
+ int cpu;
+
+ if (type == ENCRYPT)
+ desc = ctx->sh_desc_enc;
+ else if (type == DECRYPT)
+ desc = ctx->sh_desc_dec;
+ else /* (type == GIVENCRYPT) */
+ desc = ctx->sh_desc_givenc;
+
+ cpu = smp_processor_id();
+ drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
+ if (likely(!IS_ERR_OR_NULL(drv_ctx)))
+ drv_ctx->op_type = type;
+
+ ctx->drv_ctx[type] = drv_ctx;
+ }
+
+ spin_unlock(&ctx->lock);
+ }
+
+ return drv_ctx;
+}
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+ struct scatterlist *dst, int src_nents,
+ int dst_nents, dma_addr_t iv_dma, int ivsize,
+ enum optype op_type, dma_addr_t qm_sg_dma,
+ int qm_sg_bytes)
+{
+ if (dst != src) {
+ if (src_nents)
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ }
+
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize,
+ op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ if (qm_sg_bytes)
+ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
+}
+
+static void aead_unmap(struct device *dev,
+ struct aead_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
+ edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+}
+
+static void tls_unmap(struct device *dev,
+ struct tls_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
+ edesc->dst_nents, edesc->iv_dma, ivsize,
+ edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
+ edesc->qm_sg_bytes);
+}
+
+static void ablkcipher_unmap(struct device *dev,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
+ edesc->qm_sg_dma, edesc->qm_sg_bytes);
+}
+
+static void aead_done(struct caam_drv_req *drv_req, u32 status)
+{
+ struct device *qidev;
+ struct aead_edesc *edesc;
+ struct aead_request *aead_req = drv_req->app_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ qidev = caam_ctx->qidev;
+
+ if (unlikely(status)) {
+ caam_jr_strstatus(qidev, status);
+ ecode = -EIO;
+ }
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
+ aead_unmap(qidev, edesc, aead_req);
+
+ aead_request_complete(aead_req, ecode);
+ qi_cache_free(edesc);
+}
+
+/*
+ * allocate and map the aead extended descriptor
+ */
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *qidev = ctx->qidev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
+ unsigned int authsize = ctx->authsize;
+ int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ struct caam_drv_ctx *drv_ctx;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+
+ drv_ctx = get_drv_ctx(ctx, op_type);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+ return (struct aead_edesc *)drv_ctx;
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(qidev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (likely(req->src == req->dst)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen);
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize :
+ (-authsize)));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : (-authsize)));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+
+ if (src_nents) {
+ mapped_src_nents = dma_map_sg(qidev, req->src,
+ src_nents, DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = 0;
+ }
+
+ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
+ ivsize = crypto_aead_ivsize(aead);
+ iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ /*
+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+ * Input is not contiguous.
+ */
+ qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ edesc->drv_req.app_ctx = req;
+ edesc->drv_req.cbk = aead_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
+ edesc->assoclen = cpu_to_caam32(req->assoclen);
+ edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
+ dev_err(qidev, "unable to map assoclen\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
+ qm_sg_index++;
+ if (ivsize) {
+ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
+ qm_sg_index++;
+ }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
+ dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ out_len = req->assoclen + req->cryptlen +
+ (encrypt ? ctx->authsize : (-ctx->authsize));
+ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
+
+ if (req->dst == req->src) {
+ if (mapped_src_nents == 1)
+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
+ out_len, 0);
+ else
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
+ (1 + !!ivsize) * sizeof(*sg_table),
+ out_len, 0);
+ } else if (mapped_dst_nents == 1) {
+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
+ 0);
+ } else {
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
+ qm_sg_index, out_len, 0);
+ }
+
+ return edesc;
+}
+
+static inline int aead_crypt(struct aead_request *req, bool encrypt)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
+
+ if (unlikely(caam_congested))
+ return -EAGAIN;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, encrypt);
+ if (IS_ERR_OR_NULL(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor */
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+ return aead_crypt(req, true);
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+ return aead_crypt(req, false);
+}
+
+static void tls_done(struct caam_drv_req *drv_req, u32 status)
+{
+ struct device *qidev;
+ struct tls_edesc *edesc;
+ struct aead_request *aead_req = drv_req->app_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ qidev = caam_ctx->qidev;
+
+ if (unlikely(status)) {
+ caam_jr_strstatus(qidev, status);
+ ecode = -EIO;
+ }
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
+ tls_unmap(qidev, edesc, aead_req);
+
+ aead_request_complete(aead_req, ecode);
+ qi_cache_free(edesc);
+}
+
+/*
+ * allocate and map the tls extended descriptor
+ */
+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int blocksize = crypto_aead_blocksize(aead);
+ unsigned int padsize, authsize;
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *qidev = ctx->qidev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct tls_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
+ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ struct caam_drv_ctx *drv_ctx;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+ struct scatterlist *dst;
+
+ if (encrypt) {
+ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
+ blocksize);
+ authsize = ctx->authsize + padsize;
+ } else {
+ authsize = ctx->authsize;
+ }
+
+ drv_ctx = get_drv_ctx(ctx, op_type);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+ return (struct tls_edesc *)drv_ctx;
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(qidev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (likely(req->src == req->dst)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ dst = req->dst;
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen);
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
+ dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (encrypt ? authsize : 0));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+ req->cryptlen +
+ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+
+ if (src_nents) {
+ mapped_src_nents = dma_map_sg(qidev, req->src,
+ src_nents, DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = 0;
+ }
+
+ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ ivsize = crypto_aead_ivsize(aead);
+ iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
+ op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Create S/G table: IV, src, dst.
+ * Input is not contiguous.
+ */
+ qm_sg_ents = 1 + mapped_src_nents +
+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->dst = dst;
+ edesc->iv_dma = iv_dma;
+ edesc->drv_req.app_ctx = req;
+ edesc->drv_req.cbk = tls_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+ qm_sg_index = 1;
+
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
+ ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ out_len = req->cryptlen + (encrypt ? authsize : 0);
+ in_len = ivsize + req->assoclen + req->cryptlen;
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
+
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
+
+ if (req->dst == req->src)
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
+ (sg_nents_for_len(req->src, req->assoclen) +
+ 1) * sizeof(*sg_table), out_len, 0);
+ else if (mapped_dst_nents == 1)
+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
+ else
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
+ qm_sg_index, out_len, 0);
+
+ return edesc;
+}
+
+static int tls_crypt(struct aead_request *req, bool encrypt)
+{
+ struct tls_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
+
+ if (unlikely(caam_congested))
+ return -EAGAIN;
+
+ edesc = tls_edesc_alloc(req, encrypt);
+ if (IS_ERR_OR_NULL(edesc))
+ return PTR_ERR(edesc);
+
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ tls_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int tls_encrypt(struct aead_request *req)
+{
+ return tls_crypt(req, true);
+}
+
+static int tls_decrypt(struct aead_request *req)
+{
+ return tls_crypt(req, false);
+}
+
+static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+{
+ struct ablkcipher_edesc *edesc;
+ struct ablkcipher_request *req = drv_req->app_ctx;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *qidev = caam_ctx->qidev;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+#ifdef DEBUG
+ dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
+#endif
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
+
+ if (status)
+ caam_jr_strstatus(qidev, status);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+ ablkcipher_unmap(qidev, edesc, req);
+ qi_cache_free(edesc);
+
+ /*
+ * The crypto API expects us to set the IV (req->info) to the last
+ * ciphertext block. This is used e.g. by the CTS mode.
+ */
+ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+ ivsize, 0);
+
+ ablkcipher_request_complete(req, status);
+}
+
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ *req, bool encrypt)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *qidev = ctx->qidev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma;
+ bool in_contig;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int dst_sg_idx, qm_sg_ents;
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ struct caam_drv_ctx *drv_ctx;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+
+ drv_ctx = get_drv_ctx(ctx, op_type);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+ return (struct ablkcipher_edesc *)drv_ctx;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->nbytes);
+ return ERR_PTR(src_nents);
+ }
+
+ if (unlikely(req->src != req->dst)) {
+ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+ req->nbytes);
+ return ERR_PTR(dst_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (mapped_src_nents == 1 &&
+ iv_dma + ivsize == sg_dma_address(req->src)) {
+ in_contig = true;
+ qm_sg_ents = 0;
+ } else {
+ in_contig = false;
+ qm_sg_ents = 1 + mapped_src_nents;
+ }
+ dst_sg_idx = qm_sg_ents;
+
+ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(qidev, "could not allocate extended descriptor\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ sg_table = &edesc->sgt[0];
+ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+ edesc->drv_req.app_ctx = req;
+ edesc->drv_req.cbk = ablkcipher_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
+ if (!in_contig) {
+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+ }
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ dst_sg_idx, 0);
+
+ edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
+
+ if (!in_contig)
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
+ ivsize + req->nbytes, 0);
+ else
+ dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
+ 0);
+
+ if (req->src == req->dst) {
+ if (!in_contig)
+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
+ sizeof(*sg_table), req->nbytes, 0);
+ else
+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
+ req->nbytes, 0);
+ } else if (mapped_dst_nents > 1) {
+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table), req->nbytes, 0);
+ } else {
+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
+ req->nbytes, 0);
+ }
+
+ return edesc;
+}
+
+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ struct skcipher_givcrypt_request *creq)
+{
+ struct ablkcipher_request *req = &creq->creq;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *qidev = ctx->qidev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma;
+ bool out_contig;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ int dst_sg_idx, qm_sg_ents;
+ struct caam_drv_ctx *drv_ctx;
+
+ drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+ return (struct ablkcipher_edesc *)drv_ctx;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->nbytes);
+ return ERR_PTR(src_nents);
+ }
+
+ if (unlikely(req->src != req->dst)) {
+ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+ req->nbytes);
+ return ERR_PTR(dst_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dst_nents = src_nents;
+ mapped_dst_nents = src_nents;
+ }
+
+ iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+ dst_sg_idx = qm_sg_ents;
+ if (mapped_dst_nents == 1 &&
+ iv_dma + ivsize == sg_dma_address(req->dst)) {
+ out_contig = true;
+ } else {
+ out_contig = false;
+ qm_sg_ents += 1 + mapped_dst_nents;
+ }
+
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(qidev, "could not allocate extended descriptor\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ sg_table = &edesc->sgt[0];
+ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+ edesc->drv_req.app_ctx = req;
+ edesc->drv_req.cbk = ablkcipher_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
+ if (mapped_src_nents > 1)
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
+
+ if (!out_contig) {
+ dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ dst_sg_idx + 1, 0);
+ }
+
+ edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
+
+ if (mapped_src_nents > 1)
+ dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
+ 0);
+ else
+ dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
+ req->nbytes, 0);
+
+ if (!out_contig)
+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table), ivsize + req->nbytes,
+ 0);
+ else
+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
+ ivsize + req->nbytes, 0);
+
+ return edesc;
+}
+
+static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ int ret;
+
+ if (unlikely(caam_congested))
+ return -EAGAIN;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, encrypt);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ return ablkcipher_crypt(req, true);
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ return ablkcipher_crypt(req, false);
+}
+
+static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
+{
+ struct ablkcipher_request *req = &creq->creq;
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ int ret;
+
+ if (unlikely(caam_congested))
+ return -EAGAIN;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_giv_edesc_alloc(creq);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+#define template_ablkcipher template_u.ablkcipher
+struct caam_alg_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ u32 type;
+ union {
+ struct ablkcipher_alg ablkcipher;
+ } template_u;
+ u32 class1_alg_type;
+ u32 class2_alg_type;
+};
+
+static struct caam_alg_template driver_algs[] = {
+ /* ablkcipher descriptor */
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-caam-qi",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-caam-qi",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-caam-qi",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-caam-qi",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "chainiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .name = "rfc3686(ctr(aes))",
+ .driver_name = "rfc3686-ctr-aes-caam-qi",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .name = "xts(aes)",
+ .driver_name = "xts-aes-caam-qi",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = xts_ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ },
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+ /* single-pass ipsec_esp descriptor */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-aes-"
+ "caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-aes-"
+ "caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-aes-"
+ "caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-"
+ "cbc-des3_ede-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-des-"
+ "caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-des-"
+ "caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-des-"
+ "caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-des-"
+ "caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "tls10(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = tls_setkey,
+ .setauthsize = tls_setauthsize,
+ .encrypt = tls_encrypt,
+ .decrypt = tls_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ }
+};
+
+struct caam_crypto_alg {
+ struct list_head entry;
+ struct crypto_alg crypto_alg;
+ struct caam_alg_entry caam;
+};
+
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+{
+ struct caam_drv_private *priv;
+ /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
+ static const u8 digest_size[] = {
+ MD5_DIGEST_SIZE,
+ SHA1_DIGEST_SIZE,
+ SHA224_DIGEST_SIZE,
+ SHA256_DIGEST_SIZE,
+ SHA384_DIGEST_SIZE,
+ SHA512_DIGEST_SIZE
+ };
+ u8 op_id;
+
+ /*
+ * distribute tfms across job rings to ensure in-order
+ * crypto request processing per tfm
+ */
+ ctx->jrdev = caam_jr_alloc();
+ if (IS_ERR(ctx->jrdev)) {
+ pr_err("Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->jrdev);
+ }
+
+ ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
+ dev_err(ctx->jrdev, "unable to map key\n");
+ caam_jr_free(ctx->jrdev);
+ return -ENOMEM;
+ }
+
+ /* copy descriptor header template value */
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+
+ if (ctx->adata.algtype) {
+ op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
+ >> OP_ALG_ALGSEL_SHIFT;
+ if (op_id < ARRAY_SIZE(digest_size)) {
+ ctx->authsize = digest_size[op_id];
+ } else {
+ dev_err(ctx->jrdev,
+ "incorrect op_id %d; must be less than %zu\n",
+ op_id, ARRAY_SIZE(digest_size));
+ caam_jr_free(ctx->jrdev);
+ return -EINVAL;
+ }
+ } else {
+ ctx->authsize = 0;
+ }
+
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ ctx->qidev = priv->qidev;
+
+ spin_lock_init(&ctx->lock);
+ ctx->drv_ctx[ENCRYPT] = NULL;
+ ctx->drv_ctx[DECRYPT] = NULL;
+ ctx->drv_ctx[GIVENCRYPT] = NULL;
+
+ return 0;
+}
+
+static int caam_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+ crypto_alg);
+ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return caam_init_common(ctx, &caam_alg->caam);
+}
+
+static int caam_aead_init(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+ aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+
+ return caam_init_common(ctx, &caam_alg->caam);
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
+ caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
+ caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
+ caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
+
+ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
+ DMA_TO_DEVICE);
+
+ caam_jr_free(ctx->jrdev);
+}
+
+static void caam_cra_exit(struct crypto_tfm *tfm)
+{
+ caam_exit_common(crypto_tfm_ctx(tfm));
+}
+
+static void caam_aead_exit(struct crypto_aead *tfm)
+{
+ caam_exit_common(crypto_aead_ctx(tfm));
+}
+
+static struct list_head alg_list;
+static void __exit caam_qi_algapi_exit(void)
+{
+ struct caam_crypto_alg *t_alg, *n;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+
+ if (t_alg->registered)
+ crypto_unregister_aead(&t_alg->aead);
+ }
+
+ if (!alg_list.next)
+ return;
+
+ list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
+ crypto_unregister_alg(&t_alg->crypto_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+}
+
+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
+ *template)
+{
+ struct caam_crypto_alg *t_alg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ alg = &t_alg->crypto_alg;
+
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = caam_cra_init;
+ alg->cra_exit = caam_cra_exit;
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_ctxsize = sizeof(struct caam_ctx);
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+ template->type;
+ switch (template->type) {
+ case CRYPTO_ALG_TYPE_GIVCIPHER:
+ alg->cra_type = &crypto_givcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
+ }
+
+ t_alg->caam.class1_alg_type = template->class1_alg_type;
+ t_alg->caam.class2_alg_type = template->class2_alg_type;
+
+ return t_alg;
+}
+
+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
+{
+ struct aead_alg *alg = &t_alg->aead;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_aead_init;
+ alg->exit = caam_aead_exit;
+}
+
+static int __init caam_qi_algapi_init(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ struct caam_drv_private *priv;
+ int i = 0, err = 0;
+ u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+ unsigned int md_limit = SHA512_DIGEST_SIZE;
+ bool registered = false;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ of_node_put(dev_node);
+ if (!pdev)
+ return -ENODEV;
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+
+ /*
+ * If priv is NULL, it's probably because the caam driver wasn't
+ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+ */
+ if (!priv || !priv->qi_present)
+ return -ENODEV;
+
+ if (caam_dpaa2) {
+ dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
+ return -ENODEV;
+ }
+
+ INIT_LIST_HEAD(&alg_list);
+
+ /*
+ * Register crypto algorithms the device supports.
+ * First, detect presence and attributes of DES, AES, and MD blocks.
+ */
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+ aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+ /* If MD is present, limit digest size based on LP256 */
+ if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+ md_limit = SHA256_DIGEST_SIZE;
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_crypto_alg *t_alg;
+ struct caam_alg_template *alg = driver_algs + i;
+ u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!des_inst &&
+ ((alg_sel == OP_ALG_ALGSEL_3DES) ||
+ (alg_sel == OP_ALG_ALGSEL_DES)))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
+ t_alg = caam_alg_alloc(alg);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(priv->qidev, "%s alg allocation failed\n",
+ alg->driver_name);
+ continue;
+ }
+
+ err = crypto_register_alg(&t_alg->crypto_alg);
+ if (err) {
+ dev_warn(priv->qidev, "%s alg registration failed\n",
+ t_alg->crypto_alg.cra_driver_name);
+ kfree(t_alg);
+ continue;
+ }
+
+ list_add_tail(&t_alg->entry, &alg_list);
+ registered = true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+ u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!des_inst &&
+ ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
+ (c1_alg_sel == OP_ALG_ALGSEL_DES)))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
+ /*
+ * Check support for AES algorithms not available
+ * on LP devices.
+ */
+ if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
+ (alg_aai == OP_ALG_AAI_GCM))
+ continue;
+
+ /*
+ * Skip algorithms requiring message digests
+ * if MD or MD size is not supported by device.
+ */
+ if (c2_alg_sel &&
+ (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
+ continue;
+
+ caam_aead_alg_init(t_alg);
+
+ err = crypto_register_aead(&t_alg->aead);
+ if (err) {
+ pr_warn("%s alg registration failed\n",
+ t_alg->aead.base.cra_driver_name);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+
+ if (registered)
+ dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
+
+ return err;
+}
+
+module_init(caam_qi_algapi_init);
+module_exit(caam_qi_algapi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
+MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
new file mode 100644
index 0000000..102b084
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -0,0 +1,4428 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the names of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "caamalg_qi2.h"
+#include "dpseci_cmd.h"
+#include "desc_constr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "sg_sw_qm2.h"
+#include "key_gen.h"
+#include "caamalg_desc.h"
+#include "../../../drivers/staging/fsl-mc/include/mc.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
+
+#define CAAM_CRA_PRIORITY 2000
+
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
+ SHA512_DIGEST_SIZE * 2)
+
+#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
+bool caam_little_end;
+EXPORT_SYMBOL(caam_little_end);
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
+#endif
+
+/*
+ * This is a a cache of buffers, from which the users of CAAM QI driver
+ * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
+ * NOTE: A more elegant solution would be to have some headroom in the frames
+ * being processed. This can be added by the dpaa2-eth driver. This would
+ * pose a problem for userspace application processing which cannot
+ * know of this limitation. So for now, this will work.
+ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
+ */
+static struct kmem_cache *qi_cache;
+
+struct caam_alg_entry {
+ struct device *dev;
+ int class1_alg_type;
+ int class2_alg_type;
+ bool rfc3686;
+ bool geniv;
+};
+
+struct caam_aead_alg {
+ struct aead_alg aead;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+/**
+ * caam_ctx - per-session context
+ * @flc: Flow Contexts array
+ * @key: virtual address of the key(s): [authentication key], encryption key
+ * @key_dma: I/O virtual address of the key
+ * @dev: dpseci device
+ * @adata: authentication algorithm details
+ * @cdata: encryption algorithm details
+ * @authsize: authentication tag (a.k.a. ICV / MAC) size
+ */
+struct caam_ctx {
+ struct caam_flc flc[NUM_OP];
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t key_dma;
+ struct device *dev;
+ struct alginfo adata;
+ struct alginfo cdata;
+ unsigned int authsize;
+};
+
+void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
+ iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
+/*
+ * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
+ *
+ * Allocate data on the hotpath. Instead of using kzalloc, one can use the
+ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
+ * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
+ * hosting 16 SG entries.
+ *
+ * @flags - flags that would be used for the equivalent kmalloc(..) call
+ *
+ * Returns a pointer to a retrieved buffer on success or NULL on failure.
+ */
+static inline void *qi_cache_zalloc(gfp_t flags)
+{
+ return kmem_cache_zalloc(qi_cache, flags);
+}
+
+/*
+ * qi_cache_free - Frees buffers allocated from CAAM-QI cache
+ *
+ * @obj - buffer previously allocated by qi_cache_zalloc
+ *
+ * No checking is being done, the call is a passthrough call to
+ * kmem_cache_free(...)
+ */
+static inline void qi_cache_free(void *obj)
+{
+ kmem_cache_free(qi_cache, obj);
+}
+
+static struct caam_request *to_caam_req(struct crypto_async_request *areq)
+{
+ switch (crypto_tfm_alg_type(areq->tfm)) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ case CRYPTO_ALG_TYPE_GIVCIPHER:
+ return ablkcipher_request_ctx(ablkcipher_request_cast(areq));
+ case CRYPTO_ALG_TYPE_AEAD:
+ return aead_request_ctx(container_of(areq, struct aead_request,
+ base));
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+ struct scatterlist *dst, int src_nents,
+ int dst_nents, dma_addr_t iv_dma, int ivsize,
+ enum optype op_type, dma_addr_t qm_sg_dma,
+ int qm_sg_bytes)
+{
+ if (dst != src) {
+ if (src_nents)
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ }
+
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize,
+ op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+
+ if (qm_sg_bytes)
+ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
+}
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ u32 *nonce = NULL;
+ unsigned int data_len[2];
+ u32 inl_mask;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
+
+ /* aead_encrypt shared descriptor */
+ if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
+ DESC_QI_AEAD_ENC_LEN) +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+
+ if (alg->caam.geniv)
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686,
+ nonce, ctx1_iv_off, true);
+ else
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off, true);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ /* aead_decrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, alg->caam.geniv,
+ is_rfc3686, nonce, ctx1_iv_off, true);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ aead_set_sh_desc(authenc);
+
+ return 0;
+}
+
+struct split_key_sh_result {
+ struct completion completion;
+ int err;
+ struct device *dev;
+};
+
+static void split_key_sh_done(void *cbk_ctx, u32 err)
+{
+ struct split_key_sh_result *res = cbk_ctx;
+
+#ifdef DEBUG
+ dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ if (err)
+ caam_qi2_strstatus(res->dev, err);
+
+ res->err = err;
+ complete(&res->completion);
+}
+
+static int gen_split_key_sh(struct device *dev, u8 *key_out,
+ struct alginfo * const adata, const u8 *key_in,
+ u32 keylen)
+{
+ struct caam_request *req_ctx;
+ u32 *desc;
+ struct split_key_sh_result result;
+ dma_addr_t dma_addr_in, dma_addr_out;
+ struct caam_flc *flc;
+ struct dpaa2_fl_entry *in_fle, *out_fle;
+ int ret = -ENOMEM;
+
+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
+ if (!req_ctx)
+ return -ENOMEM;
+
+ in_fle = &req_ctx->fd_flt[1];
+ out_fle = &req_ctx->fd_flt[0];
+
+ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
+ if (!flc)
+ goto err_flc;
+
+ dma_addr_in = dma_map_single(dev, (void *)key_in, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr_in)) {
+ dev_err(dev, "unable to map key input memory\n");
+ goto err_dma_addr_in;
+ }
+
+ dma_addr_out = dma_map_single(dev, key_out, adata->keylen_pad,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_addr_out)) {
+ dev_err(dev, "unable to map key output memory\n");
+ goto err_dma_addr_out;
+ }
+
+ desc = flc->sh_desc;
+
+ init_sh_desc(desc, 0);
+ append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
+
+ /* Sets MDHA up into an HMAC-INIT */
+ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
+ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
+ OP_ALG_AS_INIT);
+
+ /*
+ * do a FIFO_LOAD of zero, this will trigger the internal key expansion
+ * into both pads inside MDHA
+ */
+ append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
+
+ /*
+ * FIFO_STORE with the explicit split-key content store
+ * (0x26 output type)
+ */
+ append_fifo_store(desc, dma_addr_out, adata->keylen,
+ LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ goto err_flc_dma;
+ }
+
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, dma_addr_in);
+ dpaa2_fl_set_len(in_fle, keylen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, dma_addr_out);
+ dpaa2_fl_set_len(out_fle, adata->keylen_pad);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+ print_hex_dump(KERN_ERR, "desc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ result.err = 0;
+ init_completion(&result.completion);
+ result.dev = dev;
+
+ req_ctx->flc = flc;
+ req_ctx->cbk = split_key_sh_done;
+ req_ctx->ctx = &result;
+
+ ret = dpaa2_caam_enqueue(dev, req_ctx);
+ if (ret == -EINPROGRESS) {
+ /* in progress */
+ wait_for_completion(&result.completion);
+ ret = result.err;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_out,
+ adata->keylen_pad, 1);
+#endif
+ }
+
+ dma_unmap_single(dev, flc->flc_dma, sizeof(flc->flc) + desc_bytes(desc),
+ DMA_TO_DEVICE);
+err_flc_dma:
+ dma_unmap_single(dev, dma_addr_out, adata->keylen_pad, DMA_FROM_DEVICE);
+err_dma_addr_out:
+ dma_unmap_single(dev, dma_addr_in, keylen, DMA_TO_DEVICE);
+err_dma_addr_in:
+ kfree(flc);
+err_flc:
+ kfree(req_ctx);
+ return ret;
+}
+
+static int gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
+ u32 authkeylen)
+{
+ return gen_split_key_sh(ctx->dev, ctx->key, &ctx->adata, key_in,
+ authkeylen);
+}
+
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
+ int ret;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
+#ifdef DEBUG
+ dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ ctx->adata.keylen = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+ ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+#ifdef DEBUG
+ dev_err(dev, "split keylen %d split keylen padded %d\n",
+ ctx->adata.keylen, ctx->adata.keylen_pad);
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, keylen, 1);
+#endif
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
+ if (ret)
+ goto badkey;
+
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+
+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
+ keys.enckeylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_dma)) {
+ dev_err(dev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+#endif
+
+ ctx->cdata.keylen = keys.enckeylen;
+
+ ret = aead_set_sh_desc(aead);
+ if (ret)
+ dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
+ keys.enckeylen, DMA_TO_DEVICE);
+
+ return ret;
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_request *req_ctx = aead_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
+ unsigned int authsize = ctx->authsize;
+ int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct dpaa2_sg_entry *sg_table;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (unlikely(req->dst != req->src)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen);
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize :
+ (-authsize)));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : (-authsize)));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+
+ if (src_nents) {
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = 0;
+ }
+
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
+ ivsize = crypto_aead_ivsize(aead);
+ iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ /*
+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+ * Input is not contiguous.
+ */
+ qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ if (unlikely(qm_sg_nents > CAAM_QI_MAX_AEAD_SG)) {
+ dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_nents, CAAM_QI_MAX_AEAD_SG);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+
+ edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->assoclen_dma)) {
+ dev_err(dev, "unable to map assoclen\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
+ qm_sg_index++;
+ if (ivsize) {
+ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
+ qm_sg_index++;
+ }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ out_len = req->assoclen + req->cryptlen +
+ (encrypt ? ctx->authsize : (-ctx->authsize));
+ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, in_len);
+
+ if (req->dst == req->src) {
+ if (mapped_src_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma +
+ (1 + !!ivsize) * sizeof(*sg_table));
+ }
+ } else if (mapped_dst_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
+ sizeof(*sg_table));
+ }
+
+ dpaa2_fl_set_len(out_fle, out_len);
+
+ return edesc;
+}
+
+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
+ bool encrypt)
+{
+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
+ unsigned int blocksize = crypto_aead_blocksize(tls);
+ unsigned int padsize, authsize;
+ struct caam_request *req_ctx = aead_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
+ typeof(*alg), aead);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct tls_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
+ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct dpaa2_sg_entry *sg_table;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+ struct scatterlist *dst;
+
+ if (encrypt) {
+ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
+ blocksize);
+ authsize = ctx->authsize + padsize;
+ } else {
+ authsize = ctx->authsize;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (likely(req->src == req->dst)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ dst = req->dst;
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen);
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
+ dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (encrypt ? authsize : 0));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ req->cryptlen +
+ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+
+ if (src_nents) {
+ mapped_src_nents = dma_map_sg(dev, req->src,
+ src_nents, DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = 0;
+ }
+
+ mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ ivsize = crypto_aead_ivsize(tls);
+ iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0,
+ op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Create S/G table: IV, src, dst.
+ * Input is not contiguous.
+ */
+ qm_sg_ents = 1 + mapped_src_nents +
+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->dst = dst;
+ edesc->iv_dma = iv_dma;
+
+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+ qm_sg_index = 1;
+
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
+ ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ out_len = req->cryptlen + (encrypt ? authsize : 0);
+ in_len = ivsize + req->assoclen + req->cryptlen;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, in_len);
+
+ if (req->dst == req->src) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma +
+ (sg_nents_for_len(req->src, req->assoclen) +
+ 1) * sizeof(*sg_table));
+ } else if (mapped_dst_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
+ sizeof(*sg_table));
+ }
+
+ dpaa2_fl_set_len(out_fle, out_len);
+
+ return edesc;
+}
+
+static int tls_set_sh_desc(struct crypto_aead *tls)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ unsigned int ivsize = crypto_aead_ivsize(tls);
+ unsigned int blocksize = crypto_aead_blocksize(tls);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ u32 *desc;
+ unsigned int assoclen = 13; /* always 13 bytes for TLS */
+ unsigned int data_len[2];
+ u32 inl_mask;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * TLS 1.0 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
+
+ if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
+ &inl_mask, ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+
+ cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
+ assoclen, ivsize, ctx->authsize, blocksize);
+
+ flc->flc[1] = desc_len(desc);
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * TLS 1.0 decrypt shared descriptor
+ * Keys do not fit inline, regardless of algorithms used
+ */
+ ctx->adata.key_dma = ctx->key_dma;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+
+ cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
+ ctx->authsize, blocksize);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int tls_setkey(struct crypto_aead *tls, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
+ int ret;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
+#ifdef DEBUG
+ dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ ctx->adata.keylen = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+ ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+#ifdef DEBUG
+ dev_err(dev, "split keylen %d split keylen padded %d\n",
+ ctx->adata.keylen, ctx->adata.keylen_pad);
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey,
+ keys.authkeylen + keys.enckeylen, 1);
+#endif
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
+ if (ret)
+ goto badkey;
+
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+
+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
+ keys.enckeylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_dma)) {
+ dev_err(dev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+#endif
+
+ ctx->cdata.keylen = keys.enckeylen;
+
+ ret = tls_set_sh_desc(tls);
+ if (ret)
+ dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
+ keys.enckeylen, DMA_TO_DEVICE);
+
+ return ret;
+badkey:
+ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+
+ ctx->authsize = authsize;
+ tls_set_sh_desc(tls);
+
+ return 0;
+}
+
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES GCM encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ gcm_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ int ret;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_dma)) {
+ dev_err(dev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+ ctx->cdata.keylen = keylen;
+
+ ret = gcm_set_sh_desc(aead);
+ if (ret)
+ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
+ DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * RFC4106 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4106_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ int ret;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_dma)) {
+ dev_err(dev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+
+ ret = rfc4106_set_sh_desc(aead);
+ if (ret)
+ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
+ DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * RFC4543 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4543_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ int ret;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_dma)) {
+ dev_err(dev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+
+ ret = rfc4543_set_sh_desc(aead);
+ if (ret)
+ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
+ DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+
+ memcpy(ctx->key, key, keylen);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+
+ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_dma)) {
+ dev_err(dev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
+
+ /* ablkcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+
+ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ /* ablkcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+
+ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ /* ablkcipher_givencrypt shared descriptor */
+ flc = &ctx->flc[GIVENCRYPT];
+ desc = flc->sh_desc;
+
+ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
+ ivsize, is_rfc3686, ctx1_iv_off);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ u32 *desc;
+
+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ dev_err(dev, "key size mismatch\n");
+ crypto_ablkcipher_set_flags(ablkcipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx->key_dma)) {
+ dev_err(dev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_inline = true;
+
+ /* xts_ablkcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ /* xts_ablkcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+
+ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, flc->flc_dma)) {
+ dev_err(dev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ *req, bool encrypt)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_request *req_ctx = ablkcipher_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma;
+ bool in_contig;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int dst_sg_idx, qm_sg_ents;
+ struct dpaa2_sg_entry *sg_table;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->nbytes);
+ return ERR_PTR(src_nents);
+ }
+
+ if (unlikely(req->dst != req->src)) {
+ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ req->nbytes);
+ return ERR_PTR(dst_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ iv_dma = dma_map_single(dev, req->info, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (mapped_src_nents == 1 &&
+ iv_dma + ivsize == sg_dma_address(req->src)) {
+ in_contig = true;
+ qm_sg_ents = 0;
+ } else {
+ in_contig = false;
+ qm_sg_ents = 1 + mapped_src_nents;
+ }
+ dst_sg_idx = qm_sg_ents;
+
+ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+ dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ sg_table = &edesc->sgt[0];
+ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+ if (!in_contig) {
+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+ }
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ dst_sg_idx, 0);
+
+ edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, req->nbytes + ivsize);
+ dpaa2_fl_set_len(out_fle, req->nbytes);
+
+ if (!in_contig) {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ } else {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, iv_dma);
+ }
+
+ if (req->src == req->dst) {
+ if (!in_contig) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
+ sizeof(*sg_table));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
+ }
+ } else if (mapped_dst_nents > 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+ }
+
+ return edesc;
+}
+
+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ struct skcipher_givcrypt_request *greq)
+{
+ struct ablkcipher_request *req = &greq->creq;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_request *req_ctx = ablkcipher_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma;
+ bool out_contig;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct dpaa2_sg_entry *sg_table;
+ int dst_sg_idx, qm_sg_ents;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->nbytes);
+ return ERR_PTR(src_nents);
+ }
+
+ if (unlikely(req->dst != req->src)) {
+ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ req->nbytes);
+ return ERR_PTR(dst_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dst_nents = src_nents;
+ mapped_dst_nents = src_nents;
+ }
+
+ iv_dma = dma_map_single(dev, greq->giv, ivsize, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+ dst_sg_idx = qm_sg_ents;
+ if (mapped_dst_nents == 1 &&
+ iv_dma + ivsize == sg_dma_address(req->dst)) {
+ out_contig = true;
+ } else {
+ out_contig = false;
+ qm_sg_ents += 1 + mapped_dst_nents;
+ }
+
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+ dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ sg_table = &edesc->sgt[0];
+ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+ if (mapped_src_nents > 1)
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
+
+ if (!out_contig) {
+ dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ dst_sg_idx + 1, 0);
+ }
+
+ edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, req->nbytes);
+ dpaa2_fl_set_len(out_fle, ivsize + req->nbytes);
+
+ if (mapped_src_nents > 1) {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ } else {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
+ }
+
+ if (!out_contig) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+ }
+
+ return edesc;
+}
+
+static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+ struct caam_request *caam_req = aead_request_ctx(req);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, caam_req->op_type,
+ edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+}
+
+static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(tls);
+ struct caam_request *caam_req = aead_request_ctx(req);
+
+ caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
+ edesc->dst_nents, edesc->iv_dma, ivsize, caam_req->op_type,
+ edesc->qm_sg_dma, edesc->qm_sg_bytes);
+}
+
+static void ablkcipher_unmap(struct device *dev,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct caam_request *caam_req = ablkcipher_request_ctx(req);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, caam_req->op_type,
+ edesc->qm_sg_dma, edesc->qm_sg_bytes);
+}
+
+static void aead_encrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct aead_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+#ifdef DEBUG
+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+#endif
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static void aead_decrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct aead_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+#ifdef DEBUG
+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+#endif
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ /*
+ * verify hw auth check passed else return -EBADMSG
+ */
+ if ((status & JRSTA_CCBERR_ERRID_MASK) ==
+ JRSTA_CCBERR_ERRID_ICVCHK)
+ ecode = -EBADMSG;
+ else
+ ecode = -EIO;
+ }
+
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
+ caam_req->op_type = ENCRYPT;
+ caam_req->cbk = aead_encrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
+ caam_req->op_type = DECRYPT;
+ caam_req->cbk = aead_decrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static void tls_encrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct tls_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ int ecode = 0;
+
+#ifdef DEBUG
+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+#endif
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ tls_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static void tls_decrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct tls_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ int ecode = 0;
+
+#ifdef DEBUG
+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+#endif
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ /*
+ * verify hw auth check passed else return -EBADMSG
+ */
+ if ((status & JRSTA_CCBERR_ERRID_MASK) ==
+ JRSTA_CCBERR_ERRID_ICVCHK)
+ ecode = -EBADMSG;
+ else
+ ecode = -EIO;
+ }
+
+ tls_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static int tls_encrypt(struct aead_request *req)
+{
+ struct tls_edesc *edesc;
+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = tls_edesc_alloc(req, true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
+ caam_req->op_type = ENCRYPT;
+ caam_req->cbk = tls_encrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ tls_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int tls_decrypt(struct aead_request *req)
+{
+ struct tls_edesc *edesc;
+ struct crypto_aead *tls = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = tls_edesc_alloc(req, false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
+ caam_req->op_type = DECRYPT;
+ caam_req->cbk = tls_decrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ tls_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_encrypt(req);
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_decrypt(req);
+}
+
+static void ablkcipher_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ablkcipher_request *req = ablkcipher_request_cast(areq);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct ablkcipher_edesc *edesc = req_ctx->edesc;
+ int ecode = 0;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+#ifdef DEBUG
+ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+#endif
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+ ablkcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+
+ /*
+ * The crypto API expects us to set the IV (req->info) to the last
+ * ciphertext block. This is used e.g. by the CTS mode.
+ */
+ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+ ivsize, 0);
+
+ ablkcipher_request_complete(req, ecode);
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct caam_request *caam_req = ablkcipher_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
+ caam_req->op_type = ENCRYPT;
+ caam_req->cbk = ablkcipher_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ ablkcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *greq)
+{
+ struct ablkcipher_request *req = &greq->creq;
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct caam_request *caam_req = ablkcipher_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_giv_edesc_alloc(greq);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[GIVENCRYPT];
+ caam_req->op_type = GIVENCRYPT;
+ caam_req->cbk = ablkcipher_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ ablkcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct caam_request *caam_req = ablkcipher_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
+ caam_req->op_type = DECRYPT;
+ caam_req->cbk = ablkcipher_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ ablkcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+struct caam_crypto_alg {
+ struct list_head entry;
+ struct crypto_alg crypto_alg;
+ struct caam_alg_entry caam;
+};
+
+static int caam_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+ crypto_alg);
+ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /* copy descriptor header template value */
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
+ caam_alg->caam.class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG |
+ caam_alg->caam.class2_alg_type;
+
+ ctx->dev = caam_alg->caam.dev;
+
+ return 0;
+}
+
+static int caam_cra_init_ablkcipher(struct crypto_tfm *tfm)
+{
+ struct ablkcipher_tfm *ablkcipher_tfm =
+ crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
+
+ ablkcipher_tfm->reqsize = sizeof(struct caam_request);
+ return caam_cra_init(tfm);
+}
+
+static int caam_cra_init_aead(struct crypto_aead *tfm)
+{
+ crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_aead_tfm(tfm));
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < NUM_OP; i++) {
+ if (!ctx->flc[i].flc_dma)
+ continue;
+ dma_unmap_single(ctx->dev, ctx->flc[i].flc_dma,
+ sizeof(ctx->flc[i].flc) +
+ desc_bytes(ctx->flc[i].sh_desc),
+ DMA_TO_DEVICE);
+ }
+
+ if (ctx->key_dma)
+ dma_unmap_single(ctx->dev, ctx->key_dma,
+ ctx->cdata.keylen + ctx->adata.keylen_pad,
+ DMA_TO_DEVICE);
+}
+
+static void caam_cra_exit(struct crypto_tfm *tfm)
+{
+ caam_exit_common(crypto_tfm_ctx(tfm));
+}
+
+static void caam_cra_exit_aead(struct crypto_aead *tfm)
+{
+ caam_exit_common(crypto_aead_ctx(tfm));
+}
+
+#define template_ablkcipher template_u.ablkcipher
+struct caam_alg_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ u32 type;
+ union {
+ struct ablkcipher_alg ablkcipher;
+ } template_u;
+ u32 class1_alg_type;
+ u32 class2_alg_type;
+};
+
+static struct caam_alg_template driver_algs[] = {
+ /* ablkcipher descriptor */
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-caam-qi2",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-caam-qi2",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-caam-qi2",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-caam-qi2",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "chainiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .name = "rfc3686(ctr(aes))",
+ .driver_name = "rfc3686-ctr-aes-caam-qi2",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .givencrypt = ablkcipher_givencrypt,
+ .geniv = "<built-in>",
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .name = "xts(aes)",
+ .driver_name = "xts-aes-caam-qi2",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = xts_ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ }
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ /* Galois Counter Mode */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ }
+ },
+ /* single-pass ipsec_esp descriptor */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-desi-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(md5),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha1),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha224),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha256),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha384),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha512),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "tls10(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = tls_setkey,
+ .setauthsize = tls_setauthsize,
+ .encrypt = tls_encrypt,
+ .decrypt = tls_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+};
+
+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
+ *template)
+{
+ struct caam_crypto_alg *t_alg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ alg = &t_alg->crypto_alg;
+
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ alg->cra_module = THIS_MODULE;
+ alg->cra_exit = caam_cra_exit;
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_ctxsize = sizeof(struct caam_ctx);
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+ template->type;
+ switch (template->type) {
+ case CRYPTO_ALG_TYPE_GIVCIPHER:
+ alg->cra_init = caam_cra_init_ablkcipher;
+ alg->cra_type = &crypto_givcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg->cra_init = caam_cra_init_ablkcipher;
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
+ }
+
+ t_alg->caam.class1_alg_type = template->class1_alg_type;
+ t_alg->caam.class2_alg_type = template->class2_alg_type;
+
+ return t_alg;
+}
+
+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
+{
+ struct aead_alg *alg = &t_alg->aead;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_cra_init_aead;
+ alg->exit = caam_cra_exit_aead;
+}
+
+static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+
+ ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
+ napi_schedule_irqoff(&ppriv->napi);
+}
+
+static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct dpaa2_io_notification_ctx *nctx;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err, i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ ppriv->priv = priv;
+ nctx = &ppriv->nctx;
+ nctx->is_cdan = 0;
+ nctx->id = ppriv->rsp_fqid;
+ nctx->desired_cpu = cpu;
+ nctx->cb = dpaa2_caam_fqdan_cb;
+
+ /* Register notification callbacks */
+ err = dpaa2_io_service_register(NULL, nctx);
+ if (unlikely(err)) {
+ dev_err(dev, "notification register failed\n");
+ nctx->cb = NULL;
+ goto err;
+ }
+
+ ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
+ dev);
+ if (unlikely(!ppriv->store)) {
+ dev_err(dev, "dpaa2_io_store_create() failed\n");
+ goto err;
+ }
+
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return 0;
+
+err:
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->nctx.cb)
+ break;
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ }
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->store)
+ break;
+ dpaa2_io_store_destroy(ppriv->store);
+ }
+
+ return err;
+}
+
+static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ dpaa2_io_store_destroy(ppriv->store);
+
+ if (++i == priv->num_pairs)
+ return;
+ }
+}
+
+static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
+{
+ struct dpseci_rx_queue_cfg rx_queue_cfg;
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err = 0, i = 0, cpu;
+
+ /* Configure Rx queues */
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+
+ rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
+ DPSECI_QUEUE_OPT_USER_CTX;
+ rx_queue_cfg.order_preservation_en = 0;
+ rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
+ /*
+ * Rx priority (WQ) doesn't really matter, since we use
+ * pull mode, i.e. volatile dequeues from specific FQs
+ */
+ rx_queue_cfg.dest_cfg.priority = 0;
+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
+
+ err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &rx_queue_cfg);
+ if (err) {
+ dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
+ err);
+ return err;
+ }
+
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return err;
+}
+
+static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+
+ if (!priv->cscn_mem)
+ return;
+
+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+ kfree(priv->cscn_mem);
+}
+
+static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+
+ dpaa2_dpseci_congestion_free(priv);
+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
+}
+
+static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
+ const struct dpaa2_fd *fd)
+{
+ struct caam_request *req;
+ u32 fd_err;
+
+ if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
+ dev_err(priv->dev, "Only Frame List FD format is supported!\n");
+ return;
+ }
+
+ fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
+ if (unlikely(fd_err))
+ dev_err(priv->dev, "FD error: %08x\n", fd_err);
+
+ /*
+ * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
+ * in FD[ERR] or FD[FRC].
+ */
+ req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
+ dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
+}
+
+static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
+{
+ int err;
+
+ /* Retry while portal is busy */
+ do {
+ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
+ ppriv->store);
+ } while (err == -EBUSY);
+
+ if (unlikely(err))
+ dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
+
+ return err;
+}
+
+static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
+{
+ struct dpaa2_dq *dq;
+ int cleaned = 0, is_last;
+
+ do {
+ dq = dpaa2_io_store_next(ppriv->store, &is_last);
+ if (unlikely(!dq)) {
+ if (unlikely(!is_last)) {
+ dev_dbg(ppriv->priv->dev,
+ "FQ %d returned no valid frames\n",
+ ppriv->rsp_fqid);
+ /*
+ * MUST retry until we get some sort of
+ * valid response token (be it "empty dequeue"
+ * or a valid frame).
+ */
+ continue;
+ }
+ break;
+ }
+
+ /* Process FD */
+ dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
+ cleaned++;
+ } while (!is_last);
+
+ return cleaned;
+}
+
+static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ struct dpaa2_caam_priv *priv;
+ int err, cleaned = 0, store_cleaned;
+
+ ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
+ priv = ppriv->priv;
+
+ if (unlikely(dpaa2_caam_pull_fq(ppriv)))
+ return 0;
+
+ do {
+ store_cleaned = dpaa2_caam_store_consume(ppriv);
+ cleaned += store_cleaned;
+
+ if (store_cleaned == 0 ||
+ cleaned > budget - DPAA2_CAAM_STORE_SIZE)
+ break;
+
+ /* Try to dequeue some more */
+ err = dpaa2_caam_pull_fq(ppriv);
+ if (unlikely(err))
+ break;
+ } while (1);
+
+ if (cleaned < budget) {
+ napi_complete_done(napi, cleaned);
+ err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
+ if (unlikely(err))
+ dev_err(priv->dev, "Notification rearm failed: %d\n",
+ err);
+ }
+
+ return cleaned;
+}
+
+static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
+ u16 token)
+{
+ struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
+ struct device *dev = priv->dev;
+ int err;
+
+ /*
+ * Congestion group feature supported starting with DPSECI API v5.1
+ * and only when object has been created with this capability.
+ */
+ if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
+ !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
+ return 0;
+
+ priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
+ GFP_KERNEL | GFP_DMA);
+ if (!priv->cscn_mem)
+ return -ENOMEM;
+
+ priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, priv->cscn_dma)) {
+ dev_err(dev, "Error mapping CSCN memory area\n");
+ err = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
+ cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
+ cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
+ cong_notif_cfg.message_ctx = (u64)priv;
+ cong_notif_cfg.message_iova = priv->cscn_dma;
+ cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
+ DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
+ DPSECI_CGN_MODE_COHERENT_WRITE;
+
+ err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
+ &cong_notif_cfg);
+ if (err) {
+ dev_err(dev, "dpseci_set_congestion_notification failed\n");
+ goto err_set_cong;
+ }
+
+ return 0;
+
+err_set_cong:
+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+err_dma_map:
+ kfree(priv->cscn_mem);
+
+ return err;
+}
+
+static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_caam_priv *priv;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err, cpu;
+ u8 i;
+
+ priv = dev_get_drvdata(dev);
+
+ priv->dev = dev;
+ priv->dpsec_id = ls_dev->obj_desc.id;
+
+ /* Get a handle for the DPSECI this interface is associate with */
+ err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpsec_open() failed: %d\n", err);
+ goto err_open;
+ }
+
+ dev_info(dev, "Opened dpseci object successfully\n");
+
+ err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
+ &priv->minor_ver);
+ if (err) {
+ dev_err(dev, "dpseci_get_api_version() failed\n");
+ goto err_get_vers;
+ }
+
+ err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->dpseci_attr);
+ if (err) {
+ dev_err(dev, "dpseci_get_attributes() failed\n");
+ goto err_get_vers;
+ }
+
+ err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->sec_attr);
+ if (err) {
+ dev_err(dev, "dpseci_get_sec_attr() failed\n");
+ goto err_get_vers;
+ }
+
+ err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "setup_congestion() failed\n");
+ goto err_get_vers;
+ }
+
+ priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
+ priv->dpseci_attr.num_tx_queues);
+ if (priv->num_pairs > num_online_cpus()) {
+ dev_warn(dev, "%d queues won't be used\n",
+ priv->num_pairs - num_online_cpus());
+ priv->num_pairs = num_online_cpus();
+ }
+
+ for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
+ err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &priv->rx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpseci_get_rx_queue() failed\n");
+ goto err_get_rx_queue;
+ }
+ }
+
+ for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
+ err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &priv->tx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpseci_get_tx_queue() failed\n");
+ goto err_get_rx_queue;
+ }
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu) {
+ dev_info(dev, "prio %d: rx queue %d, tx queue %d\n", i,
+ priv->rx_queue_attr[i].fqid,
+ priv->tx_queue_attr[i].fqid);
+
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
+ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
+ ppriv->prio = i;
+
+ ppriv->net_dev.dev = *dev;
+ INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
+ netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
+ DPAA2_CAAM_NAPI_WEIGHT);
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return 0;
+
+err_get_rx_queue:
+ dpaa2_dpseci_congestion_free(priv);
+err_get_vers:
+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
+err_open:
+ return err;
+}
+
+static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err, i;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ napi_enable(&ppriv->napi);
+ }
+
+ err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_enable() failed\n");
+ return err;
+ }
+
+ dev_info(dev, "DPSECI version %d.%d\n",
+ priv->major_ver,
+ priv->minor_ver);
+
+ return 0;
+}
+
+static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ int i, err = 0, enabled;
+
+ err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_disable() failed\n");
+ return err;
+ }
+
+ err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
+ if (err) {
+ dev_err(dev, "dpseci_is_enabled() failed\n");
+ return err;
+ }
+
+ dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ napi_disable(&ppriv->napi);
+ netif_napi_del(&ppriv->napi);
+ }
+
+ return 0;
+}
+
+static struct list_head alg_list;
+
+static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
+{
+ struct device *dev;
+ struct dpaa2_caam_priv *priv;
+ int i, err = 0;
+ bool registered = false;
+
+ /*
+ * There is no way to get CAAM endianness - there is no direct register
+ * space access and MC f/w does not provide this attribute.
+ * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
+ * property.
+ */
+ caam_little_end = true;
+
+ caam_imx = false;
+
+ dev = &dpseci_dev->dev;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ priv->domain = iommu_get_domain_for_dev(dev);
+
+ qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
+ 0, SLAB_CACHE_DMA, NULL);
+ if (!qi_cache) {
+ dev_err(dev, "Can't allocate SEC cache\n");
+ err = -ENOMEM;
+ goto err_qicache;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
+ if (err) {
+ dev_err(dev, "dma_set_mask_and_coherent() failed\n");
+ goto err_dma_mask;
+ }
+
+ /* Obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
+ if (err) {
+ dev_err(dev, "MC portal allocation failed\n");
+ goto err_dma_mask;
+ }
+
+ priv->ppriv = alloc_percpu(*priv->ppriv);
+ if (!priv->ppriv) {
+ dev_err(dev, "alloc_percpu() failed\n");
+ goto err_alloc_ppriv;
+ }
+
+ /* DPSECI initialization */
+ err = dpaa2_dpseci_setup(dpseci_dev);
+ if (err < 0) {
+ dev_err(dev, "dpaa2_dpseci_setup() failed\n");
+ goto err_dpseci_setup;
+ }
+
+ /* DPIO */
+ err = dpaa2_dpseci_dpio_setup(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
+ goto err_dpio_setup;
+ }
+
+ /* DPSECI binding to DPIO */
+ err = dpaa2_dpseci_bind(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_bind() failed\n");
+ goto err_bind;
+ }
+
+ /* DPSECI enable */
+ err = dpaa2_dpseci_enable(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_enable() failed");
+ goto err_bind;
+ }
+
+ /* register crypto algorithms the device supports */
+ INIT_LIST_HEAD(&alg_list);
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_crypto_alg *t_alg;
+ struct caam_alg_template *alg = driver_algs + i;
+ u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!priv->sec_attr.des_acc_num &&
+ ((alg_sel == OP_ALG_ALGSEL_3DES) ||
+ (alg_sel == OP_ALG_ALGSEL_DES)))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!priv->sec_attr.aes_acc_num &&
+ (alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
+ t_alg = caam_alg_alloc(alg);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(dev, "%s alg allocation failed: %d\n",
+ alg->driver_name, err);
+ continue;
+ }
+ t_alg->caam.dev = dev;
+
+ err = crypto_register_alg(&t_alg->crypto_alg);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->crypto_alg.cra_driver_name, err);
+ kfree(t_alg);
+ continue;
+ }
+
+ list_add_tail(&t_alg->entry, &alg_list);
+ registered = true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+ u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+ OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!priv->sec_attr.des_acc_num &&
+ ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
+ (c1_alg_sel == OP_ALG_ALGSEL_DES)))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!priv->sec_attr.aes_acc_num &&
+ (c1_alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
+ /*
+ * Skip algorithms requiring message digests
+ * if MD not supported by device.
+ */
+ if (!priv->sec_attr.md_acc_num && c2_alg_sel)
+ continue;
+
+ t_alg->caam.dev = dev;
+ caam_aead_alg_init(t_alg);
+
+ err = crypto_register_aead(&t_alg->aead);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->aead.base.cra_driver_name, err);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+ if (registered)
+ dev_info(dev, "algorithms registered in /proc/crypto\n");
+
+ return err;
+
+err_bind:
+ dpaa2_dpseci_dpio_free(priv);
+err_dpio_setup:
+ dpaa2_dpseci_free(priv);
+err_dpseci_setup:
+ free_percpu(priv->ppriv);
+err_alloc_ppriv:
+ fsl_mc_portal_free(priv->mc_io);
+err_dma_mask:
+ kmem_cache_destroy(qi_cache);
+err_qicache:
+ dev_set_drvdata(dev, NULL);
+
+ return err;
+}
+
+static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev;
+ struct dpaa2_caam_priv *priv;
+ int i;
+
+ dev = &ls_dev->dev;
+ priv = dev_get_drvdata(dev);
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+
+ if (t_alg->registered)
+ crypto_unregister_aead(&t_alg->aead);
+ }
+
+ if (alg_list.next) {
+ struct caam_crypto_alg *t_alg, *n;
+
+ list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
+ crypto_unregister_alg(&t_alg->crypto_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+ }
+
+ dpaa2_dpseci_disable(priv);
+ dpaa2_dpseci_dpio_free(priv);
+ dpaa2_dpseci_free(priv);
+ free_percpu(priv->ppriv);
+ fsl_mc_portal_free(priv->mc_io);
+ dev_set_drvdata(dev, NULL);
+ kmem_cache_destroy(qi_cache);
+
+ return 0;
+}
+
+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
+{
+ struct dpaa2_fd fd;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ int err = 0, i, id;
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (priv->cscn_mem) {
+ dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
+ DPAA2_CSCN_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
+ dev_dbg_ratelimited(dev, "Dropping request\n");
+ return -EBUSY;
+ }
+ }
+
+ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc->flc_dma);
+
+ req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, req->fd_flt_dma)) {
+ dev_err(dev, "DMA mapping error for QI enqueue request\n");
+ goto err_out;
+ }
+
+ memset(&fd, 0, sizeof(fd));
+ dpaa2_fd_set_format(&fd, dpaa2_fd_list);
+ dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
+ dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
+ dpaa2_fd_set_flc(&fd, req->flc->flc_dma);
+
+ /*
+ * There is no guarantee that preemption is disabled here,
+ * thus take action.
+ */
+ preempt_disable();
+ id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
+ for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
+ err = dpaa2_io_service_enqueue_fq(NULL,
+ priv->tx_queue_attr[id].fqid,
+ &fd);
+ if (err != -EBUSY)
+ break;
+ }
+ preempt_enable();
+
+ if (unlikely(err < 0)) {
+ dev_err(dev, "Error enqueuing frame: %d\n", err);
+ goto err_out;
+ }
+
+ return -EINPROGRESS;
+
+err_out:
+ dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ return -EIO;
+}
+EXPORT_SYMBOL(dpaa2_caam_enqueue);
+
+const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpseci",
+ },
+ { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_caam_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_caam_probe,
+ .remove = dpaa2_caam_remove,
+ .match_id_table = dpaa2_caam_match_id_table
+};
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
+
+module_fsl_mc_driver(dpaa2_caam_driver);
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
new file mode 100644
index 0000000..2ba179d
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the names of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CAAMALG_QI2_H_
+#define _CAAMALG_QI2_H_
+
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
+#include <linux/threads.h>
+#include "dpseci.h"
+#include "desc_constr.h"
+
+#define DPAA2_CAAM_STORE_SIZE 16
+/* NAPI weight *must* be a multiple of the store size. */
+#define DPAA2_CAAM_NAPI_WEIGHT 64
+
+/* The congestion entrance threshold was chosen so that on LS2088
+ * we support the maximum throughput for the available memory
+ */
+#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
+#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
+
+/**
+ * dpaa2_caam_priv - driver private data
+ * @dpseci_id: DPSECI object unique ID
+ * @major_ver: DPSECI major version
+ * @minor_ver: DPSECI minor version
+ * @dpseci_attr: DPSECI attributes
+ * @sec_attr: SEC engine attributes
+ * @rx_queue_attr: array of Rx queue attributes
+ * @tx_queue_attr: array of Tx queue attributes
+ * @cscn_mem: pointer to memory region containing the
+ * dpaa2_cscn struct; it's size is larger than
+ * sizeof(struct dpaa2_cscn) to accommodate alignment
+ * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
+ * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
+ * @cscn_dma: dma address used by the QMAN to write CSCN messages
+ * @dev: device associated with the DPSECI object
+ * @mc_io: pointer to MC portal's I/O object
+ * @domain: IOMMU domain
+ * @ppriv: per CPU pointers to privata data
+ */
+struct dpaa2_caam_priv {
+ int dpsec_id;
+
+ u16 major_ver;
+ u16 minor_ver;
+
+ struct dpseci_attr dpseci_attr;
+ struct dpseci_sec_attr sec_attr;
+ struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_PRIO_NUM];
+ struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_PRIO_NUM];
+ int num_pairs;
+
+ /* congestion */
+ void *cscn_mem;
+ void *cscn_mem_aligned;
+ dma_addr_t cscn_dma;
+
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ struct iommu_domain *domain;
+
+ struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
+};
+
+/**
+ * dpaa2_caam_priv_per_cpu - per CPU private data
+ * @napi: napi structure
+ * @net_dev: netdev used by napi
+ * @req_fqid: (virtual) request (Tx / enqueue) FQID
+ * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
+ * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
+ * @nctx: notification context of response FQ
+ * @store: where dequeued frames are stored
+ * @priv: backpointer to dpaa2_caam_priv
+ */
+struct dpaa2_caam_priv_per_cpu {
+ struct napi_struct napi;
+ struct net_device net_dev;
+ int req_fqid;
+ int rsp_fqid;
+ int prio;
+ struct dpaa2_io_notification_ctx nctx;
+ struct dpaa2_io_store *store;
+ struct dpaa2_caam_priv *priv;
+};
+
+/*
+ * The CAAM QI hardware constructs a job descriptor which points
+ * to shared descriptor (as pointed by context_a of FQ to CAAM).
+ * When the job descriptor is executed by deco, the whole job
+ * descriptor together with shared descriptor gets loaded in
+ * deco buffer which is 64 words long (each 32-bit).
+ *
+ * The job descriptor constructed by QI hardware has layout:
+ *
+ * HEADER (1 word)
+ * Shdesc ptr (1 or 2 words)
+ * SEQ_OUT_PTR (1 word)
+ * Out ptr (1 or 2 words)
+ * Out length (1 word)
+ * SEQ_IN_PTR (1 word)
+ * In ptr (1 or 2 words)
+ * In length (1 word)
+ *
+ * The shdesc ptr is used to fetch shared descriptor contents
+ * into deco buffer.
+ *
+ * Apart from shdesc contents, the total number of words that
+ * get loaded in deco buffer are '8' or '11'. The remaining words
+ * in deco buffer can be used for storing shared descriptor.
+ */
+#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
+
+/* Length of a single buffer in the QI driver memory cache */
+#define CAAM_QI_MEMCACHE_SIZE 512
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @assoclen_dma: bus physical mapped address of req->assoclen
+ * @sgt: the h/w link table
+ */
+struct aead_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ dma_addr_t assoclen_dma;
+#define CAAM_QI_MAX_AEAD_SG \
+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
+ sizeof(struct dpaa2_sg_entry))
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/*
+ * tls_edesc - s/w-extended tls descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
+ * @dst: pointer to output scatterlist, usefull for unmapping
+ * @sgt: the h/w link table
+ */
+struct tls_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ struct scatterlist tmp[2];
+ struct scatterlist *dst;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @qm_sg_dma: I/O virtual address of h/w link table
+ * @sgt: the h/w link table
+ */
+struct ablkcipher_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+#define CAAM_QI_MAX_ABLKCIPHER_SG \
+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
+ sizeof(struct dpaa2_sg_entry))
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/**
+ * caam_flc - Flow Context (FLC)
+ * @flc: Flow Context options
+ * @sh_desc: Shared Descriptor
+ * @flc_dma: DMA address of the Flow Context
+ */
+struct caam_flc {
+ u32 flc[16];
+ u32 sh_desc[MAX_SDLEN];
+ dma_addr_t flc_dma;
+} ____cacheline_aligned;
+
+enum optype {
+ ENCRYPT = 0,
+ DECRYPT,
+ GIVENCRYPT,
+ NUM_OP
+};
+
+/**
+ * caam_request - the request structure the driver application should fill while
+ * submitting a job to driver.
+ * @fd_flt: Frame list table defining input and output
+ * fd_flt[0] - FLE pointing to output buffer
+ * fd_flt[1] - FLE pointing to input buffer
+ * @fd_flt_dma: DMA address for the frame list table
+ * @flc: Flow Context
+ * @op_type: operation type
+ * @cbk: Callback function to invoke when job is completed
+ * @ctx: arbit context attached with request by the application
+ * @edesc: extended descriptor; points to one of {ablkcipher,aead}_edesc
+ */
+struct caam_request {
+ struct dpaa2_fl_entry fd_flt[2];
+ dma_addr_t fd_flt_dma;
+ struct caam_flc *flc;
+ enum optype op_type;
+ void (*cbk)(void *ctx, u32 err);
+ void *ctx;
+ void *edesc;
+};
+
+/**
+ * dpaa2_caam_enqueue() - enqueue a crypto request
+ * @dev: device associated with the DPSECI object
+ * @req: pointer to caam_request
+ */
+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
+
+#endif /* _CAAMALG_QI2_H_ */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 631337c..698580b 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -72,7 +72,7 @@
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
/* length of descriptors text */
-#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
@@ -103,20 +103,14 @@ struct caam_hash_ctx {
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
- u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
dma_addr_t sh_desc_update_dma ____cacheline_aligned;
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
- dma_addr_t sh_desc_finup_dma;
struct device *jrdev;
- u32 alg_type;
- u32 alg_op;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
- dma_addr_t key_dma;
int ctx_len;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
+ struct alginfo adata;
};
/* ahash state */
@@ -143,6 +137,31 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
+static inline void switch_buf(struct caam_hash_state *state)
+{
+ state->current_buf ^= 1;
+}
+
+static inline u8 *current_buf(struct caam_hash_state *state)
+{
+ return state->current_buf ? state->buf_1 : state->buf_0;
+}
+
+static inline u8 *alt_buf(struct caam_hash_state *state)
+{
+ return state->current_buf ? state->buf_0 : state->buf_1;
+}
+
+static inline int *current_buflen(struct caam_hash_state *state)
+{
+ return state->current_buf ? &state->buflen_1 : &state->buflen_0;
+}
+
+static inline int *alt_buflen(struct caam_hash_state *state)
+{
+ return state->current_buf ? &state->buflen_0 : &state->buflen_1;
+}
+
/* Common job descriptor seq in/out ptr routines */
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
@@ -175,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
return dst_dma;
}
-/* Map current buffer in state and put it in link table */
-static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
- struct sec4_sg_entry *sec4_sg,
- u8 *buf, int buflen)
+/* Map current buffer in state (if length > 0) and put it in link table */
+static inline int buf_map_to_sec4_sg(struct device *jrdev,
+ struct sec4_sg_entry *sec4_sg,
+ struct caam_hash_state *state)
{
- dma_addr_t buf_dma;
+ int buflen = *current_buflen(state);
- buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
- dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
+ if (!buflen)
+ return 0;
- return buf_dma;
-}
+ state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, state->buf_dma)) {
+ dev_err(jrdev, "unable to map buf\n");
+ state->buf_dma = 0;
+ return -ENOMEM;
+ }
-/*
- * Only put buffer in link table if it contains data, which is possible,
- * since a buffer has previously been used, and needs to be unmapped,
- */
-static inline dma_addr_t
-try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
- u8 *buf, dma_addr_t buf_dma, int buflen,
- int last_buflen)
-{
- if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
- dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
- if (buflen)
- buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
- else
- buf_dma = 0;
-
- return buf_dma;
+ dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
+
+ return 0;
}
/* Map state->caam_ctx, and add it to link table */
@@ -224,89 +234,54 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
return 0;
}
-/* Common shared descriptor commands */
-static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
-{
- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
-}
-
-/* Append key if it has been set */
-static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
-{
- u32 *key_jump_cmd;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- if (ctx->split_key_len) {
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_ahash(desc, ctx);
-
- set_jump_tgt_here(desc, key_jump_cmd);
- }
-
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-}
-
/*
- * For ahash read data from seqin following state->caam_ctx,
- * and write resulting class2 context to seqout, which may be state->caam_ctx
- * or req->result
+ * For ahash update, final and finup (import_ctx = true)
+ * import context, read and write to seqout
+ * For ahash firsts and digest (import_ctx = false)
+ * read and write to seqout
*/
-static inline void ahash_append_load_str(u32 *desc, int digestsize)
+static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
+ struct caam_hash_ctx *ctx, bool import_ctx)
{
- /* Calculate remaining bytes to read */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Read remaining bytes */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- FIFOLD_TYPE_MSG | KEY_VLF);
+ u32 op = ctx->adata.algtype;
+ u32 *skip_key_load;
- /* Store class2 context bytes */
- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-}
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
-/*
- * For ahash update, final and finup, import context, read and write to seqout
- */
-static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize,
- struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
+ /* Append key if it has been set; ahash update excluded */
+ if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
+ /* Skip key loading if already shared */
+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+ ctx->adata.keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
+ set_jump_tgt_here(desc, skip_key_load);
- /*
- * Load from buf and/or src and write to req->result or state->context
- */
- ahash_append_load_str(desc, digestsize);
-}
+ op |= OP_ALG_AAI_HMAC_PRECOMP;
+ }
-/* For ahash firsts and digest, read and write to seqout */
-static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize, struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
+ /* If needed, import context from software */
+ if (import_ctx)
+ append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
/* Class 2 operation */
append_operation(desc, op | state | OP_ALG_ENCRYPT);
/*
* Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
*/
- ahash_append_load_str(desc, digestsize);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
}
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
@@ -314,34 +289,13 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
- u32 have_key = 0;
u32 *desc;
- if (ctx->split_key_len)
- have_key = OP_ALG_AAI_HMAC_PRECOMP;
-
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
-
- /* Class 2 operation */
- append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
- OP_ALG_ENCRYPT);
-
- /* Load data and write to result or context */
- ahash_append_load_str(desc, ctx->ctx_len);
-
- ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
@@ -350,17 +304,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
-
- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
- ctx->ctx_len, ctx);
-
- ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
@@ -369,53 +315,20 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
-
- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
- OP_ALG_AS_FINALIZE, digestsize, ctx);
-
- ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
desc_bytes(desc), 1);
#endif
- /* ahash_finup shared descriptor */
- desc = ctx->sh_desc_finup;
-
- ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
- OP_ALG_AS_FINALIZE, digestsize, ctx);
-
- ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
-
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
-
- ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
- digestsize, ctx);
-
- ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
@@ -426,14 +339,6 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
return 0;
}
-static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
- u32 keylen)
-{
- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
- ctx->split_key_pad_len, key_in, keylen,
- ctx->alg_op);
-}
-
/* Digest hash size if it is too large */
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
@@ -469,7 +374,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
}
/* Job descriptor to perform unkeyed hash on key_in */
- append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
+ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
OP_ALG_AS_INITFINAL);
append_seq_in_ptr(desc, src_dma, *keylen, 0);
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
@@ -513,10 +418,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
int ret;
@@ -539,43 +441,19 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key;
}
- /* Pick class 2 key length from algorithm submask */
- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
- OP_ALG_ALGSEL_SHIFT] * 2;
- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
-#ifdef DEBUG
- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
- ctx->split_key_len, ctx->split_key_pad_len);
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
-
- ret = gen_split_hash_key(ctx, key, keylen);
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
+ CAAM_MAX_HASH_KEY_SIZE);
if (ret)
goto bad_free_key;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->key_dma)) {
- dev_err(jrdev, "unable to map key i/o memory\n");
- ret = -ENOMEM;
- goto error_free_key;
- }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len, 1);
+ ctx->adata.keylen_pad, 1);
#endif
- ret = ahash_set_sh_desc(ahash);
- if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
- DMA_TO_DEVICE);
- }
- error_free_key:
kfree(hashed_key);
- return ret;
+ return ahash_set_sh_desc(ahash);
bad_free_key:
kfree(hashed_key);
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -604,6 +482,8 @@ static inline void ahash_unmap(struct device *dev,
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len)
{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
if (edesc->dst_dma)
@@ -612,6 +492,12 @@ static inline void ahash_unmap(struct device *dev,
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma,
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
+
+ if (state->buf_dma) {
+ dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ DMA_TO_DEVICE);
+ state->buf_dma = 0;
+ }
}
static inline void ahash_unmap_ctx(struct device *dev,
@@ -643,8 +529,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -671,19 +556,19 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-#ifdef DEBUG
struct caam_hash_state *state = ahash_request_ctx(req);
+#ifdef DEBUG
int digestsize = crypto_ahash_digestsize(ahash);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ switch_buf(state);
kfree(edesc);
#ifdef DEBUG
@@ -713,8 +598,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -741,19 +625,19 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-#ifdef DEBUG
struct caam_hash_state *state = ahash_request_ctx(req);
+#ifdef DEBUG
int digestsize = crypto_ahash_digestsize(ahash);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ahash_edesc *)((char *)desc -
- offsetof(struct ahash_edesc, hw_desc));
+ edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+ switch_buf(state);
kfree(edesc);
#ifdef DEBUG
@@ -835,13 +719,12 @@ static int ahash_update_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
- int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
- u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
- int *next_buflen = state->current_buf ? &state->buflen_0 :
- &state->buflen_1, last_buflen;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int *buflen = current_buflen(state);
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state), last_buflen;
int in_len = *buflen + req->nbytes, to_hash;
u32 *desc;
int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
@@ -895,10 +778,9 @@ static int ahash_update_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
- edesc->sec4_sg + 1,
- buf, state->buf_dma,
- *buflen, last_buflen);
+ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
+ if (ret)
+ goto unmap_ctx;
if (mapped_nents) {
sg_to_sec4_sg_last(req->src, mapped_nents,
@@ -909,12 +791,10 @@ static int ahash_update_ctx(struct ahash_request *req)
to_hash - *buflen,
*next_buflen, 0);
} else {
- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
- cpu_to_caam32(SEC4_SG_LEN_FIN);
+ sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
+ 1);
}
- state->current_buf = !state->current_buf;
-
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -969,12 +849,9 @@ static int ahash_final_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
- int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
- int last_buflen = state->current_buf ? state->buflen_0 :
- state->buflen_1;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -1001,11 +878,11 @@ static int ahash_final_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
- buf, state->buf_dma, buflen,
- last_buflen);
- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
- cpu_to_caam32(SEC4_SG_LEN_FIN);
+ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
@@ -1048,12 +925,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
- int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
- int last_buflen = state->current_buf ? state->buflen_0 :
- state->buflen_1;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
u32 *desc;
int sec4_sg_src_index;
int src_nents, mapped_nents;
@@ -1082,7 +956,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
- ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
+ ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
flags);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
@@ -1098,9 +972,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
- buf, state->buf_dma, buflen,
- last_buflen);
+ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
+ if (ret)
+ goto unmap_ctx;
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
sec4_sg_src_index, ctx->ctx_len + buflen,
@@ -1136,15 +1010,18 @@ static int ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
int src_nents, mapped_nents;
struct ahash_edesc *edesc;
int ret;
+ state->buf_dma = 0;
+
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
@@ -1215,10 +1092,10 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
- int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int buflen = *current_buflen(state);
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
@@ -1276,13 +1153,12 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
- int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
- u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
- int *next_buflen = state->current_buf ? &state->buflen_0 :
- &state->buflen_1;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int *buflen = current_buflen(state);
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state);
int in_len = *buflen + req->nbytes, to_hash;
int sec4_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
@@ -1331,8 +1207,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->dst_dma = 0;
- state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
- buf, *buflen);
+ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
+ if (ret)
+ goto unmap_ctx;
+
sg_to_sec4_sg_last(req->src, mapped_nents,
edesc->sec4_sg + 1, 0);
@@ -1342,8 +1220,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
*next_buflen, 0);
}
- state->current_buf = !state->current_buf;
-
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -1403,12 +1279,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
- int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
- int last_buflen = state->current_buf ? state->buflen_0 :
- state->buflen_1;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
@@ -1450,9 +1323,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
- state->buf_dma, buflen,
- last_buflen);
+ ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
+ if (ret)
+ goto unmap;
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
req->nbytes);
@@ -1496,11 +1369,10 @@ static int ahash_update_first(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
- int *next_buflen = state->current_buf ?
- &state->buflen_1 : &state->buflen_0;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state);
int to_hash;
u32 *desc;
int src_nents, mapped_nents;
@@ -1582,6 +1454,7 @@ static int ahash_update_first(struct ahash_request *req)
state->final = ahash_final_no_ctx;
scatterwalk_map_and_copy(next_buf, req->src, 0,
req->nbytes, 0);
+ switch_buf(state);
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
@@ -1688,7 +1561,6 @@ struct caam_hash_template {
unsigned int blocksize;
struct ahash_alg template_ahash;
u32 alg_type;
- u32 alg_op;
};
/* ahash descriptors */
@@ -1714,7 +1586,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA1,
- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
}, {
.name = "sha224",
.driver_name = "sha224-caam",
@@ -1736,7 +1607,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA224,
- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
}, {
.name = "sha256",
.driver_name = "sha256-caam",
@@ -1758,7 +1628,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA256,
- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
}, {
.name = "sha384",
.driver_name = "sha384-caam",
@@ -1780,7 +1649,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA384,
- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
}, {
.name = "sha512",
.driver_name = "sha512-caam",
@@ -1802,7 +1670,6 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_SHA512,
- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
}, {
.name = "md5",
.driver_name = "md5-caam",
@@ -1824,14 +1691,12 @@ static struct caam_hash_template driver_hash[] = {
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
},
};
struct caam_hash_alg {
struct list_head entry;
int alg_type;
- int alg_op;
struct ahash_alg ahash_alg;
};
@@ -1853,6 +1718,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+ dma_addr_t dma_addr;
/*
* Get a Job ring from Job Ring driver to ensure in-order
@@ -1863,11 +1729,31 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(ctx->jrdev);
}
+
+ dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
+ offsetof(struct caam_hash_ctx,
+ sh_desc_update_dma),
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->jrdev, dma_addr)) {
+ dev_err(ctx->jrdev, "unable to map shared descriptors\n");
+ caam_jr_free(ctx->jrdev);
+ return -ENOMEM;
+ }
+
+ ctx->sh_desc_update_dma = dma_addr;
+ ctx->sh_desc_update_first_dma = dma_addr +
+ offsetof(struct caam_hash_ctx,
+ sh_desc_update_first);
+ ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
+ sh_desc_fin);
+ ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
+ sh_desc_digest);
+
/* copy descriptor header template value */
- ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
- ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
OP_ALG_ALGSEL_SHIFT];
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
@@ -1879,30 +1765,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
{
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->sh_desc_update_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
- dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
- desc_bytes(ctx->sh_desc_update),
- DMA_TO_DEVICE);
- if (ctx->sh_desc_update_first_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
- dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
- desc_bytes(ctx->sh_desc_update_first),
- DMA_TO_DEVICE);
- if (ctx->sh_desc_fin_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
- dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
- desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
- if (ctx->sh_desc_digest_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
- dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
- desc_bytes(ctx->sh_desc_digest),
- DMA_TO_DEVICE);
- if (ctx->sh_desc_finup_dma &&
- !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
- dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
- desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
-
+ dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
+ offsetof(struct caam_hash_ctx,
+ sh_desc_update_dma),
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
@@ -1961,7 +1827,6 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_type = &crypto_ahash_type;
t_alg->alg_type = template->alg_type;
- t_alg->alg_op = template->alg_op;
return t_alg;
}
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 354a16a..4fcb378 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -18,6 +18,10 @@
#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
sizeof(struct rsa_priv_f1_pdb))
+#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
+ sizeof(struct rsa_priv_f2_pdb))
+#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
+ sizeof(struct rsa_priv_f3_pdb))
static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
struct akcipher_request *req)
@@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
}
+static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
+ struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
+ size_t p_sz = key->p_sz;
+ size_t q_sz = key->p_sz;
+
+ dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
+}
+
+static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
+ struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
+ size_t p_sz = key->p_sz;
+ size_t q_sz = key->p_sz;
+
+ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
+}
+
/* RSA Job Completion handler */
static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
{
@@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
akcipher_request_complete(req, err);
}
+static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
+ void *context)
+{
+ struct akcipher_request *req = context;
+ struct rsa_edesc *edesc;
+
+ if (err)
+ caam_jr_strstatus(dev, err);
+
+ edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+ rsa_priv_f2_unmap(dev, edesc, req);
+ rsa_io_unmap(dev, edesc, req);
+ kfree(edesc);
+
+ akcipher_request_complete(req, err);
+}
+
+static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
+ void *context)
+{
+ struct akcipher_request *req = context;
+ struct rsa_edesc *edesc;
+
+ if (err)
+ caam_jr_strstatus(dev, err);
+
+ edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+ rsa_priv_f3_unmap(dev, edesc, req);
+ rsa_io_unmap(dev, edesc, req);
+ kfree(edesc);
+
+ akcipher_request_complete(req, err);
+}
+
static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
size_t desclen)
{
@@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *dev = ctx->dev;
struct rsa_edesc *edesc;
- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
int sgc;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
int src_nents, dst_nents;
@@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
return 0;
}
+static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
+ struct rsa_edesc *edesc)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct device *dev = ctx->dev;
+ struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
+ int sec4_sg_index = 0;
+ size_t p_sz = key->p_sz;
+ size_t q_sz = key->p_sz;
+
+ pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->d_dma)) {
+ dev_err(dev, "Unable to map RSA private exponent memory\n");
+ return -ENOMEM;
+ }
+
+ pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->p_dma)) {
+ dev_err(dev, "Unable to map RSA prime factor p memory\n");
+ goto unmap_d;
+ }
+
+ pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->q_dma)) {
+ dev_err(dev, "Unable to map RSA prime factor q memory\n");
+ goto unmap_p;
+ }
+
+ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->tmp1_dma)) {
+ dev_err(dev, "Unable to map RSA tmp1 memory\n");
+ goto unmap_q;
+ }
+
+ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->tmp2_dma)) {
+ dev_err(dev, "Unable to map RSA tmp2 memory\n");
+ goto unmap_tmp1;
+ }
+
+ if (edesc->src_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_G;
+ pdb->g_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->src_nents;
+ } else {
+ pdb->g_dma = sg_dma_address(req->src);
+ }
+
+ if (edesc->dst_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_F;
+ pdb->f_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+ } else {
+ pdb->f_dma = sg_dma_address(req->dst);
+ }
+
+ pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
+ pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
+
+ return 0;
+
+unmap_tmp1:
+ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+unmap_q:
+ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+unmap_p:
+ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+unmap_d:
+ dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+
+ return -ENOMEM;
+}
+
+static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
+ struct rsa_edesc *edesc)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct device *dev = ctx->dev;
+ struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
+ int sec4_sg_index = 0;
+ size_t p_sz = key->p_sz;
+ size_t q_sz = key->p_sz;
+
+ pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->p_dma)) {
+ dev_err(dev, "Unable to map RSA prime factor p memory\n");
+ return -ENOMEM;
+ }
+
+ pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->q_dma)) {
+ dev_err(dev, "Unable to map RSA prime factor q memory\n");
+ goto unmap_p;
+ }
+
+ pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->dp_dma)) {
+ dev_err(dev, "Unable to map RSA exponent dp memory\n");
+ goto unmap_q;
+ }
+
+ pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->dq_dma)) {
+ dev_err(dev, "Unable to map RSA exponent dq memory\n");
+ goto unmap_dp;
+ }
+
+ pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->c_dma)) {
+ dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
+ goto unmap_dq;
+ }
+
+ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->tmp1_dma)) {
+ dev_err(dev, "Unable to map RSA tmp1 memory\n");
+ goto unmap_qinv;
+ }
+
+ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->tmp2_dma)) {
+ dev_err(dev, "Unable to map RSA tmp2 memory\n");
+ goto unmap_tmp1;
+ }
+
+ if (edesc->src_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_G;
+ pdb->g_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->src_nents;
+ } else {
+ pdb->g_dma = sg_dma_address(req->src);
+ }
+
+ if (edesc->dst_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_F;
+ pdb->f_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+ } else {
+ pdb->f_dma = sg_dma_address(req->dst);
+ }
+
+ pdb->sgf |= key->n_sz;
+ pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
+
+ return 0;
+
+unmap_tmp1:
+ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+unmap_qinv:
+ dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
+unmap_dq:
+ dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
+unmap_dp:
+ dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
+unmap_q:
+ dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+unmap_p:
+ dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+
+ return -ENOMEM;
+}
+
static int caam_rsa_enc(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
@@ -301,24 +543,14 @@ init_fail:
return ret;
}
-static int caam_rsa_dec(struct akcipher_request *req)
+static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct caam_rsa_key *key = &ctx->key;
struct device *jrdev = ctx->dev;
struct rsa_edesc *edesc;
int ret;
- if (unlikely(!key->n || !key->d))
- return -EINVAL;
-
- if (req->dst_len < key->n_sz) {
- req->dst_len = key->n_sz;
- dev_err(jrdev, "Output buffer length less than parameter n\n");
- return -EOVERFLOW;
- }
-
/* Allocate extended descriptor */
edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
if (IS_ERR(edesc))
@@ -344,17 +576,147 @@ init_fail:
return ret;
}
+static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *jrdev = ctx->dev;
+ struct rsa_edesc *edesc;
+ int ret;
+
+ /* Allocate extended descriptor */
+ edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
+ ret = set_rsa_priv_f2_pdb(req, edesc);
+ if (ret)
+ goto init_fail;
+
+ /* Initialize Job Descriptor */
+ init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
+
+ ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
+ if (!ret)
+ return -EINPROGRESS;
+
+ rsa_priv_f2_unmap(jrdev, edesc, req);
+
+init_fail:
+ rsa_io_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ return ret;
+}
+
+static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *jrdev = ctx->dev;
+ struct rsa_edesc *edesc;
+ int ret;
+
+ /* Allocate extended descriptor */
+ edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
+ ret = set_rsa_priv_f3_pdb(req, edesc);
+ if (ret)
+ goto init_fail;
+
+ /* Initialize Job Descriptor */
+ init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
+
+ ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
+ if (!ret)
+ return -EINPROGRESS;
+
+ rsa_priv_f3_unmap(jrdev, edesc, req);
+
+init_fail:
+ rsa_io_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ return ret;
+}
+
+static int caam_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ int ret;
+
+ if (unlikely(!key->n || !key->d))
+ return -EINVAL;
+
+ if (req->dst_len < key->n_sz) {
+ req->dst_len = key->n_sz;
+ dev_err(ctx->dev, "Output buffer length less than parameter n\n");
+ return -EOVERFLOW;
+ }
+
+ if (key->priv_form == FORM3)
+ ret = caam_rsa_dec_priv_f3(req);
+ else if (key->priv_form == FORM2)
+ ret = caam_rsa_dec_priv_f2(req);
+ else
+ ret = caam_rsa_dec_priv_f1(req);
+
+ return ret;
+}
+
static void caam_rsa_free_key(struct caam_rsa_key *key)
{
kzfree(key->d);
+ kzfree(key->p);
+ kzfree(key->q);
+ kzfree(key->dp);
+ kzfree(key->dq);
+ kzfree(key->qinv);
+ kzfree(key->tmp1);
+ kzfree(key->tmp2);
kfree(key->e);
kfree(key->n);
- key->d = NULL;
- key->e = NULL;
- key->n = NULL;
- key->d_sz = 0;
- key->e_sz = 0;
- key->n_sz = 0;
+ memset(key, 0, sizeof(*key));
+}
+
+static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
+{
+ while (!**ptr && *nbytes) {
+ (*ptr)++;
+ (*nbytes)--;
+ }
+}
+
+/**
+ * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
+ * dP, dQ and qInv could decode to less than corresponding p, q length, as the
+ * BER-encoding requires that the minimum number of bytes be used to encode the
+ * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
+ * length.
+ *
+ * @ptr : pointer to {dP, dQ, qInv} CRT member
+ * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
+ * @dstlen: length in bytes of corresponding p or q prime factor
+ */
+static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
+{
+ u8 *dst;
+
+ caam_rsa_drop_leading_zeros(&ptr, &nbytes);
+ if (!nbytes)
+ return NULL;
+
+ dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
+ if (!dst)
+ return NULL;
+
+ memcpy(dst + (dstlen - nbytes), ptr, nbytes);
+
+ return dst;
}
/**
@@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
{
u8 *val;
- while (!*buf && *nbytes) {
- buf++;
- (*nbytes)--;
- }
+ caam_rsa_drop_leading_zeros(&buf, nbytes);
+ if (!*nbytes)
+ return NULL;
val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
if (!val)
@@ -395,7 +756,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct rsa_key raw_key = {0};
+ struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
@@ -437,11 +798,69 @@ err:
return -ENOMEM;
}
+static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
+ struct rsa_key *raw_key)
+{
+ struct caam_rsa_key *rsa_key = &ctx->key;
+ size_t p_sz = raw_key->p_sz;
+ size_t q_sz = raw_key->q_sz;
+
+ rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
+ if (!rsa_key->p)
+ return;
+ rsa_key->p_sz = p_sz;
+
+ rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
+ if (!rsa_key->q)
+ goto free_p;
+ rsa_key->q_sz = q_sz;
+
+ rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
+ if (!rsa_key->tmp1)
+ goto free_q;
+
+ rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
+ if (!rsa_key->tmp2)
+ goto free_tmp1;
+
+ rsa_key->priv_form = FORM2;
+
+ rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
+ if (!rsa_key->dp)
+ goto free_tmp2;
+
+ rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
+ if (!rsa_key->dq)
+ goto free_dp;
+
+ rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
+ q_sz);
+ if (!rsa_key->qinv)
+ goto free_dq;
+
+ rsa_key->priv_form = FORM3;
+
+ return;
+
+free_dq:
+ kzfree(rsa_key->dq);
+free_dp:
+ kzfree(rsa_key->dp);
+free_tmp2:
+ kzfree(rsa_key->tmp2);
+free_tmp1:
+ kzfree(rsa_key->tmp1);
+free_q:
+ kzfree(rsa_key->q);
+free_p:
+ kzfree(rsa_key->p);
+}
+
static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct rsa_key raw_key = {0};
+ struct rsa_key raw_key = {NULL};
struct caam_rsa_key *rsa_key = &ctx->key;
int ret;
@@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
+ caam_rsa_set_priv_key_form(ctx, &raw_key);
+
return 0;
err:
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index f595d15..87ab75e 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -13,21 +13,75 @@
#include "pdb.h"
/**
+ * caam_priv_key_form - CAAM RSA private key representation
+ * CAAM RSA private key may have either of three forms.
+ *
+ * 1. The first representation consists of the pair (n, d), where the
+ * components have the following meanings:
+ * n the RSA modulus
+ * d the RSA private exponent
+ *
+ * 2. The second representation consists of the triplet (p, q, d), where the
+ * components have the following meanings:
+ * p the first prime factor of the RSA modulus n
+ * q the second prime factor of the RSA modulus n
+ * d the RSA private exponent
+ *
+ * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
+ * where the components have the following meanings:
+ * p the first prime factor of the RSA modulus n
+ * q the second prime factor of the RSA modulus n
+ * dP the first factors's CRT exponent
+ * dQ the second factors's CRT exponent
+ * qInv the (first) CRT coefficient
+ *
+ * The benefit of using the third or the second key form is lower computational
+ * cost for the decryption and signature operations.
+ */
+enum caam_priv_key_form {
+ FORM1,
+ FORM2,
+ FORM3
+};
+
+/**
* caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
* @n : RSA modulus raw byte stream
* @e : RSA public exponent raw byte stream
* @d : RSA private exponent raw byte stream
+ * @p : RSA prime factor p of RSA modulus n
+ * @q : RSA prime factor q of RSA modulus n
+ * @dp : RSA CRT exponent of p
+ * @dp : RSA CRT exponent of q
+ * @qinv : RSA CRT coefficient
+ * @tmp1 : CAAM uses this temporary buffer as internal state buffer.
+ * It is assumed to be as long as p.
+ * @tmp2 : CAAM uses this temporary buffer as internal state buffer.
+ * It is assumed to be as long as q.
* @n_sz : length in bytes of RSA modulus n
* @e_sz : length in bytes of RSA public exponent
* @d_sz : length in bytes of RSA private exponent
+ * @p_sz : length in bytes of RSA prime factor p of RSA modulus n
+ * @q_sz : length in bytes of RSA prime factor q of RSA modulus n
+ * @priv_form : CAAM RSA private key representation
*/
struct caam_rsa_key {
u8 *n;
u8 *e;
u8 *d;
+ u8 *p;
+ u8 *q;
+ u8 *dp;
+ u8 *dq;
+ u8 *qinv;
+ u8 *tmp1;
+ u8 *tmp2;
size_t n_sz;
size_t e_sz;
size_t d_sz;
+ size_t p_sz;
+ size_t q_sz;
+ enum caam_priv_key_form priv_form;
};
/**
@@ -59,6 +113,8 @@ struct rsa_edesc {
union {
struct rsa_pub_pdb pub;
struct rsa_priv_f1_pdb priv_f1;
+ struct rsa_priv_f2_pdb priv_f2;
+ struct rsa_priv_f3_pdb priv_f3;
} pdb;
u32 hw_desc[];
};
@@ -66,5 +122,7 @@ struct rsa_edesc {
/* Descriptor construction primitives. */
void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
+void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
+void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
#endif
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 9b92af2..fde07d4 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -52,7 +52,7 @@
/* length of descriptors */
#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
-#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
+#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
/* Buffer, its dma address and lock */
struct buf_data {
@@ -100,8 +100,7 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
{
struct buf_data *bd;
- bd = (struct buf_data *)((char *)desc -
- offsetof(struct buf_data, hw_desc));
+ bd = container_of(desc, struct buf_data, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
@@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
init_sh_desc(desc, HDR_SHARE_SERIAL);
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-
/* Generate random bytes */
append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
@@ -289,11 +285,7 @@ static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
if (err)
return err;
- err = caam_init_buf(ctx, 1);
- if (err)
- return err;
-
- return 0;
+ return caam_init_buf(ctx, 1);
}
static struct hwrng caam_rng = {
@@ -351,7 +343,7 @@ static int __init caam_rng_init(void)
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(dev);
}
- rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
+ rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
if (!rng_ctx) {
err = -ENOMEM;
goto free_caam_alloc;
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 7149cd2..4e084f5 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -16,6 +16,7 @@
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/iommu.h>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/in.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 98468b9..8f9642c 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -2,40 +2,41 @@
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
*/
#include <linux/device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/sys_soc.h>
#include "compat.h"
#include "regs.h"
#include "intern.h"
#include "jr.h"
#include "desc_constr.h"
-#include "error.h"
#include "ctrl.h"
bool caam_little_end;
EXPORT_SYMBOL(caam_little_end);
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
+bool caam_dpaa2;
+EXPORT_SYMBOL(caam_dpaa2);
+
+#ifdef CONFIG_CAAM_QI
+#include "qi.h"
+#endif
/*
* i.MX targets tend to have clock control subsystems that can
* enable/disable clocking to our device.
*/
-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
-static inline struct clk *caam_drv_identify_clk(struct device *dev,
- char *clk_name)
-{
- return devm_clk_get(dev, clk_name);
-}
-#else
static inline struct clk *caam_drv_identify_clk(struct device *dev,
char *clk_name)
{
- return NULL;
+ return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
}
-#endif
/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
@@ -270,7 +271,7 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
/*
* If the corresponding bit is set, then it means the state
* handle was initialized by us, and thus it needs to be
- * deintialized as well
+ * deinitialized as well
*/
if ((1 << sh_idx) & state_handle_mask) {
/*
@@ -303,20 +304,24 @@ static int caam_remove(struct platform_device *pdev)
struct device *ctrldev;
struct caam_drv_private *ctrlpriv;
struct caam_ctrl __iomem *ctrl;
- int ring;
ctrldev = &pdev->dev;
ctrlpriv = dev_get_drvdata(ctrldev);
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
- /* Remove platform devices for JobRs */
- for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
- if (ctrlpriv->jrpdev[ring])
- of_device_unregister(ctrlpriv->jrpdev[ring]);
- }
+ /* Remove platform devices under the crypto node */
+ of_platform_depopulate(ctrldev);
+
+#ifdef CONFIG_CAAM_QI
+ if (ctrlpriv->qidev)
+ caam_qi_shutdown(ctrlpriv->qidev);
+#endif
- /* De-initialize RNG state handles initialized by this driver. */
- if (ctrlpriv->rng4_sh_init)
+ /*
+ * De-initialize RNG state handles initialized by this driver.
+ * In case of DPAA 2.x, RNG is managed by MC firmware.
+ */
+ if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
/* Shut down debug views */
@@ -331,8 +336,8 @@ static int caam_remove(struct platform_device *pdev)
clk_disable_unprepare(ctrlpriv->caam_ipg);
clk_disable_unprepare(ctrlpriv->caam_mem);
clk_disable_unprepare(ctrlpriv->caam_aclk);
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
-
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
return 0;
}
@@ -366,11 +371,8 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
*/
val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
>> RTSDCTL_ENT_DLY_SHIFT;
- if (ent_delay <= val) {
- /* put RNG4 into run mode */
- clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
- return;
- }
+ if (ent_delay <= val)
+ goto start_rng;
val = rd_reg32(&r4tst->rtsdctl);
val = (val & ~RTSDCTL_ENT_DLY_MASK) |
@@ -382,15 +384,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
/* read the control register */
val = rd_reg32(&r4tst->rtmctl);
+start_rng:
/*
* select raw sampling in both entropy shifter
- * and statistical checker
+ * and statistical checker; ; put RNG4 into run mode
*/
- clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
- /* put RNG4 into run mode */
- clrsetbits_32(&val, RTMCTL_PRGM, 0);
- /* write back the control register */
- wr_reg32(&r4tst->rtmctl, val);
+ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
}
/**
@@ -411,28 +410,26 @@ int caam_get_era(void)
}
EXPORT_SYMBOL(caam_get_era);
-#ifdef CONFIG_DEBUG_FS
-static int caam_debugfs_u64_get(void *data, u64 *val)
-{
- *val = caam64_to_cpu(*(u64 *)data);
- return 0;
-}
-
-static int caam_debugfs_u32_get(void *data, u64 *val)
-{
- *val = caam32_to_cpu(*(u32 *)data);
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
-#endif
+static const struct of_device_id caam_match[] = {
+ {
+ .compatible = "fsl,sec-v4.0",
+ },
+ {
+ .compatible = "fsl,sec4.0",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, caam_match);
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
- int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+ int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
u64 caam_id;
+ static const struct soc_device_attribute imx_soc[] = {
+ {.family = "Freescale i.MX"},
+ {},
+ };
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
@@ -452,9 +449,10 @@ static int caam_probe(struct platform_device *pdev)
dev = &pdev->dev;
dev_set_drvdata(dev, ctrlpriv);
- ctrlpriv->pdev = pdev;
nprop = pdev->dev.of_node;
+ caam_imx = (bool)soc_device_match(imx_soc);
+
/* Enable clocking */
clk = caam_drv_identify_clk(&pdev->dev, "ipg");
if (IS_ERR(clk)) {
@@ -483,14 +481,16 @@ static int caam_probe(struct platform_device *pdev)
}
ctrlpriv->caam_aclk = clk;
- clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev,
- "can't identify CAAM emi_slow clk: %d\n", ret);
- return ret;
+ if (!of_machine_is_compatible("fsl,imx6ul")) {
+ clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM emi_slow clk: %d\n", ret);
+ return ret;
+ }
+ ctrlpriv->caam_emi_slow = clk;
}
- ctrlpriv->caam_emi_slow = clk;
ret = clk_prepare_enable(ctrlpriv->caam_ipg);
if (ret < 0) {
@@ -511,11 +511,13 @@ static int caam_probe(struct platform_device *pdev)
goto disable_caam_mem;
}
- ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
- ret);
- goto disable_caam_aclk;
+ if (ctrlpriv->caam_emi_slow) {
+ ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+ ret);
+ goto disable_caam_aclk;
+ }
}
/* Get configuration properties from device tree */
@@ -542,13 +544,13 @@ static int caam_probe(struct platform_device *pdev)
else
BLOCK_OFFSET = PG_SIZE_64K;
- ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
- ctrlpriv->assure = (struct caam_assurance __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
+ ctrlpriv->assure = (struct caam_assurance __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
);
- ctrlpriv->deco = (struct caam_deco __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->deco = (struct caam_deco __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * DECO_BLOCK_NUMBER
);
@@ -557,12 +559,17 @@ static int caam_probe(struct platform_device *pdev)
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
- * long pointers in master configuration register
+ * long pointers in master configuration register.
+ * In case of DPAA 2.x, Management Complex firmware performs
+ * the configuration.
*/
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
- MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
- MCFGR_WDENABLE | MCFGR_LARGE_BURST |
- (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
+ if (!caam_dpaa2)
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+ MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+ (sizeof(dma_addr_t) == sizeof(u64) ?
+ MCFGR_LONG_PTR : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
@@ -590,64 +597,67 @@ static int caam_probe(struct platform_device *pdev)
JRSTART_JR1_START | JRSTART_JR2_START |
JRSTART_JR3_START);
- if (sizeof(dma_addr_t) == sizeof(u64))
- if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (sizeof(dma_addr_t) == sizeof(u64)) {
+ if (caam_dpaa2)
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
+ else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
else
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
- else
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-
- /*
- * Detect and enable JobRs
- * First, find out how many ring spec'ed, allocate references
- * for all, then go probe each one.
- */
- rspec = 0;
- for_each_available_child_of_node(nprop, np)
- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
- of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
- rspec++;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+ } else {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ }
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
+ goto iounmap_ctrl;
+ }
- ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
- sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
- if (ctrlpriv->jrpdev == NULL) {
- ret = -ENOMEM;
+ ret = of_platform_populate(nprop, caam_match, NULL, dev);
+ if (ret) {
+ dev_err(dev, "JR platform devices creation error\n");
goto iounmap_ctrl;
}
+#ifdef CONFIG_DEBUG_FS
+ /*
+ * FIXME: needs better naming distinction, as some amalgamation of
+ * "caam" and nprop->full_name. The OF name isn't distinctive,
+ * but does separate instances
+ */
+ perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
+
+ ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
+ ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+#endif
ring = 0;
- ctrlpriv->total_jobrs = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
- ctrlpriv->jrpdev[ring] =
- of_platform_device_create(np, NULL, dev);
- if (!ctrlpriv->jrpdev[ring]) {
- pr_warn("JR%d Platform device creation error\n",
- ring);
- continue;
- }
- ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
- ((uint8_t *)ctrl +
+ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+ ((__force uint8_t *)ctrl +
(ring + JR_BLOCK_NUMBER) *
BLOCK_OFFSET
);
ctrlpriv->total_jobrs++;
ring++;
- }
+ }
- /* Check to see if QI present. If so, enable */
- ctrlpriv->qi_present =
- !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
- CTPR_MS_QI_MASK);
- if (ctrlpriv->qi_present) {
- ctrlpriv->qi = (struct caam_queue_if __force *)
- ((uint8_t *)ctrl +
+ /* Check to see if (DPAA 1.x) QI present. If so, enable */
+ ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
+ if (ctrlpriv->qi_present && !caam_dpaa2) {
+ ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
+ ((__force uint8_t *)ctrl +
BLOCK_OFFSET * QI_BLOCK_NUMBER
);
/* This is all that's required to physically enable QI */
wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
+
+ /* If QMAN driver is present, init CAAM-QI backend */
+#ifdef CONFIG_CAAM_QI
+ ret = caam_qi_init(pdev);
+ if (ret)
+ dev_err(dev, "caam qi i/f init failed: %d\n", ret);
+#endif
}
/* If no QI and no rings specified, quit and go home */
@@ -662,8 +672,10 @@ static int caam_probe(struct platform_device *pdev)
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
+ * In case of DPAA 2.x, RNG is managed by MC firmware.
*/
- if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
+ if (!caam_dpaa2 &&
+ (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
/*
@@ -731,77 +743,46 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
caam_get_era());
- dev_info(dev, "job rings = %d, qi = %d\n",
- ctrlpriv->total_jobrs, ctrlpriv->qi_present);
+ dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
+ ctrlpriv->total_jobrs, ctrlpriv->qi_present,
+ caam_dpaa2 ? "yes" : "no");
#ifdef CONFIG_DEBUG_FS
- /*
- * FIXME: needs better naming distinction, as some amalgamation of
- * "caam" and nprop->full_name. The OF name isn't distinctive,
- * but does separate instances
- */
- perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
-
- ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
- ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
-
- /* Controller-level - performance monitor counters */
-
- ctrlpriv->ctl_rq_dequeued =
- debugfs_create_file("rq_dequeued",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->req_dequeued,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ob_enc_req =
- debugfs_create_file("ob_rq_encrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_req,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ib_dec_req =
- debugfs_create_file("ib_rq_decrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_req,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ob_enc_bytes =
- debugfs_create_file("ob_bytes_encrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_bytes,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ob_prot_bytes =
- debugfs_create_file("ob_bytes_protected",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_prot_bytes,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ib_dec_bytes =
- debugfs_create_file("ib_bytes_decrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_bytes,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ib_valid_bytes =
- debugfs_create_file("ib_bytes_validated",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_valid_bytes,
- &caam_fops_u64_ro);
+ debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->req_dequeued,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_req,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_req,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_bytes,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_prot_bytes,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_bytes,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_valid_bytes,
+ &caam_fops_u64_ro);
/* Controller level - global status values */
- ctrlpriv->ctl_faultaddr =
- debugfs_create_file("fault_addr",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultaddr,
- &caam_fops_u32_ro);
- ctrlpriv->ctl_faultdetail =
- debugfs_create_file("fault_detail",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultdetail,
- &caam_fops_u32_ro);
- ctrlpriv->ctl_faultstatus =
- debugfs_create_file("fault_status",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->status,
- &caam_fops_u32_ro);
+ debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultaddr,
+ &caam_fops_u32_ro);
+ debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultdetail,
+ &caam_fops_u32_ro);
+ debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->status,
+ &caam_fops_u32_ro);
/* Internal covering keys (useful in non-secure mode only) */
- ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
+ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
S_IRUSR |
@@ -809,7 +790,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl,
&ctrlpriv->ctl_kek_wrap);
- ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
+ ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
S_IRUSR |
@@ -817,7 +798,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl,
&ctrlpriv->ctl_tkek_wrap);
- ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
+ ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
S_IRUSR |
@@ -828,13 +809,17 @@ static int caam_probe(struct platform_device *pdev)
return 0;
caam_remove:
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(ctrlpriv->dfs_root);
+#endif
caam_remove(pdev);
return ret;
iounmap_ctrl:
iounmap(ctrl);
disable_caam_emi_slow:
- clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
disable_caam_aclk:
clk_disable_unprepare(ctrlpriv->caam_aclk);
disable_caam_mem:
@@ -844,17 +829,6 @@ disable_caam_ipg:
return ret;
}
-static struct of_device_id caam_match[] = {
- {
- .compatible = "fsl,sec-v4.0",
- },
- {
- .compatible = "fsl,sec4.0",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, caam_match);
-
static struct platform_driver caam_driver = {
.driver = {
.name = "caam",
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
index cac5402..7e7bf68 100644
--- a/drivers/crypto/caam/ctrl.h
+++ b/drivers/crypto/caam/ctrl.h
@@ -10,4 +10,6 @@
/* Prototypes for backend-level services exposed to APIs */
int caam_get_era(void);
+extern bool caam_dpaa2;
+
#endif /* CTRL_H */
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 513b664..a8c3be7 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -22,12 +22,6 @@
#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
#define SEC4_SG_OFFSET_MASK 0x00001fff
-struct sec4_sg_entry {
- u64 ptr;
- u32 len;
- u32 bpid_offset;
-};
-
/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
#define MAX_CAAM_DESCSIZE 64
@@ -47,6 +41,7 @@ struct sec4_sg_entry {
#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
+#define CMD_MOVEB (0x07 << CMD_SHIFT)
#define CMD_STORE (0x0a << CMD_SHIFT)
#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
@@ -90,8 +85,8 @@ struct sec4_sg_entry {
#define HDR_ZRO 0x00008000
/* Start Index or SharedDesc Length */
-#define HDR_START_IDX_MASK 0x3f
#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
/* If shared descriptor header, 6-bit length */
#define HDR_DESCLEN_SHR_MASK 0x3f
@@ -121,10 +116,10 @@ struct sec4_sg_entry {
#define HDR_PROP_DNR 0x00000800
/* JobDesc/SharedDesc share property */
-#define HDR_SD_SHARE_MASK 0x03
#define HDR_SD_SHARE_SHIFT 8
-#define HDR_JD_SHARE_MASK 0x07
+#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
#define HDR_JD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
@@ -235,7 +230,7 @@ struct sec4_sg_entry {
#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
@@ -360,6 +355,7 @@ struct sec4_sg_entry {
#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IFIFO (0x0f << FIFOLD_TYPE_SHIFT)
/* Other types. Need to OR in last/flush bits as desired */
#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
@@ -400,7 +396,7 @@ struct sec4_sg_entry {
#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
@@ -413,6 +409,7 @@ struct sec4_sg_entry {
#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
/*
@@ -1107,8 +1104,8 @@ struct sec4_sg_entry {
/* For non-protocol/alg-only op commands */
#define OP_ALG_TYPE_SHIFT 24
#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
-#define OP_ALG_TYPE_CLASS1 2
-#define OP_ALG_TYPE_CLASS2 4
+#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
#define OP_ALG_ALGSEL_SHIFT 16
#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
@@ -1249,7 +1246,7 @@ struct sec4_sg_entry {
#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
/* PKHA mode copy-memory functions */
-#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
#define OP_ALG_PKMODE_DST_REG_SHIFT 10
#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
@@ -1445,10 +1442,11 @@ struct sec4_sg_entry {
#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
/* Destination selectors */
#define MATH_DEST_SHIFT 8
@@ -1629,4 +1627,31 @@ struct sec4_sg_entry {
/* Frame Descriptor Command for Replacement Job Descriptor */
#define FD_CMD_REPLACE_JOB_DESC 0x20000000
+/* CHA Control Register bits */
+#define CCTRL_RESET_CHA_ALL 0x1
+#define CCTRL_RESET_CHA_AESA 0x2
+#define CCTRL_RESET_CHA_DESA 0x4
+#define CCTRL_RESET_CHA_AFHA 0x8
+#define CCTRL_RESET_CHA_KFHA 0x10
+#define CCTRL_RESET_CHA_SF8A 0x20
+#define CCTRL_RESET_CHA_PKHA 0x40
+#define CCTRL_RESET_CHA_MDHA 0x80
+#define CCTRL_RESET_CHA_CRCA 0x100
+#define CCTRL_RESET_CHA_RNG 0x200
+#define CCTRL_RESET_CHA_SF9A 0x400
+#define CCTRL_RESET_CHA_ZUCE 0x800
+#define CCTRL_RESET_CHA_ZUCA 0x1000
+#define CCTRL_UNLOAD_PK_A0 0x10000
+#define CCTRL_UNLOAD_PK_A1 0x20000
+#define CCTRL_UNLOAD_PK_A2 0x40000
+#define CCTRL_UNLOAD_PK_A3 0x80000
+#define CCTRL_UNLOAD_PK_B0 0x100000
+#define CCTRL_UNLOAD_PK_B1 0x200000
+#define CCTRL_UNLOAD_PK_B2 0x400000
+#define CCTRL_UNLOAD_PK_B3 0x800000
+#define CCTRL_UNLOAD_PK_N 0x1000000
+#define CCTRL_UNLOAD_PK_A 0x4000000
+#define CCTRL_UNLOAD_PK_B 0x8000000
+#define CCTRL_UNLOAD_SBOX 0x10000000
+
#endif /* DESC_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index a8cd8a7..2d9dbec 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -4,6 +4,9 @@
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
+#ifndef DESC_CONSTR_H
+#define DESC_CONSTR_H
+
#include "desc.h"
#include "regs.h"
@@ -33,38 +36,39 @@
extern bool caam_little_end;
-static inline int desc_len(u32 *desc)
+static inline int desc_len(u32 * const desc)
{
return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
}
-static inline int desc_bytes(void *desc)
+static inline int desc_bytes(void * const desc)
{
return desc_len(desc) * CAAM_CMD_SZ;
}
-static inline u32 *desc_end(u32 *desc)
+static inline u32 *desc_end(u32 * const desc)
{
return desc + desc_len(desc);
}
-static inline void *sh_desc_pdb(u32 *desc)
+static inline void *sh_desc_pdb(u32 * const desc)
{
return desc + 1;
}
-static inline void init_desc(u32 *desc, u32 options)
+static inline void init_desc(u32 * const desc, u32 options)
{
*desc = cpu_to_caam32((options | HDR_ONE) + 1);
}
-static inline void init_sh_desc(u32 *desc, u32 options)
+static inline void init_sh_desc(u32 * const desc, u32 options)
{
PRINT_POS;
init_desc(desc, CMD_SHARED_DESC_HDR | options);
}
-static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
+ size_t pdb_bytes)
{
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
@@ -72,19 +76,20 @@ static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
options);
}
-static inline void init_job_desc(u32 *desc, u32 options)
+static inline void init_job_desc(u32 * const desc, u32 options)
{
init_desc(desc, CMD_DESC_HDR | options);
}
-static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_job_desc_pdb(u32 * const desc, u32 options,
+ size_t pdb_bytes)
{
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
}
-static inline void append_ptr(u32 *desc, dma_addr_t ptr)
+static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
{
dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
@@ -94,8 +99,8 @@ static inline void append_ptr(u32 *desc, dma_addr_t ptr)
CAAM_PTR_SZ / CAAM_CMD_SZ);
}
-static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
- u32 options)
+static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
+ int len, u32 options)
{
PRINT_POS;
init_job_desc(desc, HDR_SHARED | options |
@@ -103,7 +108,7 @@ static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
append_ptr(desc, ptr);
}
-static inline void append_data(u32 *desc, void *data, int len)
+static inline void append_data(u32 * const desc, void *data, int len)
{
u32 *offset = desc_end(desc);
@@ -114,7 +119,7 @@ static inline void append_data(u32 *desc, void *data, int len)
(len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
}
-static inline void append_cmd(u32 *desc, u32 command)
+static inline void append_cmd(u32 * const desc, u32 command)
{
u32 *cmd = desc_end(desc);
@@ -125,7 +130,7 @@ static inline void append_cmd(u32 *desc, u32 command)
#define append_u32 append_cmd
-static inline void append_u64(u32 *desc, u64 data)
+static inline void append_u64(u32 * const desc, u64 data)
{
u32 *offset = desc_end(desc);
@@ -142,14 +147,14 @@ static inline void append_u64(u32 *desc, u64 data)
}
/* Write command without affecting header, and return pointer to next word */
-static inline u32 *write_cmd(u32 *desc, u32 command)
+static inline u32 *write_cmd(u32 * const desc, u32 command)
{
*desc = cpu_to_caam32(command);
return desc + 1;
}
-static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
+static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
u32 command)
{
append_cmd(desc, command | len);
@@ -157,7 +162,7 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
}
/* Write length after pointer, rather than inside command */
-static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
+static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
unsigned int len, u32 command)
{
append_cmd(desc, command);
@@ -166,7 +171,7 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
append_cmd(desc, len);
}
-static inline void append_cmd_data(u32 *desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
@@ -174,7 +179,7 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
}
#define APPEND_CMD_RET(cmd, op) \
-static inline u32 *append_##cmd(u32 *desc, u32 options) \
+static inline u32 *append_##cmd(u32 * const desc, u32 options) \
{ \
u32 *cmd = desc_end(desc); \
PRINT_POS; \
@@ -183,14 +188,15 @@ static inline u32 *append_##cmd(u32 *desc, u32 options) \
}
APPEND_CMD_RET(jump, JUMP)
APPEND_CMD_RET(move, MOVE)
+APPEND_CMD_RET(moveb, MOVEB)
-static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
+static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
{
*jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
(desc_len(desc) - (jump_cmd - desc)));
}
-static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
+static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
{
u32 val = caam32_to_cpu(*move_cmd);
@@ -200,7 +206,7 @@ static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
}
#define APPEND_CMD(cmd, op) \
-static inline void append_##cmd(u32 *desc, u32 options) \
+static inline void append_##cmd(u32 * const desc, u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | options); \
@@ -208,7 +214,8 @@ static inline void append_##cmd(u32 *desc, u32 options) \
APPEND_CMD(operation, OPERATION)
#define APPEND_CMD_LEN(cmd, op) \
-static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
+static inline void append_##cmd(u32 * const desc, unsigned int len, \
+ u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | len | options); \
@@ -220,8 +227,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
#define APPEND_CMD_PTR(cmd, op) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
- u32 options) \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
+ unsigned int len, u32 options) \
{ \
PRINT_POS; \
append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
@@ -231,8 +238,8 @@ APPEND_CMD_PTR(load, LOAD)
APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
APPEND_CMD_PTR(fifo_store, FIFO_STORE)
-static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
- u32 options)
+static inline void append_store(u32 * const desc, dma_addr_t ptr,
+ unsigned int len, u32 options)
{
u32 cmd_src;
@@ -249,7 +256,8 @@ static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
}
#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
-static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
+ dma_addr_t ptr, \
unsigned int len, \
u32 options) \
{ \
@@ -263,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -273,7 +281,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
-static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -287,7 +295,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
* the size of its type
*/
#define APPEND_CMD_PTR_LEN(cmd, op, type) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
type len, u32 options) \
{ \
PRINT_POS; \
@@ -304,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
@@ -315,7 +323,7 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
APPEND_CMD_PTR_TO_IMM2(key, KEY);
#define APPEND_CMD_RAW_IMM(cmd, op, type) \
-static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
+static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
u32 options) \
{ \
PRINT_POS; \
@@ -426,3 +434,66 @@ do { \
APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ * functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @keylen_pad: padded length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_inline
+ * is true, dma (bus) address if key_inline is false.
+ * @key_inline: true - key can be inlined in the descriptor; false - key is
+ * referenced by the descriptor
+ */
+struct alginfo {
+ u32 algtype;
+ unsigned int keylen;
+ unsigned int keylen_pad;
+ union {
+ dma_addr_t key_dma;
+ void *key_virt;
+ };
+ bool key_inline;
+};
+
+/**
+ * desc_inline_query() - Provide indications on which data items can be inlined
+ * and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ * excluding the data items to be inlined (or corresponding
+ * pointer if an item is not inlined). Each cnstr_* function that
+ * generates descriptors should have a define mentioning
+ * corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ * together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ * otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ * check @inl_mask for details.
+ */
+static inline int desc_inline_query(unsigned int sd_base_len,
+ unsigned int jd_len, unsigned int *data_len,
+ u32 *inl_mask, unsigned int count)
+{
+ int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+ unsigned int i;
+
+ *inl_mask = 0;
+ for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+ if (rem_bytes - (int)(data_len[i] +
+ (count - i - 1) * CAAM_PTR_SZ) >= 0) {
+ rem_bytes -= data_len[i];
+ *inl_mask |= (1 << i);
+ } else {
+ rem_bytes -= CAAM_PTR_SZ;
+ }
+ }
+
+ return (rem_bytes >= 0) ? 0 : -1;
+}
+
+#endif /* DESC_CONSTR_H */
diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c
new file mode 100644
index 0000000..410cd79
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.c
@@ -0,0 +1,859 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the names of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
+#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
+#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
+#include "dpseci.h"
+#include "dpseci_cmd.h"
+
+/**
+ * dpseci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpseci_id: DPSECI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an already created
+ * object; an object may have been declared in the DPL or by calling the
+ * dpseci_create() function.
+ * This function returns a unique authentication token, associated with the
+ * specific object ID and the specific MC portal; this token must be used in all
+ * subsequent commands for this specific object.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_open *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpseci_cmd_open *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpseci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * After this function is called, no further operations are allowed on the
+ * object without opening a new control session.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_create() - Create the DPSECI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: returned object id
+ *
+ * Create the DPSECI object, allocate required resources and perform required
+ * initialization.
+ *
+ * The object can be created either by declaring it in the DPL file, or by
+ * calling this function.
+ *
+ * The function accepts an authentication token of a parent container that this
+ * object should be assigned to. The token can be '0' so the object will be
+ * assigned to the default container.
+ * The newly created object can be opened with the returned object id and using
+ * the container's associated tokens and MC portals.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
+ const struct dpseci_cfg *cfg, u32 *obj_id)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_create *cmd_params;
+ int i, err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpseci_cmd_create *)cmd.params;
+ for (i = 0; i < 8; i++)
+ cmd_params->priorities[i] = cfg->priorities[i];
+ cmd_params->num_tx_queues = cfg->num_tx_queues;
+ cmd_params->num_rx_queues = cfg->num_rx_queues;
+ cmd_params->options = cpu_to_le32(cfg->options);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpseci_destroy() - Destroy the DPSECI object and release all its resources
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
+ u32 object_id)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_destroy *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
+ cmd_params->object_id = cpu_to_le32(object_id);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_rsp_is_enabled *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
+ *en = le32_to_cpu(rsp_params->is_enabled);
+
+ return 0;
+}
+
+/**
+ * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_irq_enable() - Get overall interrupt state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @irq_index: The interrupt index to configure
+ * @en: Returned Interrupt state - enable = 1, disable = 0
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 *en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_enable *cmd_params;
+ struct dpseci_rsp_get_irq_enable *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
+ *en = rsp_params->enable_state;
+
+ return 0;
+}
+
+/**
+ * dpseci_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. If the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_enable *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->enable_state = en;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_irq_mask() - Get interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @irq_index: The interrupt index to configure
+ * @mask: Returned event mask to trigger interrupt
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *mask)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_mask *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *mask = le32_to_cpu(cmd_params->mask);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @irq_index: The interrupt index to configure
+ * @mask: event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 mask)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_mask *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_irq_status() - Get the current status of any pending interrupts
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *status)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_status *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *status = le32_to_cpu(cmd_params->status);
+
+ return 0;
+}
+
+/**
+ * dpseci_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 status)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_status *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_attributes() - Retrieve DPSECI attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_rsp_get_attributes *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->num_tx_queues = rsp_params->num_tx_queues;
+ attr->num_rx_queues = rsp_params->num_rx_queues;
+ attr->options = le32_to_cpu(rsp_params->options);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
+ * Rx queues identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ cmd_params->queue = queue;
+ cmd_params->dest_type = cfg->dest_cfg.dest_type;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ cmd_params->order_preservation_en =
+ cpu_to_le32(cfg->order_preservation_en);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->queue = queue;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
+ attr->dest_cfg.priority = cmd_params->priority;
+ attr->dest_cfg.dest_type = cmd_params->dest_type;
+ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
+ attr->fqid = le32_to_cpu(cmd_params->fqid);
+ attr->order_preservation_en =
+ le32_to_cpu(cmd_params->order_preservation_en);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ struct dpseci_rsp_get_tx_queue *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->queue = queue;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->priority = rsp_params->priority;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned SEC attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_rsp_get_sec_attr *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
+ attr->ip_id = le16_to_cpu(rsp_params->ip_id);
+ attr->major_rev = rsp_params->major_rev;
+ attr->minor_rev = rsp_params->minor_rev;
+ attr->era = rsp_params->era;
+ attr->deco_num = rsp_params->deco_num;
+ attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
+ attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
+ attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
+ attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
+ attr->crc_acc_num = rsp_params->crc_acc_num;
+ attr->pk_acc_num = rsp_params->pk_acc_num;
+ attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
+ attr->rng_acc_num = rsp_params->rng_acc_num;
+ attr->md_acc_num = rsp_params->md_acc_num;
+ attr->arc4_acc_num = rsp_params->arc4_acc_num;
+ attr->des_acc_num = rsp_params->des_acc_num;
+ attr->aes_acc_num = rsp_params->aes_acc_num;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @counters: Returned SEC counters
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_counters *counters)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_rsp_get_sec_counters *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
+ counters->dequeued_requests =
+ le64_to_cpu(rsp_params->dequeued_requests);
+ counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
+ counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
+ counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
+ counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
+ counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
+ counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_api_version() - Get Data Path SEC Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path sec API
+ * @minor_ver: Minor version of data path sec API
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_opr() - Set Order Restoration configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @index: The queue index
+ * @options: Configuration mode options; can be OPR_OPT_CREATE or
+ * OPR_OPT_RETIRE
+ * @cfg: Configuration options for the OPR
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
+ u8 options, struct opr_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_opr *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_SET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_opr *)cmd.params;
+ cmd_params->index = index;
+ cmd_params->options = options;
+ cmd_params->oloe = cfg->oloe;
+ cmd_params->oeane = cfg->oeane;
+ cmd_params->olws = cfg->olws;
+ cmd_params->oa = cfg->oa;
+ cmd_params->oprrws = cfg->oprrws;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_opr() - Retrieve Order Restoration config and query
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @index: The queue index
+ * @cfg: Returned OPR configuration
+ * @qry: Returned OPR query
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
+ struct opr_cfg *cfg, struct opr_qry *qry)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_opr *cmd_params;
+ struct dpseci_rsp_get_opr *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_opr *)cmd.params;
+ cmd_params->index = index;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
+ qry->rip = dpseci_get_field(rsp_params->rip_enable, OPR_RIP);
+ qry->enable = dpseci_get_field(rsp_params->rip_enable, OPR_ENABLE);
+ cfg->oloe = rsp_params->oloe;
+ cfg->oeane = rsp_params->oeane;
+ cfg->olws = rsp_params->olws;
+ cfg->oa = rsp_params->oa;
+ cfg->oprrws = rsp_params->oprrws;
+ qry->nesn = le16_to_cpu(rsp_params->nesn);
+ qry->ndsn = le16_to_cpu(rsp_params->ndsn);
+ qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
+ qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
+ qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
+ qry->hseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_HSEQ_NLIS);
+ qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
+ qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
+ qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
+ qry->opr_id = le16_to_cpu(rsp_params->opr_id);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_congestion_notification() - Set congestion group
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_congestion_notification() - Get congestion group notification
+ * configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+ cfg->dest_cfg.priority = rsp_params->priority;
+ cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
+ CGN_DEST_TYPE);
+ cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+
+ return 0;
+}
diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h
new file mode 100644
index 0000000..d37489c
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.h
@@ -0,0 +1,395 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the names of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _DPSECI_H_
+#define _DPSECI_H_
+
+/*
+ * Data Path SEC Interface API
+ * Contains initialization APIs and runtime control APIs for DPSECI
+ */
+
+struct fsl_mc_io;
+struct opr_cfg;
+struct opr_qry;
+
+/**
+ * General DPSECI macros
+ */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPSECI object
+ */
+#define DPSECI_PRIO_NUM 8
+
+/**
+ * All queues considered; see dpseci_set_rx_queue()
+ */
+#define DPSECI_ALL_QUEUES (u8)(-1)
+
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token);
+
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/**
+ * Enable the Congestion Group support
+ */
+#define DPSECI_OPT_HAS_CG 0x000020
+
+/**
+ * Enable the Order Restoration support
+ */
+#define DPSECI_OPT_HAS_OPR 0x000040
+
+/**
+ * Order Point Records are shared for the entire DPSECI
+ */
+#define DPSECI_OPT_OPR_SHARED 0x000080
+
+/**
+ * struct dpseci_cfg - Structure representing DPSECI configuration
+ * @options: Any combination of the following options:
+ * DPSECI_OPT_HAS_CG
+ * DPSECI_OPT_HAS_OPR
+ * DPSECI_OPT_OPR_SHARED
+ * @num_tx_queues: num of queues towards the SEC
+ * @num_rx_queues: num of queues back from the SEC
+ * @priorities: Priorities for the SEC hardware processing;
+ * each place in the array is the priority of the tx queue
+ * towards the SEC;
+ * valid priorities are configured with values 1-8;
+ */
+struct dpseci_cfg {
+ u32 options;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u8 priorities[DPSECI_PRIO_NUM];
+};
+
+int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
+ const struct dpseci_cfg *cfg, u32 *obj_id);
+
+int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
+ u32 object_id);
+
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en);
+
+int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 *en);
+
+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 en);
+
+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *mask);
+
+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 mask);
+
+int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *status);
+
+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 status);
+
+/**
+ * struct dpseci_attr - Structure representing DPSECI attributes
+ * @id: DPSECI object ID
+ * @num_tx_queues: number of queues towards the SEC
+ * @num_rx_queues: number of queues back from the SEC
+ * @options: any combination of the following options:
+ * DPSECI_OPT_HAS_CG
+ * DPSECI_OPT_HAS_OPR
+ * DPSECI_OPT_OPR_SHARED
+ */
+struct dpseci_attr {
+ int id;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u32 options;
+};
+
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr);
+
+/**
+ * enum dpseci_dest - DPSECI destination types
+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to dequeue
+ * from the queue based on polling or other user-defined method
+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue from
+ * the queue only after notification is received
+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpseci_dest {
+ DPSECI_DEST_NONE = 0,
+ DPSECI_DEST_DPIO,
+ DPSECI_DEST_DPCON
+};
+
+/**
+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that channel;
+ * not relevant for 'DPSECI_DEST_NONE' option
+ */
+struct dpseci_dest_cfg {
+ enum dpseci_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+/**
+ * DPSECI queue modification options
+ */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPSECI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * Select to modify the queue's order preservation
+ */
+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
+
+/**
+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
+ * @order_preservation_en: order preservation configuration for the rx queue
+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
+ * in 'options'
+ * @dest_cfg: Queue destination parameters; valid only if
+ * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpseci_rx_queue_cfg {
+ u32 options;
+ int order_preservation_en;
+ u64 user_ctx;
+ struct dpseci_dest_cfg dest_cfg;
+};
+
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @order_preservation_en: Status of the order preservation configuration on the
+ * queue
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpseci_rx_queue_attr {
+ u64 user_ctx;
+ int order_preservation_en;
+ struct dpseci_dest_cfg dest_cfg;
+ u32 fqid;
+};
+
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr);
+
+/**
+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
+ * @priority: SEC hardware processing priority for the queue
+ */
+struct dpseci_tx_queue_attr {
+ u32 fqid;
+ u8 priority;
+};
+
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr);
+
+/**
+ * struct dpseci_sec_attr - Structure representing attributes of the SEC
+ * hardware accelerator
+ * @ip_id: ID for SEC
+ * @major_rev: Major revision number for SEC
+ * @minor_rev: Minor revision number for SEC
+ * @era: SEC Era
+ * @deco_num: The number of copies of the DECO that are implemented in this
+ * version of SEC
+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
+ * version of SEC
+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
+ * version of SEC
+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
+ * implemented in this version of SEC
+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
+ * implemented in this version of SEC
+ * @crc_acc_num: The number of copies of the CRC module that are implemented in
+ * this version of SEC
+ * @pk_acc_num: The number of copies of the Public Key module that are
+ * implemented in this version of SEC
+ * @kasumi_acc_num: The number of copies of the Kasumi module that are
+ * implemented in this version of SEC
+ * @rng_acc_num: The number of copies of the Random Number Generator that are
+ * implemented in this version of SEC
+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
+ * implemented in this version of SEC
+ * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
+ * in this version of SEC
+ * @des_acc_num: The number of copies of the DES module that are implemented in
+ * this version of SEC
+ * @aes_acc_num: The number of copies of the AES module that are implemented in
+ * this version of SEC
+ **/
+struct dpseci_sec_attr {
+ u16 ip_id;
+ u8 major_rev;
+ u8 minor_rev;
+ u8 era;
+ u8 deco_num;
+ u8 zuc_auth_acc_num;
+ u8 zuc_enc_acc_num;
+ u8 snow_f8_acc_num;
+ u8 snow_f9_acc_num;
+ u8 crc_acc_num;
+ u8 pk_acc_num;
+ u8 kasumi_acc_num;
+ u8 rng_acc_num;
+ u8 md_acc_num;
+ u8 arc4_acc_num;
+ u8 des_acc_num;
+ u8 aes_acc_num;
+};
+
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr);
+
+/**
+ * struct dpseci_sec_counters - Structure representing global SEC counters and
+ * not per dpseci counters
+ * @dequeued_requests: Number of Requests Dequeued
+ * @ob_enc_requests: Number of Outbound Encrypt Requests
+ * @ib_dec_requests: Number of Inbound Decrypt Requests
+ * @ob_enc_bytes: Number of Outbound Bytes Encrypted
+ * @ob_prot_bytes: Number of Outbound Bytes Protected
+ * @ib_dec_bytes: Number of Inbound Bytes Decrypted
+ * @ib_valid_bytes: Number of Inbound Bytes Validated
+ */
+struct dpseci_sec_counters {
+ u64 dequeued_requests;
+ u64 ob_enc_requests;
+ u64 ib_dec_requests;
+ u64 ob_enc_bytes;
+ u64 ob_prot_bytes;
+ u64 ib_dec_bytes;
+ u64 ib_valid_bytes;
+};
+
+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_counters *counters);
+
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
+ u8 options, struct opr_cfg *cfg);
+
+int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
+ struct opr_cfg *cfg, struct opr_qry *qry);
+
+/**
+ * enum dpseci_congestion_unit - DPSECI congestion units
+ * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpseci_congestion_unit {
+ DPSECI_CONGESTION_UNIT_BYTES = 0,
+ DPSECI_CONGESTION_UNIT_FRAMES
+};
+
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
+#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
+#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
+
+/**
+ * struct dpseci_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: units type
+ * @threshold_entry: above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned;
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
+ * values
+ */
+struct dpseci_congestion_notification_cfg {
+ enum dpseci_congestion_unit units;
+ u32 threshold_entry;
+ u32 threshold_exit;
+ u64 message_ctx;
+ u64 message_iova;
+ struct dpseci_dest_cfg dest_cfg;
+ u16 notification_mode;
+};
+
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg);
+
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg);
+
+#endif /* _DPSECI_H_ */
diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h
new file mode 100644
index 0000000..7624315
--- /dev/null
+++ b/drivers/crypto/caam/dpseci_cmd.h
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the names of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DPSECI_CMD_H_
+#define _DPSECI_CMD_H_
+
+/* DPSECI Version */
+#define DPSECI_VER_MAJOR 5
+#define DPSECI_VER_MINOR 1
+
+#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
+#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
+
+/* Command IDs */
+
+#define DPSECI_CMDID_CLOSE 0x8001
+#define DPSECI_CMDID_OPEN 0x8091
+#define DPSECI_CMDID_CREATE 0x9092
+#define DPSECI_CMDID_DESTROY 0x9891
+#define DPSECI_CMDID_GET_API_VERSION 0xa091
+
+#define DPSECI_CMDID_ENABLE 0x0021
+#define DPSECI_CMDID_DISABLE 0x0031
+#define DPSECI_CMDID_GET_ATTR 0x0041
+#define DPSECI_CMDID_RESET 0x0051
+#define DPSECI_CMDID_IS_ENABLED 0x0061
+
+#define DPSECI_CMDID_SET_IRQ_ENABLE 0x0121
+#define DPSECI_CMDID_GET_IRQ_ENABLE 0x0131
+#define DPSECI_CMDID_SET_IRQ_MASK 0x0141
+#define DPSECI_CMDID_GET_IRQ_MASK 0x0151
+#define DPSECI_CMDID_GET_IRQ_STATUS 0x0161
+#define DPSECI_CMDID_CLEAR_IRQ_STATUS 0x0171
+
+#define DPSECI_CMDID_SET_RX_QUEUE 0x1941
+#define DPSECI_CMDID_GET_RX_QUEUE 0x1961
+#define DPSECI_CMDID_GET_TX_QUEUE 0x1971
+#define DPSECI_CMDID_GET_SEC_ATTR 0x1981
+#define DPSECI_CMDID_GET_SEC_COUNTERS 0x1991
+#define DPSECI_CMDID_SET_OPR 0x19A1
+#define DPSECI_CMDID_GET_OPR 0x19B1
+
+#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION 0x1701
+#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION 0x1711
+
+/* Macros for accessing command fields smaller than 1 byte */
+#define DPSECI_MASK(field) \
+ GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
+ DPSECI_##field##_SHIFT)
+
+#define dpseci_set_field(var, field, val) \
+ ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
+
+#define dpseci_get_field(var, field) \
+ (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
+
+struct dpseci_cmd_open {
+ __le32 dpseci_id;
+};
+
+struct dpseci_cmd_create {
+ u8 priorities[8];
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ __le16 pad;
+ __le32 options;
+};
+
+struct dpseci_cmd_destroy {
+ __le32 object_id;
+};
+
+struct dpseci_rsp_is_enabled {
+ __le32 is_enabled;
+};
+
+struct dpseci_cmd_irq_enable {
+ u8 enable_state;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpseci_rsp_get_irq_enable {
+ u8 enable_state;
+};
+
+struct dpseci_cmd_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dpseci_cmd_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpseci_rsp_get_attributes {
+ __le32 id;
+ __le32 pad0;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u8 pad1[6];
+ __le32 options;
+};
+
+struct dpseci_cmd_queue {
+ __le32 dest_id;
+ u8 priority;
+ u8 queue;
+ u8 dest_type;
+ u8 pad;
+ __le64 user_ctx;
+ union {
+ __le32 options;
+ __le32 fqid;
+ };
+ __le32 order_preservation_en;
+};
+
+struct dpseci_rsp_get_tx_queue {
+ __le32 pad;
+ __le32 fqid;
+ u8 priority;
+};
+
+struct dpseci_rsp_get_sec_attr {
+ __le16 ip_id;
+ u8 major_rev;
+ u8 minor_rev;
+ u8 era;
+ u8 pad0[3];
+ u8 deco_num;
+ u8 zuc_auth_acc_num;
+ u8 zuc_enc_acc_num;
+ u8 pad1;
+ u8 snow_f8_acc_num;
+ u8 snow_f9_acc_num;
+ u8 crc_acc_num;
+ u8 pad2;
+ u8 pk_acc_num;
+ u8 kasumi_acc_num;
+ u8 rng_acc_num;
+ u8 pad3;
+ u8 md_acc_num;
+ u8 arc4_acc_num;
+ u8 des_acc_num;
+ u8 aes_acc_num;
+};
+
+struct dpseci_rsp_get_sec_counters {
+ __le64 dequeued_requests;
+ __le64 ob_enc_requests;
+ __le64 ib_dec_requests;
+ __le64 ob_enc_bytes;
+ __le64 ob_prot_bytes;
+ __le64 ib_dec_bytes;
+ __le64 ib_valid_bytes;
+};
+
+struct dpseci_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+struct dpseci_cmd_opr {
+ __le16 pad;
+ u8 index;
+ u8 options;
+ u8 pad1[7];
+ u8 oloe;
+ u8 oeane;
+ u8 olws;
+ u8 oa;
+ u8 oprrws;
+};
+
+#define DPSECI_OPR_RIP_SHIFT 0
+#define DPSECI_OPR_RIP_SIZE 1
+#define DPSECI_OPR_ENABLE_SHIFT 1
+#define DPSECI_OPR_ENABLE_SIZE 1
+#define DPSECI_OPR_TSEQ_NLIS_SHIFT 1
+#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
+#define DPSECI_OPR_HSEQ_NLIS_SHIFT 1
+#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
+
+struct dpseci_rsp_get_opr {
+ __le64 pad;
+ u8 rip_enable;
+ u8 pad0[2];
+ u8 oloe;
+ u8 oeane;
+ u8 olws;
+ u8 oa;
+ u8 oprrws;
+ __le16 nesn;
+ __le16 pad1;
+ __le16 ndsn;
+ __le16 pad2;
+ __le16 ea_tseq;
+ u8 tseq_nlis;
+ u8 pad3;
+ __le16 ea_hseq;
+ u8 hseq_nlis;
+ u8 pad4;
+ __le16 ea_hptr;
+ __le16 pad5;
+ __le16 ea_tptr;
+ __le16 pad6;
+ __le16 opr_vid;
+ __le16 pad7;
+ __le16 opr_id;
+};
+
+#define DPSECI_CGN_DEST_TYPE_SHIFT 0
+#define DPSECI_CGN_DEST_TYPE_SIZE 4
+#define DPSECI_CGN_UNITS_SHIFT 4
+#define DPSECI_CGN_UNITS_SIZE 2
+
+struct dpseci_cmd_congestion_notification {
+ __le32 dest_id;
+ __le16 notification_mode;
+ u8 priority;
+ u8 options;
+ __le64 message_iova;
+ __le64 message_ctx;
+ __le32 threshold_entry;
+ __le32 threshold_exit;
+};
+
+#endif /* _DPSECI_CMD_H_ */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 33e41ea..3196339 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -6,11 +6,54 @@
#include "compat.h"
#include "regs.h"
-#include "intern.h"
#include "desc.h"
-#include "jr.h"
#include "error.h"
+#ifdef DEBUG
+
+#include <linux/highmem.h>
+
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, struct scatterlist *sg,
+ size_t tlen, bool ascii)
+{
+ struct scatterlist *it;
+ void *it_page;
+ size_t len;
+ void *buf;
+
+ for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
+ /*
+ * make sure the scatterlist's page
+ * has a valid virtual memory mapping
+ */
+ it_page = kmap_atomic(sg_page(it));
+ if (unlikely(!it_page)) {
+ pr_err("caam_dump_sg: kmap failed\n");
+ return;
+ }
+
+ buf = it_page + it->offset;
+ len = min_t(size_t, tlen, it->length);
+ print_hex_dump(level, prefix_str, prefix_type, rowsize,
+ groupsize, buf, len, ascii);
+ tlen -= len;
+
+ kunmap_atomic(it_page);
+ }
+}
+
+#else
+
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, struct scatterlist *sg,
+ size_t tlen, bool ascii)
+{}
+
+#endif
+
+EXPORT_SYMBOL(caam_dump_sg);
+
static const struct {
u8 value;
const char *error_text;
@@ -69,6 +112,54 @@ static const struct {
{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
};
+static const struct {
+ u8 value;
+ const char *error_text;
+} qi_error_list[] = {
+ { 0x1F, "Job terminated by FQ or ICID flush" },
+ { 0x20, "FD format error"},
+ { 0x21, "FD command format error"},
+ { 0x23, "FL format error"},
+ { 0x25, "CRJD specified in FD, but not enabled in FLC"},
+ { 0x30, "Max. buffer size too small"},
+ { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
+ { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
+ { 0x33, "Size over/underflow (allocate mode)"},
+ { 0x34, "Size over/underflow (reuse mode)"},
+ { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
+ { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
+ { 0x41, "SBC frame format not supported (allocate mode)"},
+ { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
+ { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
+ { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
+ { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
+ { 0x46, "Annotation length exceeds offset (reuse mode)"},
+ { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
+ { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
+ { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
+ { 0x51, "Unsupported IF reuse mode"},
+ { 0x52, "Unsupported FL use mode"},
+ { 0x53, "Unsupported RJD use mode"},
+ { 0x54, "Unsupported inline descriptor use mode"},
+ { 0xC0, "Table buffer pool 0 depletion"},
+ { 0xC1, "Table buffer pool 1 depletion"},
+ { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
+ { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
+ { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
+ { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
+ { 0xD0, "FLC read error"},
+ { 0xD1, "FL read error"},
+ { 0xD2, "FL write error"},
+ { 0xD3, "OF SGT write error"},
+ { 0xD4, "PTA read error"},
+ { 0xD5, "PTA write error"},
+ { 0xD6, "OF SGT F-bit write error"},
+ { 0xD7, "ASA write error"},
+ { 0xE1, "FLC[ICR]=0 ICID error"},
+ { 0xE2, "FLC[ICR]=1 ICID error"},
+ { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
+};
+
static const char * const cha_id_list[] = {
"",
"AES",
@@ -146,10 +237,9 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
strlen(rng_err_id_list[err_id])) {
/* RNG-only error */
err_str = rng_err_id_list[err_id];
- } else if (err_id < ARRAY_SIZE(err_id_list))
+ } else {
err_str = err_id_list[err_id];
- else
- snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+ }
/*
* CCB ICV check failures are part of normal operation life;
@@ -198,6 +288,27 @@ static void report_deco_status(struct device *jrdev, const u32 status,
status, error, idx_str, idx, err_str, err_err_code);
}
+static void report_qi_status(struct device *qidev, const u32 status,
+ const char *error)
+{
+ u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
+ const char *err_str = "unidentified error value 0x";
+ char err_err_code[3] = { 0 };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
+ if (qi_error_list[i].value == err_id)
+ break;
+
+ if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
+ err_str = qi_error_list[i].error_text;
+ else
+ snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+
+ dev_err(qidev, "%08x: %s: %s%s\n",
+ status, error, err_str, err_err_code);
+}
+
static void report_jr_status(struct device *jrdev, const u32 status,
const char *error)
{
@@ -212,7 +323,7 @@ static void report_cond_code_status(struct device *jrdev, const u32 status,
status, error, __func__);
}
-void caam_jr_strstatus(struct device *jrdev, u32 status)
+void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
{
static const struct stat_src {
void (*report_ssed)(struct device *jrdev, const u32 status,
@@ -224,7 +335,7 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
{ report_ccb_status, "CCB" },
{ report_jump_status, "Jump" },
{ report_deco_status, "DECO" },
- { NULL, "Queue Manager Interface" },
+ { report_qi_status, "Queue Manager Interface" },
{ report_jr_status, "Job Ring" },
{ report_cond_code_status, "Condition Code" },
{ NULL, NULL },
@@ -250,4 +361,4 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
else
dev_err(jrdev, "%d: unknown error source\n", ssrc);
}
-EXPORT_SYMBOL(caam_jr_strstatus);
+EXPORT_SYMBOL(caam_strstatus);
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index b6350b0..751ddca 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -7,5 +7,13 @@
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
#define CAAM_ERROR_STR_MAX 302
-void caam_jr_strstatus(struct device *jrdev, u32 status);
+
+void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
+
+#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
+#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
+
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, struct scatterlist *sg,
+ size_t tlen, bool ascii);
#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5d4c050..a523612 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
struct device *dev;
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
+ struct tasklet_struct irqtask;
int irq; /* One per queue */
/* Number of scatterlist crypt transforms active on the JobR */
@@ -63,10 +64,9 @@ struct caam_drv_private_jr {
* Driver-private storage for a single CAAM block instance
*/
struct caam_drv_private {
-
- struct device *dev;
- struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
- struct platform_device *pdev;
+#ifdef CONFIG_CAAM_QI
+ struct device *qidev;
+#endif
/* Physical-presence section */
struct caam_ctrl __iomem *ctrl; /* controller region */
@@ -102,11 +102,6 @@ struct caam_drv_private {
#ifdef CONFIG_DEBUG_FS
struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
- struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
- struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
- struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
- struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
-
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
#endif
@@ -114,4 +109,22 @@ struct caam_drv_private {
void caam_jr_algapi_init(struct device *dev);
void caam_jr_algapi_remove(struct device *dev);
+
+#ifdef CONFIG_DEBUG_FS
+static int caam_debugfs_u64_get(void *data, u64 *val)
+{
+ *val = caam64_to_cpu(*(u64 *)data);
+ return 0;
+}
+
+static int caam_debugfs_u32_get(void *data, u64 *val)
+{
+ *val = caam32_to_cpu(*(u32 *)data);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
+#endif
+
#endif /* INTERN_H */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 757c27f..00e8709 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -9,6 +9,7 @@
#include <linux/of_address.h>
#include "compat.h"
+#include "ctrl.h"
#include "regs.h"
#include "jr.h"
#include "desc.h"
@@ -22,6 +23,14 @@ struct jr_driver_data {
static struct jr_driver_data driver_data;
+static int jr_driver_probed;
+
+int caam_jr_driver_probed(void)
+{
+ return jr_driver_probed;
+}
+EXPORT_SYMBOL(caam_jr_driver_probed);
+
static int caam_reset_hw_jr(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
@@ -73,6 +82,8 @@ static int caam_jr_shutdown(struct device *dev)
ret = caam_reset_hw_jr(dev);
+ tasklet_kill(&jrp->irqtask);
+
/* Release interrupt */
free_irq(jrp->irq, dev);
@@ -116,6 +127,8 @@ static int caam_jr_remove(struct platform_device *pdev)
dev_err(jrdev, "Failed to shut down job ring\n");
irq_dispose_mapping(jrpriv->irq);
+ jr_driver_probed--;
+
return ret;
}
@@ -128,7 +141,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/*
* Check the output ring for ready responses, kick
- * the threaded irq if jobs done.
+ * tasklet if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
if (!irqstate)
@@ -150,13 +163,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
- return IRQ_WAKE_THREAD;
+ preempt_disable();
+ tasklet_schedule(&jrp->irqtask);
+ preempt_enable();
+
+ return IRQ_HANDLED;
}
-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
{
int hw_idx, sw_idx, i, head, tail;
- struct device *dev = st_dev;
+ struct device *dev = (struct device *)devarg;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
@@ -230,8 +248,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
/* reenable / unmask IRQs */
clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
-
- return IRQ_HANDLED;
}
/**
@@ -275,6 +291,36 @@ struct device *caam_jr_alloc(void)
EXPORT_SYMBOL(caam_jr_alloc);
/**
+ * caam_jridx_alloc() - Alloc a specific job ring based on its index.
+ *
+ * returns : pointer to the newly allocated physical
+ * JobR dev can be written to if successful.
+ **/
+struct device *caam_jridx_alloc(int idx)
+{
+ struct caam_drv_private_jr *jrpriv;
+ struct device *dev = ERR_PTR(-ENODEV);
+
+ spin_lock(&driver_data.jr_alloc_lock);
+
+ if (list_empty(&driver_data.jr_list))
+ goto end;
+
+ list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
+ if (jrpriv->ridx == idx) {
+ atomic_inc(&jrpriv->tfm_count);
+ dev = jrpriv->dev;
+ break;
+ }
+ }
+
+end:
+ spin_unlock(&driver_data.jr_alloc_lock);
+ return dev;
+}
+EXPORT_SYMBOL(caam_jridx_alloc);
+
+/**
* caam_jr_free() - Free the Job Ring
* @rdev - points to the dev that identifies the Job ring to
* be released.
@@ -389,10 +435,11 @@ static int caam_jr_init(struct device *dev)
jrp = dev_get_drvdata(dev);
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+
/* Connect job ring interrupt handler. */
- error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
- caam_jr_threadirq, IRQF_SHARED,
- dev_name(dev), dev);
+ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+ dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
@@ -454,6 +501,7 @@ out_free_inpring:
out_free_irq:
free_irq(jrp->irq, dev);
out_kill_deq:
+ tasklet_kill(&jrp->irqtask);
return error;
}
@@ -489,15 +537,28 @@ static int caam_jr_probe(struct platform_device *pdev)
return -ENOMEM;
}
- jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
+ jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
- if (sizeof(dma_addr_t) == sizeof(u64))
- if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
- dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
+ if (sizeof(dma_addr_t) == sizeof(u64)) {
+ if (caam_dpaa2)
+ error = dma_set_mask_and_coherent(jrdev,
+ DMA_BIT_MASK(49));
+ else if (of_device_is_compatible(nprop,
+ "fsl,sec-v5.0-job-ring"))
+ error = dma_set_mask_and_coherent(jrdev,
+ DMA_BIT_MASK(40));
else
- dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
- else
- dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
+ error = dma_set_mask_and_coherent(jrdev,
+ DMA_BIT_MASK(36));
+ } else {
+ error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
+ }
+ if (error) {
+ dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
+ error);
+ iounmap(ctrl);
+ return error;
+ }
/* Identify the interrupt */
jrpriv->irq = irq_of_parse_and_map(nprop, 0);
@@ -517,10 +578,12 @@ static int caam_jr_probe(struct platform_device *pdev)
atomic_set(&jrpriv->tfm_count, 0);
+ jr_driver_probed++;
+
return 0;
}
-static struct of_device_id caam_jr_match[] = {
+static const struct of_device_id caam_jr_match[] = {
{
.compatible = "fsl,sec-v4.0-job-ring",
},
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
index 97113a6..ee4d31c 100644
--- a/drivers/crypto/caam/jr.h
+++ b/drivers/crypto/caam/jr.h
@@ -8,7 +8,9 @@
#define JR_H
/* Prototypes for backend-level services exposed to APIs */
+int caam_jr_driver_probed(void);
struct device *caam_jr_alloc(void);
+struct device *caam_jridx_alloc(int idx);
void caam_jr_free(struct device *rdev);
int caam_jr_enqueue(struct device *dev, u32 *desc,
void (*cbk)(struct device *dev, u32 *desc, u32 status,
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 3ce1d5c..a523ed7 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -41,15 +41,29 @@ Split key generation-----------------------------------------------
[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
@0xffe04000
*/
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
- int split_key_pad_len, const u8 *key_in, u32 keylen,
- u32 alg_op)
+int gen_split_key(struct device *jrdev, u8 *key_out,
+ struct alginfo * const adata, const u8 *key_in, u32 keylen,
+ int max_keylen)
{
u32 *desc;
struct split_key_result result;
dma_addr_t dma_addr_in, dma_addr_out;
int ret = -ENOMEM;
+ adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
+ adata->keylen_pad = split_key_pad_len(adata->algtype &
+ OP_ALG_ALGSEL_MASK);
+
+#ifdef DEBUG
+ dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+ adata->keylen, adata->keylen_pad);
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+#endif
+
+ if (adata->keylen_pad > max_keylen)
+ return -EINVAL;
+
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
@@ -63,7 +77,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
goto out_free;
}
- dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+ dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_out)) {
dev_err(jrdev, "unable to map key output memory\n");
@@ -74,7 +88,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
- append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
+ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
+ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
+ OP_ALG_AS_INIT);
/*
* do a FIFO_LOAD of zero, this will trigger the internal key expansion
@@ -87,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
* FIFO_STORE with the explicit split-key content store
* (0x26 output type)
*/
- append_fifo_store(desc, dma_addr_out, split_key_len,
+ append_fifo_store(desc, dma_addr_out, adata->keylen,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
#ifdef DEBUG
@@ -108,11 +124,11 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_out,
- split_key_pad_len, 1);
+ adata->keylen_pad, 1);
#endif
}
- dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
+ dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
DMA_FROM_DEVICE);
out_unmap_in:
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index c5588f6..851a7c86 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -5,6 +5,36 @@
*
*/
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ u32 idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
struct split_key_result {
struct completion completion;
int err;
@@ -12,6 +42,6 @@ struct split_key_result {
void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
- int split_key_pad_len, const u8 *key_in, u32 keylen,
- u32 alg_op);
+int gen_split_key(struct device *jrdev, u8 *key_out,
+ struct alginfo * const adata, const u8 *key_in, u32 keylen,
+ int max_keylen);
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
index aaa00dd..31e5996 100644
--- a/drivers/crypto/caam/pdb.h
+++ b/drivers/crypto/caam/pdb.h
@@ -483,6 +483,8 @@ struct dsa_verify_pdb {
#define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
#define RSA_PDB_D_SHIFT 12
#define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
+#define RSA_PDB_Q_SHIFT 12
+#define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT)
#define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
#define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
@@ -490,6 +492,8 @@ struct dsa_verify_pdb {
#define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
#define RSA_PRIV_KEY_FRM_1 0
+#define RSA_PRIV_KEY_FRM_2 1
+#define RSA_PRIV_KEY_FRM_3 2
/**
* RSA Encrypt Protocol Data Block
@@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb {
dma_addr_t d_dma;
} __packed;
+/**
+ * RSA Decrypt PDB - Private Key Form #2
+ * @sgf : scatter-gather field
+ * @g_dma : dma address of encrypted input data
+ * @f_dma : dma address of output data
+ * @d_dma : dma address of RSA private exponent
+ * @p_dma : dma address of RSA prime factor p of RSA modulus n
+ * @q_dma : dma address of RSA prime factor q of RSA modulus n
+ * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
+ * as internal state buffer. It is assumed to be as long as p.
+ * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
+ * as internal state buffer. It is assumed to be as long as q.
+ * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
+ */
+struct rsa_priv_f2_pdb {
+ u32 sgf;
+ dma_addr_t g_dma;
+ dma_addr_t f_dma;
+ dma_addr_t d_dma;
+ dma_addr_t p_dma;
+ dma_addr_t q_dma;
+ dma_addr_t tmp1_dma;
+ dma_addr_t tmp2_dma;
+ u32 p_q_len;
+} __packed;
+
+/**
+ * RSA Decrypt PDB - Private Key Form #3
+ * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
+ * the RSA modulus.
+ * @sgf : scatter-gather field
+ * @g_dma : dma address of encrypted input data
+ * @f_dma : dma address of output data
+ * @c_dma : dma address of RSA CRT coefficient
+ * @p_dma : dma address of RSA prime factor p of RSA modulus n
+ * @q_dma : dma address of RSA prime factor q of RSA modulus n
+ * @dp_dma : dma address of RSA CRT exponent of RSA prime factor p
+ * @dp_dma : dma address of RSA CRT exponent of RSA prime factor q
+ * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
+ * as internal state buffer. It is assumed to be as long as p.
+ * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
+ * as internal state buffer. It is assumed to be as long as q.
+ * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
+ */
+struct rsa_priv_f3_pdb {
+ u32 sgf;
+ dma_addr_t g_dma;
+ dma_addr_t f_dma;
+ dma_addr_t c_dma;
+ dma_addr_t p_dma;
+ dma_addr_t q_dma;
+ dma_addr_t dp_dma;
+ dma_addr_t dq_dma;
+ dma_addr_t tmp1_dma;
+ dma_addr_t tmp2_dma;
+ u32 p_q_len;
+} __packed;
+
#endif
diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
index 4e4183e..9e2ce6f 100644
--- a/drivers/crypto/caam/pkc_desc.c
+++ b/drivers/crypto/caam/pkc_desc.c
@@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
RSA_PRIV_KEY_FRM_1);
}
+
+/* Descriptor for RSA Private operation - Private Key Form #2 */
+void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
+{
+ init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ append_cmd(desc, pdb->sgf);
+ append_ptr(desc, pdb->g_dma);
+ append_ptr(desc, pdb->f_dma);
+ append_ptr(desc, pdb->d_dma);
+ append_ptr(desc, pdb->p_dma);
+ append_ptr(desc, pdb->q_dma);
+ append_ptr(desc, pdb->tmp1_dma);
+ append_ptr(desc, pdb->tmp2_dma);
+ append_cmd(desc, pdb->p_q_len);
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
+ RSA_PRIV_KEY_FRM_2);
+}
+
+/* Descriptor for RSA Private operation - Private Key Form #3 */
+void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
+{
+ init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ append_cmd(desc, pdb->sgf);
+ append_ptr(desc, pdb->g_dma);
+ append_ptr(desc, pdb->f_dma);
+ append_ptr(desc, pdb->c_dma);
+ append_ptr(desc, pdb->p_dma);
+ append_ptr(desc, pdb->q_dma);
+ append_ptr(desc, pdb->dp_dma);
+ append_ptr(desc, pdb->dq_dma);
+ append_ptr(desc, pdb->tmp1_dma);
+ append_ptr(desc, pdb->tmp2_dma);
+ append_cmd(desc, pdb->p_q_len);
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
+ RSA_PRIV_KEY_FRM_3);
+}
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
new file mode 100644
index 0000000..48185d5
--- /dev/null
+++ b/drivers/crypto/caam/qi.c
@@ -0,0 +1,797 @@
+/*
+ * CAAM/SEC 4.x QI transport/backend driver
+ * Queue Interface backend functionality
+ *
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ */
+
+#include <linux/cpumask.h>
+#include <linux/kthread.h>
+#include <linux/fsl_qman.h>
+
+#include "regs.h"
+#include "qi.h"
+#include "desc.h"
+#include "intern.h"
+#include "desc_constr.h"
+
+#define PREHDR_RSLS_SHIFT 31
+
+/*
+ * Use a reasonable backlog of frames (per CPU) as congestion threshold,
+ * so that resources used by the in-flight buffers do not become a memory hog.
+ */
+#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
+
+#define CAAM_QI_ENQUEUE_RETRIES 10000
+
+#define CAAM_NAPI_WEIGHT 63
+
+/*
+ * caam_napi - struct holding CAAM NAPI-related params
+ * @irqtask: IRQ task for QI backend
+ * @p: QMan portal
+ */
+struct caam_napi {
+ struct napi_struct irqtask;
+ struct qman_portal *p;
+};
+
+/*
+ * caam_qi_pcpu_priv - percpu private data structure to main list of pending
+ * responses expected on each cpu.
+ * @caam_napi: CAAM NAPI params
+ * @net_dev: netdev used by NAPI
+ * @rsp_fq: response FQ from CAAM
+ */
+struct caam_qi_pcpu_priv {
+ struct caam_napi caam_napi;
+ struct net_device net_dev;
+ struct qman_fq *rsp_fq;
+} ____cacheline_aligned;
+
+static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
+static DEFINE_PER_CPU(int, last_cpu);
+
+/*
+ * caam_qi_priv - CAAM QI backend private params
+ * @cgr: QMan congestion group
+ * @qi_pdev: platform device for QI backend
+ */
+struct caam_qi_priv {
+ struct qman_cgr cgr;
+ struct platform_device *qi_pdev;
+};
+
+static struct caam_qi_priv qipriv ____cacheline_aligned;
+
+/*
+ * This is written by only one core - the one that initialized the CGR - and
+ * read by multiple cores (all the others).
+ */
+bool caam_congested __read_mostly;
+EXPORT_SYMBOL(caam_congested);
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * This is a counter for the number of times the congestion group (where all
+ * the request and response queueus are) reached congestion. Incremented
+ * each time the congestion callback is called with congested == true.
+ */
+static u64 times_congested;
+#endif
+
+/*
+ * CPU from where the module initialised. This is required because QMan driver
+ * requires CGRs to be removed from same CPU from where they were originally
+ * allocated.
+ */
+static int mod_init_cpu;
+
+/*
+ * This is a a cache of buffers, from which the users of CAAM QI driver
+ * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
+ * doing malloc on the hotpath.
+ * NOTE: A more elegant solution would be to have some headroom in the frames
+ * being processed. This could be added by the dpaa-ethernet driver.
+ * This would pose a problem for userspace application processing which
+ * cannot know of this limitation. So for now, this will work.
+ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
+ */
+static struct kmem_cache *qi_cache;
+
+int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
+{
+ struct qm_fd fd;
+ int ret;
+ int num_retries = 0;
+
+ fd.cmd = 0;
+ fd.format = qm_fd_compound;
+ fd.cong_weight = req->fd_sgt[1].length;
+ fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(qidev, fd.addr)) {
+ dev_err(qidev, "DMA mapping error for QI enqueue request\n");
+ return -EIO;
+ }
+
+ do {
+ ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
+ if (likely(!ret))
+ return 0;
+
+ if (ret != -EBUSY)
+ break;
+ num_retries++;
+ } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
+
+ dev_err(qidev, "qman_enqueue failed: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(caam_qi_enqueue);
+
+static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ const struct qm_fd *fd;
+ struct caam_drv_req *drv_req;
+ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+
+ fd = &msg->ern.fd;
+
+ if (fd->format != qm_fd_compound) {
+ dev_err(qidev, "Non-compound FD from CAAM\n");
+ return;
+ }
+
+ drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
+ if (!drv_req) {
+ dev_err(qidev,
+ "Can't find original request for CAAM response\n");
+ return;
+ }
+
+ dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
+ sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
+
+ drv_req->cbk(drv_req, -EIO);
+}
+
+static struct qman_fq *create_caam_req_fq(struct device *qidev,
+ struct qman_fq *rsp_fq,
+ dma_addr_t hwdesc,
+ int fq_sched_flag)
+{
+ int ret;
+ struct qman_fq *req_fq;
+ struct qm_mcc_initfq opts;
+
+ req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
+ if (!req_fq)
+ return ERR_PTR(-ENOMEM);
+
+ req_fq->cb.ern = caam_fq_ern_cb;
+ req_fq->cb.fqs = NULL;
+
+ ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
+ QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
+ req_fq);
+ if (ret) {
+ dev_err(qidev, "Failed to create session req FQ\n");
+ goto create_req_fq_fail;
+ }
+
+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
+ QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
+ QM_INITFQ_WE_CGID;
+ opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
+ opts.fqd.dest.channel = qm_channel_caam;
+ opts.fqd.dest.wq = 2;
+ opts.fqd.context_b = qman_fq_fqid(rsp_fq);
+ opts.fqd.context_a.hi = upper_32_bits(hwdesc);
+ opts.fqd.context_a.lo = lower_32_bits(hwdesc);
+ opts.fqd.cgid = qipriv.cgr.cgrid;
+
+ ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
+ if (ret) {
+ dev_err(qidev, "Failed to init session req FQ\n");
+ goto init_req_fq_fail;
+ }
+
+ dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
+ smp_processor_id());
+ return req_fq;
+
+init_req_fq_fail:
+ qman_destroy_fq(req_fq, 0);
+create_req_fq_fail:
+ kfree(req_fq);
+ return ERR_PTR(ret);
+}
+
+static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
+{
+ int ret;
+
+ ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
+ QMAN_VOLATILE_FLAG_FINISH,
+ QM_VDQCR_PRECEDENCE_VDQCR |
+ QM_VDQCR_NUMFRAMES_TILLEMPTY);
+ if (ret) {
+ dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
+ return ret;
+ }
+
+ do {
+ struct qman_portal *p;
+
+ p = qman_get_affine_portal(smp_processor_id());
+ qman_p_poll_dqrr(p, 16);
+ } while (fq->flags & QMAN_FQ_STATE_NE);
+
+ return 0;
+}
+
+static int kill_fq(struct device *qidev, struct qman_fq *fq)
+{
+ u32 flags;
+ int ret;
+
+ ret = qman_retire_fq(fq, &flags);
+ if (ret < 0) {
+ dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!ret)
+ goto empty_fq;
+
+ /* Async FQ retirement condition */
+ if (ret == 1) {
+ /* Retry till FQ gets in retired state */
+ do {
+ msleep(20);
+ } while (fq->state != qman_fq_state_retired);
+
+ WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
+ WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
+ }
+
+empty_fq:
+ if (fq->flags & QMAN_FQ_STATE_NE) {
+ ret = empty_retired_fq(qidev, fq);
+ if (ret) {
+ dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
+ fq->fqid);
+ return ret;
+ }
+ }
+
+ ret = qman_oos_fq(fq);
+ if (ret)
+ dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
+
+ qman_destroy_fq(fq, 0);
+ kfree(fq);
+
+ return ret;
+}
+
+static int empty_caam_fq(struct qman_fq *fq)
+{
+ int ret;
+ struct qm_mcr_queryfq_np np;
+
+ /* Wait till the older CAAM FQ get empty */
+ do {
+ ret = qman_query_fq_np(fq, &np);
+ if (ret)
+ return ret;
+
+ if (!np.frm_cnt)
+ break;
+
+ msleep(20);
+ } while (1);
+
+ /*
+ * Give extra time for pending jobs from this FQ in holding tanks
+ * to get processed
+ */
+ msleep(20);
+ return 0;
+}
+
+int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
+{
+ int ret;
+ u32 num_words;
+ struct qman_fq *new_fq, *old_fq;
+ struct device *qidev = drv_ctx->qidev;
+
+ num_words = desc_len(sh_desc);
+ if (num_words > MAX_SDLEN) {
+ dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
+ return -EINVAL;
+ }
+
+ /* Note down older req FQ */
+ old_fq = drv_ctx->req_fq;
+
+ /* Create a new req FQ in parked state */
+ new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
+ drv_ctx->context_a, 0);
+ if (unlikely(IS_ERR_OR_NULL(new_fq))) {
+ dev_err(qidev, "FQ allocation for shdesc update failed\n");
+ return PTR_ERR(new_fq);
+ }
+
+ /* Hook up new FQ to context so that new requests keep queuing */
+ drv_ctx->req_fq = new_fq;
+
+ /* Empty and remove the older FQ */
+ ret = empty_caam_fq(old_fq);
+ if (ret) {
+ dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
+
+ /* We can revert to older FQ */
+ drv_ctx->req_fq = old_fq;
+
+ if (kill_fq(qidev, new_fq))
+ dev_warn(qidev, "New CAAM FQ kill failed\n");
+
+ return ret;
+ }
+
+ /*
+ * Re-initialise pre-header. Set RSLS and SDLEN.
+ * Update the shared descriptor for driver context.
+ */
+ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
+ num_words);
+ memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
+ dma_sync_single_for_device(qidev, drv_ctx->context_a,
+ sizeof(drv_ctx->sh_desc) +
+ sizeof(drv_ctx->prehdr),
+ DMA_BIDIRECTIONAL);
+
+ /* Put the new FQ in scheduled state */
+ ret = qman_schedule_fq(new_fq);
+ if (ret) {
+ dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
+
+ /*
+ * We can kill new FQ and revert to old FQ.
+ * Since the desc is already modified, it is success case
+ */
+
+ drv_ctx->req_fq = old_fq;
+
+ if (kill_fq(qidev, new_fq))
+ dev_warn(qidev, "New CAAM FQ kill failed\n");
+ } else if (kill_fq(qidev, old_fq)) {
+ dev_warn(qidev, "Old CAAM FQ kill failed\n");
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(caam_drv_ctx_update);
+
+struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
+ int *cpu,
+ u32 *sh_desc)
+{
+ size_t size;
+ u32 num_words;
+ dma_addr_t hwdesc;
+ struct caam_drv_ctx *drv_ctx;
+ const cpumask_t *cpus = qman_affine_cpus();
+
+ num_words = desc_len(sh_desc);
+ if (num_words > MAX_SDLEN) {
+ dev_err(qidev, "Invalid descriptor len: %d words\n",
+ num_words);
+ return ERR_PTR(-EINVAL);
+ }
+
+ drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
+ if (!drv_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
+ * and dma-map them.
+ */
+ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
+ num_words);
+ memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
+ size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
+ hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(qidev, hwdesc)) {
+ dev_err(qidev, "DMA map error for preheader + shdesc\n");
+ kfree(drv_ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ drv_ctx->context_a = hwdesc;
+
+ /* If given CPU does not own the portal, choose another one that does */
+ if (!cpumask_test_cpu(*cpu, cpus)) {
+ int *pcpu = &get_cpu_var(last_cpu);
+
+ *pcpu = cpumask_next(*pcpu, cpus);
+ if (*pcpu >= nr_cpu_ids)
+ *pcpu = cpumask_first(cpus);
+ *cpu = *pcpu;
+
+ put_cpu_var(last_cpu);
+ }
+ drv_ctx->cpu = *cpu;
+
+ /* Find response FQ hooked with this CPU */
+ drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
+
+ /* Attach request FQ */
+ drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
+ QMAN_INITFQ_FLAG_SCHED);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
+ dev_err(qidev, "create_caam_req_fq failed\n");
+ dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
+ kfree(drv_ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drv_ctx->qidev = qidev;
+ return drv_ctx;
+}
+EXPORT_SYMBOL(caam_drv_ctx_init);
+
+void *qi_cache_alloc(gfp_t flags)
+{
+ return kmem_cache_alloc(qi_cache, flags);
+}
+EXPORT_SYMBOL(qi_cache_alloc);
+
+void qi_cache_free(void *obj)
+{
+ kmem_cache_free(qi_cache, obj);
+}
+EXPORT_SYMBOL(qi_cache_free);
+
+static int caam_qi_poll(struct napi_struct *napi, int budget)
+{
+ struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
+
+ int cleaned = qman_p_poll_dqrr(np->p, budget);
+
+ if (cleaned < budget) {
+ napi_complete(napi);
+ qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
+ }
+
+ return cleaned;
+}
+
+void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
+{
+ if (IS_ERR_OR_NULL(drv_ctx))
+ return;
+
+ /* Remove request FQ */
+ if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
+ dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
+
+ dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
+ sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
+ DMA_BIDIRECTIONAL);
+ kfree(drv_ctx);
+}
+EXPORT_SYMBOL(caam_drv_ctx_rel);
+
+int caam_qi_shutdown(struct device *qidev)
+{
+ int i, ret;
+ struct caam_qi_priv *priv = dev_get_drvdata(qidev);
+ const cpumask_t *cpus = qman_affine_cpus();
+ struct cpumask old_cpumask = current->cpus_allowed;
+
+ for_each_cpu(i, cpus) {
+ struct napi_struct *irqtask;
+
+ irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
+ napi_disable(irqtask);
+ netif_napi_del(irqtask);
+
+ if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
+ dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
+ }
+
+ /*
+ * QMan driver requires CGRs to be deleted from same CPU from where they
+ * were instantiated. Hence we get the module removal execute from the
+ * same CPU from where it was originally inserted.
+ */
+ set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
+
+ ret = qman_delete_cgr(&priv->cgr);
+ if (ret)
+ dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
+ else
+ qman_release_cgrid(priv->cgr.cgrid);
+
+ kmem_cache_destroy(qi_cache);
+
+ /* Now that we're done with the CGRs, restore the cpus allowed mask */
+ set_cpus_allowed_ptr(current, &old_cpumask);
+
+ platform_device_unregister(priv->qi_pdev);
+ return ret;
+}
+
+static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
+{
+ caam_congested = congested;
+
+ if (congested) {
+#ifdef CONFIG_DEBUG_FS
+ times_congested++;
+#endif
+ pr_debug_ratelimited("CAAM entered congestion\n");
+
+ } else {
+ pr_debug_ratelimited("CAAM exited congestion\n");
+ }
+}
+
+static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
+{
+ /*
+ * In case of threaded ISR, for RT kernels in_irq() does not return
+ * appropriate value, so use in_serving_softirq to distinguish between
+ * softirq and irq contexts.
+ */
+ if (unlikely(in_irq() || !in_serving_softirq())) {
+ /* Disable QMan IRQ source and invoke NAPI */
+ qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
+ np->p = p;
+ napi_schedule(&np->irqtask);
+ return 1;
+ }
+ return 0;
+}
+
+static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
+ struct qman_fq *rsp_fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
+ struct caam_drv_req *drv_req;
+ const struct qm_fd *fd;
+ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+
+ if (caam_qi_napi_schedule(p, caam_napi))
+ return qman_cb_dqrr_stop;
+
+ fd = &dqrr->fd;
+ if (unlikely(fd->status))
+ dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);
+
+ if (unlikely(fd->format != fd->format)) {
+ dev_err(qidev, "Non-compound FD from CAAM\n");
+ return qman_cb_dqrr_consume;
+ }
+
+ drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
+ if (unlikely(!drv_req)) {
+ dev_err(qidev,
+ "Can't find original request for caam response\n");
+ return qman_cb_dqrr_consume;
+ }
+
+ dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
+ sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
+
+ drv_req->cbk(drv_req, fd->status);
+ return qman_cb_dqrr_consume;
+}
+
+static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
+{
+ struct qm_mcc_initfq opts;
+ struct qman_fq *fq;
+ int ret;
+
+ fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
+ if (!fq)
+ return -ENOMEM;
+
+ fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
+
+ ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
+ QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
+ if (ret) {
+ dev_err(qidev, "Rsp FQ create failed\n");
+ kfree(fq);
+ return -ENODEV;
+ }
+
+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
+ QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
+ QM_INITFQ_WE_CGID;
+ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
+ QM_FQCTRL_CGE;
+ opts.fqd.dest.channel = qman_affine_channel(cpu);
+ opts.fqd.dest.wq = 3;
+ opts.fqd.cgid = qipriv.cgr.cgrid;
+ opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
+ QM_STASHING_EXCL_DATA;
+ opts.fqd.context_a.stashing.data_cl = 1;
+ opts.fqd.context_a.stashing.context_cl = 1;
+
+ ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
+ if (ret) {
+ dev_err(qidev, "Rsp FQ init failed\n");
+ kfree(fq);
+ return -ENODEV;
+ }
+
+ per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
+
+ dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
+ return 0;
+}
+
+static int init_cgr(struct device *qidev)
+{
+ int ret;
+ struct qm_mcc_initcgr opts;
+ const u64 cpus = *(u64 *)qman_affine_cpus();
+ const int num_cpus = hweight64(cpus);
+ const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
+
+ ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
+ if (ret) {
+ dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
+ return ret;
+ }
+
+ qipriv.cgr.cb = cgr_cb;
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
+ opts.cgr.cscn_en = QM_CGR_EN;
+ opts.cgr.mode = QMAN_CGR_MODE_FRAME;
+ qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
+
+ ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
+ if (ret) {
+ dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
+ qipriv.cgr.cgrid);
+ return ret;
+ }
+
+ dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
+ return 0;
+}
+
+static int alloc_rsp_fqs(struct device *qidev)
+{
+ int ret, i;
+ const cpumask_t *cpus = qman_affine_cpus();
+
+ /*Now create response FQs*/
+ for_each_cpu(i, cpus) {
+ ret = alloc_rsp_fq_cpu(qidev, i);
+ if (ret) {
+ dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void free_rsp_fqs(void)
+{
+ int i;
+ const cpumask_t *cpus = qman_affine_cpus();
+
+ for_each_cpu(i, cpus)
+ kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
+}
+
+int caam_qi_init(struct platform_device *caam_pdev)
+{
+ int err, i;
+ struct platform_device *qi_pdev;
+ struct device *ctrldev = &caam_pdev->dev, *qidev;
+ struct caam_drv_private *ctrlpriv;
+ const cpumask_t *cpus = qman_affine_cpus();
+ struct cpumask old_cpumask = current->cpus_allowed;
+ static struct platform_device_info qi_pdev_info = {
+ .name = "caam_qi",
+ .id = PLATFORM_DEVID_NONE
+ };
+
+ /*
+ * QMAN requires CGRs to be removed from same CPU+portal from where it
+ * was originally allocated. Hence we need to note down the
+ * initialisation CPU and use the same CPU for module exit.
+ * We select the first CPU to from the list of portal owning CPUs.
+ * Then we pin module init to this CPU.
+ */
+ mod_init_cpu = cpumask_first(cpus);
+ set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
+
+ qi_pdev_info.parent = ctrldev;
+ qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
+ qi_pdev = platform_device_register_full(&qi_pdev_info);
+ if (IS_ERR(qi_pdev))
+ return PTR_ERR(qi_pdev);
+ arch_setup_dma_ops(&qi_pdev->dev, 0, 0, NULL, true);
+
+ ctrlpriv = dev_get_drvdata(ctrldev);
+ qidev = &qi_pdev->dev;
+
+ qipriv.qi_pdev = qi_pdev;
+ dev_set_drvdata(qidev, &qipriv);
+
+ /* Initialize the congestion detection */
+ err = init_cgr(qidev);
+ if (err) {
+ dev_err(qidev, "CGR initialization failed: %d\n", err);
+ platform_device_unregister(qi_pdev);
+ return err;
+ }
+
+ /* Initialise response FQs */
+ err = alloc_rsp_fqs(qidev);
+ if (err) {
+ dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
+ free_rsp_fqs();
+ platform_device_unregister(qi_pdev);
+ return err;
+ }
+
+ /*
+ * Enable the NAPI contexts on each of the core which has an affine
+ * portal.
+ */
+ for_each_cpu(i, cpus) {
+ struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
+ struct caam_napi *caam_napi = &priv->caam_napi;
+ struct napi_struct *irqtask = &caam_napi->irqtask;
+ struct net_device *net_dev = &priv->net_dev;
+
+ net_dev->dev = *qidev;
+ INIT_LIST_HEAD(&net_dev->napi_list);
+
+ netif_napi_add(net_dev, irqtask, caam_qi_poll,
+ CAAM_NAPI_WEIGHT);
+
+ napi_enable(irqtask);
+ }
+
+ /* Hook up QI device to parent controlling caam device */
+ ctrlpriv->qidev = qidev;
+
+ qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
+ SLAB_CACHE_DMA, NULL);
+ if (!qi_cache) {
+ dev_err(qidev, "Can't allocate CAAM cache\n");
+ free_rsp_fqs();
+ platform_device_unregister(qi_pdev);
+ return -ENOMEM;
+ }
+
+ /* Done with the CGRs; restore the cpus allowed mask */
+ set_cpus_allowed_ptr(current, &old_cpumask);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
+ &times_congested, &caam_fops_u64_ro);
+#endif
+ dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
+ return 0;
+}
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
new file mode 100644
index 0000000..0c2e68b
--- /dev/null
+++ b/drivers/crypto/caam/qi.h
@@ -0,0 +1,204 @@
+/*
+ * Public definitions for the CAAM/QI (Queue Interface) backend.
+ *
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ */
+
+#ifndef __QI_H__
+#define __QI_H__
+
+#include <linux/fsl_qman.h>
+#include "compat.h"
+#include "desc.h"
+#include "desc_constr.h"
+
+/*
+ * CAAM hardware constructs a job descriptor which points to a shared descriptor
+ * (as pointed by context_a of to-CAAM FQ).
+ * When the job descriptor is executed by DECO, the whole job descriptor
+ * together with shared descriptor gets loaded in DECO buffer, which is
+ * 64 words (each 32-bit) long.
+ *
+ * The job descriptor constructed by CAAM hardware has the following layout:
+ *
+ * HEADER (1 word)
+ * Shdesc ptr (1 or 2 words)
+ * SEQ_OUT_PTR (1 word)
+ * Out ptr (1 or 2 words)
+ * Out length (1 word)
+ * SEQ_IN_PTR (1 word)
+ * In ptr (1 or 2 words)
+ * In length (1 word)
+ *
+ * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
+ *
+ * Apart from shdesc contents, the total number of words that get loaded in DECO
+ * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
+ * storing shared descriptor.
+ */
+#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
+
+/* Length of a single buffer in the QI driver memory cache */
+#define CAAM_QI_MEMCACHE_SIZE 768
+
+extern bool caam_congested __read_mostly;
+
+/*
+ * This is the request structure the driver application should fill while
+ * submitting a job to driver.
+ */
+struct caam_drv_req;
+
+/*
+ * caam_qi_cbk - application's callback function invoked by the driver when the
+ * request has been successfully processed.
+ * @drv_req: original request that was submitted
+ * @status: completion status of request (0 - success, non-zero - error code)
+ */
+typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
+
+enum optype {
+ ENCRYPT,
+ DECRYPT,
+ GIVENCRYPT,
+ NUM_OP
+};
+
+/**
+ * caam_drv_ctx - CAAM/QI backend driver context
+ *
+ * The jobs are processed by the driver against a driver context.
+ * With every cryptographic context, a driver context is attached.
+ * The driver context contains data for private use by driver.
+ * For the applications, this is an opaque structure.
+ *
+ * @prehdr: preheader placed before shrd desc
+ * @sh_desc: shared descriptor
+ * @context_a: shared descriptor dma address
+ * @req_fq: to-CAAM request frame queue
+ * @rsp_fq: from-CAAM response frame queue
+ * @cpu: cpu on which to receive CAAM response
+ * @op_type: operation type
+ * @qidev: device pointer for CAAM/QI backend
+ */
+struct caam_drv_ctx {
+ u32 prehdr[2];
+ u32 sh_desc[MAX_SDLEN];
+ dma_addr_t context_a;
+ struct qman_fq *req_fq;
+ struct qman_fq *rsp_fq;
+ int cpu;
+ enum optype op_type;
+ struct device *qidev;
+} ____cacheline_aligned;
+
+/**
+ * caam_drv_req - The request structure the driver application should fill while
+ * submitting a job to driver.
+ * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
+ * buffers.
+ * @cbk: callback function to invoke when job is completed
+ * @app_ctx: arbitrary context attached with request by the application
+ *
+ * The fields mentioned below should not be used by application.
+ * These are for private use by driver.
+ *
+ * @hdr__: linked list header to maintain list of outstanding requests to CAAM
+ * @hwaddr: DMA address for the S/G table.
+ */
+struct caam_drv_req {
+ struct qm_sg_entry fd_sgt[2];
+ struct caam_drv_ctx *drv_ctx;
+ caam_qi_cbk cbk;
+ void *app_ctx;
+} ____cacheline_aligned;
+
+/**
+ * caam_drv_ctx_init - Initialise a CAAM/QI driver context
+ *
+ * A CAAM/QI driver context must be attached with each cryptographic context.
+ * This function allocates memory for CAAM/QI context and returns a handle to
+ * the application. This handle must be submitted along with each enqueue
+ * request to the driver by the application.
+ *
+ * @cpu: CPU where the application prefers to the driver to receive CAAM
+ * responses. The request completion callback would be issued from this
+ * CPU.
+ * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
+ * context.
+ *
+ * Returns a driver context on success or negative error code on failure.
+ */
+struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
+ u32 *sh_desc);
+
+/**
+ * caam_qi_enqueue - Submit a request to QI backend driver.
+ *
+ * The request structure must be properly filled as described above.
+ *
+ * @qidev: device pointer for QI backend
+ * @req: CAAM QI request structure
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
+
+/**
+ * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
+ * or too many CAAM responses are pending to be processed.
+ * @drv_ctx: driver context for which job is to be submitted
+ *
+ * Returns caam congestion status 'true/false'
+ */
+bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
+
+/**
+ * caam_drv_ctx_update - Update QI driver context
+ *
+ * Invoked when shared descriptor is required to be change in driver context.
+ *
+ * @drv_ctx: driver context to be updated
+ * @sh_desc: new shared descriptor pointer to be updated in QI driver context
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
+
+/**
+ * caam_drv_ctx_rel - Release a QI driver context
+ * @drv_ctx: context to be released
+ */
+void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
+
+int caam_qi_init(struct platform_device *pdev);
+int caam_qi_shutdown(struct device *dev);
+
+/**
+ * qi_cache_alloc - Allocate buffers from CAAM-QI cache
+ *
+ * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
+ * to be allocated on the hotpath. Instead of using malloc, one can use the
+ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
+ * will have a size of 256B, which is sufficient for hosting 16 SG entries.
+ *
+ * @flags: flags that would be used for the equivalent malloc(..) call
+ *
+ * Returns a pointer to a retrieved buffer on success or NULL on failure.
+ */
+void *qi_cache_alloc(gfp_t flags);
+
+/**
+ * qi_cache_free - Frees buffers allocated from CAAM-QI cache
+ *
+ * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
+ * the buffer previously allocated by a qi_cache_alloc call.
+ * No checking is being done, the call is a passthrough call to
+ * kmem_cache_free(...)
+ *
+ * @obj: object previously allocated using qi_cache_alloc()
+ */
+void qi_cache_free(void *obj);
+
+#endif /* __QI_H__ */
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 84d2f83..74eb8c6 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -2,6 +2,7 @@
* CAAM hardware register-level view
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
*/
#ifndef REGS_H
@@ -67,6 +68,7 @@
*/
extern bool caam_little_end;
+extern bool caam_imx;
#define caam_to_cpu(len) \
static inline u##len caam##len ## _to_cpu(u##len val) \
@@ -154,13 +156,10 @@ static inline u64 rd_reg64(void __iomem *reg)
#else /* CONFIG_64BIT */
static inline void wr_reg64(void __iomem *reg, u64 data)
{
-#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
- if (caam_little_end) {
+ if (!caam_imx && caam_little_end) {
wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
wr_reg32((u32 __iomem *)(reg), data);
- } else
-#endif
- {
+ } else {
wr_reg32((u32 __iomem *)(reg), data >> 32);
wr_reg32((u32 __iomem *)(reg) + 1, data);
}
@@ -168,41 +167,40 @@ static inline void wr_reg64(void __iomem *reg, u64 data)
static inline u64 rd_reg64(void __iomem *reg)
{
-#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
- if (caam_little_end)
+ if (!caam_imx && caam_little_end)
return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
(u64)rd_reg32((u32 __iomem *)(reg)));
- else
-#endif
- return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
- (u64)rd_reg32((u32 __iomem *)(reg) + 1));
+
+ return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
+ (u64)rd_reg32((u32 __iomem *)(reg) + 1));
}
#endif /* CONFIG_64BIT */
+static inline u64 cpu_to_caam_dma64(dma_addr_t value)
+{
+ if (caam_imx)
+ return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
+ (u64)cpu_to_caam32(upper_32_bits(value)));
+
+ return cpu_to_caam64(value);
+}
+
+static inline u64 caam_dma64_to_cpu(u64 value)
+{
+ if (caam_imx)
+ return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
+ (u64)caam32_to_cpu(upper_32_bits(value)));
+
+ return caam64_to_cpu(value);
+}
+
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-#ifdef CONFIG_SOC_IMX7D
-#define cpu_to_caam_dma(value) \
- (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
- (u64)cpu_to_caam32(upper_32_bits(value)))
-#define caam_dma_to_cpu(value) \
- (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
- (u64)caam32_to_cpu(upper_32_bits(value)))
-#else
-#define cpu_to_caam_dma(value) cpu_to_caam64(value)
-#define caam_dma_to_cpu(value) caam64_to_cpu(value)
-#endif /* CONFIG_SOC_IMX7D */
+#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
+#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
#else
#define cpu_to_caam_dma(value) cpu_to_caam32(value)
#define caam_dma_to_cpu(value) caam32_to_cpu(value)
-#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
-
-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
-#define cpu_to_caam_dma64(value) \
- (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
- (u64)cpu_to_caam32(upper_32_bits(value)))
-#else
-#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
-#endif
+#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
/*
* jr_outentry
@@ -293,6 +291,7 @@ struct caam_perfmon {
u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
#define CTPR_MS_QI_SHIFT 25
#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_DPAA2 BIT(13)
#define CTPR_MS_VIRT_EN_INCL 0x00000001
#define CTPR_MS_VIRT_EN_POR 0x00000002
#define CTPR_MS_PG_SZ_MASK 0x10
@@ -628,6 +627,8 @@ struct caam_job_ring {
#define JRSTA_DECOERR_INVSIGN 0x86
#define JRSTA_DECOERR_DSASIGN 0x87
+#define JRSTA_QIERR_ERROR_MASK 0x00ff
+
#define JRSTA_CCBERR_JUMP 0x08000000
#define JRSTA_CCBERR_INDEX_MASK 0xff00
#define JRSTA_CCBERR_INDEX_SHIFT 8
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
new file mode 100644
index 0000000..3b3cabc
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016-2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SG_SW_QM_H
+#define __SG_SW_QM_H
+
+#include <linux/fsl_qman.h>
+#include "regs.h"
+
+static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
+{
+ dma_addr_t addr = qm_sg_ptr->opaque;
+
+ qm_sg_ptr->opaque = cpu_to_caam64(addr);
+ qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
+}
+
+static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
+ u32 len, u16 offset)
+{
+ qm_sg_ptr->addr = dma;
+ qm_sg_ptr->length = len;
+ qm_sg_ptr->__reserved2 = 0;
+ qm_sg_ptr->bpid = 0;
+ qm_sg_ptr->__reserved3 = 0;
+ qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
+
+ cpu_to_hw_sg(qm_sg_ptr);
+}
+
+static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+{
+ qm_sg_ptr->extension = 0;
+ qm_sg_ptr->final = 0;
+ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
+}
+
+static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+{
+ qm_sg_ptr->extension = 0;
+ qm_sg_ptr->final = 1;
+ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
+}
+
+static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+{
+ qm_sg_ptr->extension = 1;
+ qm_sg_ptr->final = 0;
+ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
+}
+
+static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len,
+ u16 offset)
+{
+ qm_sg_ptr->extension = 1;
+ qm_sg_ptr->final = 1;
+ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct qm_sg_entry *
+sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+ struct qm_sg_entry *qm_sg_ptr, u16 offset)
+{
+ while (sg_count && sg) {
+ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
+ sg_dma_len(sg), offset);
+ qm_sg_ptr++;
+ sg = sg_next(sg);
+ sg_count--;
+ }
+ return qm_sg_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+ struct qm_sg_entry *qm_sg_ptr, u16 offset)
+{
+ qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+
+ qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
+ qm_sg_ptr->final = 1;
+ qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
+}
+
+#endif /* __SG_SW_QM_H */
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
new file mode 100644
index 0000000..31b4407
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the names of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SG_SW_QM2_H_
+#define _SG_SW_QM2_H_
+
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
+
+static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+{
+ dpaa2_sg_set_addr(qm_sg_ptr, dma);
+ dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
+ dpaa2_sg_set_final(qm_sg_ptr, false);
+ dpaa2_sg_set_len(qm_sg_ptr, len);
+ dpaa2_sg_set_bpid(qm_sg_ptr, 0);
+ dpaa2_sg_set_offset(qm_sg_ptr, offset);
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct dpaa2_sg_entry *
+sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+ struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
+{
+ while (sg_count && sg) {
+ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
+ sg_dma_len(sg), offset);
+ qm_sg_ptr++;
+ sg = sg_next(sg);
+ sg_count--;
+ }
+ return qm_sg_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+ struct dpaa2_sg_entry *qm_sg_ptr,
+ u16 offset)
+{
+ qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+ dpaa2_sg_set_final(qm_sg_ptr, true);
+}
+
+#endif /* _SG_SW_QM2_H_ */
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 41cd5a3..936b1b6 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -5,9 +5,19 @@
*
*/
+#ifndef _SG_SW_SEC4_H_
+#define _SG_SW_SEC4_H_
+
+#include "ctrl.h"
#include "regs.h"
+#include "sg_sw_qm2.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
-struct sec4_sg_entry;
+struct sec4_sg_entry {
+ u64 ptr;
+ u32 len;
+ u32 bpid_offset;
+};
/*
* convert single dma address to h/w link table format
@@ -15,9 +25,15 @@ struct sec4_sg_entry;
static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
{
- sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
- sec4_sg_ptr->len = cpu_to_caam32(len);
- sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
+ if (caam_dpaa2) {
+ dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
+ offset);
+ } else {
+ sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
+ sec4_sg_ptr->len = cpu_to_caam32(len);
+ sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
+ SEC4_SG_OFFSET_MASK);
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
@@ -43,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
return sec4_sg_ptr - 1;
}
+static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
+{
+ if (caam_dpaa2)
+ dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
+ else
+ sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+}
+
/*
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
@@ -52,31 +76,7 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
u16 offset)
{
sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
- sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
-}
-
-static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
- struct scatterlist *sg, unsigned int total,
- struct sec4_sg_entry *sec4_sg_ptr)
-{
- do {
- unsigned int len = min(sg_dma_len(sg), total);
-
- dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
- sec4_sg_ptr++;
- sg = sg_next(sg);
- total -= len;
- } while (total);
- return sec4_sg_ptr - 1;
+ sg_to_sec4_set_last(sec4_sg_ptr);
}
-/* derive number of elements in scatterlist, but return 0 for 1 */
-static inline int sg_count(struct scatterlist *sg_list, int nbytes)
-{
- int sg_nents = sg_nents_for_len(sg_list, nbytes);
-
- if (likely(sg_nents == 1))
- return 0;
-
- return sg_nents;
-}
+#endif /* _SG_SW_SEC4_H_ */