summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYashpal Dutta <yashpal.dutta@freescale.com>2014-03-22 22:59:34 (GMT)
committerJose Rivera <German.Rivera@freescale.com>2014-03-24 16:09:35 (GMT)
commitfc281aa59e4c6f2e1574d4a6812438a6e06be0a9 (patch)
treefa46b059eae0492f4a78bf3d65bc04560f9397c2
parent67659d841becfc172216df76f058db25588f87cf (diff)
downloadlinux-fsl-qoriq-fc281aa59e4c6f2e1574d4a6812438a6e06be0a9.tar.xz
added support for PKC keygen
As a part of PKC support, RSA, DSA DH, ECDH, ECDSA requires key generation. The patch adds support for key generation support for DSA, ECDSA, DH, ECDH. The patch adds DH operation support too Signed-off-by: Yashpal Dutta <yashpal.dutta@freescale.com> Change-Id: I0dc9c144a23e2248bf8974a1615363341dc4886e Reviewed-on: http://git.am.freescale.net:8181/5867 Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Geanta Neag Horia Ioan-B05471 <horia.geanta@freescale.com> Reviewed-by: Rivera Jose-B46482 <German.Rivera@freescale.com> Reviewed-on: http://git.am.freescale.net:8181/9551 Reviewed-by: Ruchika Gupta <ruchika.gupta@freescale.com> Reviewed-by: Jose Rivera <German.Rivera@freescale.com>
-rw-r--r--drivers/crypto/caam/caampkc.c408
-rw-r--r--drivers/crypto/caam/pdb.h47
-rw-r--r--drivers/crypto/caam/pkc_desc.c132
-rw-r--r--drivers/crypto/caam/pkc_desc.h21
4 files changed, 557 insertions, 51 deletions
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index e1bf0db..8023e55 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -154,55 +154,106 @@ static void rsa_op_done(struct device *dev, u32 *desc, u32 err, void *context)
pkc_request_complete(req, err);
}
+static void dh_unmap(struct device *dev,
+ struct dh_edesc_s *edesc, struct pkc_request *req)
+{
+ struct dh_key_req_s *dh_req = &req->req_u.dh_req;
+ struct dh_key_desc_s *dh_desc =
+ (struct dh_key_desc_s *)edesc->hw_desc;
+ dma_unmap_single(dev, dh_desc->q_dma,
+ dh_req->q_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dh_desc->w_dma,
+ dh_req->pub_key_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dh_desc->s_dma,
+ dh_req->s_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dh_desc->z_dma,
+ dh_req->z_len, DMA_FROM_DEVICE);
+ if (edesc->req_type == ECDH_COMPUTE_KEY)
+ dma_unmap_single(dev, dh_desc->ab_dma,
+ dh_req->ab_len, DMA_TO_DEVICE);
+}
+
static void dsa_unmap(struct device *dev,
struct dsa_edesc_s *edesc, struct pkc_request *req)
{
switch (req->type) {
case DSA_SIGN:
- {
- struct dsa_sign_req_s *dsa_req = &req->req_u.dsa_sign;
- struct dsa_sign_desc_s *dsa_desc =
- (struct dsa_sign_desc_s *)edesc->hw_desc;
- dma_unmap_single(dev, dsa_desc->q_dma,
- dsa_req->q_len, DMA_TO_DEVICE);
- dma_unmap_single(dev, dsa_desc->r_dma,
- dsa_req->r_len, DMA_TO_DEVICE);
- dma_unmap_single(dev, dsa_desc->g_dma,
- dsa_req->g_len, DMA_TO_DEVICE);
- dma_unmap_single(dev, dsa_desc->s_dma,
- dsa_req->priv_key_len, DMA_TO_DEVICE);
- dma_unmap_single(dev, dsa_desc->f_dma,
- dsa_req->m_len, DMA_TO_DEVICE);
- dma_unmap_single(dev, dsa_desc->c_dma,
- dsa_req->d_len, DMA_FROM_DEVICE);
- dma_unmap_single(dev, dsa_desc->d_dma,
- dsa_req->d_len, DMA_FROM_DEVICE);
- }
+ case ECDSA_SIGN:
+ {
+ struct dsa_sign_req_s *dsa_req = &req->req_u.dsa_sign;
+ struct dsa_sign_desc_s *dsa_desc =
+ (struct dsa_sign_desc_s *)edesc->hw_desc;
+ dma_unmap_single(dev, dsa_desc->q_dma,
+ dsa_req->q_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dsa_desc->r_dma,
+ dsa_req->r_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dsa_desc->g_dma,
+ dsa_req->g_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dsa_desc->s_dma,
+ dsa_req->priv_key_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dsa_desc->f_dma,
+ dsa_req->m_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dsa_desc->c_dma,
+ dsa_req->d_len, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, dsa_desc->d_dma,
+ dsa_req->d_len, DMA_FROM_DEVICE);
+ if (req->type == ECDSA_SIGN)
+ dma_unmap_single(dev, edesc->ab_dma,
+ dsa_req->ab_len, DMA_TO_DEVICE);
+ }
break;
case DSA_VERIFY:
- {
- struct dsa_verify_req_s *dsa_req =
- &req->req_u.dsa_verify;
- struct dsa_verify_desc_s *dsa_desc =
- (struct dsa_verify_desc_s *)edesc->hw_desc;
- dma_unmap_single(dev, dsa_desc->q_dma,
- dsa_req->q_len, DMA_TO_DEVICE);
- dma_unmap_single(dev, dsa_desc->r_dma,
- dsa_req->r_len, DMA_TO_DEVICE);
- dma_unmap_single(dev, dsa_desc->g_dma,
- dsa_req->g_len, DMA_TO_DEVICE);
- dma_unmap_single(dev, dsa_desc->w_dma,
- dsa_req->pub_key_len, DMA_TO_DEVICE);
- dma_unmap_single(dev, dsa_desc->f_dma,
- dsa_req->m_len, DMA_TO_DEVICE);
- dma_unmap_single(dev, dsa_desc->c_dma,
- dsa_req->d_len, DMA_TO_DEVICE);
- dma_unmap_single(dev, dsa_desc->d_dma,
- dsa_req->d_len, DMA_TO_DEVICE);
+ case ECDSA_VERIFY:
+ {
+ struct dsa_verify_req_s *dsa_req = &req->req_u.dsa_verify;
+ struct dsa_verify_desc_s *dsa_desc =
+ (struct dsa_verify_desc_s *)edesc->hw_desc;
+ dma_unmap_single(dev, dsa_desc->q_dma,
+ dsa_req->q_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dsa_desc->r_dma,
+ dsa_req->r_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dsa_desc->g_dma,
+ dsa_req->g_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dsa_desc->w_dma,
+ dsa_req->pub_key_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dsa_desc->f_dma,
+ dsa_req->m_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dsa_desc->c_dma,
+ dsa_req->d_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, dsa_desc->d_dma,
+ dsa_req->d_len, DMA_TO_DEVICE);
+ if (req->type == ECDSA_VERIFY) {
+ dma_unmap_single(dev, dsa_desc->tmp_dma,
+ 2*edesc->l_len, DMA_BIDIRECTIONAL);
+ dma_unmap_single(dev, edesc->ab_dma,
+ dsa_req->ab_len, DMA_TO_DEVICE);
+ } else {
dma_unmap_single(dev, dsa_desc->tmp_dma,
- edesc->l_len, DMA_BIDIRECTIONAL);
- kfree(edesc->tmp);
+ edesc->l_len, DMA_BIDIRECTIONAL);
}
+ kfree(edesc->tmp);
+ }
+ break;
+ case DLC_KEYGEN:
+ case ECC_KEYGEN:
+ {
+ struct keygen_req_s *key_req = &req->req_u.keygen;
+ struct dlc_keygen_desc_s *key_desc =
+ (struct dlc_keygen_desc_s *)edesc->hw_desc;
+ dma_unmap_single(dev, key_desc->q_dma,
+ key_req->q_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, key_desc->r_dma,
+ key_req->r_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, key_desc->g_dma,
+ key_req->g_len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, key_desc->s_dma,
+ key_req->priv_key_len, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, key_desc->w_dma,
+ key_req->pub_key_len, DMA_FROM_DEVICE);
+ if (req->type == ECC_KEYGEN)
+ dma_unmap_single(dev, edesc->ab_dma,
+ key_req->ab_len, DMA_TO_DEVICE);
+ }
break;
default:
dev_err(dev, "Unable to find request type\n");
@@ -239,6 +290,7 @@ static int caam_dsa_sign_edesc(struct pkc_request *req,
edesc->l_len = dsa_req->q_len;
edesc->n_len = dsa_req->r_len;
edesc->req_type = req->type;
+ edesc->curve_type = req->curve_type;
edesc->q_dma = dma_map_single(dev, dsa_req->q, dsa_req->q_len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, edesc->q_dma)) {
@@ -287,6 +339,7 @@ static int caam_dsa_sign_edesc(struct pkc_request *req,
dev_err(dev, "Unable to map memory\n");
goto d_map_fail;
}
+
if (edesc->req_type == ECDSA_SIGN) {
edesc->ab_dma = dma_map_single(dev, dsa_req->ab,
dsa_req->ab_len, DMA_TO_DEVICE);
@@ -316,24 +369,32 @@ r_map_fail:
q_map_fail:
return -EINVAL;
}
+
static int caam_dsa_verify_edesc(struct pkc_request *req,
struct dsa_edesc_s *edesc)
{
struct crypto_pkc *tfm = crypto_pkc_reqtfm(req);
struct caam_pkc_context_s *ctxt = crypto_pkc_ctx(tfm);
struct device *dev = ctxt->dev;
+ uint32_t tmp_len;
struct dsa_verify_req_s *dsa_req = &req->req_u.dsa_verify;
edesc->l_len = dsa_req->q_len;
edesc->n_len = dsa_req->r_len;
- edesc->tmp = kzalloc(dsa_req->q_len, GFP_DMA);
edesc->req_type = req->type;
+ edesc->curve_type = req->curve_type;
+ if (edesc->req_type == ECDSA_VERIFY)
+ tmp_len = 2*dsa_req->q_len;
+ else
+ tmp_len = dsa_req->q_len;
+
+ edesc->tmp = kzalloc(tmp_len, GFP_DMA);
if (!edesc->tmp) {
pr_debug("Failed to allocate temp buffer for DSA Verify\n");
return -ENOMEM;
}
- edesc->tmp_dma = dma_map_single(dev, edesc->tmp, dsa_req->q_len,
+ edesc->tmp_dma = dma_map_single(dev, edesc->tmp, tmp_len,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, edesc->tmp_dma)) {
dev_err(dev, "Unable to map memory\n");
@@ -374,6 +435,7 @@ static int caam_dsa_verify_edesc(struct pkc_request *req,
dev_err(dev, "Unable to map memory\n");
goto key_map_fail;
}
+
edesc->c_dma = dma_map_single(dev, dsa_req->c, dsa_req->d_len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, edesc->c_dma)) {
@@ -387,6 +449,7 @@ static int caam_dsa_verify_edesc(struct pkc_request *req,
dev_err(dev, "Unable to map memory\n");
goto d_map_fail;
}
+
if (edesc->req_type == ECDSA_VERIFY) {
edesc->ab_dma = dma_map_single(dev, dsa_req->ab,
dsa_req->ab_len, DMA_TO_DEVICE);
@@ -414,13 +477,87 @@ g_map_fail:
r_map_fail:
dma_unmap_single(dev, edesc->q_dma, dsa_req->q_len, DMA_TO_DEVICE);
q_map_fail:
- dma_unmap_single(dev, edesc->tmp_dma, dsa_req->q_len,
- DMA_BIDIRECTIONAL);
+ dma_unmap_single(dev, edesc->tmp_dma, tmp_len, DMA_BIDIRECTIONAL);
tmp_map_fail:
kfree(edesc->tmp);
return -EINVAL;
}
+static int caam_keygen_edesc(struct pkc_request *req,
+ struct dsa_edesc_s *edesc)
+{
+ struct crypto_pkc *tfm = crypto_pkc_reqtfm(req);
+ struct caam_pkc_context_s *ctxt = crypto_pkc_ctx(tfm);
+ struct device *dev = ctxt->dev;
+ struct keygen_req_s *key_req = &req->req_u.keygen;
+
+ edesc->l_len = key_req->q_len;
+ edesc->n_len = key_req->r_len;
+ edesc->req_type = req->type;
+ edesc->curve_type = req->curve_type;
+
+ edesc->q_dma = dma_map_single(dev, key_req->q, key_req->q_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->q_dma)) {
+ dev_err(dev, "Unable to map memory\n");
+ goto q_map_fail;
+ }
+
+ edesc->r_dma = dma_map_single(dev, key_req->r, key_req->r_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->r_dma)) {
+ dev_err(dev, "Unable to map memory\n");
+ goto r_map_fail;
+ }
+
+ edesc->g_dma = dma_map_single(dev, key_req->g, key_req->g_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->g_dma)) {
+ dev_err(dev, "Unable to map memory\n");
+ goto g_map_fail;
+ }
+
+ edesc->key_dma = dma_map_single(dev, key_req->pub_key,
+ key_req->pub_key_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, edesc->key_dma)) {
+ dev_err(dev, "Unable to map memory\n");
+ goto key_map_fail;
+ }
+
+ edesc->s_dma = dma_map_single(dev, key_req->priv_key,
+ key_req->priv_key_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, edesc->s_dma)) {
+ dev_err(dev, "Unable to map memory\n");
+ goto s_map_fail;
+ }
+
+ if (edesc->req_type == ECC_KEYGEN) {
+ edesc->ab_dma = dma_map_single(dev, key_req->ab,
+ key_req->ab_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->ab_dma)) {
+ dev_err(dev, "Unable to map memory\n");
+ goto ab_map_fail;
+ }
+ }
+
+ return 0;
+ab_map_fail:
+ if (edesc->req_type == ECC_KEYGEN)
+ dma_unmap_single(dev, edesc->s_dma, key_req->priv_key_len,
+ DMA_FROM_DEVICE);
+s_map_fail:
+ dma_unmap_single(dev, edesc->key_dma, key_req->pub_key_len,
+ DMA_FROM_DEVICE);
+key_map_fail:
+ dma_unmap_single(dev, edesc->g_dma, key_req->g_len, DMA_TO_DEVICE);
+g_map_fail:
+ dma_unmap_single(dev, edesc->r_dma, key_req->r_len, DMA_TO_DEVICE);
+r_map_fail:
+ dma_unmap_single(dev, edesc->q_dma, key_req->q_len, DMA_TO_DEVICE);
+q_map_fail:
+ return -EINVAL;
+}
+
static int caam_rsa_pub_edesc(struct pkc_request *req, struct rsa_edesc *edesc)
{
struct crypto_pkc *tfm = crypto_pkc_reqtfm(req);
@@ -877,7 +1014,6 @@ static void *caam_dsa_desc_init(struct pkc_request *req)
}
desc = caam_dsa_sign_desc(edesc);
- break;
}
break;
case DSA_VERIFY:
@@ -894,7 +1030,22 @@ static void *caam_dsa_desc_init(struct pkc_request *req)
}
desc = caam_dsa_verify_desc(edesc);
- break;
+ }
+ break;
+ case DLC_KEYGEN:
+ {
+ edesc = kzalloc(sizeof(*edesc) +
+ sizeof(struct dlc_keygen_desc_s),
+ GFP_DMA);
+ if (!edesc)
+ return NULL;
+
+ if (caam_keygen_edesc(req, edesc)) {
+ kfree(edesc);
+ return NULL;
+ }
+
+ desc = caam_keygen_desc(edesc);
}
break;
case ECDSA_SIGN:
@@ -911,9 +1062,7 @@ static void *caam_dsa_desc_init(struct pkc_request *req)
}
desc = caam_dsa_sign_desc(edesc);
- break;
}
-
break;
case ECDSA_VERIFY:
{
@@ -929,17 +1078,91 @@ static void *caam_dsa_desc_init(struct pkc_request *req)
}
desc = caam_dsa_verify_desc(edesc);
- break;
}
+ break;
+ case ECC_KEYGEN:
+ {
+ edesc = kzalloc(sizeof(*edesc) +
+ sizeof(struct ecc_keygen_desc_s),
+ GFP_DMA);
+ if (!edesc)
+ return NULL;
+
+ if (caam_keygen_edesc(req, edesc)) {
+ kfree(edesc);
+ return NULL;
+ }
+
+ desc = caam_keygen_desc(edesc);
+ }
+ break;
default:
pr_debug("Unknown DSA Desc init request\n");
return NULL;
}
-
edesc->req_type = req->type;
return desc;
}
+static int caam_dh_key_edesc(struct pkc_request *req, struct dh_edesc_s *edesc)
+{
+ struct crypto_pkc *tfm = crypto_pkc_reqtfm(req);
+ struct caam_pkc_context_s *ctxt = crypto_pkc_ctx(tfm);
+ struct device *dev = ctxt->dev;
+ struct dh_key_req_s *dh_req = &req->req_u.dh_req;
+
+ edesc->l_len = dh_req->q_len;
+ edesc->n_len = dh_req->s_len;
+ edesc->req_type = req->type;
+ edesc->curve_type = req->curve_type;
+ edesc->q_dma = dma_map_single(dev, dh_req->q, dh_req->q_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->q_dma)) {
+ dev_err(dev, "Unable to map memory\n");
+ goto q_map_fail;
+ }
+
+ edesc->w_dma = dma_map_single(dev, dh_req->pub_key, dh_req->pub_key_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->w_dma)) {
+ dev_err(dev, "Unable to map memory\n");
+ goto w_map_fail;
+ }
+
+ edesc->s_dma = dma_map_single(dev, dh_req->s, dh_req->s_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->s_dma)) {
+ dev_err(dev, "Unable to map memory\n");
+ goto s_map_fail;
+ }
+
+ edesc->z_dma = dma_map_single(dev, dh_req->z, dh_req->z_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, edesc->z_dma)) {
+ dev_err(dev, "Unable to map memory\n");
+ goto z_map_fail;
+ }
+ if (req->type == ECDH_COMPUTE_KEY) {
+ edesc->ab_dma = dma_map_single(dev, dh_req->ab, dh_req->ab_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->ab_dma)) {
+ dev_err(dev, "Unable to map memory\n");
+ goto ab_map_fail;
+ }
+ }
+ return 0;
+ab_map_fail:
+ dma_unmap_single(dev, edesc->z_dma, dh_req->z_len, DMA_FROM_DEVICE);
+z_map_fail:
+ dma_unmap_single(dev, edesc->s_dma, dh_req->s_len, DMA_TO_DEVICE);
+s_map_fail:
+ dma_unmap_single(dev, edesc->w_dma, dh_req->pub_key_len, DMA_TO_DEVICE);
+w_map_fail:
+ dma_unmap_single(dev, edesc->q_dma, dh_req->q_len, DMA_TO_DEVICE);
+q_map_fail:
+ return -EINVAL;
+}
+
/* DSA operation Handler */
static int dsa_op(struct pkc_request *req)
{
@@ -962,6 +1185,77 @@ static int dsa_op(struct pkc_request *req)
return ret;
}
+/* CAAM Descriptor creator for DH Public Key operations */
+static void *caam_dh_desc_init(struct pkc_request *req)
+{
+ void *desc = NULL;
+ struct dh_edesc_s *edesc = NULL;
+
+ switch (req->type) {
+ case DH_COMPUTE_KEY:
+ case ECDH_COMPUTE_KEY:
+ {
+ edesc = kzalloc(sizeof(*edesc) +
+ sizeof(struct dh_key_desc_s),
+ GFP_DMA);
+ if (!edesc)
+ return NULL;
+
+ if (caam_dh_key_edesc(req, edesc)) {
+ kfree(edesc);
+ return NULL;
+ }
+ desc = caam_dh_key_desc(edesc);
+ }
+ break;
+ default:
+ pr_debug("Unknown DH Desc init request\n");
+ return NULL;
+ }
+ edesc->req_type = req->type;
+ return desc;
+}
+
+/* DH Job Completion handler */
+static void dh_op_done(struct device *dev, u32 *desc, u32 err, void *context)
+{
+ struct pkc_request *req = context;
+ struct dh_edesc_s *edesc;
+
+ edesc = (struct dh_edesc_s *)((char *)desc -
+ offsetof(struct dh_edesc_s, hw_desc));
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+ dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ dh_unmap(dev, edesc, req);
+ kfree(edesc);
+
+ pkc_request_complete(req, err);
+}
+
+static int dh_op(struct pkc_request *req)
+{
+ struct crypto_pkc *pkc_tfm = crypto_pkc_reqtfm(req);
+ struct caam_pkc_context_s *ctxt = crypto_pkc_ctx(pkc_tfm);
+ struct device *dev = ctxt->dev;
+ int ret = 0;
+ void *desc = NULL;
+ desc = caam_dh_desc_init(req);
+ if (!desc) {
+ dev_err(dev, "Unable to allocate descriptor\n");
+ return -ENOMEM;
+ }
+
+ ret = caam_jr_enqueue(dev, desc, dh_op_done, req);
+ if (!ret)
+ ret = -EINPROGRESS;
+
+ return ret;
+}
+
/* RSA operation Handler */
static int rsa_op(struct pkc_request *req)
{
@@ -1020,6 +1314,19 @@ static struct caam_pkc_template driver_pkc[] = {
.min_keysize = 512,
.max_keysize = 4096,
},
+ },
+ /* DH driver registeration hooks */
+ {
+ .name = "dh",
+ .driver_name = "dh-caam",
+ .pkc_name = "pkc(dh)",
+ .pkc_driver_name = "pkc-dh-caam",
+ .type = CRYPTO_ALG_TYPE_PKC_DH,
+ .template_pkc = {
+ .pkc_op = dh_op,
+ .min_keysize = 512,
+ .max_keysize = 4096,
+ },
}
};
@@ -1061,6 +1368,7 @@ static struct caam_pkc_alg *caam_pkc_alloc(struct device *ctrldev,
dev_err(ctrldev, "failed to allocate t_alg\n");
return NULL;
}
+
alg = &t_alg->crypto_alg;
alg->cra_pkc = template->template_pkc;
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
index 7cf2662..0680b30 100644
--- a/drivers/crypto/caam/pdb.h
+++ b/drivers/crypto/caam/pdb.h
@@ -400,6 +400,17 @@ struct dsa_verify_desc_s {
uint32_t op;
} __packed;
+struct dlc_keygen_desc_s {
+ uint32_t desc_hdr;
+ uint32_t sgf_ln;
+ dma_addr_t q_dma;
+ dma_addr_t r_dma;
+ dma_addr_t g_dma;
+ dma_addr_t s_dma;
+ dma_addr_t w_dma;
+ uint32_t op;
+} __packed;
+
struct ecdsa_sign_desc_s {
uint32_t desc_hdr;
uint32_t sgf_ln; /* Use ECDSA_PDB_ definitions per above */
@@ -429,6 +440,42 @@ struct ecdsa_verify_desc_s {
uint32_t op;
} __packed;
+struct ecc_keygen_desc_s {
+ uint32_t desc_hdr;
+ uint32_t sgf_ln;
+ dma_addr_t q_dma;
+ dma_addr_t r_dma;
+ dma_addr_t g_dma;
+ dma_addr_t s_dma;
+ dma_addr_t w_dma;
+ dma_addr_t ab_dma;
+ uint32_t op;
+} __packed;
+
+#define DH_PDB_L_SHIFT 7
+#define DH_PDB_L_MASK (0x3ff << DH_PDB_L_SHIFT)
+#define DH_PDB_N_MASK 0x7f
+#define DH_PDB_SGF_SHIFT 24
+#define DH_PDB_SGF_MASK (0xff << DH_PDB_SGF_SHIFT)
+#define DH_PDB_SGF_Q (0x80 << DH_PDB_SGF_SHIFT)
+#define DH_PDB_SGF_R (0x40 << DH_PDB_SGF_SHIFT)
+#define DH_PDB_SGF_W (0x20 << DH_PDB_SGF_SHIFT)
+#define DH_PDB_SGF_S (0x10 << DH_PDB_SGF_SHIFT)
+#define DH_PDB_SGF_Z (0x08 << DH_PDB_SGF_SHIFT)
+#define DH_PDB_SGF_AB (0x04 << DH_PDB_SGF_SHIFT)
+
+struct dh_key_desc_s {
+ uint32_t desc_hdr;
+ uint32_t sgf_ln; /* Use DH_PDB_ definitions per above */
+ dma_addr_t q_dma;
+ dma_addr_t r_dma;
+ dma_addr_t w_dma;
+ dma_addr_t s_dma;
+ dma_addr_t z_dma;
+ dma_addr_t ab_dma;
+ uint32_t op;
+} __packed;
+
/* DSA/ECDSA Protocol Data Blocks */
#define RSA_PDB_SGF_SHIFT 28
#define RSA_PDB_MSG_FMT_SHIFT 12
diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
index a607dd4..5bcf5d3 100644
--- a/drivers/crypto/caam/pkc_desc.c
+++ b/drivers/crypto/caam/pkc_desc.c
@@ -142,11 +142,56 @@ void *caam_rsa_priv_f3_desc(struct rsa_edesc *edesc)
return rsa_priv_desc;
}
+/* DH sign CAAM descriptor */
+void *caam_dh_key_desc(struct dh_edesc_s *edesc)
+{
+ u32 start_idx, desc_size;
+ void *desc;
+#ifdef CAAM_DEBUG
+ uint32_t i;
+ uint32_t *buf;
+#endif
+ struct dh_key_desc_s *dh_desc =
+ (struct dh_key_desc_s *)edesc->hw_desc;
+ desc_size = sizeof(struct dh_key_desc_s) / sizeof(u32);
+ start_idx = desc_size - 1;
+ start_idx &= HDR_START_IDX_MASK;
+ init_job_desc(edesc->hw_desc, (start_idx << HDR_START_IDX_SHIFT) |
+ (start_idx & HDR_DESCLEN_MASK) | HDR_ONE);
+ dh_desc->sgf_ln = (edesc->l_len << DH_PDB_L_SHIFT) |
+ ((edesc->n_len & DH_PDB_N_MASK));
+ dh_desc->q_dma = edesc->q_dma;
+ dh_desc->w_dma = edesc->w_dma;
+ dh_desc->s_dma = edesc->s_dma;
+ dh_desc->z_dma = edesc->z_dma;
+ dh_desc->op = CMD_OPERATION | OP_TYPE_UNI_PROTOCOL |
+ OP_PCLID_DH;
+ if (edesc->req_type == ECDH_COMPUTE_KEY) {
+ dh_desc->ab_dma = edesc->ab_dma;
+ dh_desc->op |= OP_PCL_PKPROT_ECC;
+ if (edesc->curve_type == ECC_BINARY)
+ dh_desc->op |= OP_PCL_PKPROT_F2M;
+ }
+
+ desc = dh_desc;
+#ifdef CAAM_DEBUG
+ buf = desc;
+ pr_debug("%d DH Descriptor is:\n", desc_size);
+ for (i = 0; i < desc_size; i++)
+ pr_debug("[%d] %x\n", i, buf[i]);
+#endif
+ return desc;
+}
+
/* DSA sign CAAM descriptor */
void *caam_dsa_sign_desc(struct dsa_edesc_s *edesc)
{
u32 start_idx, desc_size;
void *desc;
+#ifdef CAAM_DEBUG
+ uint32_t i;
+ uint32_t *buf;
+#endif
if (edesc->req_type == ECDSA_SIGN) {
struct ecdsa_sign_desc_s *ecdsa_desc =
@@ -169,6 +214,9 @@ void *caam_dsa_sign_desc(struct dsa_edesc_s *edesc)
ecdsa_desc->ab_dma = edesc->ab_dma;
ecdsa_desc->op = CMD_OPERATION | OP_TYPE_UNI_PROTOCOL |
OP_PCLID_DSASIGN | OP_PCL_PKPROT_ECC;
+ if (edesc->curve_type == ECC_BINARY)
+ ecdsa_desc->op |= OP_PCL_PKPROT_F2M;
+
desc = ecdsa_desc;
} else {
struct dsa_sign_desc_s *dsa_desc =
@@ -192,6 +240,77 @@ void *caam_dsa_sign_desc(struct dsa_edesc_s *edesc)
OP_PCLID_DSASIGN;
desc = dsa_desc;
}
+#ifdef CAAM_DEBUG
+ buf = desc;
+ pr_debug("DSA Descriptor is:");
+ for (i = 0; i < desc_size; i++)
+ pr_debug("[%d] %x ", i, buf[i]);
+ pr_debug("\n");
+#endif
+
+ return desc;
+}
+
+/* DSA/ECDSA/DH/ECDH keygen CAAM descriptor */
+void *caam_keygen_desc(struct dsa_edesc_s *edesc)
+{
+ u32 start_idx, desc_size;
+ void *desc;
+#ifdef CAAM_DEBUG
+ uint32_t i;
+ uint32_t *buf;
+#endif
+
+ if (edesc->req_type == ECC_KEYGEN) {
+ struct ecc_keygen_desc_s *ecc_desc =
+ (struct ecc_keygen_desc_s *)edesc->hw_desc;
+ desc_size = sizeof(struct ecc_keygen_desc_s) / sizeof(u32);
+ start_idx = desc_size - 1;
+ start_idx &= HDR_START_IDX_MASK;
+ init_job_desc(edesc->hw_desc,
+ (start_idx << HDR_START_IDX_SHIFT) |
+ (start_idx & HDR_DESCLEN_MASK) | HDR_ONE);
+ ecc_desc->sgf_ln = (edesc->l_len << DSA_PDB_L_SHIFT) |
+ ((edesc->n_len & DSA_PDB_N_MASK));
+ ecc_desc->q_dma = edesc->q_dma;
+ ecc_desc->r_dma = edesc->r_dma;
+ ecc_desc->g_dma = edesc->g_dma;
+ ecc_desc->s_dma = edesc->s_dma;
+ ecc_desc->w_dma = edesc->key_dma;
+ ecc_desc->ab_dma = edesc->ab_dma;
+ ecc_desc->op = CMD_OPERATION | OP_TYPE_UNI_PROTOCOL |
+ OP_PCLID_PUBLICKEYPAIR | OP_PCL_PKPROT_ECC;
+ if (edesc->curve_type == ECC_BINARY)
+ ecc_desc->op |= OP_PCL_PKPROT_F2M;
+
+ desc = ecc_desc;
+ } else {
+ struct dlc_keygen_desc_s *key_desc =
+ (struct dlc_keygen_desc_s *)edesc->hw_desc;
+ desc_size = sizeof(struct dlc_keygen_desc_s) / sizeof(u32);
+ start_idx = desc_size - 1;
+ start_idx &= HDR_START_IDX_MASK;
+ init_job_desc(edesc->hw_desc,
+ (start_idx << HDR_START_IDX_SHIFT) |
+ (start_idx & HDR_DESCLEN_MASK) | HDR_ONE);
+ key_desc->sgf_ln = (edesc->l_len << DSA_PDB_L_SHIFT) |
+ ((edesc->n_len & DSA_PDB_N_MASK));
+ key_desc->q_dma = edesc->q_dma;
+ key_desc->r_dma = edesc->r_dma;
+ key_desc->g_dma = edesc->g_dma;
+ key_desc->s_dma = edesc->s_dma;
+ key_desc->w_dma = edesc->key_dma;
+ key_desc->op = CMD_OPERATION | OP_TYPE_UNI_PROTOCOL |
+ OP_PCLID_PUBLICKEYPAIR;
+ desc = key_desc;
+ }
+#ifdef CAAM_DEBUG
+ buf = desc;
+ pr_debug("DSA Keygen Descriptor is:");
+ for (i = 0; i < desc_size; i++)
+ pr_debug("[%d] %x ", i, buf[i]);
+ pr_debug("\n");
+#endif
return desc;
}
@@ -201,6 +320,10 @@ void *caam_dsa_verify_desc(struct dsa_edesc_s *edesc)
{
u32 start_idx, desc_size;
void *desc;
+#ifdef CAAM_DEBUG
+ uint32_t i;
+ uint32_t *buf;
+#endif
if (edesc->req_type == ECDSA_VERIFY) {
struct ecdsa_verify_desc_s *ecdsa_desc =
@@ -224,8 +347,9 @@ void *caam_dsa_verify_desc(struct dsa_edesc_s *edesc)
ecdsa_desc->ab_dma = edesc->ab_dma;
ecdsa_desc->op = CMD_OPERATION | OP_TYPE_UNI_PROTOCOL |
OP_PCLID_DSAVERIFY | OP_PCL_PKPROT_ECC;
+ if (edesc->curve_type == ECC_BINARY)
+ ecdsa_desc->op |= OP_PCL_PKPROT_F2M;
desc = ecdsa_desc;
-
} else {
struct dsa_verify_desc_s *dsa_desc =
(struct dsa_verify_desc_s *)edesc->hw_desc;
@@ -249,5 +373,11 @@ void *caam_dsa_verify_desc(struct dsa_edesc_s *edesc)
OP_PCLID_DSAVERIFY;
desc = dsa_desc;
}
+#ifdef CAAM_DEBUG
+ buf = desc;
+ pr_debug("DSA Descriptor is:\n");
+ for (i = 0; i < desc_size; i++)
+ pr_debug("[%d] %x\n", i, buf[i]);
+#endif
return desc;
}
diff --git a/drivers/crypto/caam/pkc_desc.h b/drivers/crypto/caam/pkc_desc.h
index 56fc2ee..f73ad07 100644
--- a/drivers/crypto/caam/pkc_desc.h
+++ b/drivers/crypto/caam/pkc_desc.h
@@ -176,9 +176,11 @@ struct rsa_edesc {
*/
struct dsa_edesc_s {
enum pkc_req_type req_type;
+ enum curve_t curve_type;
uint32_t l_len;
uint32_t n_len;
dma_addr_t key_dma;
+ dma_addr_t s_dma;
dma_addr_t f_dma;
dma_addr_t q_dma;
dma_addr_t r_dma;
@@ -191,11 +193,30 @@ struct dsa_edesc_s {
u32 hw_desc[];
};
+/*
+ * dh_edesc - s/w-extended for dh and ecdh descriptors
+ * @hw_desc: the h/w job descriptor
+ */
+struct dh_edesc_s {
+ enum pkc_req_type req_type;
+ enum curve_t curve_type;
+ uint32_t l_len;
+ uint32_t n_len;
+ dma_addr_t q_dma;
+ dma_addr_t ab_dma;
+ dma_addr_t w_dma;
+ dma_addr_t s_dma;
+ dma_addr_t z_dma;
+ u32 hw_desc[];
+};
+
void *caam_rsa_pub_desc(struct rsa_edesc *);
void *caam_rsa_priv_f1_desc(struct rsa_edesc *);
void *caam_rsa_priv_f2_desc(struct rsa_edesc *);
void *caam_rsa_priv_f3_desc(struct rsa_edesc *);
void *caam_dsa_sign_desc(struct dsa_edesc_s *);
void *caam_dsa_verify_desc(struct dsa_edesc_s *);
+void *caam_keygen_desc(struct dsa_edesc_s *);
+void *caam_dh_key_desc(struct dh_edesc_s *);
#endif