From b0c8bc1b9d8027093f8506266ab06c5cf0e5199f Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Tue, 20 Oct 2015 12:50:03 -0700 Subject: crypto: qat - when stopping all devices make fure VF are stopped first When stopping all devices make sure VFs are stopped before the corresponding PF. VFs will always be after PF so just need to loop back. Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index 03856ad..bd8dfa1 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -278,7 +278,7 @@ static int adf_ctl_stop_devices(uint32_t id) struct list_head *itr, *head = adf_devmgr_get_head(); int ret = 0; - list_for_each(itr, head) { + list_for_each_prev(itr, head) { struct adf_accel_dev *accel_dev = list_entry(itr, struct adf_accel_dev, list); if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { -- cgit v0.10.2 From 276a2ff1aea2d86681ccdcc149c9297dd639f493 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Thu, 22 Oct 2015 08:51:50 +0200 Subject: crypto: hifn_795x - remove the hifn_test function The hifn_test function is redundant with test done at register time by the crypto API, so remove it. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index ca5c71a..0bcc15e 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -1640,60 +1640,6 @@ err_out_exit: return err; } -static int hifn_test(struct hifn_device *dev, int encdec, u8 snum) -{ - int n, err; - u8 src[16]; - struct hifn_context ctx; - struct hifn_request_context rctx; - u8 fips_aes_ecb_from_zero[16] = { - 0x66, 0xE9, 0x4B, 0xD4, - 0xEF, 0x8A, 0x2C, 0x3B, - 0x88, 0x4C, 0xFA, 0x59, - 0xCA, 0x34, 0x2B, 0x2E}; - struct scatterlist sg; - - memset(src, 0, sizeof(src)); - memset(ctx.key, 0, sizeof(ctx.key)); - - ctx.dev = dev; - ctx.keysize = 16; - rctx.ivsize = 0; - rctx.iv = NULL; - rctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT; - rctx.mode = ACRYPTO_MODE_ECB; - rctx.type = ACRYPTO_TYPE_AES_128; - rctx.walk.cache[0].length = 0; - - sg_init_one(&sg, &src, sizeof(src)); - - err = hifn_setup_dma(dev, &ctx, &rctx, &sg, &sg, sizeof(src), NULL); - if (err) - goto err_out; - - dev->started = 0; - msleep(200); - - dprintk("%s: decoded: ", dev->name); - for (n=0; nname); - for (n=0; nname); - return 0; - } - -err_out: - printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name); - return -1; -} - static int hifn_start_device(struct hifn_device *dev) { int err; @@ -2646,10 +2592,6 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_out_free_irq; - err = hifn_test(dev, 1, 0); - if (err) - goto err_out_stop_device; - err = hifn_register_rng(dev); if (err) goto err_out_stop_device; -- cgit v0.10.2 From cfeecab44c8d9682bea8c65425920ec3b9ff8149 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Thu, 22 Oct 2015 08:51:51 +0200 Subject: crypto: hifn_795x - use dev_xx/pr_xx instead of printk This patch replace all printk by their dev_xx/pr_xx counterpart. The patch remove also all custom dprintk by pr_debug/dev_debug Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 0bcc15e..e0ecddc 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -36,14 +36,6 @@ #include #include -//#define HIFN_DEBUG - -#ifdef HIFN_DEBUG -#define dprintk(f, a...) printk(f, ##a) -#else -#define dprintk(f, a...) do {} while (0) -#endif - static char hifn_pll_ref[sizeof("extNNN")] = "ext"; module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444); MODULE_PARM_DESC(hifn_pll_ref, @@ -702,7 +694,7 @@ static void hifn_wait_puc(struct hifn_device *dev) } if (!i) - dprintk("%s: Failed to reset PUC unit.\n", dev->name); + dev_err(&dev->pdev->dev, "Failed to reset PUC unit.\n"); } static void hifn_reset_puc(struct hifn_device *dev) @@ -854,15 +846,13 @@ static int hifn_init_pubrng(struct hifn_device *dev) } if (!i) - dprintk("Chip %s: Failed to initialise public key engine.\n", - dev->name); + dev_err(&dev->pdev->dev, "Failed to initialise public key engine.\n"); else { hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); dev->dmareg |= HIFN_DMAIER_PUBDONE; hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); - dprintk("Chip %s: Public key engine has been successfully " - "initialised.\n", dev->name); + dev_dbg(&dev->pdev->dev, "Public key engine has been successfully initialised.\n"); } /* @@ -871,8 +861,7 @@ static int hifn_init_pubrng(struct hifn_device *dev) hifn_write_1(dev, HIFN_1_RNG_CONFIG, hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); - dprintk("Chip %s: RNG engine has been successfully initialised.\n", - dev->name); + dev_dbg(&dev->pdev->dev, "RNG engine has been successfully initialised.\n"); #ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG /* First value must be discarded */ @@ -897,7 +886,7 @@ static int hifn_enable_crypto(struct hifn_device *dev) } if (offtbl == NULL) { - dprintk("Chip %s: Unknown card!\n", dev->name); + dev_err(&dev->pdev->dev, "Unknown card!\n"); return -ENODEV; } @@ -920,7 +909,7 @@ static int hifn_enable_crypto(struct hifn_device *dev) } hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg); - dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev)); + dev_dbg(&dev->pdev->dev, "%s %s.\n", dev->name, pci_name(dev->pdev)); return 0; } @@ -984,9 +973,8 @@ static void hifn_init_pll(struct hifn_device *dev) freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); else { freq = 66; - printk(KERN_INFO "hifn795x: assuming %uMHz clock speed, " - "override with hifn_pll_ref=%.3s\n", - freq, hifn_pll_ref); + dev_info(&dev->pdev->dev, "assuming %uMHz clock speed, override with hifn_pll_ref=%.3s\n", + freq, hifn_pll_ref); } m = HIFN_PLL_FCK_MAX / freq; @@ -1471,8 +1459,8 @@ static int ablkcipher_add(unsigned int *drestp, struct scatterlist *dst, drest -= copy; nbytes -= copy; - dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n", - __func__, copy, size, drest, nbytes); + pr_debug("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n", + __func__, copy, size, drest, nbytes); dst++; idx++; @@ -1499,8 +1487,8 @@ static int hifn_cipher_walk(struct ablkcipher_request *req, dst = &req->dst[idx]; - dprintk("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n", - __func__, dst->length, dst->offset, offset, nbytes); + pr_debug("\n%s: dlen: %u, doff: %u, offset: %u, nbytes: %u.\n", + __func__, dst->length, dst->offset, offset, nbytes); if (!IS_ALIGNED(dst->offset, HIFN_D_DST_DALIGN) || !IS_ALIGNED(dst->length, HIFN_D_DST_DALIGN) || @@ -1536,14 +1524,13 @@ static int hifn_cipher_walk(struct ablkcipher_request *req, * Temporary of course... * Kick author if you will catch this one. */ - printk(KERN_ERR "%s: dlen: %u, nbytes: %u," - "slen: %u, offset: %u.\n", - __func__, dlen, nbytes, slen, offset); - printk(KERN_ERR "%s: please contact author to fix this " - "issue, generally you should not catch " - "this path under any condition but who " - "knows how did you use crypto code.\n" - "Thank you.\n", __func__); + pr_err("%s: dlen: %u, nbytes: %u, slen: %u, offset: %u.\n", + __func__, dlen, nbytes, slen, offset); + pr_err("%s: please contact author to fix this " + "issue, generally you should not catch " + "this path under any condition but who " + "knows how did you use crypto code.\n" + "Thank you.\n", __func__); BUG(); } else { copy += diff + nbytes; @@ -1630,11 +1617,11 @@ err_out: spin_unlock_irqrestore(&dev->lock, flags); err_out_exit: if (err) { - printk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, " - "type: %u, err: %d.\n", - dev->name, rctx->iv, rctx->ivsize, - ctx->key, ctx->keysize, - rctx->mode, rctx->op, rctx->type, err); + dev_info(&dev->pdev->dev, "iv: %p [%d], key: %p [%d], mode: %u, op: %u, " + "type: %u, err: %d.\n", + rctx->iv, rctx->ivsize, + ctx->key, ctx->keysize, + rctx->mode, rctx->op, rctx->type, err); } return err; @@ -1685,8 +1672,8 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset saddr += copy; offset = 0; - dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n", - __func__, copy, size, srest, nbytes); + pr_debug("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n", + __func__, copy, size, srest, nbytes); dst++; idx++; @@ -1706,7 +1693,8 @@ static inline void hifn_complete_sa(struct hifn_device *dev, int i) dev->sa[i] = NULL; dev->started--; if (dev->started < 0) - printk("%s: started: %d.\n", __func__, dev->started); + dev_info(&dev->pdev->dev, "%s: started: %d.\n", __func__, + dev->started); spin_unlock_irqrestore(&dev->lock, flags); BUG_ON(dev->started < 0); } @@ -1725,7 +1713,7 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error) t = &rctx->walk.cache[idx]; dst = &req->dst[idx]; - dprintk("\n%s: sg_page(t): %p, t->length: %u, " + pr_debug("\n%s: sg_page(t): %p, t->length: %u, " "sg_page(dst): %p, dst->length: %u, " "nbytes: %u.\n", __func__, sg_page(t), t->length, @@ -1761,9 +1749,8 @@ static void hifn_clear_rings(struct hifn_device *dev, int error) struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int i, u; - dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " + dev_dbg(&dev->pdev->dev, "ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " "k: %d.%d.%d.%d.\n", - dev->name, dma->cmdi, dma->srci, dma->dsti, dma->resi, dma->cmdu, dma->srcu, dma->dstu, dma->resu, dma->cmdk, dma->srck, dma->dstk, dma->resk); @@ -1816,9 +1803,8 @@ static void hifn_clear_rings(struct hifn_device *dev, int error) } dma->dstk = i; dma->dstu = u; - dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " + dev_dbg(&dev->pdev->dev, "ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, " "k: %d.%d.%d.%d.\n", - dev->name, dma->cmdi, dma->srci, dma->dsti, dma->resi, dma->cmdu, dma->srcu, dma->dstu, dma->resu, dma->cmdk, dma->srck, dma->dstk, dma->resk); @@ -1867,21 +1853,22 @@ static void hifn_work(struct work_struct *work) int i; struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; - printk("%s: r: %08x, active: %d, started: %d, " - "success: %lu: qlen: %u/%u, reset: %d.\n", - dev->name, r, dev->active, dev->started, - dev->success, dev->queue.qlen, dev->queue.max_qlen, - reset); + dev_info(&dev->pdev->dev, + "r: %08x, active: %d, started: %d, " + "success: %lu: qlen: %u/%u, reset: %d.\n", + r, dev->active, dev->started, + dev->success, dev->queue.qlen, dev->queue.max_qlen, + reset); - printk("%s: res: ", __func__); + dev_info(&dev->pdev->dev, "%s: res: ", __func__); for (i=0; iresr[i].l, dev->sa[i]); + pr_info("%x.%p ", dma->resr[i].l, dev->sa[i]); if (dev->sa[i]) { hifn_process_ready(dev->sa[i], -ENODEV); hifn_complete_sa(dev, i); } } - printk("\n"); + pr_info("\n"); hifn_reset_dma(dev, 1); hifn_stop_device(dev); @@ -1903,9 +1890,9 @@ static irqreturn_t hifn_interrupt(int irq, void *data) dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR); - dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], " + dev_dbg(&dev->pdev->dev, "1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], " "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n", - dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi, + dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi, dma->cmdi, dma->srci, dma->dsti, dma->resi, dma->cmdu, dma->srcu, dma->dstu, dma->resu); @@ -1924,9 +1911,9 @@ static irqreturn_t hifn_interrupt(int irq, void *data) if (restart) { u32 puisr = hifn_read_0(dev, HIFN_0_PUISR); - printk(KERN_WARNING "%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", - dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER), - !!(dmacsr & HIFN_DMACSR_D_OVER), + dev_warn(&dev->pdev->dev, "overflow: r: %d, d: %d, puisr: %08x, d: %u.\n", + !!(dmacsr & HIFN_DMACSR_R_OVER), + !!(dmacsr & HIFN_DMACSR_D_OVER), puisr, !!(puisr & HIFN_PUISR_DSTOVER)); if (!!(puisr & HIFN_PUISR_DSTOVER)) hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); @@ -1937,18 +1924,18 @@ static irqreturn_t hifn_interrupt(int irq, void *data) restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); if (restart) { - printk(KERN_WARNING "%s: abort: c: %d, s: %d, d: %d, r: %d.\n", - dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT), - !!(dmacsr & HIFN_DMACSR_S_ABORT), - !!(dmacsr & HIFN_DMACSR_D_ABORT), - !!(dmacsr & HIFN_DMACSR_R_ABORT)); + dev_warn(&dev->pdev->dev, "abort: c: %d, s: %d, d: %d, r: %d.\n", + !!(dmacsr & HIFN_DMACSR_C_ABORT), + !!(dmacsr & HIFN_DMACSR_S_ABORT), + !!(dmacsr & HIFN_DMACSR_D_ABORT), + !!(dmacsr & HIFN_DMACSR_R_ABORT)); hifn_reset_dma(dev, 1); hifn_init_dma(dev); hifn_init_registers(dev); } if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { - dprintk("%s: wait on command.\n", dev->name); + dev_dbg(&dev->pdev->dev, "wait on command.\n"); dev->dmareg &= ~(HIFN_DMAIER_C_WAIT); hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); } @@ -2530,8 +2517,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE || pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE || pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) { - dprintk("%s: Broken hardware - I/O regions are too small.\n", - pci_name(pdev)); + dev_err(&pdev->dev, "Broken hardware - I/O regions are too small.\n"); err = -ENODEV; goto err_out_free_regions; } @@ -2564,7 +2550,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev->desc_virt = pci_zalloc_consistent(pdev, sizeof(struct hifn_dma), &dev->desc_dma); if (!dev->desc_virt) { - dprintk("Failed to allocate descriptor rings.\n"); + dev_err(&pdev->dev, "Failed to allocate descriptor rings.\n"); err = -ENOMEM; goto err_out_unmap_bars; } @@ -2583,7 +2569,8 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev); if (err) { - dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err); + dev_err(&pdev->dev, "Failed to request IRQ%d: err: %d.\n", + dev->irq, err); dev->irq = 0; goto err_out_free_desc; } @@ -2603,9 +2590,9 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_DELAYED_WORK(&dev->work, hifn_work); schedule_delayed_work(&dev->work, HZ); - dprintk("HIFN crypto accelerator card at %s has been " - "successfully registered as %s.\n", - pci_name(pdev), dev->name); + dev_dbg(&pdev->dev, "HIFN crypto accelerator card at %s has been " + "successfully registered as %s.\n", + pci_name(pdev), dev->name); return 0; @@ -2692,8 +2679,7 @@ static int __init hifn_init(void) if (strncmp(hifn_pll_ref, "ext", 3) && strncmp(hifn_pll_ref, "pci", 3)) { - printk(KERN_ERR "hifn795x: invalid hifn_pll_ref clock, " - "must be pci or ext"); + pr_err("hifn795x: invalid hifn_pll_ref clock, must be pci or ext"); return -EINVAL; } @@ -2705,22 +2691,21 @@ static int __init hifn_init(void) if (hifn_pll_ref[3] != '\0') { freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10); if (freq < 20 || freq > 100) { - printk(KERN_ERR "hifn795x: invalid hifn_pll_ref " - "frequency, must be in the range " - "of 20-100"); + pr_err("hifn795x: invalid hifn_pll_ref frequency, must" + "be in the range of 20-100"); return -EINVAL; } } err = pci_register_driver(&hifn_pci_driver); if (err < 0) { - dprintk("Failed to register PCI driver for %s device.\n", - hifn_pci_driver.name); + pr_err("Failed to register PCI driver for %s device.\n", + hifn_pci_driver.name); return -ENODEV; } - printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip " - "has been successfully registered.\n"); + pr_info("Driver for HIFN 795x crypto accelerator chip " + "has been successfully registered.\n"); return 0; } @@ -2729,8 +2714,8 @@ static void __exit hifn_fini(void) { pci_unregister_driver(&hifn_pci_driver); - printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip " - "has been successfully unregistered.\n"); + pr_info("Driver for HIFN 795x crypto accelerator chip " + "has been successfully unregistered.\n"); } module_init(hifn_init); -- cgit v0.10.2 From 16f56e8b751f72e0e105abc1d8f5912c340a5623 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Thu, 22 Oct 2015 08:51:52 +0200 Subject: crypto: hifn_795x - fix coding style The hifn_795x driver is old and have lots of style issue. This patch try to solve easy ones. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index e0ecddc..201e57d 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -11,10 +11,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include @@ -71,12 +67,12 @@ static atomic_t hifn_dev_number; /* DMA registres */ -#define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */ -#define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */ +#define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */ +#define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */ #define HIFN_DMA_RRA 0x2C /* DMA Result Ring Address */ #define HIFN_DMA_DDRA 0x3C /* DMA Destination Data Ring Address */ #define HIFN_DMA_STCTL 0x40 /* DMA Status and Control */ -#define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */ +#define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */ #define HIFN_DMA_CFG1 0x48 /* DMA Configuration #1 */ #define HIFN_DMA_CFG2 0x6C /* DMA Configuration #2 */ #define HIFN_CHIP_ID 0x98 /* Chip ID */ @@ -350,10 +346,10 @@ static atomic_t hifn_dev_number; #define HIFN_NAMESIZE 32 #define HIFN_MAX_RESULT_ORDER 5 -#define HIFN_D_CMD_RSIZE 24*1 -#define HIFN_D_SRC_RSIZE 80*1 -#define HIFN_D_DST_RSIZE 80*1 -#define HIFN_D_RES_RSIZE 24*1 +#define HIFN_D_CMD_RSIZE (24 * 1) +#define HIFN_D_SRC_RSIZE (80 * 1) +#define HIFN_D_DST_RSIZE (80 * 1) +#define HIFN_D_RES_RSIZE (24 * 1) #define HIFN_D_DST_DALIGN 4 @@ -378,17 +374,16 @@ static atomic_t hifn_dev_number; #define HIFN_MAX_RESULT (8 + 4 + 4 + 20 + 4) #define HIFN_USED_RESULT 12 -struct hifn_desc -{ +struct hifn_desc { volatile __le32 l; volatile __le32 p; }; struct hifn_dma { - struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1]; - struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1]; - struct hifn_desc dstr[HIFN_D_DST_RSIZE+1]; - struct hifn_desc resr[HIFN_D_RES_RSIZE+1]; + struct hifn_desc cmdr[HIFN_D_CMD_RSIZE + 1]; + struct hifn_desc srcr[HIFN_D_SRC_RSIZE + 1]; + struct hifn_desc dstr[HIFN_D_DST_RSIZE + 1]; + struct hifn_desc resr[HIFN_D_RES_RSIZE + 1]; u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND]; u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT]; @@ -402,16 +397,15 @@ struct hifn_dma { int cmdk, srck, dstk, resk; }; -#define HIFN_FLAG_CMD_BUSY (1<<0) -#define HIFN_FLAG_SRC_BUSY (1<<1) -#define HIFN_FLAG_DST_BUSY (1<<2) -#define HIFN_FLAG_RES_BUSY (1<<3) -#define HIFN_FLAG_OLD_KEY (1<<4) +#define HIFN_FLAG_CMD_BUSY (1 << 0) +#define HIFN_FLAG_SRC_BUSY (1 << 1) +#define HIFN_FLAG_DST_BUSY (1 << 2) +#define HIFN_FLAG_RES_BUSY (1 << 3) +#define HIFN_FLAG_OLD_KEY (1 << 4) #define HIFN_DEFAULT_ACTIVE_NUM 5 -struct hifn_device -{ +struct hifn_device { char name[HIFN_NAMESIZE]; int irq; @@ -424,7 +418,7 @@ struct hifn_device u32 dmareg; - void *sa[HIFN_D_RES_RSIZE]; + void *sa[HIFN_D_RES_RSIZE]; spinlock_t lock; @@ -439,7 +433,7 @@ struct hifn_device struct tasklet_struct tasklet; - struct crypto_queue queue; + struct crypto_queue queue; struct list_head alg_list; unsigned int pk_clk_freq; @@ -460,8 +454,7 @@ struct hifn_device #define HIFN_D_JUMP 0x40000000 #define HIFN_D_VALID 0x80000000 -struct hifn_base_command -{ +struct hifn_base_command { volatile __le16 masks; volatile __le16 session_num; volatile __le16 total_source_count; @@ -483,12 +476,11 @@ struct hifn_base_command /* * Structure to help build up the command data structure. */ -struct hifn_crypt_command -{ - volatile __le16 masks; - volatile __le16 header_skip; - volatile __le16 source_count; - volatile __le16 reserved; +struct hifn_crypt_command { + volatile __le16 masks; + volatile __le16 header_skip; + volatile __le16 source_count; + volatile __le16 reserved; }; #define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */ @@ -514,12 +506,11 @@ struct hifn_crypt_command /* * Structure to help build up the command data structure. */ -struct hifn_mac_command -{ - volatile __le16 masks; - volatile __le16 header_skip; - volatile __le16 source_count; - volatile __le16 reserved; +struct hifn_mac_command { + volatile __le16 masks; + volatile __le16 header_skip; + volatile __le16 source_count; + volatile __le16 reserved; }; #define HIFN_MAC_CMD_ALG_MASK 0x0001 @@ -543,12 +534,11 @@ struct hifn_mac_command #define HIFN_MAC_CMD_POS_IPSEC 0x0200 #define HIFN_MAC_CMD_NEW_KEY 0x0800 -struct hifn_comp_command -{ - volatile __le16 masks; - volatile __le16 header_skip; - volatile __le16 source_count; - volatile __le16 reserved; +struct hifn_comp_command { + volatile __le16 masks; + volatile __le16 header_skip; + volatile __le16 source_count; + volatile __le16 reserved; }; #define HIFN_COMP_CMD_SRCLEN_M 0xc000 @@ -562,12 +552,11 @@ struct hifn_comp_command #define HIFN_COMP_CMD_ALG_MPPC 0x0001 /* MPPC */ #define HIFN_COMP_CMD_ALG_LZS 0x0000 /* LZS */ -struct hifn_base_result -{ - volatile __le16 flags; - volatile __le16 session; - volatile __le16 src_cnt; /* 15:0 of source count */ - volatile __le16 dst_cnt; /* 15:0 of dest count */ +struct hifn_base_result { + volatile __le16 flags; + volatile __le16 session; + volatile __le16 src_cnt; /* 15:0 of source count */ + volatile __le16 dst_cnt; /* 15:0 of dest count */ }; #define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */ @@ -576,8 +565,7 @@ struct hifn_base_result #define HIFN_BASE_RES_DSTLEN_M 0x3000 /* 17:16 of dest count */ #define HIFN_BASE_RES_DSTLEN_S 12 -struct hifn_comp_result -{ +struct hifn_comp_result { volatile __le16 flags; volatile __le16 crc; }; @@ -588,18 +576,16 @@ struct hifn_comp_result #define HIFN_COMP_RES_ENDMARKER 0x0002 /* LZS: end marker seen */ #define HIFN_COMP_RES_SRC_NOTZERO 0x0001 /* source expired */ -struct hifn_mac_result -{ - volatile __le16 flags; - volatile __le16 reserved; +struct hifn_mac_result { + volatile __le16 flags; + volatile __le16 reserved; /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */ }; #define HIFN_MAC_RES_MISCOMPARE 0x0002 /* compare failed */ #define HIFN_MAC_RES_SRC_NOTZERO 0x0001 /* source expired */ -struct hifn_crypt_result -{ +struct hifn_crypt_result { volatile __le16 flags; volatile __le16 reserved; }; @@ -614,11 +600,10 @@ struct hifn_crypt_result #define HIFN_POLL_SCALAR 0x0 #endif -#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */ +#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */ #define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */ -struct hifn_crypto_alg -{ +struct hifn_crypto_alg { struct list_head entry; struct crypto_alg alg; struct hifn_device *dev; @@ -626,24 +611,21 @@ struct hifn_crypto_alg #define ASYNC_SCATTERLIST_CACHE 16 -#define ASYNC_FLAGS_MISALIGNED (1<<0) +#define ASYNC_FLAGS_MISALIGNED (1 << 0) -struct hifn_cipher_walk -{ +struct hifn_cipher_walk { struct scatterlist cache[ASYNC_SCATTERLIST_CACHE]; u32 flags; int num; }; -struct hifn_context -{ +struct hifn_context { u8 key[HIFN_MAX_CRYPT_KEY_LENGTH]; struct hifn_device *dev; unsigned int keysize; }; -struct hifn_request_context -{ +struct hifn_request_context { u8 *iv; unsigned int ivsize; u8 op, type, mode, unused; @@ -685,7 +667,7 @@ static void hifn_wait_puc(struct hifn_device *dev) int i; u32 ret; - for (i=10000; i > 0; --i) { + for (i = 10000; i > 0; --i) { ret = hifn_read_0(dev, HIFN_0_PUCTRL); if (!(ret & HIFN_PUCTRL_RESET)) break; @@ -741,13 +723,12 @@ static void hifn_reset_dma(struct hifn_device *dev, int full) hifn_reset_puc(dev); } -static u32 hifn_next_signature(u_int32_t a, u_int cnt) +static u32 hifn_next_signature(u32 a, u_int cnt) { int i; u32 v; for (i = 0; i < cnt; i++) { - /* get the parity */ v = a & 0x80080125; v ^= v >> 16; @@ -838,16 +819,16 @@ static int hifn_init_pubrng(struct hifn_device *dev) hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); - for (i=100; i > 0; --i) { + for (i = 100; i > 0; --i) { mdelay(1); if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0) break; } - if (!i) + if (!i) { dev_err(&dev->pdev->dev, "Failed to initialise public key engine.\n"); - else { + } else { hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); dev->dmareg |= HIFN_DMAIER_PUBDONE; hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); @@ -855,9 +836,7 @@ static int hifn_init_pubrng(struct hifn_device *dev) dev_dbg(&dev->pdev->dev, "Public key engine has been successfully initialised.\n"); } - /* - * Enable RNG engine. - */ + /* Enable RNG engine. */ hifn_write_1(dev, HIFN_1_RNG_CONFIG, hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA); @@ -885,7 +864,7 @@ static int hifn_enable_crypto(struct hifn_device *dev) } } - if (offtbl == NULL) { + if (!offtbl) { dev_err(&dev->pdev->dev, "Unknown card!\n"); return -ENODEV; } @@ -901,7 +880,7 @@ static int hifn_enable_crypto(struct hifn_device *dev) hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0); mdelay(1); - for (i=0; i<12; ++i) { + for (i = 0; i < 12; ++i) { addr = hifn_next_signature(addr, offtbl[i] + 0x101); hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr); @@ -920,16 +899,14 @@ static void hifn_init_dma(struct hifn_device *dev) u32 dptr = dev->desc_dma; int i; - for (i=0; icmdr[i].p = __cpu_to_le32(dptr + offsetof(struct hifn_dma, command_bufs[i][0])); - for (i=0; iresr[i].p = __cpu_to_le32(dptr + offsetof(struct hifn_dma, result_bufs[i][0])); - /* - * Setup LAST descriptors. - */ + /* Setup LAST descriptors. */ dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr + offsetof(struct hifn_dma, cmdr[0])); dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr + @@ -949,7 +926,7 @@ static void hifn_init_dma(struct hifn_device *dev) * to calculate the optimal multiplier. For PCI we assume 66MHz, since that * allows us to operate without the risk of overclocking the chip. If it * actually uses 33MHz, the chip will operate at half the speed, this can be - * overriden by specifying the frequency as module parameter (pci33). + * overridden by specifying the frequency as module parameter (pci33). * * Unfortunately the PCI clock is not very suitable since the HIFN needs a * stable clock and the PCI clock frequency may vary, so the default is the @@ -1162,17 +1139,17 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, mask = 0; switch (rctx->op) { - case ACRYPTO_OP_DECRYPT: - mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE; - break; - case ACRYPTO_OP_ENCRYPT: - mask = HIFN_BASE_CMD_CRYPT; - break; - case ACRYPTO_OP_HMAC: - mask = HIFN_BASE_CMD_MAC; - break; - default: - goto err_out; + case ACRYPTO_OP_DECRYPT: + mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE; + break; + case ACRYPTO_OP_ENCRYPT: + mask = HIFN_BASE_CMD_CRYPT; + break; + case ACRYPTO_OP_HMAC: + mask = HIFN_BASE_CMD_MAC; + break; + default: + goto err_out; } buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes, @@ -1187,53 +1164,53 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, md |= HIFN_CRYPT_CMD_NEW_IV; switch (rctx->mode) { - case ACRYPTO_MODE_ECB: - md |= HIFN_CRYPT_CMD_MODE_ECB; - break; - case ACRYPTO_MODE_CBC: - md |= HIFN_CRYPT_CMD_MODE_CBC; - break; - case ACRYPTO_MODE_CFB: - md |= HIFN_CRYPT_CMD_MODE_CFB; - break; - case ACRYPTO_MODE_OFB: - md |= HIFN_CRYPT_CMD_MODE_OFB; - break; - default: - goto err_out; + case ACRYPTO_MODE_ECB: + md |= HIFN_CRYPT_CMD_MODE_ECB; + break; + case ACRYPTO_MODE_CBC: + md |= HIFN_CRYPT_CMD_MODE_CBC; + break; + case ACRYPTO_MODE_CFB: + md |= HIFN_CRYPT_CMD_MODE_CFB; + break; + case ACRYPTO_MODE_OFB: + md |= HIFN_CRYPT_CMD_MODE_OFB; + break; + default: + goto err_out; } switch (rctx->type) { - case ACRYPTO_TYPE_AES_128: - if (ctx->keysize != 16) - goto err_out; - md |= HIFN_CRYPT_CMD_KSZ_128 | - HIFN_CRYPT_CMD_ALG_AES; - break; - case ACRYPTO_TYPE_AES_192: - if (ctx->keysize != 24) - goto err_out; - md |= HIFN_CRYPT_CMD_KSZ_192 | - HIFN_CRYPT_CMD_ALG_AES; - break; - case ACRYPTO_TYPE_AES_256: - if (ctx->keysize != 32) - goto err_out; - md |= HIFN_CRYPT_CMD_KSZ_256 | - HIFN_CRYPT_CMD_ALG_AES; - break; - case ACRYPTO_TYPE_3DES: - if (ctx->keysize != 24) - goto err_out; - md |= HIFN_CRYPT_CMD_ALG_3DES; - break; - case ACRYPTO_TYPE_DES: - if (ctx->keysize != 8) - goto err_out; - md |= HIFN_CRYPT_CMD_ALG_DES; - break; - default: + case ACRYPTO_TYPE_AES_128: + if (ctx->keysize != 16) goto err_out; + md |= HIFN_CRYPT_CMD_KSZ_128 | + HIFN_CRYPT_CMD_ALG_AES; + break; + case ACRYPTO_TYPE_AES_192: + if (ctx->keysize != 24) + goto err_out; + md |= HIFN_CRYPT_CMD_KSZ_192 | + HIFN_CRYPT_CMD_ALG_AES; + break; + case ACRYPTO_TYPE_AES_256: + if (ctx->keysize != 32) + goto err_out; + md |= HIFN_CRYPT_CMD_KSZ_256 | + HIFN_CRYPT_CMD_ALG_AES; + break; + case ACRYPTO_TYPE_3DES: + if (ctx->keysize != 24) + goto err_out; + md |= HIFN_CRYPT_CMD_ALG_3DES; + break; + case ACRYPTO_TYPE_DES: + if (ctx->keysize != 8) + goto err_out; + md |= HIFN_CRYPT_CMD_ALG_DES; + break; + default: + goto err_out; } buf_pos += hifn_setup_crypto_command(dev, buf_pos, @@ -1253,8 +1230,9 @@ static int hifn_setup_cmd_desc(struct hifn_device *dev, HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ | HIFN_D_JUMP); dma->cmdi = 0; - } else - dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID); + } else { + dma->cmdr[dma->cmdi - 1].l |= __cpu_to_le32(HIFN_D_VALID); + } if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) { hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); @@ -1412,7 +1390,7 @@ static int hifn_cipher_walk_init(struct hifn_cipher_walk *w, sg_init_table(w->cache, num); w->num = 0; - for (i=0; inum; ++i) { + for (i = 0; i < w->num; ++i) { struct scatterlist *s = &w->cache[i]; __free_page(sg_page(s)); @@ -1513,10 +1491,10 @@ static int hifn_cipher_walk(struct ablkcipher_request *req, * to put there additional blocksized chunk, * so we mark that page as containing only * blocksize aligned chunks: - * t->length = (slen & ~(HIFN_D_DST_DALIGN - 1)); + * t->length = (slen & ~(HIFN_D_DST_DALIGN - 1)); * and increase number of bytes to be processed * in next chunk: - * nbytes += diff; + * nbytes += diff; */ nbytes += diff; @@ -1861,7 +1839,7 @@ static void hifn_work(struct work_struct *work) reset); dev_info(&dev->pdev->dev, "%s: res: ", __func__); - for (i=0; iresr[i].l, dev->sa[i]); if (dev->sa[i]) { hifn_process_ready(dev->sa[i], -ENODEV); @@ -1953,12 +1931,12 @@ static void hifn_flush(struct hifn_device *dev) struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt; int i; - for (i=0; iresr[i]; if (dev->sa[i]) { hifn_process_ready(dev->sa[i], - (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0); + (d->l & __cpu_to_le32(HIFN_D_VALID)) ? -ENODEV : 0); hifn_complete_sa(dev, i); } } @@ -1990,7 +1968,7 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key, if (len == HIFN_DES_KEY_LENGTH) { u32 tmp[DES_EXPKEY_WORDS]; int ret = des_ekey(tmp, key); - + if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; @@ -2231,9 +2209,7 @@ static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req) ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); } -/* - * 3DES decryption functions. - */ +/* 3DES decryption functions. */ static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req) { return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT, @@ -2255,8 +2231,7 @@ static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req) ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB); } -struct hifn_alg_template -{ +struct hifn_alg_template { char name[CRYPTO_MAX_ALG_NAME]; char drv_name[CRYPTO_MAX_ALG_NAME]; unsigned int bsize; @@ -2416,7 +2391,7 @@ static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t) struct hifn_crypto_alg *alg; int err; - alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL); + alg = kzalloc(sizeof(*alg), GFP_KERNEL); if (!alg) return -ENOMEM; @@ -2463,7 +2438,7 @@ static int hifn_register_alg(struct hifn_device *dev) { int i, err; - for (i=0; iname, sizeof(dev->name), "%s", name); spin_lock_init(&dev->lock); - for (i=0; i<3; ++i) { + for (i = 0; i < 3; ++i) { unsigned long addr, size; addr = pci_resource_start(pdev, i); @@ -2558,7 +2533,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev->pdev = pdev; dev->irq = pdev->irq; - for (i=0; isa[i] = NULL; pci_set_drvdata(pdev, dev); @@ -2609,7 +2584,7 @@ err_out_free_desc: dev->desc_virt, dev->desc_dma); err_out_unmap_bars: - for (i=0; i<3; ++i) + for (i = 0; i < 3; ++i) if (dev->bar[i]) iounmap(dev->bar[i]); @@ -2644,7 +2619,7 @@ static void hifn_remove(struct pci_dev *pdev) pci_free_consistent(pdev, sizeof(struct hifn_dma), dev->desc_virt, dev->desc_dma); - for (i=0; i<3; ++i) + for (i = 0; i < 3; ++i) if (dev->bar[i]) iounmap(dev->bar[i]); -- cgit v0.10.2 From d956fed7b68fa1713b7ad0d8e100824cc55afce8 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Thu, 22 Oct 2015 06:30:36 -0700 Subject: crypto: qat - fix get instance function Fix the logic in case we have found a device on a given node. Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index 9cab154..9425402 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -118,19 +118,19 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) } } } - if (!accel_dev) - pr_info("QAT: Could not find a device on node %d\n", node); - - /* Get any started device */ - list_for_each(itr, adf_devmgr_get_head()) { - struct adf_accel_dev *tmp_dev; - tmp_dev = list_entry(itr, struct adf_accel_dev, list); + if (!accel_dev) { + pr_info("QAT: Could not find a device on node %d\n", node); + /* Get any started device */ + list_for_each(itr, adf_devmgr_get_head()) { + struct adf_accel_dev *tmp_dev; - if (adf_dev_started(tmp_dev) && - !list_empty(&tmp_dev->crypto_list)) { - accel_dev = tmp_dev; - break; + tmp_dev = list_entry(itr, struct adf_accel_dev, list); + if (adf_dev_started(tmp_dev) && + !list_empty(&tmp_dev->crypto_list)) { + accel_dev = tmp_dev; + break; + } } } -- cgit v0.10.2 From c52b67338937ffee7a4d7225d9cb334ace4279dd Mon Sep 17 00:00:00 2001 From: Salvatore Benedetto Date: Thu, 22 Oct 2015 15:23:12 +0100 Subject: crypto: qat - remove superfluous check from adf_probe - ent->device is already checked at the beginning of the function against the same value. This check is a duplicate. Signed-off-by: Salvatore Benedetto Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index f8dd14f..f933f7d 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -253,13 +253,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } accel_dev->hw_device = hw_data; - switch (ent->device) { - case ADF_DH895XCC_PCI_DEVICE_ID: - adf_init_hw_data_dh895xcc(accel_dev->hw_device); - break; - default: - return -ENODEV; - } + adf_init_hw_data_dh895xcc(accel_dev->hw_device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET, &hw_data->fuses); diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 789426f..7bec249 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -243,14 +243,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err; } accel_dev->hw_device = hw_data; - switch (ent->device) { - case ADF_DH895XCCIOV_PCI_DEVICE_ID: - adf_init_hw_data_dh895xcciov(accel_dev->hw_device); - break; - default: - ret = -ENODEV; - goto out_err; - } + adf_init_hw_data_dh895xcciov(accel_dev->hw_device); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); -- cgit v0.10.2 From 652d5b8a8da8f05f7fb301067ffeef78b6f2eb01 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Fri, 23 Oct 2015 14:10:36 +0200 Subject: crypto: algif - Change some variable to size_t Some variable are set as int but store only positive values. Furthermore there are used in operation/function that wait for unsigned value. This patch set them as size_t. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0aa6fdf..f70bcf8 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -213,7 +213,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } while (size) { - unsigned long len = size; + size_t len = size; struct scatterlist *sg = NULL; /* use the existing memory in an allocated page */ @@ -247,7 +247,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) /* allocate a new page */ len = min_t(unsigned long, size, aead_sndbuf(sk)); while (len) { - int plen = 0; + size_t plen = 0; if (sgl->cur >= ALG_MAX_PAGES) { aead_put_sgl(sk); @@ -256,7 +256,7 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } sg = sgl->sg + sgl->cur; - plen = min_t(int, len, PAGE_SIZE); + plen = min_t(size_t, len, PAGE_SIZE); sg_assign_page(sg, alloc_page(GFP_KERNEL)); err = -ENOMEM; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index af31a0e..bbb1b66 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -40,7 +40,7 @@ struct skcipher_ctx { struct af_alg_completion completion; atomic_t inflight; - unsigned used; + size_t used; unsigned int len; bool more; @@ -153,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk) return 0; } -static void skcipher_pull_sgl(struct sock *sk, int used, int put) +static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; @@ -167,7 +167,7 @@ static void skcipher_pull_sgl(struct sock *sk, int used, int put) sg = sgl->sg; for (i = 0; i < sgl->cur; i++) { - int plen = min_t(int, used, sg[i].length); + size_t plen = min_t(size_t, used, sg[i].length); if (!sg_page(sg + i)) continue; @@ -348,7 +348,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, while (size) { struct scatterlist *sg; unsigned long len = size; - int plen; + size_t plen; if (ctx->merge) { sgl = list_entry(ctx->tsgl.prev, @@ -390,7 +390,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, sg_unmark_end(sg + sgl->cur); do { i = sgl->cur; - plen = min_t(int, len, PAGE_SIZE); + plen = min_t(size_t, len, PAGE_SIZE); sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); err = -ENOMEM; -- cgit v0.10.2 From c22dafb3b101073d83262b6c9020b6578d2a442a Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 4 Nov 2015 21:13:33 +0100 Subject: crypto: marvell - check return value of sg_nents_for_len The sg_nents_for_len() function could fail, this patch add a check for its return value. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index 6edae64..dcf1fce 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -401,7 +401,15 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, return -EINVAL; creq->src_nents = sg_nents_for_len(req->src, req->nbytes); + if (creq->src_nents < 0) { + dev_err(cesa_dev->dev, "Invalid number of src SG"); + return creq->src_nents; + } creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes); + if (creq->dst_nents < 0) { + dev_err(cesa_dev->dev, "Invalid number of dst SG"); + return creq->dst_nents; + } mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, CESA_SA_DESC_CFG_OP_MSK); diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 6ec55b4..683cca9 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -712,6 +712,10 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) creq->req.base.type = CESA_STD_REQ; creq->src_nents = sg_nents_for_len(req->src, req->nbytes); + if (creq->src_nents < 0) { + dev_err(cesa_dev->dev, "Invalid number of src SG"); + return creq->src_nents; + } ret = mv_cesa_ahash_cache_req(req, cached); if (ret) -- cgit v0.10.2 From 8e409fe10695cb9729a8bdfa49b0af435b5ec89f Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 4 Nov 2015 21:13:34 +0100 Subject: crypto: talitos - check return value of sg_nents_for_len The sg_nents_for_len() function could fail, this patch add a check for its return value. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index b6f9f42..ab33898 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1216,6 +1216,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; + void *err; if (cryptlen + authsize > max_len) { dev_err(dev, "length exceeds h/w max limit\n"); @@ -1228,14 +1229,29 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, if (!dst || dst == src) { src_nents = sg_nents_for_len(src, assoclen + cryptlen + authsize); + if (src_nents < 0) { + dev_err(dev, "Invalid number of src SG.\n"); + err = ERR_PTR(-EINVAL); + goto error_sg; + } src_nents = (src_nents == 1) ? 0 : src_nents; dst_nents = dst ? src_nents : 0; } else { /* dst && dst != src*/ src_nents = sg_nents_for_len(src, assoclen + cryptlen + (encrypt ? 0 : authsize)); + if (src_nents < 0) { + dev_err(dev, "Invalid number of src SG.\n"); + err = ERR_PTR(-EINVAL); + goto error_sg; + } src_nents = (src_nents == 1) ? 0 : src_nents; dst_nents = sg_nents_for_len(dst, assoclen + cryptlen + (encrypt ? authsize : 0)); + if (dst_nents < 0) { + dev_err(dev, "Invalid number of dst SG.\n"); + err = ERR_PTR(-EINVAL); + goto error_sg; + } dst_nents = (dst_nents == 1) ? 0 : dst_nents; } @@ -1260,11 +1276,9 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, edesc = kmalloc(alloc_len, GFP_DMA | flags); if (!edesc) { - if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); - dev_err(dev, "could not allocate edescriptor\n"); - return ERR_PTR(-ENOMEM); + err = ERR_PTR(-ENOMEM); + goto error_sg; } edesc->src_nents = src_nents; @@ -1277,6 +1291,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, DMA_BIDIRECTIONAL); return edesc; +error_sg: + if (iv_dma) + dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); + return err; } static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, @@ -1830,11 +1848,16 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) unsigned int nbytes_to_hash; unsigned int to_hash_later; unsigned int nsg; + int nents; if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { /* Buffer up to one whole block */ - sg_copy_to_buffer(areq->src, - sg_nents_for_len(areq->src, nbytes), + nents = sg_nents_for_len(areq->src, nbytes); + if (nents < 0) { + dev_err(ctx->dev, "Invalid number of src SG.\n"); + return nents; + } + sg_copy_to_buffer(areq->src, nents, req_ctx->buf + req_ctx->nbuf, nbytes); req_ctx->nbuf += nbytes; return 0; @@ -1867,7 +1890,11 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) req_ctx->psrc = areq->src; if (to_hash_later) { - int nents = sg_nents_for_len(areq->src, nbytes); + nents = sg_nents_for_len(areq->src, nbytes); + if (nents < 0) { + dev_err(ctx->dev, "Invalid number of src SG.\n"); + return nents; + } sg_pcopy_to_buffer(areq->src, nents, req_ctx->bufnext, to_hash_later, -- cgit v0.10.2 From 6c2b74d4774f36ea23f9cc8bb1401c1215fcd671 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 4 Nov 2015 21:13:35 +0100 Subject: crypto: sahara - check return value of sg_nents_for_len The sg_nents_for_len() function could fail, this patch add a check for its return value. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index f68c24a..ea9f56a 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -477,7 +477,15 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev) } dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total); + if (dev->nb_in_sg < 0) { + dev_err(dev->device, "Invalid numbers of src SG.\n"); + return dev->nb_in_sg; + } dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total); + if (dev->nb_out_sg < 0) { + dev_err(dev->device, "Invalid numbers of dst SG.\n"); + return dev->nb_out_sg; + } if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) { dev_err(dev->device, "not enough hw links (%d)\n", dev->nb_in_sg + dev->nb_out_sg); @@ -793,6 +801,10 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev, dev->in_sg = rctx->in_sg; dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total); + if (dev->nb_in_sg < 0) { + dev_err(dev->device, "Invalid numbers of src SG.\n"); + return dev->nb_in_sg; + } if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) { dev_err(dev->device, "not enough hw links (%d)\n", dev->nb_in_sg + dev->nb_out_sg); -- cgit v0.10.2 From 4fa9948ca5bf308fabd5c9b584bf710f5d393859 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 4 Nov 2015 21:13:36 +0100 Subject: crypto: qce - check return value of sg_nents_for_len The sg_nents_for_len() function could fail, this patch add a check for its return value. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c index 2c0d63d..dbcbbe2 100644 --- a/drivers/crypto/qce/ablkcipher.c +++ b/drivers/crypto/qce/ablkcipher.c @@ -83,6 +83,14 @@ qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req) rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); else rctx->dst_nents = rctx->src_nents; + if (rctx->src_nents < 0) { + dev_err(qce->dev, "Invalid numbers of src SG.\n"); + return rctx->src_nents; + } + if (rctx->dst_nents < 0) { + dev_err(qce->dev, "Invalid numbers of dst SG.\n"); + return -rctx->dst_nents; + } rctx->dst_nents += 1; diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 0c9973e..47e114a 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -92,6 +92,11 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) } rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); + if (rctx->src_nents < 0) { + dev_err(qce->dev, "Invalid numbers of src SG.\n"); + return rctx->src_nents; + } + ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); if (ret < 0) return ret; -- cgit v0.10.2 From f051f95eb47bb216ad0a8fbe64b4be179cc5abec Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 4 Nov 2015 21:13:37 +0100 Subject: crypto: picoxcell - check return value of sg_nents_for_len The sg_nents_for_len() function could fail, this patch add a check for its return value. In the same time, we remove sg_count() as it is used as an alias of sg_nents_for_len. Signed-off-by: LABBE Corentin Acked-by: Jamie Iles Signed-off-by: Herbert Xu diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 615da96..a9c6367 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -272,12 +272,6 @@ static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx, return indx; } -/* Count the number of scatterlist entries in a scatterlist. */ -static inline int sg_count(struct scatterlist *sg_list, int nbytes) -{ - return sg_nents_for_len(sg_list, nbytes); -} - static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) { ddt->p = phys; @@ -300,7 +294,11 @@ static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine, struct spacc_ddt *ddt; int i; - nents = sg_count(payload, nbytes); + nents = sg_nents_for_len(payload, nbytes); + if (nents < 0) { + dev_err(engine->dev, "Invalid numbers of SG.\n"); + return NULL; + } mapped_ents = dma_map_sg(engine->dev, payload, nents, dir); if (mapped_ents + 1 > MAX_DDT_LEN) @@ -336,13 +334,21 @@ static int spacc_aead_make_ddts(struct aead_request *areq) if (req->is_encrypt) total += crypto_aead_authsize(aead); - src_nents = sg_count(areq->src, total); + src_nents = sg_nents_for_len(areq->src, total); + if (src_nents < 0) { + dev_err(engine->dev, "Invalid numbers of src SG.\n"); + return src_nents; + } if (src_nents + 1 > MAX_DDT_LEN) return -E2BIG; dst_nents = 0; if (areq->src != areq->dst) { - dst_nents = sg_count(areq->dst, total); + dst_nents = sg_nents_for_len(areq->dst, total); + if (dst_nents < 0) { + dev_err(engine->dev, "Invalid numbers of dst SG.\n"); + return dst_nents; + } if (src_nents + 1 > MAX_DDT_LEN) return -E2BIG; } @@ -422,13 +428,22 @@ static void spacc_aead_free_ddts(struct spacc_req *req) (req->is_encrypt ? crypto_aead_authsize(aead) : 0); struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead); struct spacc_engine *engine = aead_ctx->generic.engine; - unsigned nents = sg_count(areq->src, total); + int nents = sg_nents_for_len(areq->src, total); + + /* sg_nents_for_len should not fail since it works when mapping sg */ + if (unlikely(nents < 0)) { + dev_err(engine->dev, "Invalid numbers of src SG.\n"); + return; + } if (areq->src != areq->dst) { dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); - dma_unmap_sg(engine->dev, areq->dst, - sg_count(areq->dst, total), - DMA_FROM_DEVICE); + nents = sg_nents_for_len(areq->dst, total); + if (unlikely(nents < 0)) { + dev_err(engine->dev, "Invalid numbers of dst SG.\n"); + return; + } + dma_unmap_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE); } else dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); @@ -440,7 +455,12 @@ static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt, dma_addr_t ddt_addr, struct scatterlist *payload, unsigned nbytes, enum dma_data_direction dir) { - unsigned nents = sg_count(payload, nbytes); + int nents = sg_nents_for_len(payload, nbytes); + + if (nents < 0) { + dev_err(req->engine->dev, "Invalid numbers of SG.\n"); + return; + } dma_unmap_sg(req->engine->dev, payload, nents, dir); dma_pool_free(req->engine->req_pool, ddt, ddt_addr); -- cgit v0.10.2 From f9970c2865d300bd0bb4ba006a0283a8ffa51ada Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 4 Nov 2015 21:13:38 +0100 Subject: crypto: caam - check return value of sg_nents_for_len The sg_nents_for_len() function could fail, this patch add a check for its return value. We do the same for sg_count since it use sg_nents_for_len(). Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 49106ea..fe9c156 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -803,6 +803,10 @@ static int ahash_update_ctx(struct ahash_request *req) if (to_hash) { src_nents = sg_nents_for_len(req->src, req->nbytes - (*next_buflen)); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } sec4_sg_src_index = 1 + (*buflen ? 1 : 0); sec4_sg_bytes = (sec4_sg_src_index + src_nents) * sizeof(struct sec4_sg_entry); @@ -1002,6 +1006,10 @@ static int ahash_finup_ctx(struct ahash_request *req) int sh_len; src_nents = sg_nents_for_len(req->src, req->nbytes); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } sec4_sg_src_index = 1 + (buflen ? 1 : 0); sec4_sg_bytes = (sec4_sg_src_index + src_nents) * sizeof(struct sec4_sg_entry); @@ -1086,6 +1094,10 @@ static int ahash_digest(struct ahash_request *req) int sh_len; src_nents = sg_count(req->src, req->nbytes); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); @@ -1234,6 +1246,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) if (to_hash) { src_nents = sg_nents_for_len(req->src, req->nbytes - (*next_buflen)); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } sec4_sg_bytes = (1 + src_nents) * sizeof(struct sec4_sg_entry); @@ -1342,6 +1358,10 @@ static int ahash_finup_no_ctx(struct ahash_request *req) int ret = 0; src_nents = sg_nents_for_len(req->src, req->nbytes); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } sec4_sg_src_index = 2; sec4_sg_bytes = (sec4_sg_src_index + src_nents) * sizeof(struct sec4_sg_entry); @@ -1430,6 +1450,10 @@ static int ahash_update_first(struct ahash_request *req) if (to_hash) { src_nents = sg_count(req->src, req->nbytes - (*next_buflen)); + if (src_nents < 0) { + dev_err(jrdev, "Invalid number of src SG.\n"); + return src_nents; + } dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); -- cgit v0.10.2 From 7aff7d0abc83b0991e83af3c43f0d0df47e7c3bf Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Wed, 4 Nov 2015 21:13:39 +0100 Subject: crypto: amcc - check return value of sg_nents_for_len The sg_nents_for_len() function could fail, this patch add a check for its return value. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 58a630e..62134c8 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -781,6 +781,10 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req, /* figure how many gd is needed */ num_gd = sg_nents_for_len(src, datalen); + if ((int)num_gd < 0) { + dev_err(dev->core_dev->device, "Invalid number of src SG.\n"); + return -EINVAL; + } if (num_gd == 1) num_gd = 0; -- cgit v0.10.2 From 9cbe21d8f89dfa851e593ca12725e910ec60c10c Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Fri, 13 Nov 2015 12:01:32 +0100 Subject: lib/mpi: only require buffers as big as needed for the integer Since mpi_write_to_sgl and mpi_read_buffer explicitly left-align the integers being written it makes no sense to require a buffer big enough for the number + the leading zero bytes which are not written. The error returned also doesn't convey any information. So instead require only the size needed and return -EOVERFLOW to signal when buffer too short. Signed-off-by: Andrew Zaborowski Signed-off-by: Herbert Xu diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 3db76b8..ec533a6 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -135,7 +135,9 @@ EXPORT_SYMBOL_GPL(mpi_read_from_buffer); * @buf: bufer to which the output will be written to. Needs to be at * leaset mpi_get_size(a) long. * @buf_len: size of the buf. - * @nbytes: receives the actual length of the data written. + * @nbytes: receives the actual length of the data written on success and + * the data to-be-written on -EOVERFLOW in case buf_len was too + * small. * @sign: if not NULL, it will be set to the sign of a. * * Return: 0 on success or error code in case of error @@ -148,7 +150,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, unsigned int n = mpi_get_size(a); int i, lzeros = 0; - if (buf_len < n || !buf || !nbytes) + if (!buf || !nbytes) return -EINVAL; if (sign) @@ -163,6 +165,11 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, break; } + if (buf_len < n - lzeros) { + *nbytes = n - lzeros; + return -EOVERFLOW; + } + p = buf; *nbytes = n - lzeros; @@ -332,7 +339,8 @@ EXPORT_SYMBOL_GPL(mpi_set_buffer); * @nbytes: in/out param - it has the be set to the maximum number of * bytes that can be written to sgl. This has to be at least * the size of the integer a. On return it receives the actual - * length of the data written. + * length of the data written on success or the data that would + * be written if buffer was too small. * @sign: if not NULL, it will be set to the sign of a. * * Return: 0 on success or error code in case of error @@ -345,7 +353,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, unsigned int n = mpi_get_size(a); int i, x, y = 0, lzeros = 0, buf_len; - if (!nbytes || *nbytes < n) + if (!nbytes) return -EINVAL; if (sign) @@ -360,6 +368,11 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, break; } + if (*nbytes < n - lzeros) { + *nbytes = n - lzeros; + return -EOVERFLOW; + } + *nbytes = n - lzeros; buf_len = sgl->length; p2 = sg_virt(sgl); -- cgit v0.10.2 From 457e6f73a12bb713cc3eec2c979d707cb5716a07 Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Fri, 13 Nov 2015 12:01:33 +0100 Subject: crypto: rsa - only require output buffers as big as needed. rhe RSA operations explicitly left-align the integers being written skipping any leading zero bytes, but still require the output buffers to include just enough space for the integer + the leading zero bytes. Since the size of integer + the leading zero bytes (i.e. the key modulus size) can now be obtained more easily through crypto_akcipher_maxsize change the operations to only require as big a buffer as actually needed if the caller has that information. The semantics for request->dst_len don't change. Signed-off-by: Andrew Zaborowski Signed-off-by: Herbert Xu diff --git a/crypto/rsa.c b/crypto/rsa.c index 1093e04..58aad69 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -91,12 +91,6 @@ static int rsa_enc(struct akcipher_request *req) goto err_free_c; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_c; - } - ret = -ENOMEM; m = mpi_read_raw_from_sgl(req->src, req->src_len); if (!m) @@ -136,12 +130,6 @@ static int rsa_dec(struct akcipher_request *req) goto err_free_m; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_m; - } - ret = -ENOMEM; c = mpi_read_raw_from_sgl(req->src, req->src_len); if (!c) @@ -180,12 +168,6 @@ static int rsa_sign(struct akcipher_request *req) goto err_free_s; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_s; - } - ret = -ENOMEM; m = mpi_read_raw_from_sgl(req->src, req->src_len); if (!m) @@ -225,12 +207,6 @@ static int rsa_verify(struct akcipher_request *req) goto err_free_m; } - if (req->dst_len < mpi_get_size(pkey->n)) { - req->dst_len = mpi_get_size(pkey->n); - ret = -EOVERFLOW; - goto err_free_m; - } - ret = -ENOMEM; s = mpi_read_raw_from_sgl(req->src, req->src_len); if (!s) { -- cgit v0.10.2 From 202a32f0463ebc58833cc3e75c069996437ba526 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 14 Nov 2015 11:06:59 +0100 Subject: crypto: qat - constify pci_error_handlers structures This pci_error_handlers structure is never modified, like all the other pci_error_handlers structures, so declare it as const. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index 0a5ca0b..d24cfd4 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -197,7 +197,7 @@ static void adf_resume(struct pci_dev *pdev) dev_info(&pdev->dev, "Device is up and runnig\n"); } -static struct pci_error_handlers adf_err_handler = { +static const struct pci_error_handlers adf_err_handler = { .error_detected = adf_error_detected, .slot_reset = adf_slot_reset, .resume = adf_resume, -- cgit v0.10.2 From f9d1293b3c635f5224f7445acbe01672880a4945 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 15 Nov 2015 16:51:21 +0100 Subject: crypto: ixp4xx - Delete unnecessary checks before the function call "dma_pool_destroy" The dma_pool_destroy() function tests whether its argument is NULL and then returns immediately. Thus the test around the calls is not needed. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Herbert Xu diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 8f27903..e52496a 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -510,10 +510,8 @@ npe_error: printk(KERN_ERR "%s not responding\n", npe_name(npe_c)); ret = -EIO; err: - if (ctx_pool) - dma_pool_destroy(ctx_pool); - if (buffer_pool) - dma_pool_destroy(buffer_pool); + dma_pool_destroy(ctx_pool); + dma_pool_destroy(buffer_pool); npe_release(npe_c); return ret; } -- cgit v0.10.2 From 4f9ea86604e3ba64edd2817795798168fbb3c1a6 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Mon, 16 Nov 2015 09:35:54 +0100 Subject: crypto: sun4i-ss - add missing statesize sun4i-ss implementaton of md5/sha1 is via ahash algorithms. Commit 8996eafdcbad ("crypto: ahash - ensure statesize is non-zero") made impossible to load them without giving statesize. This patch specifiy statesize for sha1 and md5. Fixes: 6298e948215f ("crypto: sunxi-ss - Add Allwinner Security System crypto accelerator") Cc: # v4.3+ Tested-by: Chen-Yu Tsai Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c index eab6fe2..107cd2a 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c @@ -39,6 +39,7 @@ static struct sun4i_ss_alg_template ss_algs[] = { .import = sun4i_hash_import_md5, .halg = { .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_driver_name = "md5-sun4i-ss", @@ -66,6 +67,7 @@ static struct sun4i_ss_alg_template ss_algs[] = { .import = sun4i_hash_import_sha1, .halg = { .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name = "sha1-sun4i-ss", -- cgit v0.10.2 From 304e4818d4a45e83019ea30e4cfcb3ac2a8ce09a Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Mon, 16 Nov 2015 22:37:14 +0800 Subject: crypto: api - use list_first_entry_or_null and list_next_entry Simplify crypto_more_spawns() with list_first_entry_or_null() and list_next_entry(). Signed-off-by: Geliang Tang Signed-off-by: Herbert Xu diff --git a/crypto/algapi.c b/crypto/algapi.c index 59bf491..7be76aa 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -93,16 +93,15 @@ static struct list_head *crypto_more_spawns(struct crypto_alg *alg, { struct crypto_spawn *spawn, *n; - if (list_empty(stack)) + spawn = list_first_entry_or_null(stack, struct crypto_spawn, list); + if (!spawn) return NULL; - spawn = list_first_entry(stack, struct crypto_spawn, list); - n = list_entry(spawn->list.next, struct crypto_spawn, list); + n = list_next_entry(spawn, list); if (spawn->alg && &n->list != stack && !n->alg) n->alg = (n->list.next == stack) ? alg : - &list_entry(n->list.next, struct crypto_spawn, - list)->inst->alg; + &list_next_entry(n, list)->inst->alg; list_move(&spawn->list, secondary_spawns); -- cgit v0.10.2 From 08346170d4483d58b8971fe9ff2a1318fd93d121 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Mon, 16 Nov 2015 22:37:15 +0800 Subject: crypto: mcryptd - use list_first_entry_or_null() Simplify mcryptd_opportunistic_flush() with list_first_entry_or_null(). Signed-off-by: Geliang Tang Signed-off-by: Herbert Xu diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index fe5b495a..f78d4fc 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -128,13 +128,9 @@ static void mcryptd_opportunistic_flush(void) flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); while (single_task_running()) { mutex_lock(&flist->lock); - if (list_empty(&flist->list)) { - mutex_unlock(&flist->lock); - return; - } - cstate = list_entry(flist->list.next, + cstate = list_first_entry_or_null(&flist->list, struct mcryptd_alg_cstate, flush_list); - if (!cstate->flusher_engaged) { + if (!cstate || !cstate->flusher_engaged) { mutex_unlock(&flist->lock); return; } -- cgit v0.10.2 From 20ecae79e7aa7908810094e365e4c72a877fb87d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 17 Nov 2015 10:22:06 +0100 Subject: crypto: atmel - fix 64-bit warnings The atmel AES driver assumes that 'int' and 'size_t' are the same type in multiple locations, which the compiler warns about when building it for 64-bit systems: In file included from ../drivers/crypto/atmel-aes.c:17:0: drivers/crypto/atmel-aes.c: In function 'atmel_aes_sg_copy': include/linux/kernel.h:724:17: warning: comparison of distinct pointer types lacks a cast drivers/crypto/atmel-aes.c:448:11: note: in expansion of macro 'min' drivers/crypto/atmel-aes.c: In function 'atmel_aes_crypt_dma_stop': include/linux/kern_levels.h:4:18: warning: format '%u' expects argument of type 'unsigned int', but argument 2 has type 'size_t {aka long unsigned int}' [-Wformat=] This changes the format strings to use the %z modifier when printing a size_t, and makes sure that we use the correct size_t type where needed. In case of sg_dma_len(), the type of the result depends on CONFIG_NEED_SG_DMA_LENGTH, so we have to use min_t to get it to work in all configurations. Signed-off-by: Arnd Bergmann Acked-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index fb16d81..bfb1f79 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -184,7 +184,7 @@ static int atmel_aes_sg_length(struct ablkcipher_request *req, static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset, void *buf, size_t buflen, size_t total, int out) { - unsigned int count, off = 0; + size_t count, off = 0; while (buflen && total) { count = min((*sg)->length - *offset, total); @@ -444,8 +444,8 @@ static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) if (fast) { - count = min(dd->total, sg_dma_len(dd->in_sg)); - count = min(count, sg_dma_len(dd->out_sg)); + count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg)); + count = min_t(size_t, count, sg_dma_len(dd->out_sg)); err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); if (!err) { @@ -639,7 +639,7 @@ static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) dd->buf_out, dd->buflen, dd->dma_size, 1); if (count != dd->dma_size) { err = -EINVAL; - pr_err("not all data converted: %u\n", count); + pr_err("not all data converted: %zu\n", count); } } } @@ -666,7 +666,7 @@ static int atmel_aes_buff_init(struct atmel_aes_dev *dd) dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen, DMA_TO_DEVICE); if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { - dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); + dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen); err = -EINVAL; goto err_map_in; } @@ -674,7 +674,7 @@ static int atmel_aes_buff_init(struct atmel_aes_dev *dd) dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen, DMA_FROM_DEVICE); if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { - dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); + dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen); err = -EINVAL; goto err_map_out; } -- cgit v0.10.2 From f18611da8683da19267e30187a191af7fa670206 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Tue, 17 Nov 2015 13:37:10 +0100 Subject: crypto: tcrypt - fix keysize argument of test_aead_speed for gcm(aes) The key sizes used by AES in GCM mode should be 128, 192 or 256 bits (16, 24 or 32 bytes). There is no additional 4byte nonce as for RFC 4106. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 46a4a75..270bc4b 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1789,7 +1789,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m) test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL, 0, 16, 16, aead_speed_template_20); test_aead_speed("gcm(aes)", ENCRYPT, sec, - NULL, 0, 16, 8, aead_speed_template_20); + NULL, 0, 16, 8, speed_template_16_24_32); break; case 212: -- cgit v0.10.2 From 56b85c9d7a45a90bf7cb8b5e8c2693f8064b2c09 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 27 Jan 2015 22:34:04 +0100 Subject: crypto: atmel: fix bogus select The Atmel at91 crypto driver unconditionally selects AT_HDMAC, which results in a Kconfig warning if that driver is not enabled: warning: (CRYPTO_DEV_ATMEL_AES) selects AT_HDMAC which has unmet direct dependencies (DMADEVICES && ARCH_AT91) The crypto driver itself does not actually have a dependency on a particular dma engine, other than this being the one that is used in at91. Removing the 'select' gets rid of the warning, but can cause the driver to be unusable if the HDMAC is not enabled at the same time. To work around that, this patch clarifies the runtime dependency to be 'AT_HDMAC || AT_XDMAC', but adds an alternative for COMPILE_TEST, which lets the driver get build on all systems. The ARCH_AT91 dependency is implied by AT_XDMAC || AT_HDMAC now and no longer needs to be listed separately. Signed-off-by: Arnd Bergmann Signed-off-by: Herbert Xu diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 2569e04..5357bc1 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -378,10 +378,9 @@ config CRYPTO_DEV_BFIN_CRC config CRYPTO_DEV_ATMEL_AES tristate "Support for Atmel AES hw accelerator" - depends on ARCH_AT91 + depends on AT_XDMAC || AT_HDMAC || COMPILE_TEST select CRYPTO_AES select CRYPTO_BLKCIPHER - select AT_HDMAC help Some Atmel processors have AES hw accelerator. Select this if you want to use the Atmel module for -- cgit v0.10.2 From 4c13ac1cf0e40c1fea8993354e54822ef643425e Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Wed, 18 Nov 2015 21:59:01 +0200 Subject: hwrng: omap3-rom - convert timer to delayed work We cannot put the HW RNG to idle using a timer because we cannot disable clocks from atomic context. Use a delayed work instead. Fixes a warning with CONFIG_DEBUG_MUTEXES on Nokia N900 during boot. Reported-by: Sebastian Reichel Signed-off-by: Aaro Koskinen Signed-off-by: Herbert Xu diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index a405cdc..8da14f1 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -29,11 +29,11 @@ /* param1: ptr, param2: count, param3: flag */ static u32 (*omap3_rom_rng_call)(u32, u32, u32); -static struct timer_list idle_timer; +static struct delayed_work idle_work; static int rng_idle; static struct clk *rng_clk; -static void omap3_rom_rng_idle(unsigned long data) +static void omap3_rom_rng_idle(struct work_struct *work) { int r; @@ -51,7 +51,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count) u32 r; u32 ptr; - del_timer_sync(&idle_timer); + cancel_delayed_work_sync(&idle_work); if (rng_idle) { clk_prepare_enable(rng_clk); r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); @@ -65,7 +65,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count) ptr = virt_to_phys(buf); r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW); - mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500)); + schedule_delayed_work(&idle_work, msecs_to_jiffies(500)); if (r != 0) return -EINVAL; return 0; @@ -102,7 +102,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev) return -EINVAL; } - setup_timer(&idle_timer, omap3_rom_rng_idle, 0); + INIT_DELAYED_WORK(&idle_work, omap3_rom_rng_idle); rng_clk = devm_clk_get(&pdev->dev, "ick"); if (IS_ERR(rng_clk)) { pr_err("unable to get RNG clock\n"); @@ -118,6 +118,7 @@ static int omap3_rom_rng_probe(struct platform_device *pdev) static int omap3_rom_rng_remove(struct platform_device *pdev) { + cancel_delayed_work_sync(&idle_work); hwrng_unregister(&omap3_rom_rng_ops); clk_disable_unprepare(rng_clk); return 0; -- cgit v0.10.2 From f8e28a0dec1dad2f68d754340dd80f1685941a4b Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Thu, 19 Nov 2015 13:38:17 +0100 Subject: crypto: sahara - set nb_[in|out]_sg as signed int The two unsigned int variables nb_in_sg and nb_out_sg can be assigned signed value (-EINVAL) from sg_nents_for_len(). Furthermore they are used only by dma_map_sg and dma_unmap_sg which wait for an signed int, so they must be set as int. Fixes: 6c2b74d4774f ("crypto: sahara - check return value of sg_nents_for_len") Reported-by: Dan Carpenter Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index ea9f56a..cc738f3 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -228,9 +228,9 @@ struct sahara_dev { size_t total; struct scatterlist *in_sg; - unsigned int nb_in_sg; + int nb_in_sg; struct scatterlist *out_sg; - unsigned int nb_out_sg; + int nb_out_sg; u32 error; }; -- cgit v0.10.2 From f53e38afdc3a395722775c28ceb0e06c36a17ac3 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Thu, 19 Nov 2015 13:38:18 +0100 Subject: crypto: picoxcell - set [src|dst]_nents and nents as signed int The unsigned int variables [src|dst]_nents and nents can be assigned signed value (-EINVAL) from sg_nents_for_len(). Furthermore they are used only by dma_map_sg and dma_unmap_sg which wait for an signed int, so they must be set as int. Fixes: f051f95eb47b ("crypto: picoxcell - check return value of sg_nents_for_len") Reported-by: Dan Carpenter Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index a9c6367..15b5e39 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -289,10 +289,11 @@ static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine, enum dma_data_direction dir, dma_addr_t *ddt_phys) { - unsigned nents, mapped_ents; + unsigned mapped_ents; struct scatterlist *cur; struct spacc_ddt *ddt; int i; + int nents; nents = sg_nents_for_len(payload, nbytes); if (nents < 0) { @@ -326,7 +327,7 @@ static int spacc_aead_make_ddts(struct aead_request *areq) struct spacc_engine *engine = req->engine; struct spacc_ddt *src_ddt, *dst_ddt; unsigned total; - unsigned int src_nents, dst_nents; + int src_nents, dst_nents; struct scatterlist *cur; int i, dst_ents, src_ents; -- cgit v0.10.2 From f143fc673546477f16aba7b7bf456629d06ac4e7 Mon Sep 17 00:00:00 2001 From: Jim Davis Date: Thu, 19 Nov 2015 17:06:19 -0700 Subject: crypto: qat - fix typo in clean-files A typo in the Makefile leaves qat_rsaprivkey-asn1.h hanging around. Signed-off-by: Jim Davis Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index 9e9e196..12f40a3 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile @@ -4,7 +4,7 @@ $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \ $(obj)/qat_rsaprivkey-asn1.h clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h -clean-files += qat_rsaprivkey-asn1.c qat_rsapvivkey-asn1.h +clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o intel_qat-objs := adf_cfg.o \ -- cgit v0.10.2 From 1d4bbc5a6f475f48b878715de832cd2ac99226b1 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Sat, 21 Nov 2015 22:24:11 +0800 Subject: crypto: padlock-aes - use offset_in_page macro Use offset_in_page macro instead of (addr & ~PAGE_MASK). Signed-off-by: Geliang Tang Signed-off-by: Herbert Xu diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index da2d677..be2dd10 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -238,7 +238,7 @@ static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. * We could avoid some copying here but it's probably not worth it. */ - if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) { + if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) { ecb_crypt_copy(in, out, key, cword, count); return; } @@ -250,7 +250,7 @@ static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, int count) { /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ - if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE)) + if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE)) return cbc_crypt_copy(in, out, key, iv, cword, count); return rep_xcrypt_cbc(in, out, key, iv, cword, count); -- cgit v0.10.2 From d62112f27e4b734014ac129ad8e2d58443d2e98a Mon Sep 17 00:00:00 2001 From: saurabh Date: Mon, 23 Nov 2015 15:26:54 +0530 Subject: crypto: nx - use of_property_read_u32() use of_propert_read_u32() for reading int value, it can help reducing number of variables used Signed-off-by: Saurabh Sengar Acked-by: Dan Streetman Signed-off-by: Herbert Xu diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 9ef51fa..87f7a0f 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -525,7 +525,6 @@ static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen, static int __init nx842_powernv_probe(struct device_node *dn) { struct nx842_coproc *coproc; - struct property *ct_prop, *ci_prop; unsigned int ct, ci; int chip_id; @@ -534,18 +533,16 @@ static int __init nx842_powernv_probe(struct device_node *dn) pr_err("ibm,chip-id missing\n"); return -EINVAL; } - ct_prop = of_find_property(dn, "ibm,842-coprocessor-type", NULL); - if (!ct_prop) { + + if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) { pr_err("ibm,842-coprocessor-type missing\n"); return -EINVAL; } - ct = be32_to_cpu(*(unsigned int *)ct_prop->value); - ci_prop = of_find_property(dn, "ibm,842-coprocessor-instance", NULL); - if (!ci_prop) { + + if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) { pr_err("ibm,842-coprocessor-instance missing\n"); return -EINVAL; } - ci = be32_to_cpu(*(unsigned int *)ci_prop->value); coproc = kmalloc(sizeof(*coproc), GFP_KERNEL); if (!coproc) -- cgit v0.10.2 From e81c1b4646149c1e67610c83e8770a7217491a13 Mon Sep 17 00:00:00 2001 From: Zain Wang Date: Wed, 25 Nov 2015 13:43:30 +0800 Subject: crypto: rockchip - add DT bindings documentation Add DT bindings documentation for the rk3288 crypto drivers. Signed-off-by: Zain Wang Acked-by: Rob Herring Tested-by: Heiko Stuebner Signed-off-by: Herbert Xu diff --git a/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt b/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt new file mode 100644 index 0000000..096df34 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/rockchip-crypto.txt @@ -0,0 +1,29 @@ +Rockchip Electronics And Security Accelerator + +Required properties: +- compatible: Should be "rockchip,rk3288-crypto" +- reg: Base physical address of the engine and length of memory mapped + region +- interrupts: Interrupt number +- clocks: Reference to the clocks about crypto +- clock-names: "aclk" used to clock data + "hclk" used to clock data + "sclk" used to clock crypto accelerator + "apb_pclk" used to clock dma +- resets: Must contain an entry for each entry in reset-names. + See ../reset/reset.txt for details. +- reset-names: Must include the name "crypto-rst". + +Examples: + + crypto: cypto-controller@ff8a0000 { + compatible = "rockchip,rk3288-crypto"; + reg = <0xff8a0000 0x4000>; + interrupts = ; + clocks = <&cru ACLK_CRYPTO>, <&cru HCLK_CRYPTO>, + <&cru SCLK_CRYPTO>, <&cru ACLK_DMAC1>; + clock-names = "aclk", "hclk", "sclk", "apb_pclk"; + resets = <&cru SRST_CRYPTO>; + reset-names = "crypto-rst"; + status = "okay"; + }; -- cgit v0.10.2 From 433cd2c617bfbac27a02e40fbcce1713c84ce441 Mon Sep 17 00:00:00 2001 From: Zain Wang Date: Wed, 25 Nov 2015 13:43:32 +0800 Subject: crypto: rockchip - add crypto driver for rk3288 Crypto driver support: ecb(aes) cbc(aes) ecb(des) cbc(des) ecb(des3_ede) cbc(des3_ede) You can alloc tags above in your case. And other algorithms and platforms will be added later on. Signed-off-by: Zain Wang Tested-by: Heiko Stuebner Signed-off-by: Herbert Xu diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 5357bc1..95dccde 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -497,4 +497,15 @@ config CRYPTO_DEV_SUN4I_SS To compile this driver as a module, choose M here: the module will be called sun4i-ss. +config CRYPTO_DEV_ROCKCHIP + tristate "Rockchip's Cryptographic Engine driver" + depends on OF && ARCH_ROCKCHIP + select CRYPTO_AES + select CRYPTO_DES + select CRYPTO_BLKCIPHER + + help + This driver interfaces with the hardware crypto accelerator. + Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode. + endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index c3ced6fb..713de9d 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -29,3 +29,4 @@ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ +obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile new file mode 100644 index 0000000..7051c6c --- /dev/null +++ b/drivers/crypto/rockchip/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o +rk_crypto-objs := rk3288_crypto.o \ + rk3288_crypto_ablkcipher.o \ diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c new file mode 100644 index 0000000..6b72f8d --- /dev/null +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -0,0 +1,393 @@ +/* + * Crypto acceleration support for Rockchip RK3288 + * + * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd + * + * Author: Zain Wang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Some ideas are from marvell-cesa.c and s5p-sss.c driver. + */ + +#include "rk3288_crypto.h" +#include +#include +#include +#include +#include +#include + +static int rk_crypto_enable_clk(struct rk_crypto_info *dev) +{ + int err; + + err = clk_prepare_enable(dev->sclk); + if (err) { + dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n", + __func__, __LINE__); + goto err_return; + } + err = clk_prepare_enable(dev->aclk); + if (err) { + dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n", + __func__, __LINE__); + goto err_aclk; + } + err = clk_prepare_enable(dev->hclk); + if (err) { + dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n", + __func__, __LINE__); + goto err_hclk; + } + err = clk_prepare_enable(dev->dmaclk); + if (err) { + dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n", + __func__, __LINE__); + goto err_dmaclk; + } + return err; +err_dmaclk: + clk_disable_unprepare(dev->hclk); +err_hclk: + clk_disable_unprepare(dev->aclk); +err_aclk: + clk_disable_unprepare(dev->sclk); +err_return: + return err; +} + +static void rk_crypto_disable_clk(struct rk_crypto_info *dev) +{ + clk_disable_unprepare(dev->dmaclk); + clk_disable_unprepare(dev->hclk); + clk_disable_unprepare(dev->aclk); + clk_disable_unprepare(dev->sclk); +} + +static int check_alignment(struct scatterlist *sg_src, + struct scatterlist *sg_dst, + int align_mask) +{ + int in, out, align; + + in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && + IS_ALIGNED((uint32_t)sg_src->length, align_mask); + if (!sg_dst) + return in; + out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && + IS_ALIGNED((uint32_t)sg_dst->length, align_mask); + align = in && out; + + return (align && (sg_src->length == sg_dst->length)); +} + +static int rk_load_data(struct rk_crypto_info *dev, + struct scatterlist *sg_src, + struct scatterlist *sg_dst) +{ + unsigned int count; + + dev->aligned = dev->aligned ? + check_alignment(sg_src, sg_dst, dev->align_size) : + dev->aligned; + if (dev->aligned) { + count = min(dev->left_bytes, sg_src->length); + dev->left_bytes -= count; + + if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { + dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", + __func__, __LINE__); + return -EINVAL; + } + dev->addr_in = sg_dma_address(sg_src); + + if (sg_dst) { + if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { + dev_err(dev->dev, + "[%s:%d] dma_map_sg(dst) error\n", + __func__, __LINE__); + dma_unmap_sg(dev->dev, sg_src, 1, + DMA_TO_DEVICE); + return -EINVAL; + } + dev->addr_out = sg_dma_address(sg_dst); + } + } else { + count = (dev->left_bytes > PAGE_SIZE) ? + PAGE_SIZE : dev->left_bytes; + + if (!sg_pcopy_to_buffer(dev->first, dev->nents, + dev->addr_vir, count, + dev->total - dev->left_bytes)) { + dev_err(dev->dev, "[%s:%d] pcopy err\n", + __func__, __LINE__); + return -EINVAL; + } + dev->left_bytes -= count; + sg_init_one(&dev->sg_tmp, dev->addr_vir, count); + if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { + dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", + __func__, __LINE__); + return -ENOMEM; + } + dev->addr_in = sg_dma_address(&dev->sg_tmp); + + if (sg_dst) { + if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, + DMA_FROM_DEVICE)) { + dev_err(dev->dev, + "[%s:%d] dma_map_sg(sg_tmp) error\n", + __func__, __LINE__); + dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, + DMA_TO_DEVICE); + return -ENOMEM; + } + dev->addr_out = sg_dma_address(&dev->sg_tmp); + } + } + dev->count = count; + return 0; +} + +static void rk_unload_data(struct rk_crypto_info *dev) +{ + struct scatterlist *sg_in, *sg_out; + + sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; + dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); + + if (dev->sg_dst) { + sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; + dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); + } +} + +static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) +{ + struct rk_crypto_info *dev = platform_get_drvdata(dev_id); + u32 interrupt_status; + int err = 0; + + spin_lock(&dev->lock); + interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); + CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); + if (interrupt_status & 0x0a) { + dev_warn(dev->dev, "DMA Error\n"); + err = -EFAULT; + } else if (interrupt_status & 0x05) { + err = dev->update(dev); + } + if (err) + dev->complete(dev, err); + spin_unlock(&dev->lock); + return IRQ_HANDLED; +} + +static void rk_crypto_tasklet_cb(unsigned long data) +{ + struct rk_crypto_info *dev = (struct rk_crypto_info *)data; + struct crypto_async_request *async_req, *backlog; + int err = 0; + + spin_lock(&dev->lock); + backlog = crypto_get_backlog(&dev->queue); + async_req = crypto_dequeue_request(&dev->queue); + spin_unlock(&dev->lock); + if (!async_req) { + dev_err(dev->dev, "async_req is NULL !!\n"); + return; + } + if (backlog) { + backlog->complete(backlog, -EINPROGRESS); + backlog = NULL; + } + + if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER) + dev->ablk_req = ablkcipher_request_cast(async_req); + err = dev->start(dev); + if (err) + dev->complete(dev, err); +} + +static struct rk_crypto_tmp *rk_cipher_algs[] = { + &rk_ecb_aes_alg, + &rk_cbc_aes_alg, + &rk_ecb_des_alg, + &rk_cbc_des_alg, + &rk_ecb_des3_ede_alg, + &rk_cbc_des3_ede_alg, +}; + +static int rk_crypto_register(struct rk_crypto_info *crypto_info) +{ + unsigned int i, k; + int err = 0; + + for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { + rk_cipher_algs[i]->dev = crypto_info; + err = crypto_register_alg(&rk_cipher_algs[i]->alg); + if (err) + goto err_cipher_algs; + } + return 0; + +err_cipher_algs: + for (k = 0; k < i; k++) + crypto_unregister_alg(&rk_cipher_algs[k]->alg); + return err; +} + +static void rk_crypto_unregister(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) + crypto_unregister_alg(&rk_cipher_algs[i]->alg); +} + +static void rk_crypto_action(void *data) +{ + struct rk_crypto_info *crypto_info = data; + + reset_control_assert(crypto_info->rst); +} + +static const struct of_device_id crypto_of_id_table[] = { + { .compatible = "rockchip,rk3288-crypto" }, + {} +}; +MODULE_DEVICE_TABLE(of, crypto_of_id_table); + +static int rk_crypto_probe(struct platform_device *pdev) +{ + struct resource *res; + struct device *dev = &pdev->dev; + struct rk_crypto_info *crypto_info; + int err = 0; + + crypto_info = devm_kzalloc(&pdev->dev, + sizeof(*crypto_info), GFP_KERNEL); + if (!crypto_info) { + err = -ENOMEM; + goto err_crypto; + } + + crypto_info->rst = devm_reset_control_get(dev, "crypto-rst"); + if (IS_ERR(crypto_info->rst)) { + err = PTR_ERR(crypto_info->rst); + goto err_crypto; + } + + reset_control_assert(crypto_info->rst); + usleep_range(10, 20); + reset_control_deassert(crypto_info->rst); + + err = devm_add_action(dev, rk_crypto_action, crypto_info); + if (err) { + reset_control_assert(crypto_info->rst); + goto err_crypto; + } + + spin_lock_init(&crypto_info->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + crypto_info->reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(crypto_info->reg)) { + err = PTR_ERR(crypto_info->reg); + goto err_crypto; + } + + crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk"); + if (IS_ERR(crypto_info->aclk)) { + err = PTR_ERR(crypto_info->aclk); + goto err_crypto; + } + + crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk"); + if (IS_ERR(crypto_info->hclk)) { + err = PTR_ERR(crypto_info->hclk); + goto err_crypto; + } + + crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk"); + if (IS_ERR(crypto_info->sclk)) { + err = PTR_ERR(crypto_info->sclk); + goto err_crypto; + } + + crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk"); + if (IS_ERR(crypto_info->dmaclk)) { + err = PTR_ERR(crypto_info->dmaclk); + goto err_crypto; + } + + crypto_info->irq = platform_get_irq(pdev, 0); + if (crypto_info->irq < 0) { + dev_warn(crypto_info->dev, + "control Interrupt is not available.\n"); + err = crypto_info->irq; + goto err_crypto; + } + + err = devm_request_irq(&pdev->dev, crypto_info->irq, + rk_crypto_irq_handle, IRQF_SHARED, + "rk-crypto", pdev); + + if (err) { + dev_err(crypto_info->dev, "irq request failed.\n"); + goto err_crypto; + } + + crypto_info->dev = &pdev->dev; + platform_set_drvdata(pdev, crypto_info); + + tasklet_init(&crypto_info->crypto_tasklet, + rk_crypto_tasklet_cb, (unsigned long)crypto_info); + crypto_init_queue(&crypto_info->queue, 50); + + crypto_info->enable_clk = rk_crypto_enable_clk; + crypto_info->disable_clk = rk_crypto_disable_clk; + crypto_info->load_data = rk_load_data; + crypto_info->unload_data = rk_unload_data; + + err = rk_crypto_register(crypto_info); + if (err) { + dev_err(dev, "err in register alg"); + goto err_register_alg; + } + + dev_info(dev, "Crypto Accelerator successfully registered\n"); + return 0; + +err_register_alg: + tasklet_kill(&crypto_info->crypto_tasklet); +err_crypto: + return err; +} + +static int rk_crypto_remove(struct platform_device *pdev) +{ + struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); + + rk_crypto_unregister(); + tasklet_kill(&crypto_tmp->crypto_tasklet); + return 0; +} + +static struct platform_driver crypto_driver = { + .probe = rk_crypto_probe, + .remove = rk_crypto_remove, + .driver = { + .name = "rk3288-crypto", + .of_match_table = crypto_of_id_table, + }, +}; + +module_platform_driver(crypto_driver); + +MODULE_AUTHOR("Zain Wang "); +MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h new file mode 100644 index 0000000..e499c2c --- /dev/null +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -0,0 +1,216 @@ +#ifndef __RK3288_CRYPTO_H__ +#define __RK3288_CRYPTO_H__ + +#include +#include +#include +#include +#include + +#define _SBF(v, f) ((v) << (f)) + +/* Crypto control registers*/ +#define RK_CRYPTO_INTSTS 0x0000 +#define RK_CRYPTO_PKA_DONE_INT BIT(5) +#define RK_CRYPTO_HASH_DONE_INT BIT(4) +#define RK_CRYPTO_HRDMA_ERR_INT BIT(3) +#define RK_CRYPTO_HRDMA_DONE_INT BIT(2) +#define RK_CRYPTO_BCDMA_ERR_INT BIT(1) +#define RK_CRYPTO_BCDMA_DONE_INT BIT(0) + +#define RK_CRYPTO_INTENA 0x0004 +#define RK_CRYPTO_PKA_DONE_ENA BIT(5) +#define RK_CRYPTO_HASH_DONE_ENA BIT(4) +#define RK_CRYPTO_HRDMA_ERR_ENA BIT(3) +#define RK_CRYPTO_HRDMA_DONE_ENA BIT(2) +#define RK_CRYPTO_BCDMA_ERR_ENA BIT(1) +#define RK_CRYPTO_BCDMA_DONE_ENA BIT(0) + +#define RK_CRYPTO_CTRL 0x0008 +#define RK_CRYPTO_WRITE_MASK _SBF(0xFFFF, 16) +#define RK_CRYPTO_TRNG_FLUSH BIT(9) +#define RK_CRYPTO_TRNG_START BIT(8) +#define RK_CRYPTO_PKA_FLUSH BIT(7) +#define RK_CRYPTO_HASH_FLUSH BIT(6) +#define RK_CRYPTO_BLOCK_FLUSH BIT(5) +#define RK_CRYPTO_PKA_START BIT(4) +#define RK_CRYPTO_HASH_START BIT(3) +#define RK_CRYPTO_BLOCK_START BIT(2) +#define RK_CRYPTO_TDES_START BIT(1) +#define RK_CRYPTO_AES_START BIT(0) + +#define RK_CRYPTO_CONF 0x000c +/* HASH Receive DMA Address Mode: fix | increment */ +#define RK_CRYPTO_HR_ADDR_MODE BIT(8) +/* Block Transmit DMA Address Mode: fix | increment */ +#define RK_CRYPTO_BT_ADDR_MODE BIT(7) +/* Block Receive DMA Address Mode: fix | increment */ +#define RK_CRYPTO_BR_ADDR_MODE BIT(6) +#define RK_CRYPTO_BYTESWAP_HRFIFO BIT(5) +#define RK_CRYPTO_BYTESWAP_BTFIFO BIT(4) +#define RK_CRYPTO_BYTESWAP_BRFIFO BIT(3) +/* AES = 0 OR DES = 1 */ +#define RK_CRYPTO_DESSEL BIT(2) +#define RK_CYYPTO_HASHINSEL_INDEPENDENT_SOURCE _SBF(0x00, 0) +#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_INPUT _SBF(0x01, 0) +#define RK_CYYPTO_HASHINSEL_BLOCK_CIPHER_OUTPUT _SBF(0x02, 0) + +/* Block Receiving DMA Start Address Register */ +#define RK_CRYPTO_BRDMAS 0x0010 +/* Block Transmitting DMA Start Address Register */ +#define RK_CRYPTO_BTDMAS 0x0014 +/* Block Receiving DMA Length Register */ +#define RK_CRYPTO_BRDMAL 0x0018 +/* Hash Receiving DMA Start Address Register */ +#define RK_CRYPTO_HRDMAS 0x001c +/* Hash Receiving DMA Length Register */ +#define RK_CRYPTO_HRDMAL 0x0020 + +/* AES registers */ +#define RK_CRYPTO_AES_CTRL 0x0080 +#define RK_CRYPTO_AES_BYTESWAP_CNT BIT(11) +#define RK_CRYPTO_AES_BYTESWAP_KEY BIT(10) +#define RK_CRYPTO_AES_BYTESWAP_IV BIT(9) +#define RK_CRYPTO_AES_BYTESWAP_DO BIT(8) +#define RK_CRYPTO_AES_BYTESWAP_DI BIT(7) +#define RK_CRYPTO_AES_KEY_CHANGE BIT(6) +#define RK_CRYPTO_AES_ECB_MODE _SBF(0x00, 4) +#define RK_CRYPTO_AES_CBC_MODE _SBF(0x01, 4) +#define RK_CRYPTO_AES_CTR_MODE _SBF(0x02, 4) +#define RK_CRYPTO_AES_128BIT_key _SBF(0x00, 2) +#define RK_CRYPTO_AES_192BIT_key _SBF(0x01, 2) +#define RK_CRYPTO_AES_256BIT_key _SBF(0x02, 2) +/* Slave = 0 / fifo = 1 */ +#define RK_CRYPTO_AES_FIFO_MODE BIT(1) +/* Encryption = 0 , Decryption = 1 */ +#define RK_CRYPTO_AES_DEC BIT(0) + +#define RK_CRYPTO_AES_STS 0x0084 +#define RK_CRYPTO_AES_DONE BIT(0) + +/* AES Input Data 0-3 Register */ +#define RK_CRYPTO_AES_DIN_0 0x0088 +#define RK_CRYPTO_AES_DIN_1 0x008c +#define RK_CRYPTO_AES_DIN_2 0x0090 +#define RK_CRYPTO_AES_DIN_3 0x0094 + +/* AES output Data 0-3 Register */ +#define RK_CRYPTO_AES_DOUT_0 0x0098 +#define RK_CRYPTO_AES_DOUT_1 0x009c +#define RK_CRYPTO_AES_DOUT_2 0x00a0 +#define RK_CRYPTO_AES_DOUT_3 0x00a4 + +/* AES IV Data 0-3 Register */ +#define RK_CRYPTO_AES_IV_0 0x00a8 +#define RK_CRYPTO_AES_IV_1 0x00ac +#define RK_CRYPTO_AES_IV_2 0x00b0 +#define RK_CRYPTO_AES_IV_3 0x00b4 + +/* AES Key Data 0-3 Register */ +#define RK_CRYPTO_AES_KEY_0 0x00b8 +#define RK_CRYPTO_AES_KEY_1 0x00bc +#define RK_CRYPTO_AES_KEY_2 0x00c0 +#define RK_CRYPTO_AES_KEY_3 0x00c4 +#define RK_CRYPTO_AES_KEY_4 0x00c8 +#define RK_CRYPTO_AES_KEY_5 0x00cc +#define RK_CRYPTO_AES_KEY_6 0x00d0 +#define RK_CRYPTO_AES_KEY_7 0x00d4 + +/* des/tdes */ +#define RK_CRYPTO_TDES_CTRL 0x0100 +#define RK_CRYPTO_TDES_BYTESWAP_KEY BIT(8) +#define RK_CRYPTO_TDES_BYTESWAP_IV BIT(7) +#define RK_CRYPTO_TDES_BYTESWAP_DO BIT(6) +#define RK_CRYPTO_TDES_BYTESWAP_DI BIT(5) +/* 0: ECB, 1: CBC */ +#define RK_CRYPTO_TDES_CHAINMODE_CBC BIT(4) +/* TDES Key Mode, 0 : EDE, 1 : EEE */ +#define RK_CRYPTO_TDES_EEE BIT(3) +/* 0: DES, 1:TDES */ +#define RK_CRYPTO_TDES_SELECT BIT(2) +/* 0: Slave, 1:Fifo */ +#define RK_CRYPTO_TDES_FIFO_MODE BIT(1) +/* Encryption = 0 , Decryption = 1 */ +#define RK_CRYPTO_TDES_DEC BIT(0) + +#define RK_CRYPTO_TDES_STS 0x0104 +#define RK_CRYPTO_TDES_DONE BIT(0) + +#define RK_CRYPTO_TDES_DIN_0 0x0108 +#define RK_CRYPTO_TDES_DIN_1 0x010c +#define RK_CRYPTO_TDES_DOUT_0 0x0110 +#define RK_CRYPTO_TDES_DOUT_1 0x0114 +#define RK_CRYPTO_TDES_IV_0 0x0118 +#define RK_CRYPTO_TDES_IV_1 0x011c +#define RK_CRYPTO_TDES_KEY1_0 0x0120 +#define RK_CRYPTO_TDES_KEY1_1 0x0124 +#define RK_CRYPTO_TDES_KEY2_0 0x0128 +#define RK_CRYPTO_TDES_KEY2_1 0x012c +#define RK_CRYPTO_TDES_KEY3_0 0x0130 +#define RK_CRYPTO_TDES_KEY3_1 0x0134 + +#define CRYPTO_READ(dev, offset) \ + readl_relaxed(((dev)->reg + (offset))) +#define CRYPTO_WRITE(dev, offset, val) \ + writel_relaxed((val), ((dev)->reg + (offset))) + +struct rk_crypto_info { + struct device *dev; + struct clk *aclk; + struct clk *hclk; + struct clk *sclk; + struct clk *dmaclk; + struct reset_control *rst; + void __iomem *reg; + int irq; + struct crypto_queue queue; + struct tasklet_struct crypto_tasklet; + struct ablkcipher_request *ablk_req; + /* device lock */ + spinlock_t lock; + + /* the public variable */ + struct scatterlist *sg_src; + struct scatterlist *sg_dst; + struct scatterlist sg_tmp; + struct scatterlist *first; + unsigned int left_bytes; + void *addr_vir; + int aligned; + int align_size; + size_t nents; + unsigned int total; + unsigned int count; + u32 mode; + dma_addr_t addr_in; + dma_addr_t addr_out; + int (*start)(struct rk_crypto_info *dev); + int (*update)(struct rk_crypto_info *dev); + void (*complete)(struct rk_crypto_info *dev, int err); + int (*enable_clk)(struct rk_crypto_info *dev); + void (*disable_clk)(struct rk_crypto_info *dev); + int (*load_data)(struct rk_crypto_info *dev, + struct scatterlist *sg_src, + struct scatterlist *sg_dst); + void (*unload_data)(struct rk_crypto_info *dev); +}; + +/* the private variable of cipher */ +struct rk_cipher_ctx { + struct rk_crypto_info *dev; + unsigned int keylen; +}; + +struct rk_crypto_tmp { + struct rk_crypto_info *dev; + struct crypto_alg alg; +}; + +extern struct rk_crypto_tmp rk_ecb_aes_alg; +extern struct rk_crypto_tmp rk_cbc_aes_alg; +extern struct rk_crypto_tmp rk_ecb_des_alg; +extern struct rk_crypto_tmp rk_cbc_des_alg; +extern struct rk_crypto_tmp rk_ecb_des3_ede_alg; +extern struct rk_crypto_tmp rk_cbc_des3_ede_alg; + +#endif diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c new file mode 100644 index 0000000..4a8f9de --- /dev/null +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c @@ -0,0 +1,503 @@ +/* + * Crypto acceleration support for Rockchip RK3288 + * + * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd + * + * Author: Zain Wang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Some ideas are from marvell-cesa.c and s5p-sss.c driver. + */ +#include "rk3288_crypto.h" + +#define RK_CRYPTO_DEC BIT(0) + +static void rk_crypto_complete(struct rk_crypto_info *dev, int err) +{ + if (dev->ablk_req->base.complete) + dev->ablk_req->base.complete(&dev->ablk_req->base, err); +} + +static int rk_handle_req(struct rk_crypto_info *dev, + struct ablkcipher_request *req) +{ + int err; + + if (!IS_ALIGNED(req->nbytes, dev->align_size)) + return -EINVAL; + + dev->left_bytes = req->nbytes; + dev->total = req->nbytes; + dev->sg_src = req->src; + dev->first = req->src; + dev->nents = sg_nents(req->src); + dev->sg_dst = req->dst; + dev->aligned = 1; + dev->ablk_req = req; + + spin_lock(&dev->lock); + err = ablkcipher_enqueue_request(&dev->queue, req); + spin_unlock(&dev->lock); + tasklet_schedule(&dev->crypto_tasklet); + return err; +} + +static int rk_aes_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + ctx->keylen = keylen; + memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen); + return 0; +} + +static int rk_tdes_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + u32 tmp[DES_EXPKEY_WORDS]; + + if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + if (keylen == DES_KEY_SIZE) { + if (!des_ekey(tmp, key) && + (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + } + + ctx->keylen = keylen; + memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); + return 0; +} + +static int rk_aes_ecb_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_AES_ECB_MODE; + return rk_handle_req(dev, req); +} + +static int rk_aes_ecb_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; + return rk_handle_req(dev, req); +} + +static int rk_aes_cbc_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_AES_CBC_MODE; + return rk_handle_req(dev, req); +} + +static int rk_aes_cbc_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; + return rk_handle_req(dev, req); +} + +static int rk_des_ecb_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = 0; + return rk_handle_req(dev, req); +} + +static int rk_des_ecb_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_DEC; + return rk_handle_req(dev, req); +} + +static int rk_des_cbc_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; + return rk_handle_req(dev, req); +} + +static int rk_des_cbc_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; + return rk_handle_req(dev, req); +} + +static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_TDES_SELECT; + return rk_handle_req(dev, req); +} + +static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; + return rk_handle_req(dev, req); +} + +static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; + return rk_handle_req(dev, req); +} + +static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct rk_crypto_info *dev = ctx->dev; + + dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | + RK_CRYPTO_DEC; + return rk_handle_req(dev, req); +} + +static void rk_ablk_hw_init(struct rk_crypto_info *dev) +{ + struct crypto_ablkcipher *cipher = + crypto_ablkcipher_reqtfm(dev->ablk_req); + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 ivsize, block, conf_reg = 0; + + block = crypto_tfm_alg_blocksize(tfm); + ivsize = crypto_ablkcipher_ivsize(cipher); + + if (block == DES_BLOCK_SIZE) { + dev->mode |= RK_CRYPTO_TDES_FIFO_MODE | + RK_CRYPTO_TDES_BYTESWAP_KEY | + RK_CRYPTO_TDES_BYTESWAP_IV; + CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, dev->mode); + memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, + dev->ablk_req->info, ivsize); + conf_reg = RK_CRYPTO_DESSEL; + } else { + dev->mode |= RK_CRYPTO_AES_FIFO_MODE | + RK_CRYPTO_AES_KEY_CHANGE | + RK_CRYPTO_AES_BYTESWAP_KEY | + RK_CRYPTO_AES_BYTESWAP_IV; + if (ctx->keylen == AES_KEYSIZE_192) + dev->mode |= RK_CRYPTO_AES_192BIT_key; + else if (ctx->keylen == AES_KEYSIZE_256) + dev->mode |= RK_CRYPTO_AES_256BIT_key; + CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, dev->mode); + memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, + dev->ablk_req->info, ivsize); + } + conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | + RK_CRYPTO_BYTESWAP_BRFIFO; + CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg); + CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, + RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA); +} + +static void crypto_dma_start(struct rk_crypto_info *dev) +{ + CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in); + CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4); + CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out); + CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START | + _SBF(RK_CRYPTO_BLOCK_START, 16)); +} + +static int rk_set_data_start(struct rk_crypto_info *dev) +{ + int err; + + err = dev->load_data(dev, dev->sg_src, dev->sg_dst); + if (!err) + crypto_dma_start(dev); + return err; +} + +static int rk_ablk_start(struct rk_crypto_info *dev) +{ + int err; + + spin_lock(&dev->lock); + rk_ablk_hw_init(dev); + err = rk_set_data_start(dev); + spin_unlock(&dev->lock); + return err; +} + +static void rk_iv_copyback(struct rk_crypto_info *dev) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(dev->ablk_req); + u32 ivsize = crypto_ablkcipher_ivsize(tfm); + + if (ivsize == DES_BLOCK_SIZE) + memcpy_fromio(dev->ablk_req->info, + dev->reg + RK_CRYPTO_TDES_IV_0, ivsize); + else if (ivsize == AES_BLOCK_SIZE) + memcpy_fromio(dev->ablk_req->info, + dev->reg + RK_CRYPTO_AES_IV_0, ivsize); +} + +/* return: + * true some err was occurred + * fault no err, continue + */ +static int rk_ablk_rx(struct rk_crypto_info *dev) +{ + int err = 0; + + dev->unload_data(dev); + if (!dev->aligned) { + if (!sg_pcopy_from_buffer(dev->ablk_req->dst, dev->nents, + dev->addr_vir, dev->count, + dev->total - dev->left_bytes - + dev->count)) { + err = -EINVAL; + goto out_rx; + } + } + if (dev->left_bytes) { + if (dev->aligned) { + if (sg_is_last(dev->sg_src)) { + dev_err(dev->dev, "[%s:%d] Lack of data\n", + __func__, __LINE__); + err = -ENOMEM; + goto out_rx; + } + dev->sg_src = sg_next(dev->sg_src); + dev->sg_dst = sg_next(dev->sg_dst); + } + err = rk_set_data_start(dev); + } else { + rk_iv_copyback(dev); + /* here show the calculation is over without any err */ + dev->complete(dev, 0); + } +out_rx: + return err; +} + +static int rk_ablk_cra_init(struct crypto_tfm *tfm) +{ + struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_alg *alg = tfm->__crt_alg; + struct rk_crypto_tmp *algt; + + algt = container_of(alg, struct rk_crypto_tmp, alg); + + ctx->dev = algt->dev; + ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1; + ctx->dev->start = rk_ablk_start; + ctx->dev->update = rk_ablk_rx; + ctx->dev->complete = rk_crypto_complete; + ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL); + + return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM; +} + +static void rk_ablk_cra_exit(struct crypto_tfm *tfm) +{ + struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + free_page((unsigned long)ctx->dev->addr_vir); + ctx->dev->disable_clk(ctx->dev); +} + +struct rk_crypto_tmp rk_ecb_aes_alg = { + .alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-rk", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = rk_ablk_cra_init, + .cra_exit = rk_ablk_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = rk_aes_setkey, + .encrypt = rk_aes_ecb_encrypt, + .decrypt = rk_aes_ecb_decrypt, + } + } +}; + +struct rk_crypto_tmp rk_cbc_aes_alg = { + .alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-rk", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = rk_ablk_cra_init, + .cra_exit = rk_ablk_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = rk_aes_setkey, + .encrypt = rk_aes_cbc_encrypt, + .decrypt = rk_aes_cbc_decrypt, + } + } +}; + +struct rk_crypto_tmp rk_ecb_des_alg = { + .alg = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-rk", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_cipher_ctx), + .cra_alignmask = 0x07, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = rk_ablk_cra_init, + .cra_exit = rk_ablk_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = rk_tdes_setkey, + .encrypt = rk_des_ecb_encrypt, + .decrypt = rk_des_ecb_decrypt, + } + } +}; + +struct rk_crypto_tmp rk_cbc_des_alg = { + .alg = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-rk", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_cipher_ctx), + .cra_alignmask = 0x07, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = rk_ablk_cra_init, + .cra_exit = rk_ablk_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = rk_tdes_setkey, + .encrypt = rk_des_cbc_encrypt, + .decrypt = rk_des_cbc_decrypt, + } + } +}; + +struct rk_crypto_tmp rk_ecb_des3_ede_alg = { + .alg = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-ede-rk", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_cipher_ctx), + .cra_alignmask = 0x07, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = rk_ablk_cra_init, + .cra_exit = rk_ablk_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = rk_tdes_setkey, + .encrypt = rk_des3_ede_ecb_encrypt, + .decrypt = rk_des3_ede_ecb_decrypt, + } + } +}; + +struct rk_crypto_tmp rk_cbc_des3_ede_alg = { + .alg = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3-ede-rk", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct rk_cipher_ctx), + .cra_alignmask = 0x07, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = rk_ablk_cra_init, + .cra_exit = rk_ablk_cra_exit, + .cra_u.ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = rk_tdes_setkey, + .encrypt = rk_des3_ede_cbc_encrypt, + .decrypt = rk_des3_ede_cbc_decrypt, + } + } +}; -- cgit v0.10.2 From c012a79d0ce95bd8488a5a44cd8c00c275774518 Mon Sep 17 00:00:00 2001 From: Alexander Kuleshov Date: Wed, 25 Nov 2015 23:48:28 +0600 Subject: crypto: cryptod - use crypto_skcipher_type() for getting skcipher type The provides inline function - crypto_skcipher_type(). Let's use it in the cryptd_alloc_ablkcipher() instead of direct calculation. Signed-off-by: Alexander Kuleshov Signed-off-by: Herbert Xu diff --git a/crypto/cryptd.c b/crypto/cryptd.c index c81861b..c4af8aa 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -887,8 +887,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) return ERR_PTR(-EINVAL); - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); - type |= CRYPTO_ALG_TYPE_BLKCIPHER; + type = crypto_skcipher_type(type); mask &= ~CRYPTO_ALG_TYPE_MASK; mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); tfm = crypto_alloc_base(cryptd_alg_name, type, mask); -- cgit v0.10.2 From 1ab87298cb59b649d8d648d25dc15b36ab865f5a Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 27 Nov 2015 16:50:43 +0100 Subject: hwrng: core - sleep interruptible in read hwrng kthread can be waiting via hwrng_fillfn for some data from a rng like virtio-rng: hwrng D ffff880093e17798 0 382 2 0x00000000 ... Call Trace: [] wait_for_completion_killable+0x96/0x210 [] virtio_read+0x57/0xf0 [virtio_rng] [] hwrng_fillfn+0x75/0x130 [] kthread+0xf3/0x110 And when some user program tries to read the /dev node in this state, we get: rngd D ffff880093e17798 0 762 1 0x00000004 ... Call Trace: [] mutex_lock_nested+0x15c/0x3e0 [] rng_dev_read+0x6e/0x240 [] __vfs_read+0x28/0xe0 [] vfs_read+0x83/0x130 And this is indeed unkillable. So use mutex_lock_interruptible instead of mutex_lock in rng_dev_read and exit immediatelly when interrupted. And possibly return already read data, if any (as POSIX allows). v2: use ERESTARTSYS instead of EINTR Signed-off-by: Jiri Slaby Cc: Matt Mackall Cc: Herbert Xu Cc: Signed-off-by: Herbert Xu diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 6f497aa..9203f2d 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -238,7 +238,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, goto out; } - mutex_lock(&reading_mutex); + if (mutex_lock_interruptible(&reading_mutex)) { + err = -ERESTARTSYS; + goto out_put; + } if (!data_avail) { bytes_read = rng_get_data(rng, rng_buffer, rng_buffer_size(), @@ -288,6 +291,7 @@ out: out_unlock_reading: mutex_unlock(&reading_mutex); +out_put: put_rng(rng); goto out; } -- cgit v0.10.2 From ac7c8e6b6dc959d285382c7e9cdfe608205f0c68 Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Sat, 28 Nov 2015 13:27:48 +0100 Subject: crypto: rockchip - fix possible deadlock Lockdep warns about a possible deadlock resulting from the use of regular spin_locks: ================================= [ INFO: inconsistent lock state ] 4.4.0-rc2+ #2724 Not tainted --------------------------------- inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. ksoftirqd/0/3 [HC0[0]:SC1[1]:HE1:SE0] takes: (&(&crypto_info->lock)->rlock){+.?...}, at: [] rk_crypto_tasklet_cb+0x24/0xb4 [rk_crypto] {SOFTIRQ-ON-W} state was registered at: [] lock_acquire+0x178/0x218 [] _raw_spin_lock+0x54/0x64 [] rk_handle_req+0x7c/0xbc [rk_crypto] [] rk_des_ecb_encrypt+0x2c/0x30 [rk_crypto] [] rk_aes_ecb_encrypt+0x18/0x1c [rk_crypto] [] skcipher_encrypt_ablkcipher+0x64/0x68 [] __test_skcipher+0x2a8/0x8dc [] test_skcipher+0x38/0xc4 [] alg_test_skcipher+0x90/0xb0 [] alg_test+0x1e8/0x280 [] cryptomgr_test+0x34/0x54 [] kthread+0xf4/0x10c [] ret_from_fork+0x14/0x24 irq event stamp: 10672 hardirqs last enabled at (10672): [] tasklet_action+0x48/0x104 hardirqs last disabled at (10671): [] tasklet_action+0x20/0x104 softirqs last enabled at (10658): [] __do_softirq+0x358/0x49c softirqs last disabled at (10669): [] run_ksoftirqd+0x40/0x80 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&(&crypto_info->lock)->rlock); lock(&(&crypto_info->lock)->rlock); *** DEADLOCK *** Fix this by moving to irq-disabling spinlocks. Signed-off-by: Heiko Stuebner Signed-off-by: Herbert Xu diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index 6b72f8d..da9c73d 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -190,12 +190,13 @@ static void rk_crypto_tasklet_cb(unsigned long data) { struct rk_crypto_info *dev = (struct rk_crypto_info *)data; struct crypto_async_request *async_req, *backlog; + unsigned long flags; int err = 0; - spin_lock(&dev->lock); + spin_lock_irqsave(&dev->lock, flags); backlog = crypto_get_backlog(&dev->queue); async_req = crypto_dequeue_request(&dev->queue); - spin_unlock(&dev->lock); + spin_unlock_irqrestore(&dev->lock, flags); if (!async_req) { dev_err(dev->dev, "async_req is NULL !!\n"); return; diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c index 4a8f9de..d98b681 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c @@ -24,6 +24,7 @@ static void rk_crypto_complete(struct rk_crypto_info *dev, int err) static int rk_handle_req(struct rk_crypto_info *dev, struct ablkcipher_request *req) { + unsigned long flags; int err; if (!IS_ALIGNED(req->nbytes, dev->align_size)) @@ -38,9 +39,9 @@ static int rk_handle_req(struct rk_crypto_info *dev, dev->aligned = 1; dev->ablk_req = req; - spin_lock(&dev->lock); + spin_lock_irqsave(&dev->lock, flags); err = ablkcipher_enqueue_request(&dev->queue, req); - spin_unlock(&dev->lock); + spin_unlock_irqrestore(&dev->lock, flags); tasklet_schedule(&dev->crypto_tasklet); return err; } @@ -267,12 +268,13 @@ static int rk_set_data_start(struct rk_crypto_info *dev) static int rk_ablk_start(struct rk_crypto_info *dev) { + unsigned long flags; int err; - spin_lock(&dev->lock); + spin_lock_irqsave(&dev->lock, flags); rk_ablk_hw_init(dev); err = rk_set_data_start(dev); - spin_unlock(&dev->lock); + spin_unlock_irqrestore(&dev->lock, flags); return err; } -- cgit v0.10.2 From 3a020a723c65eb8ffa7c237faca26521a024e582 Mon Sep 17 00:00:00 2001 From: "Wang, Rui Y" Date: Sun, 29 Nov 2015 22:45:33 +0800 Subject: crypto: ghash-clmulni - Fix load failure ghash_clmulni_intel fails to load on Linux 4.3+ with the following message: "modprobe: ERROR: could not insert 'ghash_clmulni_intel': Invalid argument" After 8996eafdc ("crypto: ahash - ensure statesize is non-zero") all ahash drivers are required to implement import()/export(), and must have a non- zero statesize. This patch has been tested with the algif_hash interface. The calculated digest values, after several rounds of import()s and export()s, match those calculated by tcrypt. Signed-off-by: Rui Wang Signed-off-by: Herbert Xu diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 440df0c..a69321a 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -219,6 +219,29 @@ static int ghash_async_final(struct ahash_request *req) } } +static int ghash_async_import(struct ahash_request *req, const void *in) +{ + struct ahash_request *cryptd_req = ahash_request_ctx(req); + struct shash_desc *desc = cryptd_shash_desc(cryptd_req); + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + ghash_async_init(req); + memcpy(dctx, in, sizeof(*dctx)); + return 0; + +} + +static int ghash_async_export(struct ahash_request *req, void *out) +{ + struct ahash_request *cryptd_req = ahash_request_ctx(req); + struct shash_desc *desc = cryptd_shash_desc(cryptd_req); + struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + memcpy(out, dctx, sizeof(*dctx)); + return 0; + +} + static int ghash_async_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -288,8 +311,11 @@ static struct ahash_alg ghash_async_alg = { .final = ghash_async_final, .setkey = ghash_async_setkey, .digest = ghash_async_digest, + .export = ghash_async_export, + .import = ghash_async_import, .halg = { .digestsize = GHASH_DIGEST_SIZE, + .statesize = sizeof(struct ghash_desc_ctx), .base = { .cra_name = "ghash", .cra_driver_name = "ghash-clmulni", -- cgit v0.10.2 From 1a07834024dfca5c4bed5de8f8714306e0a11836 Mon Sep 17 00:00:00 2001 From: "Wang, Rui Y" Date: Sun, 29 Nov 2015 22:45:34 +0800 Subject: crypto: cryptd - Assign statesize properly cryptd_create_hash() fails by returning -EINVAL. It is because after 8996eafdc ("crypto: ahash - ensure statesize is non-zero") all ahash drivers must have a non-zero statesize. This patch fixes the problem by properly assigning the statesize. Signed-off-by: Rui Wang Signed-off-by: Herbert Xu diff --git a/crypto/cryptd.c b/crypto/cryptd.c index c4af8aa..7921251 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -637,6 +637,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.halg.base.cra_flags = type; inst->alg.halg.digestsize = salg->digestsize; + inst->alg.halg.statesize = salg->statesize; inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; -- cgit v0.10.2 From f456cd2dc8578f989ef12964c11aa7b2fafbc319 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 30 Nov 2015 11:03:58 -0200 Subject: crypto: caam - pass the correct buffer length When buffer 0 is used we should use buflen_0 instead of buflen_1. Fix it. Signed-off-by: Fabio Estevam Signed-off-by: Herbert Xu diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index fe9c156..5845d4a 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1596,7 +1596,7 @@ static int ahash_export(struct ahash_request *req, void *out) len = state->buflen_1; } else { buf = state->buf_0; - len = state->buflen_1; + len = state->buflen_0; } memcpy(export->buf, buf, len); -- cgit v0.10.2 From 0d3d054b43719ef33232677ba27ba6097afdafbc Mon Sep 17 00:00:00 2001 From: Leonidas Da Silva Barbosa Date: Mon, 30 Nov 2015 16:19:03 -0200 Subject: crypto: vmx - IV size failing on skcipher API IV size was zero on CBC and CTR modes, causing a bug triggered by skcipher. Fixing this adding a correct size. Signed-off-by: Leonidas Da Silva Barbosa Signed-off-by: Paulo Smorigo Signed-off-by: Herbert Xu diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 0b8fe2e..78a9786 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -191,7 +191,7 @@ struct crypto_alg p8_aes_cbc_alg = { .cra_init = p8_aes_cbc_init, .cra_exit = p8_aes_cbc_exit, .cra_blkcipher = { - .ivsize = 0, + .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = p8_aes_cbc_setkey, diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index ee1306c..1febc4f 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -175,7 +175,7 @@ struct crypto_alg p8_aes_ctr_alg = { .cra_init = p8_aes_ctr_init, .cra_exit = p8_aes_ctr_exit, .cra_blkcipher = { - .ivsize = 0, + .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = p8_aes_ctr_setkey, -- cgit v0.10.2 From 5e75ae1b3cef6455b131835621216cb92060da34 Mon Sep 17 00:00:00 2001 From: LEROY Christophe Date: Tue, 1 Dec 2015 12:44:15 +0100 Subject: crypto: talitos - add new crypto modes This patch adds the following algorithms to the talitos driver: * ecb(aes) * ctr(aes) * ecb(des) * cbc(des) * ecb(des3_ede) Signed-off-by: Christophe Leroy Signed-off-by: Herbert Xu diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index ab33898..a0d4a08 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -2324,6 +2324,22 @@ static struct talitos_alg_template driver_algs[] = { /* ABLKCIPHER algorithms. */ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .alg.crypto = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_AESU, + }, + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, @@ -2341,6 +2357,73 @@ static struct talitos_alg_template driver_algs[] = { }, { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .alg.crypto = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-talitos", + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_AESU | + DESC_HDR_MODE0_AESU_CTR, + }, + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-talitos", + .cra_blocksize = DES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_DEU, + }, + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-talitos", + .cra_blocksize = DES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_CBC, + }, + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-3des-talitos", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_DEU | + DESC_HDR_MODE0_DEU_3DES, + }, + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 0090f32..8dd8f40 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -345,6 +345,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) /* primary execution unit mode (MODE0) and derivatives */ #define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000) #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000) +#define DESC_HDR_MODE0_AESU_CTR cpu_to_be32(0x00600000) #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000) #define DESC_HDR_MODE0_DEU_3DES cpu_to_be32(0x00200000) #define DESC_HDR_MODE0_MDEU_CONT cpu_to_be32(0x08000000) -- cgit v0.10.2 From a103a75ad978592cf71b6d42c05849aacc76c2da Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 2 Dec 2015 17:16:36 +0100 Subject: crypto: n2 - Use platform_register/unregister_drivers() These new helpers simplify implementing multi-driver modules and properly handle failure to register one driver by unregistering all previously registered drivers. Signed-off-by: Thierry Reding Acked-by: David S. Miller Signed-off-by: Herbert Xu diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 5450880..739a786 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -2243,22 +2243,19 @@ static struct platform_driver n2_mau_driver = { .remove = n2_mau_remove, }; +static struct platform_driver * const drivers[] = { + &n2_crypto_driver, + &n2_mau_driver, +}; + static int __init n2_init(void) { - int err = platform_driver_register(&n2_crypto_driver); - - if (!err) { - err = platform_driver_register(&n2_mau_driver); - if (err) - platform_driver_unregister(&n2_crypto_driver); - } - return err; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } static void __exit n2_exit(void) { - platform_driver_unregister(&n2_mau_driver); - platform_driver_unregister(&n2_crypto_driver); + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(n2_init); -- cgit v0.10.2 From 70e088fe8c7237eae20991f1e0b2d99ecc5f1292 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 3 Dec 2015 12:00:41 -0800 Subject: crypto: aead - fix kernel-doc warnings in crypto/aead.h Fix 21 occurrences of this kernel-doc warning in : ..//include/crypto/aead.h:149: warning: No description found for parameter 'base' Signed-off-by: Randy Dunlap Signed-off-by: Herbert Xu diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 077cae1..84d13b1 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -128,6 +128,7 @@ struct aead_request { * @exit: Deinitialize the cryptographic transformation object. This is a * counterpart to @init, used to remove various changes set in * @init. + * @base: Definition of a generic crypto cipher algorithm. * * All fields except @ivsize is mandatory and must be filled. */ -- cgit v0.10.2 From 9809ebcd0e8cacb20a938e7a9fab68ea47e80f82 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 4 Dec 2015 16:56:17 -0800 Subject: crypto: qat - add new device definitions Add dev ids and names for the new device types. Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index ca853d5..9786a2e 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -55,8 +55,18 @@ #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" +#define ADF_C62X_DEVICE_NAME "c62x" +#define ADF_C62XVF_DEVICE_NAME "c62xvf" +#define ADF_C3XXX_DEVICE_NAME "c3xxx" +#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" #define ADF_DH895XCC_PCI_DEVICE_ID 0x435 #define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443 +#define ADF_C62X_PCI_DEVICE_ID 0x37c8 +#define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9 +#define ADF_C3XXX_PCI_DEVICE_ID 0x19e2 +#define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3 +#define ADF_ERRSOU3 (0x3A000 + 0x0C) +#define ADF_ERRSOU5 (0x3A000 + 0xD8) #define ADF_PCI_MAX_BARS 3 #define ADF_DEVICE_NAME_LENGTH 32 #define ADF_ETR_MAX_RINGS_PER_BANK 16 -- cgit v0.10.2 From b0272276d903d87160df37f0f56af56cbda59801 Mon Sep 17 00:00:00 2001 From: Pingchao Yang Date: Fri, 4 Dec 2015 16:56:23 -0800 Subject: crypto: qat - add support for new devices to FW loader FW loader updates for new qat devices Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c index 20b08bd..a42fc42 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c @@ -78,9 +78,12 @@ int adf_ae_fw_load(struct adf_accel_dev *accel_dev) uof_addr = (void *)loader_data->uof_fw->data; mmp_size = loader_data->mmp_fw->size; mmp_addr = (void *)loader_data->mmp_fw->data; - qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size); - if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) { - dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n"); + if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) { + dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n"); + goto out_err; + } + if (qat_uclo_map_obj(loader_data->fw_loader, uof_addr, uof_size)) { + dev_err(&GET_DEV(accel_dev), "Failed to map FW\n"); goto out_err; } if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 3f76bd4..d482022 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -178,6 +178,8 @@ void qat_hal_reset(struct icp_qat_fw_loader_handle *handle); int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle); void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask); +int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, + unsigned int ae); int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, unsigned char ae, enum icp_qat_uof_regtype lm_type, unsigned char mode); @@ -216,10 +218,10 @@ int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned short lm_addr, unsigned int value); int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); -int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, - void *addr_ptr, int mem_size); -void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, - void *addr_ptr, int mem_size); +int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr, + int mem_size); +int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size); #if defined(CONFIG_PCI_IOV) int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h index 5e1aa40..2ffef3e 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h +++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h @@ -68,11 +68,21 @@ struct icp_qat_fw_loader_hal_handle { struct icp_qat_fw_loader_handle { struct icp_qat_fw_loader_hal_handle *hal_handle; + struct pci_dev *pci_dev; void *obj_handle; + void *sobj_handle; + bool fw_auth; void __iomem *hal_sram_addr_v; void __iomem *hal_cap_g_ctl_csr_addr_v; void __iomem *hal_cap_ae_xfer_csr_addr_v; void __iomem *hal_cap_ae_local_csr_addr_v; void __iomem *hal_ep_csr_addr_v; }; + +struct icp_firml_dram_desc { + void __iomem *dram_base_addr; + void *dram_base_addr_v; + dma_addr_t dram_bus_addr; + u64 dram_size; +}; #endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h index 85b6d24..7187917 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_hal.h +++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h @@ -81,6 +81,31 @@ enum hal_ae_csr { LOCAL_CSR_STATUS = 0x180, }; +enum fcu_csr { + FCU_CONTROL = 0x8c0, + FCU_STATUS = 0x8c4, + FCU_STATUS1 = 0x8c8, + FCU_DRAM_ADDR_LO = 0x8cc, + FCU_DRAM_ADDR_HI = 0x8d0, + FCU_RAMBASE_ADDR_HI = 0x8d4, + FCU_RAMBASE_ADDR_LO = 0x8d8 +}; + +enum fcu_cmd { + FCU_CTRL_CMD_NOOP = 0, + FCU_CTRL_CMD_AUTH = 1, + FCU_CTRL_CMD_LOAD = 2, + FCU_CTRL_CMD_START = 3 +}; + +enum fcu_sts { + FCU_STS_NO_STS = 0, + FCU_STS_VERI_DONE = 1, + FCU_STS_LOAD_DONE = 2, + FCU_STS_VERI_FAIL = 3, + FCU_STS_LOAD_FAIL = 4, + FCU_STS_BUSY = 5 +}; #define UA_ECS (0x1 << 31) #define ACS_ABO_BITPOS 31 #define ACS_ACNO 0x7 @@ -98,6 +123,13 @@ enum hal_ae_csr { #define LCS_STATUS (0x1) #define MMC_SHARE_CS_BITPOS 2 #define GLOBAL_CSR 0xA00 +#define FCU_CTRL_AE_POS 0x8 +#define FCU_AUTH_STS_MASK 0x7 +#define FCU_STS_DONE_POS 0x9 +#define FCU_STS_AUTHFWLD_POS 0X8 +#define FCU_LOADED_AE_POS 0x16 +#define FW_AUTH_WAIT_PERIOD 10 +#define FW_AUTH_MAX_RETRY 300 #define SET_CAP_CSR(handle, csr, val) \ ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val) @@ -106,14 +138,14 @@ enum hal_ae_csr { #define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val) #define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr) #define AE_CSR(handle, ae) \ - (handle->hal_cap_ae_local_csr_addr_v + \ + ((char __iomem *)handle->hal_cap_ae_local_csr_addr_v + \ ((ae & handle->hal_handle->ae_mask) << 12)) #define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr)) #define SET_AE_CSR(handle, ae, csr, val) \ ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val) #define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0) #define AE_XFER(handle, ae) \ - (handle->hal_cap_ae_xfer_csr_addr_v + \ + ((char __iomem *)handle->hal_cap_ae_xfer_csr_addr_v + \ ((ae & handle->hal_handle->ae_mask) << 12)) #define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \ ((reg & 0xff) << 2)) @@ -121,5 +153,4 @@ enum hal_ae_csr { ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val) #define SRAM_WRITE(handle, addr, val) \ ADF_CSR_WR(handle->hal_sram_addr_v, addr, val) -#define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr) #endif diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h index 2132a8c..d97db99 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h @@ -47,32 +47,55 @@ #ifndef __ICP_QAT_UCLO_H__ #define __ICP_QAT_UCLO_H__ -#define ICP_QAT_AC_C_CPU_TYPE 0x00400000 +#define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000 +#define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000 +#define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000 #define ICP_QAT_UCLO_MAX_AE 12 #define ICP_QAT_UCLO_MAX_CTX 8 #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) #define ICP_QAT_UCLO_MAX_USTORE 0x4000 #define ICP_QAT_UCLO_MAX_XFER_REG 128 #define ICP_QAT_UCLO_MAX_GPR_REG 128 -#define ICP_QAT_UCLO_MAX_NN_REG 128 #define ICP_QAT_UCLO_MAX_LMEM_REG 1024 #define ICP_QAT_UCLO_AE_ALL_CTX 0xff #define ICP_QAT_UOF_OBJID_LEN 8 #define ICP_QAT_UOF_FID 0xc6c2 #define ICP_QAT_UOF_MAJVER 0x4 #define ICP_QAT_UOF_MINVER 0x11 -#define ICP_QAT_UOF_NN_MODE_NOTCARE 0xff #define ICP_QAT_UOF_OBJS "UOF_OBJS" #define ICP_QAT_UOF_STRT "UOF_STRT" -#define ICP_QAT_UOF_GTID "UOF_GTID" #define ICP_QAT_UOF_IMAG "UOF_IMAG" #define ICP_QAT_UOF_IMEM "UOF_IMEM" -#define ICP_QAT_UOF_MSEG "UOF_MSEG" #define ICP_QAT_UOF_LOCAL_SCOPE 1 #define ICP_QAT_UOF_INIT_EXPR 0 #define ICP_QAT_UOF_INIT_REG 1 #define ICP_QAT_UOF_INIT_REG_CTX 2 #define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3 +#define ICP_QAT_SUOF_OBJ_ID_LEN 8 +#define ICP_QAT_SUOF_FID 0x53554f46 +#define ICP_QAT_SUOF_MAJVER 0x0 +#define ICP_QAT_SUOF_MINVER 0x1 +#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long)) +#define ICP_QAT_SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long)) +#define ICP_QAT_CSS_FWSK_MODULUS_LEN 256 +#define ICP_QAT_CSS_FWSK_EXPONENT_LEN 4 +#define ICP_QAT_CSS_FWSK_PAD_LEN 252 +#define ICP_QAT_CSS_FWSK_PUB_LEN (ICP_QAT_CSS_FWSK_MODULUS_LEN + \ + ICP_QAT_CSS_FWSK_EXPONENT_LEN + \ + ICP_QAT_CSS_FWSK_PAD_LEN) +#define ICP_QAT_CSS_SIGNATURE_LEN 256 +#define ICP_QAT_CSS_AE_IMG_LEN (sizeof(struct icp_qat_simg_ae_mode) + \ + ICP_QAT_SIMG_AE_INIT_SEQ_LEN + \ + ICP_QAT_SIMG_AE_INSTS_LEN) +#define ICP_QAT_CSS_AE_SIMG_LEN (sizeof(struct icp_qat_css_hdr) + \ + ICP_QAT_CSS_FWSK_PUB_LEN + \ + ICP_QAT_CSS_SIGNATURE_LEN + \ + ICP_QAT_CSS_AE_IMG_LEN) +#define ICP_QAT_AE_IMG_OFFSET (sizeof(struct icp_qat_css_hdr) + \ + ICP_QAT_CSS_FWSK_MODULUS_LEN + \ + ICP_QAT_CSS_FWSK_EXPONENT_LEN + \ + ICP_QAT_CSS_SIGNATURE_LEN) +#define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000 #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) @@ -112,6 +135,11 @@ enum icp_qat_uof_regtype { ICP_NEIGH_REL, }; +enum icp_qat_css_fwtype { + CSS_AE_FIRMWARE = 0, + CSS_MMP_FIRMWARE = 1 +}; + struct icp_qat_uclo_page { struct icp_qat_uclo_encap_page *encap_page; struct icp_qat_uclo_region *region; @@ -235,7 +263,7 @@ struct icp_qat_uof_filechunkhdr { }; struct icp_qat_uof_objhdr { - unsigned int cpu_type; + unsigned int ac_dev_type; unsigned short min_cpu_ver; unsigned short max_cpu_ver; short max_chunks; @@ -326,7 +354,7 @@ struct icp_qat_uof_image { unsigned int img_name; unsigned int ae_assigned; unsigned int ctx_assigned; - unsigned int cpu_type; + unsigned int ac_dev_type; unsigned int entry_address; unsigned int fill_pattern[2]; unsigned int reloadable_size; @@ -374,4 +402,127 @@ struct icp_qat_uof_batch_init { unsigned int size; struct icp_qat_uof_batch_init *next; }; + +struct icp_qat_suof_img_hdr { + char *simg_buf; + unsigned long simg_len; + char *css_header; + char *css_key; + char *css_signature; + char *css_simg; + unsigned long simg_size; + unsigned int ae_num; + unsigned int ae_mask; + unsigned int fw_type; + unsigned long simg_name; + unsigned long appmeta_data; +}; + +struct icp_qat_suof_img_tbl { + unsigned int num_simgs; + struct icp_qat_suof_img_hdr *simg_hdr; +}; + +struct icp_qat_suof_handle { + unsigned int file_id; + unsigned int check_sum; + char min_ver; + char maj_ver; + char fw_type; + char *suof_buf; + unsigned int suof_size; + char *sym_str; + unsigned int sym_size; + struct icp_qat_suof_img_tbl img_table; +}; + +struct icp_qat_fw_auth_desc { + unsigned int img_len; + unsigned int reserved; + unsigned int css_hdr_high; + unsigned int css_hdr_low; + unsigned int img_high; + unsigned int img_low; + unsigned int signature_high; + unsigned int signature_low; + unsigned int fwsk_pub_high; + unsigned int fwsk_pub_low; + unsigned int img_ae_mode_data_high; + unsigned int img_ae_mode_data_low; + unsigned int img_ae_init_data_high; + unsigned int img_ae_init_data_low; + unsigned int img_ae_insts_high; + unsigned int img_ae_insts_low; +}; + +struct icp_qat_auth_chunk { + struct icp_qat_fw_auth_desc fw_auth_desc; + u64 chunk_size; + u64 chunk_bus_addr; +}; + +struct icp_qat_css_hdr { + unsigned int module_type; + unsigned int header_len; + unsigned int header_ver; + unsigned int module_id; + unsigned int module_vendor; + unsigned int date; + unsigned int size; + unsigned int key_size; + unsigned int module_size; + unsigned int exponent_size; + unsigned int fw_type; + unsigned int reserved[21]; +}; + +struct icp_qat_simg_ae_mode { + unsigned int file_id; + unsigned short maj_ver; + unsigned short min_ver; + unsigned int dev_type; + unsigned short devmax_ver; + unsigned short devmin_ver; + unsigned int ae_mask; + unsigned int ctx_enables; + char fw_type; + char ctx_mode; + char nn_mode; + char lm0_mode; + char lm1_mode; + char scs_mode; + char lm2_mode; + char lm3_mode; + char tindex_mode; + unsigned char reserved[7]; + char simg_name[256]; + char appmeta_data[256]; +}; + +struct icp_qat_suof_filehdr { + unsigned int file_id; + unsigned int check_sum; + char min_ver; + char maj_ver; + char fw_type; + char reserved; + unsigned short max_chunks; + unsigned short num_chunks; +}; + +struct icp_qat_suof_chunk_hdr { + char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN]; + u64 offset; + u64 size; +}; + +struct icp_qat_suof_strtable { + unsigned int tab_length; + unsigned int strings; +}; + +struct icp_qat_suof_objhdr { + unsigned int img_length; + unsigned int reserved; +}; #endif diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 380e761..45c1739 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -45,21 +45,22 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include +#include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "icp_qat_hal.h" #include "icp_qat_uclo.h" -#define BAD_REGADDR 0xffff -#define MAX_RETRY_TIMES 10000 -#define INIT_CTX_ARB_VALUE 0x0 +#define BAD_REGADDR 0xffff +#define MAX_RETRY_TIMES 10000 +#define INIT_CTX_ARB_VALUE 0x0 #define INIT_CTX_ENABLE_VALUE 0x0 -#define INIT_PC_VALUE 0x0 +#define INIT_PC_VALUE 0x0 #define INIT_WAKEUP_EVENTS_VALUE 0x1 #define INIT_SIG_EVENTS_VALUE 0x1 #define INIT_CCENABLE_VALUE 0x2000 -#define RST_CSR_QAT_LSB 20 +#define RST_CSR_QAT_LSB 20 #define RST_CSR_AE_LSB 0 #define MC_TIMESTAMP_ENABLE (0x1 << 7) @@ -391,9 +392,6 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle) unsigned int times = MAX_RETRY_TIMES; for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { - if (!(handle->hal_handle->ae_mask & (1 << ae))) - continue; - qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, (unsigned int *)&base_cnt); base_cnt &= 0xffff; @@ -413,6 +411,20 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle) return 0; } +int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, + unsigned int ae) +{ + unsigned int enable = 0, active = 0; + + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable); + qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active); + if ((enable & (0xff >> CE_ENABLE_BITPOS)) || + (active & (1 << ACS_ABO_BITPOS))) + return 1; + else + return 0; +} + static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) { unsigned int misc_ctl; @@ -425,8 +437,6 @@ static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) (~MC_TIMESTAMP_ENABLE)); for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { - if (!(handle->hal_handle->ae_mask & (1 << ae))) - continue; qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0); } @@ -440,8 +450,9 @@ static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) { - void __iomem *csr_addr = handle->hal_ep_csr_addr_v + - ESRAM_AUTO_INIT_CSR_OFFSET; + void __iomem *csr_addr = + (void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v + + ESRAM_AUTO_INIT_CSR_OFFSET); unsigned int csr_val, times = 30; csr_val = ADF_CSR_RD(csr_addr, 0); @@ -493,8 +504,6 @@ int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle) /* Set undefined power-up/reset states to reasonable default values */ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { - if (!(handle->hal_handle->ae_mask & (1 << ae))) - continue; qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, INIT_CTX_ENABLE_VALUE); qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, @@ -598,25 +607,31 @@ static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle, qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); } -static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) +static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle) { unsigned char ae; - unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; - int times = MAX_RETRY_TIMES; - unsigned int csr_val = 0; unsigned short reg; - unsigned int savctx = 0; - int ret = 0; for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { - if (!(handle->hal_handle->ae_mask & (1 << ae))) - continue; for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS, reg, 0); qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS, reg, 0); } + } +} + +static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) +{ + unsigned char ae; + unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; + int times = MAX_RETRY_TIMES; + unsigned int csr_val = 0; + unsigned int savctx = 0; + int ret = 0; + + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); @@ -638,8 +653,6 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) qat_hal_enable_ctx(handle, ae, ctx_mask); } for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { - if (!(handle->hal_handle->ae_mask & (1 << ae))) - continue; /* wait for AE to finish */ do { ret = qat_hal_wait_cycles(handle, ae, 20, 1); @@ -667,10 +680,10 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) return 0; } -#define ICP_DH895XCC_AE_OFFSET 0x20000 -#define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000) +#define ICP_QAT_AE_OFFSET 0x20000 +#define ICP_QAT_CAP_OFFSET (ICP_QAT_AE_OFFSET + 0x10000) #define LOCAL_TO_XFER_REG_OFFSET 0x800 -#define ICP_DH895XCC_EP_OFFSET 0x3a000 +#define ICP_QAT_EP_OFFSET 0x3a000 int qat_hal_init(struct adf_accel_dev *accel_dev) { unsigned char ae; @@ -687,15 +700,22 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) if (!handle) return -ENOMEM; - handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr + - ICP_DH895XCC_CAP_OFFSET; - handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr + - ICP_DH895XCC_AE_OFFSET; - handle->hal_ep_csr_addr_v = misc_bar->virt_addr + - ICP_DH895XCC_EP_OFFSET; - handle->hal_cap_ae_local_csr_addr_v = - handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET; handle->hal_sram_addr_v = sram_bar->virt_addr; + handle->hal_cap_g_ctl_csr_addr_v = + (void __iomem *)((uintptr_t)misc_bar->virt_addr + + ICP_QAT_CAP_OFFSET); + handle->hal_cap_ae_xfer_csr_addr_v = + (void __iomem *)((uintptr_t)misc_bar->virt_addr + + ICP_QAT_AE_OFFSET); + handle->hal_ep_csr_addr_v = + (void __iomem *)((uintptr_t)misc_bar->virt_addr + + ICP_QAT_EP_OFFSET); + handle->hal_cap_ae_local_csr_addr_v = + (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + + LOCAL_TO_XFER_REG_OFFSET); + handle->pci_dev = pci_info->pci_dev; + handle->fw_auth = (handle->pci_dev->device == + ADF_DH895XCC_PCI_DEVICE_ID) ? false : true; handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL); if (!handle->hal_handle) goto out_hal_handle; @@ -723,14 +743,16 @@ int qat_hal_init(struct adf_accel_dev *accel_dev) dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n"); goto out_err; } - if (qat_hal_clear_gpr(handle)) - goto out_err; + qat_hal_clear_xfer(handle); + if (!handle->fw_auth) { + if (qat_hal_clear_gpr(handle)) + goto out_err; + } + /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { unsigned int csr_val = 0; - if (!(hw_data->ae_mask & (1 << ae))) - continue; qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); csr_val |= 0x1; qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); @@ -756,15 +778,31 @@ void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle) void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask) { - qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) & + int retry = 0; + unsigned int fcu_sts = 0; + + if (handle->fw_auth) { + SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_START); + do { + msleep(FW_AUTH_WAIT_PERIOD); + fcu_sts = GET_CAP_CSR(handle, FCU_STATUS); + if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1)) + return; + } while (retry++ < FW_AUTH_MAX_RETRY); + pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n", ae, + fcu_sts); + } else { + qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) & ICP_QAT_UCLO_AE_ALL_CTX, 0x10000); - qat_hal_enable_ctx(handle, ae, ctx_mask); + qat_hal_enable_ctx(handle, ae, ctx_mask); + } } void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, unsigned int ctx_mask) { - qat_hal_disable_ctx(handle, ae, ctx_mask); + if (!handle->fw_auth) + qat_hal_disable_ctx(handle, ae, ctx_mask); } void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle, diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index c48f181..25d15f1 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -47,7 +47,7 @@ #include #include #include - +#include #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "icp_qat_uclo.h" @@ -119,10 +119,10 @@ static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table, { if ((!str_table->table_len) || (str_offset > str_table->table_len)) return NULL; - return (char *)(((unsigned long)(str_table->strings)) + str_offset); + return (char *)(((uintptr_t)(str_table->strings)) + str_offset); } -static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr) +static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr) { int maj = hdr->maj_ver & 0xff; int min = hdr->min_ver & 0xff; @@ -139,6 +139,31 @@ static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr) return 0; } +static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr) +{ + int maj = suof_hdr->maj_ver & 0xff; + int min = suof_hdr->min_ver & 0xff; + + if (suof_hdr->file_id != ICP_QAT_SUOF_FID) { + pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id); + return -EINVAL; + } + if (suof_hdr->fw_type != 0) { + pr_err("QAT: unsupported firmware type\n"); + return -EINVAL; + } + if (suof_hdr->num_chunks <= 0x1) { + pr_err("QAT: SUOF chunk amount is incorrect\n"); + return -EINVAL; + } + if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) { + pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n", + maj, min); + return -EINVAL; + } + return 0; +} + static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, unsigned int addr, unsigned int *val, unsigned int num_in_bytes) @@ -275,7 +300,7 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle unsigned int i, flag = 0; mem_val_attr = - (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + + (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem + sizeof(struct icp_qat_uof_initmem)); init_header = *init_tab_base; @@ -425,8 +450,8 @@ static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) if (qat_uclo_init_ae_memory(handle, initmem)) return -EINVAL; } - initmem = (struct icp_qat_uof_initmem *)((unsigned long)( - (unsigned long)initmem + + initmem = (struct icp_qat_uof_initmem *)((uintptr_t)( + (uintptr_t)initmem + sizeof(struct icp_qat_uof_initmem)) + (sizeof(struct icp_qat_uof_memvar_attr) * initmem->val_attr_num)); @@ -454,7 +479,7 @@ static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, int i; struct icp_qat_uof_chunkhdr *chunk_hdr = (struct icp_qat_uof_chunkhdr *) - ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); + ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); for (i = 0; i < obj_hdr->num_chunks; i++) { if ((cur < (void *)&chunk_hdr[i]) && @@ -596,7 +621,7 @@ static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; for (i = 0; i < uword_block_tab->entry_num; i++) page->uwblock[i].micro_words = - (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset; + (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset; } static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, @@ -697,7 +722,7 @@ qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, memcpy(&str_table->table_len, obj_hdr->file_buff + chunk_hdr->offset, sizeof(str_table->table_len)); hdr_size = (char *)&str_table->strings - (char *)str_table; - str_table->strings = (unsigned long)obj_hdr->file_buff + + str_table->strings = (uintptr_t)obj_hdr->file_buff + chunk_hdr->offset + hdr_size; return str_table; } @@ -721,13 +746,31 @@ qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj, } } +static unsigned int +qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) +{ + switch (handle->pci_dev->device) { + case ADF_DH895XCC_PCI_DEVICE_ID: + return ICP_QAT_AC_895XCC_DEV_TYPE; + case ADF_C62X_PCI_DEVICE_ID: + return ICP_QAT_AC_C62X_DEV_TYPE; + case ADF_C3XXX_PCI_DEVICE_ID: + return ICP_QAT_AC_C3XXX_DEV_TYPE; + default: + pr_err("QAT: unsupported device 0x%x\n", + handle->pci_dev->device); + return 0; + } +} + static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) { unsigned int maj_ver, prod_type = obj_handle->prod_type; - if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) { - pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n", - obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type); + if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) { + pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n", + obj_handle->encap_uof_obj.obj_hdr->ac_dev_type, + prod_type); return -EINVAL; } maj_ver = obj_handle->prod_rev & 0xff; @@ -932,7 +975,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) obj_handle->obj_hdr->file_buff; obj_handle->uword_in_bytes = 6; - obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE; + obj_handle->prod_type = qat_uclo_get_dev_type(handle); obj_handle->prod_rev = PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); if (qat_uclo_check_uof_compat(obj_handle)) { @@ -969,23 +1012,435 @@ out_err: return -EFAULT; } -void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, - void *addr_ptr, int mem_size) +static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_suof_filehdr *suof_ptr, + int suof_size) { - qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4)); + unsigned int check_sum = 0; + unsigned int min_ver_offset = 0; + struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; + + suof_handle->file_id = ICP_QAT_SUOF_FID; + suof_handle->suof_buf = (char *)suof_ptr; + suof_handle->suof_size = suof_size; + min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr, + min_ver); + check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver, + min_ver_offset); + if (check_sum != suof_ptr->check_sum) { + pr_err("QAT: incorrect SUOF checksum\n"); + return -EINVAL; + } + suof_handle->check_sum = suof_ptr->check_sum; + suof_handle->min_ver = suof_ptr->min_ver; + suof_handle->maj_ver = suof_ptr->maj_ver; + suof_handle->fw_type = suof_ptr->fw_type; + return 0; } -int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, - void *addr_ptr, int mem_size) +static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle, + struct icp_qat_suof_img_hdr *suof_img_hdr, + struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) { - struct icp_qat_uof_filehdr *filehdr; - struct icp_qat_uclo_objhandle *objhdl; + struct icp_qat_simg_ae_mode *ae_mode; + struct icp_qat_suof_objhdr *suof_objhdr; + + suof_img_hdr->simg_buf = (suof_handle->suof_buf + + suof_chunk_hdr->offset + + sizeof(*suof_objhdr)); + suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t) + (suof_handle->suof_buf + + suof_chunk_hdr->offset))->img_length; + + suof_img_hdr->css_header = suof_img_hdr->simg_buf; + suof_img_hdr->css_key = (suof_img_hdr->css_header + + sizeof(struct icp_qat_css_hdr)); + suof_img_hdr->css_signature = suof_img_hdr->css_key + + ICP_QAT_CSS_FWSK_MODULUS_LEN + + ICP_QAT_CSS_FWSK_EXPONENT_LEN; + suof_img_hdr->css_simg = suof_img_hdr->css_signature + + ICP_QAT_CSS_SIGNATURE_LEN; + + ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg); + suof_img_hdr->ae_mask = ae_mode->ae_mask; + suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name; + suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data; + suof_img_hdr->fw_type = ae_mode->fw_type; +} - BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= - (sizeof(handle->hal_handle->ae_mask) * 8)); +static void +qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle, + struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) +{ + char **sym_str = (char **)&suof_handle->sym_str; + unsigned int *sym_size = &suof_handle->sym_size; + struct icp_qat_suof_strtable *str_table_obj; + + *sym_size = *(unsigned int *)(uintptr_t) + (suof_chunk_hdr->offset + suof_handle->suof_buf); + *sym_str = (char *)(uintptr_t) + (suof_handle->suof_buf + suof_chunk_hdr->offset + + sizeof(str_table_obj->tab_length)); +} - if (!handle || !addr_ptr || mem_size < 24) +static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_suof_img_hdr *img_hdr) +{ + struct icp_qat_simg_ae_mode *img_ae_mode = NULL; + unsigned int prod_rev, maj_ver, prod_type; + + prod_type = qat_uclo_get_dev_type(handle); + img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg; + prod_rev = PID_MAJOR_REV | + (PID_MINOR_REV & handle->hal_handle->revision_id); + if (img_ae_mode->dev_type != prod_type) { + pr_err("QAT: incompatible product type %x\n", + img_ae_mode->dev_type); return -EINVAL; + } + maj_ver = prod_rev & 0xff; + if ((maj_ver > img_ae_mode->devmax_ver) || + (maj_ver < img_ae_mode->devmin_ver)) { + pr_err("QAT: incompatible device majver 0x%x\n", maj_ver); + return -EINVAL; + } + return 0; +} + +static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle) +{ + struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; + + kfree(sobj_handle->img_table.simg_hdr); + sobj_handle->img_table.simg_hdr = NULL; + kfree(handle->sobj_handle); + handle->sobj_handle = NULL; +} + +static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr, + unsigned int img_id, unsigned int num_simgs) +{ + struct icp_qat_suof_img_hdr img_header; + + if (img_id != num_simgs - 1) { + memcpy(&img_header, &suof_img_hdr[num_simgs - 1], + sizeof(*suof_img_hdr)); + memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id], + sizeof(*suof_img_hdr)); + memcpy(&suof_img_hdr[img_id], &img_header, + sizeof(*suof_img_hdr)); + } +} + +static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_suof_filehdr *suof_ptr, + int suof_size) +{ + struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; + struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL; + struct icp_qat_suof_img_hdr *suof_img_hdr = NULL; + int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE; + unsigned int i = 0; + struct icp_qat_suof_img_hdr img_header; + + if (!suof_ptr || (suof_size == 0)) { + pr_err("QAT: input parameter SUOF pointer/size is NULL\n"); + return -EINVAL; + } + if (qat_uclo_check_suof_format(suof_ptr)) + return -EINVAL; + ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size); + if (ret) + return ret; + suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *) + ((uintptr_t)suof_ptr + sizeof(*suof_ptr)); + + qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr); + suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1; + + if (suof_handle->img_table.num_simgs != 0) { + suof_img_hdr = kzalloc(suof_handle->img_table.num_simgs * + sizeof(img_header), GFP_KERNEL); + if (!suof_img_hdr) + return -ENOMEM; + suof_handle->img_table.simg_hdr = suof_img_hdr; + } + + for (i = 0; i < suof_handle->img_table.num_simgs; i++) { + qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i], + &suof_chunk_hdr[1 + i]); + ret = qat_uclo_check_simg_compat(handle, + &suof_img_hdr[i]); + if (ret) + return ret; + if ((suof_img_hdr[i].ae_mask & 0x1) != 0) + ae0_img = i; + } + qat_uclo_tail_img(suof_img_hdr, ae0_img, + suof_handle->img_table.num_simgs); + return 0; +} + +#define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low) +#define BITS_IN_DWORD 32 + +static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_fw_auth_desc *desc) +{ + unsigned int fcu_sts, retry = 0; + u64 bus_addr; + + bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) + - sizeof(struct icp_qat_auth_chunk); + SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD)); + SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr); + SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH); + + do { + msleep(FW_AUTH_WAIT_PERIOD); + fcu_sts = GET_CAP_CSR(handle, FCU_STATUS); + if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL) + goto auth_fail; + if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1)) + if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE) + return 0; + } while (retry++ < FW_AUTH_MAX_RETRY); +auth_fail: + pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n", + fcu_sts & FCU_AUTH_STS_MASK, retry); + return -EINVAL; +} + +static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle, + struct icp_firml_dram_desc *dram_desc, + unsigned int size) +{ + void *vptr; + dma_addr_t ptr; + + vptr = dma_alloc_coherent(&handle->pci_dev->dev, + size, &ptr, GFP_KERNEL); + if (!vptr) + return -ENOMEM; + dram_desc->dram_base_addr_v = vptr; + dram_desc->dram_bus_addr = ptr; + dram_desc->dram_size = size; + return 0; +} + +static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle, + struct icp_firml_dram_desc *dram_desc) +{ + dma_free_coherent(&handle->pci_dev->dev, + (size_t)(dram_desc->dram_size), + (dram_desc->dram_base_addr_v), + dram_desc->dram_bus_addr); + memset(dram_desc, 0, sizeof(*dram_desc)); +} + +static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_fw_auth_desc **desc) +{ + struct icp_firml_dram_desc dram_desc; + + dram_desc.dram_base_addr_v = *desc; + dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *) + (*desc))->chunk_bus_addr; + dram_desc.dram_size = ((struct icp_qat_auth_chunk *) + (*desc))->chunk_size; + qat_uclo_simg_free(handle, &dram_desc); +} + +static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, + char *image, unsigned int size, + struct icp_qat_fw_auth_desc **desc) +{ + struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image; + struct icp_qat_fw_auth_desc *auth_desc; + struct icp_qat_auth_chunk *auth_chunk; + u64 virt_addr, bus_addr, virt_base; + unsigned int length, simg_offset = sizeof(*auth_chunk); + struct icp_firml_dram_desc img_desc; + + if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) { + pr_err("QAT: error, input image size overflow %d\n", size); + return -EINVAL; + } + length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ? + ICP_QAT_CSS_AE_SIMG_LEN + simg_offset : + size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset; + if (qat_uclo_simg_alloc(handle, &img_desc, length)) { + pr_err("QAT: error, allocate continuous dram fail\n"); + return -ENOMEM; + } + + auth_chunk = img_desc.dram_base_addr_v; + auth_chunk->chunk_size = img_desc.dram_size; + auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr; + virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset; + bus_addr = img_desc.dram_bus_addr + simg_offset; + auth_desc = img_desc.dram_base_addr_v; + auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); + auth_desc->css_hdr_low = (unsigned int)bus_addr; + virt_addr = virt_base; + + memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr)); + /* pub key */ + bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) + + sizeof(*css_hdr); + virt_addr = virt_addr + sizeof(*css_hdr); + + auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); + auth_desc->fwsk_pub_low = (unsigned int)bus_addr; + + memcpy((void *)(uintptr_t)virt_addr, + (void *)(image + sizeof(*css_hdr)), + ICP_QAT_CSS_FWSK_MODULUS_LEN); + /* padding */ + memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN), + 0, ICP_QAT_CSS_FWSK_PAD_LEN); + + /* exponent */ + memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN + + ICP_QAT_CSS_FWSK_PAD_LEN), + (void *)(image + sizeof(*css_hdr) + + ICP_QAT_CSS_FWSK_MODULUS_LEN), + sizeof(unsigned int)); + + /* signature */ + bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, + auth_desc->fwsk_pub_low) + + ICP_QAT_CSS_FWSK_PUB_LEN; + virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN; + auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); + auth_desc->signature_low = (unsigned int)bus_addr; + + memcpy((void *)(uintptr_t)virt_addr, + (void *)(image + sizeof(*css_hdr) + + ICP_QAT_CSS_FWSK_MODULUS_LEN + + ICP_QAT_CSS_FWSK_EXPONENT_LEN), + ICP_QAT_CSS_SIGNATURE_LEN); + + bus_addr = ADD_ADDR(auth_desc->signature_high, + auth_desc->signature_low) + + ICP_QAT_CSS_SIGNATURE_LEN; + virt_addr += ICP_QAT_CSS_SIGNATURE_LEN; + + auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); + auth_desc->img_low = (unsigned int)bus_addr; + auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET; + memcpy((void *)(uintptr_t)virt_addr, + (void *)(image + ICP_QAT_AE_IMG_OFFSET), + auth_desc->img_len); + virt_addr = virt_base; + /* AE firmware */ + if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type == + CSS_AE_FIRMWARE) { + auth_desc->img_ae_mode_data_high = auth_desc->img_high; + auth_desc->img_ae_mode_data_low = auth_desc->img_low; + bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high, + auth_desc->img_ae_mode_data_low) + + sizeof(struct icp_qat_simg_ae_mode); + + auth_desc->img_ae_init_data_high = (unsigned int) + (bus_addr >> BITS_IN_DWORD); + auth_desc->img_ae_init_data_low = (unsigned int)bus_addr; + bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; + auth_desc->img_ae_insts_high = (unsigned int) + (bus_addr >> BITS_IN_DWORD); + auth_desc->img_ae_insts_low = (unsigned int)bus_addr; + } else { + auth_desc->img_ae_insts_high = auth_desc->img_high; + auth_desc->img_ae_insts_low = auth_desc->img_low; + } + *desc = auth_desc; + return 0; +} + +static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_fw_auth_desc *desc) +{ + unsigned int i; + unsigned int fcu_sts; + struct icp_qat_simg_ae_mode *virt_addr; + unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS; + + virt_addr = (void *)((uintptr_t)desc + + sizeof(struct icp_qat_auth_chunk) + + sizeof(struct icp_qat_css_hdr) + + ICP_QAT_CSS_FWSK_PUB_LEN + + ICP_QAT_CSS_SIGNATURE_LEN); + for (i = 0; i < handle->hal_handle->ae_max_num; i++) { + int retry = 0; + + if (!((virt_addr->ae_mask >> i) & 0x1)) + continue; + if (qat_hal_check_ae_active(handle, i)) { + pr_err("QAT: AE %d is active\n", i); + return -EINVAL; + } + SET_CAP_CSR(handle, FCU_CONTROL, + (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS))); + + do { + msleep(FW_AUTH_WAIT_PERIOD); + fcu_sts = GET_CAP_CSR(handle, FCU_STATUS); + if (((fcu_sts & FCU_AUTH_STS_MASK) == + FCU_STS_LOAD_DONE) && + ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i))) + break; + } while (retry++ < FW_AUTH_MAX_RETRY); + if (retry > FW_AUTH_MAX_RETRY) { + pr_err("QAT: firmware load failed timeout %x\n", retry); + return -EINVAL; + } + } + return 0; +} + +static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size) +{ + struct icp_qat_suof_handle *suof_handle; + + suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL); + if (!suof_handle) + return -ENOMEM; + handle->sobj_handle = suof_handle; + if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) { + qat_uclo_del_suof(handle); + pr_err("QAT: map SUOF failed\n"); + return -EINVAL; + } + return 0; +} + +int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size) +{ + struct icp_qat_fw_auth_desc *desc = NULL; + int status = 0; + + if (handle->fw_auth) { + if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc)) + status = qat_uclo_auth_fw(handle, desc); + qat_uclo_ummap_auth_fw(handle, &desc); + } else { + if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) { + pr_err("QAT: C3XXX doesn't support unsigned MMP\n"); + return -EINVAL; + } + qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size); + } + return status; +} + +static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size) +{ + struct icp_qat_uof_filehdr *filehdr; + struct icp_qat_uclo_objhandle *objhdl; + objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL); if (!objhdl) return -ENOMEM; @@ -993,7 +1448,7 @@ int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, if (!objhdl->obj_buf) goto out_objbuf_err; filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; - if (qat_uclo_check_format(filehdr)) + if (qat_uclo_check_uof_format(filehdr)) goto out_objhdr_err; objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, ICP_QAT_UOF_OBJS); @@ -1016,11 +1471,27 @@ out_objbuf_err: return -ENOMEM; } +int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, + void *addr_ptr, int mem_size) +{ + BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= + (sizeof(handle->hal_handle->ae_mask) * 8)); + + if (!handle || !addr_ptr || mem_size < 24) + return -EINVAL; + + return (handle->fw_auth) ? + qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) : + qat_uclo_map_uof_obj(handle, addr_ptr, mem_size); +} + void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int a; + if (handle->sobj_handle) + qat_uclo_del_suof(handle); if (!obj_handle) return; @@ -1055,7 +1526,7 @@ static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, encap_page->uwblock[i].words_num - 1) { raddr -= encap_page->uwblock[i].start_addr; raddr *= obj_handle->uword_in_bytes; - memcpy(&uwrd, (void *)(((unsigned long) + memcpy(&uwrd, (void *)(((uintptr_t) encap_page->uwblock[i].micro_words) + raddr), obj_handle->uword_in_bytes); uwrd = uwrd & 0xbffffffffffull; @@ -1147,7 +1618,33 @@ static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, } } -int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) +static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle) +{ + unsigned int i; + struct icp_qat_fw_auth_desc *desc = NULL; + struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; + struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr; + + for (i = 0; i < sobj_handle->img_table.num_simgs; i++) { + if (qat_uclo_map_auth_fw(handle, + (char *)simg_hdr[i].simg_buf, + (unsigned int) + (simg_hdr[i].simg_len), + &desc)) + goto wr_err; + if (qat_uclo_auth_fw(handle, desc)) + goto wr_err; + if (qat_uclo_load_fw(handle, desc)) + goto wr_err; + qat_uclo_ummap_auth_fw(handle, &desc); + } + return 0; +wr_err: + qat_uclo_ummap_auth_fw(handle, &desc); + return -EINVAL; +} + +static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle) { struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; unsigned int i; @@ -1164,3 +1661,9 @@ int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) } return 0; } + +int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) +{ + return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) : + qat_uclo_wr_uof_img(handle); +} -- cgit v0.10.2 From 1a72d3a6d1d9a08705546eba14f0390c565ccd24 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 4 Dec 2015 16:56:28 -0800 Subject: crypto: qat - move isr files to qat common so that they can be reused Move qat_isr.c and qat_isrvf.c files to qat_common dir so that they can be reused by all devices. Remove adf_drv.h files because thay are not longer needed. Move adf_dev_configure() function to qat_common so it can be reused. Also some minor updates to common code for multidevice. Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index 12f40a3..29c7c53 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile @@ -8,6 +8,8 @@ clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o intel_qat-objs := adf_cfg.o \ + adf_isr.o \ + adf_vf_isr.o \ adf_ctl_drv.o \ adf_dev_mgr.o \ adf_init.o \ diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 9786a2e..4d78ec0 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -67,6 +67,8 @@ #define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3 #define ADF_ERRSOU3 (0x3A000 + 0x0C) #define ADF_ERRSOU5 (0x3A000 + 0xD8) +#define ADF_DEVICE_FUSECTL_OFFSET 0x40 +#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_PCI_MAX_BARS 3 #define ADF_DEVICE_NAME_LENGTH 32 #define ADF_ETR_MAX_RINGS_PER_BANK 16 @@ -178,11 +180,11 @@ struct adf_hw_device_data { const char *fw_mmp_name; uint32_t fuses; uint32_t accel_capabilities_mask; + uint32_t instance_id; uint16_t accel_mask; uint16_t ae_mask; uint16_t tx_rings_mask; uint8_t tx_rx_gap; - uint8_t instance_id; uint8_t num_banks; uint8_t num_accel; uint8_t num_logical_accel; diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c index 147d755..eb557f6 100644 --- a/drivers/crypto/qat/qat_common/adf_admin.c +++ b/drivers/crypto/qat/qat_common/adf_admin.c @@ -51,6 +51,7 @@ #include #include #include "adf_accel_devices.h" +#include "adf_common_drv.h" #include "icp_qat_fw_init_admin.h" /* Admin Messages Registers */ @@ -234,7 +235,8 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev) struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; void __iomem *csr = pmisc->virt_addr; - void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; + void __iomem *mailbox = (void __iomem *)((uintptr_t)csr + + ADF_DH895XCC_MAILBOX_BASE_OFFSET); u64 reg_val; admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index d24cfd4..e78a1d7 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c @@ -82,7 +82,7 @@ struct adf_reset_dev_data { struct work_struct reset_work; }; -static void adf_dev_restore(struct adf_accel_dev *accel_dev) +void adf_dev_restore(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); struct pci_dev *parent = pdev->bus->self; diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index d482022..c03e286 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -54,7 +54,7 @@ #include "icp_qat_hal.h" #define ADF_MAJOR_VERSION 0 -#define ADF_MINOR_VERSION 2 +#define ADF_MINOR_VERSION 6 #define ADF_BUILD_VERSION 0 #define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ __stringify(ADF_MINOR_VERSION) "." \ @@ -143,6 +143,7 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev); int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf); void adf_disable_aer(struct adf_accel_dev *accel_dev); +void adf_dev_restore(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); int adf_init_admin_comms(struct adf_accel_dev *accel_dev); @@ -159,6 +160,7 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev); void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev); int qat_crypto_register(void); int qat_crypto_unregister(void); +int qat_crypto_dev_config(struct adf_accel_dev *accel_dev); struct qat_crypto_instance *qat_crypto_get_instance_node(int node); void qat_crypto_put_instance(struct qat_crypto_instance *inst); void qat_alg_callback(void *resp); @@ -168,6 +170,11 @@ void qat_algs_unregister(void); int qat_asym_algs_register(void); void qat_asym_algs_unregister(void); +int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); +void adf_isr_resource_free(struct adf_accel_dev *accel_dev); +int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev); +void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev); + int qat_hal_init(struct adf_accel_dev *accel_dev); void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index bd8dfa1..2e6d0c5 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -198,7 +198,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev, goto out_err; } - params_head = section_head->params; + params_head = section.params; while (params_head) { if (copy_from_user(&key_val, (void __user *)params_head, @@ -342,12 +342,10 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, if (ret) return ret; + ret = -ENODEV; accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); - if (!accel_dev) { - pr_err("QAT: Device %d not found\n", ctl_data->device_id); - ret = -ENODEV; + if (!accel_dev) goto out; - } if (!adf_dev_started(accel_dev)) { dev_info(&GET_DEV(accel_dev), diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c index 6849422..f267d9e 100644 --- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c +++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c @@ -45,6 +45,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "adf_accel_devices.h" +#include "adf_common_drv.h" #include "adf_transport_internal.h" #define ADF_ARB_NUM 4 @@ -124,19 +125,12 @@ int adf_init_arb(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_init_arb); -/** - * adf_update_ring_arb() - update ring arbitration rgister - * @accel_dev: Pointer to ring data. - * - * Function enables or disables rings for/from arbitration. - */ void adf_update_ring_arb(struct adf_etr_ring_data *ring) { WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, ring->bank->bank_number, ring->bank->ring_mask & 0xFF); } -EXPORT_SYMBOL_GPL(adf_update_ring_arb); void adf_exit_arb(struct adf_accel_dev *accel_dev) { diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index d873eee..ef5575e 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -62,15 +62,6 @@ static void adf_service_add(struct service_hndl *service) mutex_unlock(&service_lock); } -/** - * adf_service_register() - Register acceleration service in the accel framework - * @service: Pointer to the service - * - * Function adds the acceleration service to the acceleration framework. - * To be used by QAT device specific drivers. - * - * Return: 0 on success, error code otherwise. - */ int adf_service_register(struct service_hndl *service) { service->init_status = 0; @@ -78,7 +69,6 @@ int adf_service_register(struct service_hndl *service) adf_service_add(service); return 0; } -EXPORT_SYMBOL_GPL(adf_service_register); static void adf_service_remove(struct service_hndl *service) { @@ -87,15 +77,6 @@ static void adf_service_remove(struct service_hndl *service) mutex_unlock(&service_lock); } -/** - * adf_service_unregister() - Unregister acceleration service from the framework - * @service: Pointer to the service - * - * Function remove the acceleration service from the acceleration framework. - * To be used by QAT device specific drivers. - * - * Return: 0 on success, error code otherwise. - */ int adf_service_unregister(struct service_hndl *service) { if (service->init_status || service->start_status) { @@ -105,7 +86,6 @@ int adf_service_unregister(struct service_hndl *service) adf_service_remove(service); return 0; } -EXPORT_SYMBOL_GPL(adf_service_unregister); /** * adf_dev_init() - Init data structures and services for the given accel device @@ -366,6 +346,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev) hw_data->disable_iov(accel_dev); adf_cleanup_etr_data(accel_dev); + adf_dev_restore(accel_dev); } EXPORT_SYMBOL_GPL(adf_dev_shutdown); diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c new file mode 100644 index 0000000..b81f79a --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -0,0 +1,348 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_cfg.h" +#include "adf_cfg_strings.h" +#include "adf_cfg_common.h" +#include "adf_transport_access_macros.h" +#include "adf_transport_internal.h" + +static int adf_enable_msix(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 msix_num_entries = 1; + + /* If SR-IOV is disabled, add entries for each bank */ + if (!accel_dev->pf.vf_info) { + int i; + + msix_num_entries += hw_data->num_banks; + for (i = 0; i < msix_num_entries; i++) + pci_dev_info->msix_entries.entries[i].entry = i; + } else { + pci_dev_info->msix_entries.entries[0].entry = + hw_data->num_banks; + } + + if (pci_enable_msix_exact(pci_dev_info->pci_dev, + pci_dev_info->msix_entries.entries, + msix_num_entries)) { + dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n"); + return -EFAULT; + } + return 0; +} + +static void adf_disable_msix(struct adf_accel_pci *pci_dev_info) +{ + pci_disable_msix(pci_dev_info->pci_dev); +} + +static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr) +{ + struct adf_etr_bank_data *bank = bank_ptr; + + WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0); + tasklet_hi_schedule(&bank->resp_handler); + return IRQ_HANDLED; +} + +static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) +{ + struct adf_accel_dev *accel_dev = dev_ptr; + +#ifdef CONFIG_PCI_IOV + /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */ + if (accel_dev->pf.vf_info) { + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *pmisc = + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; + void __iomem *pmisc_bar_addr = pmisc->virt_addr; + u32 vf_mask; + + /* Get the interrupt sources triggered by VFs */ + vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) & + 0x0000FFFF) << 16) | + ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) & + 0x01FFFE00) >> 9); + + if (vf_mask) { + struct adf_accel_vf_info *vf_info; + bool irq_handled = false; + int i; + + /* Disable VF2PF interrupts for VFs with pending ints */ + adf_disable_vf2pf_interrupts(accel_dev, vf_mask); + + /* + * Schedule tasklets to handle VF2PF interrupt BHs + * unless the VF is malicious and is attempting to + * flood the host OS with VF2PF interrupts. + */ + for_each_set_bit(i, (const unsigned long *)&vf_mask, + (sizeof(vf_mask) * BITS_PER_BYTE)) { + vf_info = accel_dev->pf.vf_info + i; + + if (!__ratelimit(&vf_info->vf2pf_ratelimit)) { + dev_info(&GET_DEV(accel_dev), + "Too many ints from VF%d\n", + vf_info->vf_nr + 1); + continue; + } + + /* Tasklet will re-enable ints from this VF */ + tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet); + irq_handled = true; + } + + if (irq_handled) + return IRQ_HANDLED; + } + } +#endif /* CONFIG_PCI_IOV */ + + dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", + accel_dev->accel_id); + + return IRQ_NONE; +} + +static int adf_request_irqs(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct msix_entry *msixe = pci_dev_info->msix_entries.entries; + struct adf_etr_data *etr_data = accel_dev->transport; + int ret, i = 0; + char *name; + + /* Request msix irq for all banks unless SR-IOV enabled */ + if (!accel_dev->pf.vf_info) { + for (i = 0; i < hw_data->num_banks; i++) { + struct adf_etr_bank_data *bank = &etr_data->banks[i]; + unsigned int cpu, cpus = num_online_cpus(); + + name = *(pci_dev_info->msix_entries.names + i); + snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, + "qat%d-bundle%d", accel_dev->accel_id, i); + ret = request_irq(msixe[i].vector, + adf_msix_isr_bundle, 0, name, bank); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "failed to enable irq %d for %s\n", + msixe[i].vector, name); + return ret; + } + + cpu = ((accel_dev->accel_id * hw_data->num_banks) + + i) % cpus; + irq_set_affinity_hint(msixe[i].vector, + get_cpu_mask(cpu)); + } + } + + /* Request msix irq for AE */ + name = *(pci_dev_info->msix_entries.names + i); + snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, + "qat%d-ae-cluster", accel_dev->accel_id); + ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "failed to enable irq %d, for %s\n", + msixe[i].vector, name); + return ret; + } + return ret; +} + +static void adf_free_irqs(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct msix_entry *msixe = pci_dev_info->msix_entries.entries; + struct adf_etr_data *etr_data = accel_dev->transport; + int i = 0; + + if (pci_dev_info->msix_entries.num_entries > 1) { + for (i = 0; i < hw_data->num_banks; i++) { + irq_set_affinity_hint(msixe[i].vector, NULL); + free_irq(msixe[i].vector, &etr_data->banks[i]); + } + } + irq_set_affinity_hint(msixe[i].vector, NULL); + free_irq(msixe[i].vector, accel_dev); +} + +static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) +{ + int i; + char **names; + struct msix_entry *entries; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 msix_num_entries = 1; + + /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */ + if (!accel_dev->pf.vf_info) + msix_num_entries += hw_data->num_banks; + + entries = kzalloc_node(msix_num_entries * sizeof(*entries), + GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); + if (!entries) + return -ENOMEM; + + names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL); + if (!names) { + kfree(entries); + return -ENOMEM; + } + for (i = 0; i < msix_num_entries; i++) { + *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); + if (!(*(names + i))) + goto err; + } + accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries; + accel_dev->accel_pci_dev.msix_entries.entries = entries; + accel_dev->accel_pci_dev.msix_entries.names = names; + return 0; +err: + for (i = 0; i < msix_num_entries; i++) + kfree(*(names + i)); + kfree(entries); + kfree(names); + return -ENOMEM; +} + +static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) +{ + char **names = accel_dev->accel_pci_dev.msix_entries.names; + int i; + + kfree(accel_dev->accel_pci_dev.msix_entries.entries); + for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++) + kfree(*(names + i)); + kfree(names); +} + +static int adf_setup_bh(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *priv_data = accel_dev->transport; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + int i; + + for (i = 0; i < hw_data->num_banks; i++) + tasklet_init(&priv_data->banks[i].resp_handler, + adf_response_handler, + (unsigned long)&priv_data->banks[i]); + return 0; +} + +static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *priv_data = accel_dev->transport; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + int i; + + for (i = 0; i < hw_data->num_banks; i++) { + tasklet_disable(&priv_data->banks[i].resp_handler); + tasklet_kill(&priv_data->banks[i].resp_handler); + } +} + +/** + * adf_vf_isr_resource_free() - Free IRQ for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function frees interrupts for acceleration device. + */ +void adf_isr_resource_free(struct adf_accel_dev *accel_dev) +{ + adf_free_irqs(accel_dev); + adf_cleanup_bh(accel_dev); + adf_disable_msix(&accel_dev->accel_pci_dev); + adf_isr_free_msix_entry_table(accel_dev); +} +EXPORT_SYMBOL_GPL(adf_isr_resource_free); + +/** + * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function allocates interrupts for acceleration device. + * + * Return: 0 on success, error code otherwise. + */ +int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) +{ + int ret; + + ret = adf_isr_alloc_msix_entry_table(accel_dev); + if (ret) + return ret; + if (adf_enable_msix(accel_dev)) + goto err_out; + + if (adf_setup_bh(accel_dev)) + goto err_out; + + if (adf_request_irqs(accel_dev)) + goto err_out; + + return 0; +err_out: + adf_isr_resource_free(accel_dev); + return -EFAULT; +} +EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index 5fdbad8..b3875fd 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -45,8 +45,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include -#include #include #include "adf_accel_devices.h" #include "adf_common_drv.h" @@ -58,12 +56,6 @@ #define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC) #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16) -/** - * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts - * @accel_dev: Pointer to acceleration device. - * - * Function enables PF to VF interrupts - */ void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; @@ -73,14 +65,7 @@ void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0); } -EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts); -/** - * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts - * @accel_dev: Pointer to acceleration device. - * - * Function disables PF to VF interrupts - */ void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev; @@ -90,7 +75,6 @@ void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2); } -EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) @@ -116,12 +100,6 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, } } -/** - * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts - * @accel_dev: Pointer to acceleration device. - * - * Function disables VF to PF interrupts - */ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; @@ -144,7 +122,6 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg); } } -EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts); static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) { diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 3865ae8..eff00cd 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -122,7 +122,7 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) return -EAGAIN; } spin_lock_bh(&ring->lock); - memcpy(ring->base_addr + ring->tail, msg, + memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg, ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); ring->tail = adf_modulo(ring->tail + @@ -137,7 +137,7 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) static int adf_handle_response(struct adf_etr_ring_data *ring) { uint32_t msg_counter = 0; - uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head); + uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); while (*msg != ADF_RING_EMPTY_SIG) { ring->callback((uint32_t *)msg); @@ -146,7 +146,7 @@ static int adf_handle_response(struct adf_etr_ring_data *ring) ADF_MSG_SIZE_TO_BYTES(ring->msg_size), ADF_RING_SIZE_MODULO(ring->ring_size)); msg_counter++; - msg = (uint32_t *)(ring->base_addr + ring->head); + msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); } if (msg_counter > 0) { WRITE_CSR_RING_HEAD(ring->bank->csr_addr, @@ -342,18 +342,7 @@ static void adf_ring_response_handler(struct adf_etr_bank_data *bank) } } -/** - * adf_response_handler() - Bottom half handler response handler - * @bank_addr: Address of a ring bank for with the BH was scheduled. - * - * Function is the bottom half handler for the response from acceleration - * device. There is one handler for every ring bank. Function checks all - * communication rings in the bank. - * To be used by QAT device specific drivers. - * - * Return: void - */ -void adf_response_handler(unsigned long bank_addr) +void adf_response_handler(uintptr_t bank_addr) { struct adf_etr_bank_data *bank = (void *)bank_addr; @@ -362,7 +351,6 @@ void adf_response_handler(unsigned long bank_addr) WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, bank->irq_mask); } -EXPORT_SYMBOL_GPL(adf_response_handler); static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev, const char *section, const char *format, diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h index a486962..bb88336 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_internal.h +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h @@ -91,7 +91,7 @@ struct adf_etr_data { struct dentry *debug; }; -void adf_response_handler(unsigned long bank_addr); +void adf_response_handler(uintptr_t bank_addr); #ifdef CONFIG_DEBUG_FS #include int adf_bank_debugfs_add(struct adf_etr_bank_data *bank); diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c new file mode 100644 index 0000000..09427b3 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c @@ -0,0 +1,280 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_cfg.h" +#include "adf_cfg_strings.h" +#include "adf_cfg_common.h" +#include "adf_transport_access_macros.h" +#include "adf_transport_internal.h" +#include "adf_pf2vf_msg.h" + +#define ADF_VINTSOU_OFFSET 0x204 +#define ADF_VINTSOU_BUN BIT(0) +#define ADF_VINTSOU_PF2VF BIT(1) + +static int adf_enable_msi(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; + int stat = pci_enable_msi(pci_dev_info->pci_dev); + + if (stat) { + dev_err(&GET_DEV(accel_dev), + "Failed to enable MSI interrupts\n"); + return stat; + } + + accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); + if (!accel_dev->vf.irq_name) + return -ENOMEM; + + return stat; +} + +static void adf_disable_msi(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + + kfree(accel_dev->vf.irq_name); + pci_disable_msi(pdev); +} + +static void adf_pf2vf_bh_handler(void *data) +{ + struct adf_accel_dev *accel_dev = data; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *pmisc = + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; + void __iomem *pmisc_bar_addr = pmisc->virt_addr; + u32 msg; + + /* Read the message from PF */ + msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0)); + + if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM)) + /* Ignore legacy non-system (non-kernel) PF2VF messages */ + goto err; + + switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) { + case ADF_PF2VF_MSGTYPE_RESTARTING: + dev_dbg(&GET_DEV(accel_dev), + "Restarting msg received from PF 0x%x\n", msg); + adf_dev_stop(accel_dev); + break; + case ADF_PF2VF_MSGTYPE_VERSION_RESP: + dev_dbg(&GET_DEV(accel_dev), + "Version resp received from PF 0x%x\n", msg); + accel_dev->vf.pf_version = + (msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >> + ADF_PF2VF_VERSION_RESP_VERS_SHIFT; + accel_dev->vf.compatible = + (msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >> + ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; + complete(&accel_dev->vf.iov_msg_completion); + break; + default: + goto err; + } + + /* To ack, clear the PF2VFINT bit */ + msg &= ~BIT(0); + ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg); + + /* Re-enable PF2VF interrupts */ + adf_enable_pf2vf_interrupts(accel_dev); + return; +err: + dev_err(&GET_DEV(accel_dev), + "Unknown message from PF (0x%x); leaving PF2VF ints disabled\n", + msg); +} + +static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev) +{ + tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet, + (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev); + + mutex_init(&accel_dev->vf.vf2pf_lock); + return 0; +} + +static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev) +{ + tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet); + tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet); + mutex_destroy(&accel_dev->vf.vf2pf_lock); +} + +static irqreturn_t adf_isr(int irq, void *privdata) +{ + struct adf_accel_dev *accel_dev = privdata; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *pmisc = + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; + void __iomem *pmisc_bar_addr = pmisc->virt_addr; + u32 v_int; + + /* Read VF INT source CSR to determine the source of VF interrupt */ + v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET); + + /* Check for PF2VF interrupt */ + if (v_int & ADF_VINTSOU_PF2VF) { + /* Disable PF to VF interrupt */ + adf_disable_pf2vf_interrupts(accel_dev); + + /* Schedule tasklet to handle interrupt BH */ + tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet); + return IRQ_HANDLED; + } + + /* Check bundle interrupt */ + if (v_int & ADF_VINTSOU_BUN) { + struct adf_etr_data *etr_data = accel_dev->transport; + struct adf_etr_bank_data *bank = &etr_data->banks[0]; + + /* Disable Flag and Coalesce Ring Interrupts */ + WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, + 0); + tasklet_hi_schedule(&bank->resp_handler); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int adf_request_msi_irq(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + unsigned int cpu; + int ret; + + snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME, + "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name, + (void *)accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n", + accel_dev->vf.irq_name); + return ret; + } + cpu = accel_dev->accel_id % num_online_cpus(); + irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu)); + + return ret; +} + +static int adf_setup_bh(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *priv_data = accel_dev->transport; + + tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler, + (unsigned long)priv_data->banks); + return 0; +} + +static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) +{ + struct adf_etr_data *priv_data = accel_dev->transport; + + tasklet_disable(&priv_data->banks[0].resp_handler); + tasklet_kill(&priv_data->banks[0].resp_handler); +} + +/** + * adf_vf_isr_resource_free() - Free IRQ for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function frees interrupts for acceleration device virtual function. + */ +void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + + irq_set_affinity_hint(pdev->irq, NULL); + free_irq(pdev->irq, (void *)accel_dev); + adf_cleanup_bh(accel_dev); + adf_cleanup_pf2vf_bh(accel_dev); + adf_disable_msi(accel_dev); +} +EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free); + +/** + * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device + * @accel_dev: Pointer to acceleration device. + * + * Function allocates interrupts for acceleration device virtual function. + * + * Return: 0 on success, error code otherwise. + */ +int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) +{ + if (adf_enable_msi(accel_dev)) + goto err_out; + + if (adf_setup_pf2vf_bh(accel_dev)) + goto err_out; + + if (adf_setup_bh(accel_dev)) + goto err_out; + + if (adf_request_msi_irq(accel_dev)) + goto err_out; + + return 0; +err_out: + adf_vf_isr_resource_free(accel_dev); + return -EFAULT; +} +EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc); diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index 9425402..4d0c65b 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -49,6 +49,7 @@ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_transport.h" +#include "adf_transport_access_macros.h" #include "adf_cfg.h" #include "adf_cfg_strings.h" #include "qat_crypto.h" @@ -159,6 +160,97 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) return inst; } +/** + * qat_crypto_dev_config() - create dev config required to create crypto inst. + * + * @accel_dev: Pointer to acceleration device. + * + * Function creates device configuration required to create crypto instances + * + * Return: 0 on success, error code otherwise. + */ +int qat_crypto_dev_config(struct adf_accel_dev *accel_dev) +{ + int cpus = num_online_cpus(); + int banks = GET_MAX_BANKS(accel_dev); + int instances = min(cpus, banks); + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int i; + unsigned long val; + + if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) + goto err; + if (adf_cfg_section_add(accel_dev, "Accelerator0")) + goto err; + for (i = 0; i < instances; i++) { + val = i; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, + i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); + val = 128; + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 2; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 8; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = 10; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, (void *)&val, ADF_DEC)) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, (void *)&val, ADF_DEC)) + goto err; + } + + val = i; + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + ADF_NUM_CY, (void *)&val, ADF_DEC)) + goto err; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + return 0; +err: + dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(qat_crypto_dev_config); + static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) { int i; diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile index 8c79c54..180a00e 100644 --- a/drivers/crypto/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/qat/qat_dh895xcc/Makefile @@ -1,5 +1,3 @@ ccflags-y := -I$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o -qat_dh895xcc-objs := adf_drv.o \ - adf_isr.o \ - adf_dh895xcc_hw_data.o +qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index ff54257..6e1d5e1 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -48,7 +48,6 @@ #include #include #include "adf_dh895xcc_hw_data.h" -#include "adf_drv.h" /* Worker thread to service arbiter mappings based on dev SKUs */ static const uint32_t thrd_to_arb_map_sku4[] = { @@ -143,8 +142,8 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self) return DEV_SKU_UNKNOWN; } -void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, - uint32_t const **arb_map_config) +static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, + u32 const **arb_map_config) { switch (accel_dev->accel_pci_dev.sku) { case DEV_SKU_1: diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index 88dffb2..72eacfd 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -53,7 +53,6 @@ #define ADF_DH895XCC_ETR_BAR 2 #define ADF_DH895XCC_RX_RINGS_OFFSET 8 #define ADF_DH895XCC_TX_RINGS_MASK 0xFF -#define ADF_DH895XCC_FUSECTL_OFFSET 0x40 #define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000 #define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20 #define ADF_DH895XCC_FUSECTL_SKU_1 0x0 @@ -65,7 +64,6 @@ #define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13 #define ADF_DH895XCC_ACCELERATORS_MASK 0x3F #define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF -#define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C #define ADF_DH895XCC_ETR_MAX_BANKS 32 #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) @@ -80,11 +78,12 @@ #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10) #define ADF_DH895XCC_ERRSSMSH_EN BIT(3) -#define ADF_DH895XCC_ERRSOU3 (0x3A000 + 0x00C) -#define ADF_DH895XCC_ERRSOU5 (0x3A000 + 0x0D8) #define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) #define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) /* FW names */ #define ADF_DH895XCC_FW "qat_895xcc.bin" #define ADF_DH895XCC_MMP "qat_mmp.bin" + +void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); #endif diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index f933f7d..a8c4b92 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -60,11 +60,7 @@ #include #include #include -#include #include "adf_dh895xcc_hw_data.h" -#include "adf_drv.h" - -static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME; #define ADF_SYSTEM_DEVICE(device_id) \ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} @@ -80,7 +76,7 @@ static void adf_remove(struct pci_dev *dev); static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, - .name = adf_driver_name, + .name = ADF_DH895XCC_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, .sriov_configure = adf_sriov_configure, @@ -120,87 +116,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) adf_devmgr_rm_dev(accel_dev, NULL); } -static int adf_dev_configure(struct adf_accel_dev *accel_dev) -{ - int cpus = num_online_cpus(); - int banks = GET_MAX_BANKS(accel_dev); - int instances = min(cpus, banks); - char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; - int i; - unsigned long val; - - if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) - goto err; - if (adf_cfg_section_add(accel_dev, "Accelerator0")) - goto err; - for (i = 0; i < instances; i++) { - val = i; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, - i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); - val = 128; - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 512; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 2; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 8; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 10; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = ADF_COALESCING_DEF_TIME; - snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); - if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", - key, (void *)&val, ADF_DEC)) - goto err; - } - - val = i; - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - ADF_NUM_CY, (void *)&val, ADF_DEC)) - goto err; - - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); - return 0; -err: - dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n"); - return -EINVAL; -} - static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct adf_accel_dev *accel_dev; @@ -255,7 +170,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) accel_dev->hw_device = hw_data; adf_init_hw_data_dh895xcc(accel_dev->hw_device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); - pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET, + pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, &hw_data->fuses); /* Get Accelerators and Accelerators Engines masks */ @@ -310,13 +225,13 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); } - if (pci_request_regions(pdev, adf_driver_name)) { + if (pci_request_regions(pdev, ADF_DH895XCC_DEVICE_NAME)) { ret = -EFAULT; goto out_err_disable; } /* Read accelerator capabilities mask */ - pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET, + pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &hw_data->accel_capabilities_mask); /* Find and map all the device's BARS */ @@ -351,7 +266,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err_free_reg; } - ret = adf_dev_configure(accel_dev); + ret = qat_crypto_dev_config(accel_dev); if (ret) goto out_err_free_reg; diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h deleted file mode 100644 index 85ff245..0000000 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ -#ifndef ADF_DH895x_DRV_H_ -#define ADF_DH895x_DRV_H_ -#include -#include - -void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); -void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); -int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev); -void adf_isr_resource_free(struct adf_accel_dev *accel_dev); -void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, - uint32_t const **arb_map_config); -#endif diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c deleted file mode 100644 index 5570f78..0000000 --- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c +++ /dev/null @@ -1,332 +0,0 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "adf_drv.h" -#include "adf_dh895xcc_hw_data.h" - -static int adf_enable_msix(struct adf_accel_dev *accel_dev) -{ - struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; - struct adf_hw_device_data *hw_data = accel_dev->hw_device; - u32 msix_num_entries = 1; - - /* If SR-IOV is disabled, add entries for each bank */ - if (!accel_dev->pf.vf_info) { - int i; - - msix_num_entries += hw_data->num_banks; - for (i = 0; i < msix_num_entries; i++) - pci_dev_info->msix_entries.entries[i].entry = i; - } else { - pci_dev_info->msix_entries.entries[0].entry = - hw_data->num_banks; - } - - if (pci_enable_msix_exact(pci_dev_info->pci_dev, - pci_dev_info->msix_entries.entries, - msix_num_entries)) { - dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n"); - return -EFAULT; - } - return 0; -} - -static void adf_disable_msix(struct adf_accel_pci *pci_dev_info) -{ - pci_disable_msix(pci_dev_info->pci_dev); -} - -static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr) -{ - struct adf_etr_bank_data *bank = bank_ptr; - - WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0); - tasklet_hi_schedule(&bank->resp_handler); - return IRQ_HANDLED; -} - -static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) -{ - struct adf_accel_dev *accel_dev = dev_ptr; - -#ifdef CONFIG_PCI_IOV - /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */ - if (accel_dev->pf.vf_info) { - void __iomem *pmisc_bar_addr = - (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr; - u32 vf_mask; - - /* Get the interrupt sources triggered by VFs */ - vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) & - 0x0000FFFF) << 16) | - ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) & - 0x01FFFE00) >> 9); - - if (vf_mask) { - struct adf_accel_vf_info *vf_info; - bool irq_handled = false; - int i; - - /* Disable VF2PF interrupts for VFs with pending ints */ - adf_disable_vf2pf_interrupts(accel_dev, vf_mask); - - /* - * Schedule tasklets to handle VF2PF interrupt BHs - * unless the VF is malicious and is attempting to - * flood the host OS with VF2PF interrupts. - */ - for_each_set_bit(i, (const unsigned long *)&vf_mask, - (sizeof(vf_mask) * BITS_PER_BYTE)) { - vf_info = accel_dev->pf.vf_info + i; - - if (!__ratelimit(&vf_info->vf2pf_ratelimit)) { - dev_info(&GET_DEV(accel_dev), - "Too many ints from VF%d\n", - vf_info->vf_nr + 1); - continue; - } - - /* Tasklet will re-enable ints from this VF */ - tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet); - irq_handled = true; - } - - if (irq_handled) - return IRQ_HANDLED; - } - } -#endif /* CONFIG_PCI_IOV */ - - dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", - accel_dev->accel_id); - - return IRQ_NONE; -} - -static int adf_request_irqs(struct adf_accel_dev *accel_dev) -{ - struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; - struct adf_hw_device_data *hw_data = accel_dev->hw_device; - struct msix_entry *msixe = pci_dev_info->msix_entries.entries; - struct adf_etr_data *etr_data = accel_dev->transport; - int ret, i = 0; - char *name; - - /* Request msix irq for all banks unless SR-IOV enabled */ - if (!accel_dev->pf.vf_info) { - for (i = 0; i < hw_data->num_banks; i++) { - struct adf_etr_bank_data *bank = &etr_data->banks[i]; - unsigned int cpu, cpus = num_online_cpus(); - - name = *(pci_dev_info->msix_entries.names + i); - snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, - "qat%d-bundle%d", accel_dev->accel_id, i); - ret = request_irq(msixe[i].vector, - adf_msix_isr_bundle, 0, name, bank); - if (ret) { - dev_err(&GET_DEV(accel_dev), - "failed to enable irq %d for %s\n", - msixe[i].vector, name); - return ret; - } - - cpu = ((accel_dev->accel_id * hw_data->num_banks) + - i) % cpus; - irq_set_affinity_hint(msixe[i].vector, - get_cpu_mask(cpu)); - } - } - - /* Request msix irq for AE */ - name = *(pci_dev_info->msix_entries.names + i); - snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, - "qat%d-ae-cluster", accel_dev->accel_id); - ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev); - if (ret) { - dev_err(&GET_DEV(accel_dev), - "failed to enable irq %d, for %s\n", - msixe[i].vector, name); - return ret; - } - return ret; -} - -static void adf_free_irqs(struct adf_accel_dev *accel_dev) -{ - struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; - struct adf_hw_device_data *hw_data = accel_dev->hw_device; - struct msix_entry *msixe = pci_dev_info->msix_entries.entries; - struct adf_etr_data *etr_data = accel_dev->transport; - int i = 0; - - if (pci_dev_info->msix_entries.num_entries > 1) { - for (i = 0; i < hw_data->num_banks; i++) { - irq_set_affinity_hint(msixe[i].vector, NULL); - free_irq(msixe[i].vector, &etr_data->banks[i]); - } - } - irq_set_affinity_hint(msixe[i].vector, NULL); - free_irq(msixe[i].vector, accel_dev); -} - -static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) -{ - int i; - char **names; - struct msix_entry *entries; - struct adf_hw_device_data *hw_data = accel_dev->hw_device; - u32 msix_num_entries = 1; - - /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */ - if (!accel_dev->pf.vf_info) - msix_num_entries += hw_data->num_banks; - - entries = kzalloc_node(msix_num_entries * sizeof(*entries), - GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); - if (!entries) - return -ENOMEM; - - names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL); - if (!names) { - kfree(entries); - return -ENOMEM; - } - for (i = 0; i < msix_num_entries; i++) { - *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); - if (!(*(names + i))) - goto err; - } - accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries; - accel_dev->accel_pci_dev.msix_entries.entries = entries; - accel_dev->accel_pci_dev.msix_entries.names = names; - return 0; -err: - for (i = 0; i < msix_num_entries; i++) - kfree(*(names + i)); - kfree(entries); - kfree(names); - return -ENOMEM; -} - -static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev) -{ - char **names = accel_dev->accel_pci_dev.msix_entries.names; - int i; - - kfree(accel_dev->accel_pci_dev.msix_entries.entries); - for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++) - kfree(*(names + i)); - kfree(names); -} - -static int adf_setup_bh(struct adf_accel_dev *accel_dev) -{ - struct adf_etr_data *priv_data = accel_dev->transport; - struct adf_hw_device_data *hw_data = accel_dev->hw_device; - int i; - - for (i = 0; i < hw_data->num_banks; i++) - tasklet_init(&priv_data->banks[i].resp_handler, - adf_response_handler, - (unsigned long)&priv_data->banks[i]); - return 0; -} - -static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) -{ - struct adf_etr_data *priv_data = accel_dev->transport; - struct adf_hw_device_data *hw_data = accel_dev->hw_device; - int i; - - for (i = 0; i < hw_data->num_banks; i++) { - tasklet_disable(&priv_data->banks[i].resp_handler); - tasklet_kill(&priv_data->banks[i].resp_handler); - } -} - -void adf_isr_resource_free(struct adf_accel_dev *accel_dev) -{ - adf_free_irqs(accel_dev); - adf_cleanup_bh(accel_dev); - adf_disable_msix(&accel_dev->accel_pci_dev); - adf_isr_free_msix_entry_table(accel_dev); -} - -int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) -{ - int ret; - - ret = adf_isr_alloc_msix_entry_table(accel_dev); - if (ret) - return ret; - if (adf_enable_msix(accel_dev)) - goto err_out; - - if (adf_setup_bh(accel_dev)) - goto err_out; - - if (adf_request_irqs(accel_dev)) - goto err_out; - - return 0; -err_out: - adf_isr_resource_free(accel_dev); - return -EFAULT; -} diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile index 85399fc..5c3ccf8 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/Makefile +++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile @@ -1,5 +1,3 @@ ccflags-y := -I$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o -qat_dh895xccvf-objs := adf_drv.o \ - adf_isr.o \ - adf_dh895xccvf_hw_data.o +qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index a9a27ef..dc04ab6 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -48,7 +48,6 @@ #include #include #include "adf_dh895xccvf_hw_data.h" -#include "adf_drv.h" static struct adf_hw_device_class dh895xcciov_class = { .name = ADF_DH895XCCVF_DEVICE_NAME, @@ -136,7 +135,6 @@ static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) { hw_data->dev_class = &dh895xcciov_class; - hw_data->instance_id = dh895xcciov_class.instances++; hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS; hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS; hw_data->num_logical_accel = 1; @@ -164,9 +162,12 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) hw_data->enable_ints = adf_vf_void_noop; hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; + hw_data->dev_class->instances++; + adf_devmgr_update_class_index(hw_data); } void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) { hw_data->dev_class->instances--; + adf_devmgr_update_class_index(hw_data); } diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h index 8f6babf..6ddc19b 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h @@ -56,13 +56,9 @@ #define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF #define ADF_DH895XCCIOV_ETR_BAR 0 #define ADF_DH895XCCIOV_ETR_MAX_BANKS 1 - #define ADF_DH895XCCIOV_PF2VF_OFFSET 0x200 -#define ADF_DH895XCC_PF2VF_PF2VFINT BIT(0) - -#define ADF_DH895XCCIOV_VINTSOU_OFFSET 0x204 -#define ADF_DH895XCC_VINTSOU_BUN BIT(0) -#define ADF_DH895XCC_VINTSOU_PF2VF BIT(1) - #define ADF_DH895XCCIOV_VINTMSK_OFFSET 0x208 + +void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); #endif diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 7bec249..f8cc4bf 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -60,11 +60,7 @@ #include #include #include -#include #include "adf_dh895xccvf_hw_data.h" -#include "adf_drv.h" - -static const char adf_driver_name[] = ADF_DH895XCCVF_DEVICE_NAME; #define ADF_SYSTEM_DEVICE(device_id) \ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} @@ -80,7 +76,7 @@ static void adf_remove(struct pci_dev *dev); static struct pci_driver adf_driver = { .id_table = adf_pci_tbl, - .name = adf_driver_name, + .name = ADF_DH895XCCVF_DEVICE_NAME, .probe = adf_probe, .remove = adf_remove, }; @@ -121,83 +117,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) adf_devmgr_rm_dev(accel_dev, pf); } -static int adf_dev_configure(struct adf_accel_dev *accel_dev) -{ - char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; - unsigned long val, bank = 0; - - if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) - goto err; - if (adf_cfg_section_add(accel_dev, "Accelerator0")) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, - (void *)&bank, ADF_DEC)) - goto err; - - val = bank; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, - (void *)&val, ADF_DEC)) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, 0); - - val = 128; - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key, - (void *)&val, ADF_DEC)) - goto err; - - val = 512; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 2; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 8; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = 10; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, 0); - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, (void *)&val, ADF_DEC)) - goto err; - - val = ADF_COALESCING_DEF_TIME; - snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, - (int)bank); - if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0", - key, (void *)&val, ADF_DEC)) - goto err; - - val = 1; - if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - ADF_NUM_CY, (void *)&val, ADF_DEC)) - goto err; - - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); - return 0; -err: - dev_err(&GET_DEV(accel_dev), "Failed to configure QAT accel dev\n"); - return -EINVAL; -} - static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct adf_accel_dev *accel_dev; @@ -288,7 +207,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); } - if (pci_request_regions(pdev, adf_driver_name)) { + if (pci_request_regions(pdev, ADF_DH895XCCVF_DEVICE_NAME)) { ret = -EFAULT; goto out_err_disable; } @@ -315,7 +234,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Completion for VF2PF request/response message exchange */ init_completion(&accel_dev->vf.iov_msg_completion); - ret = adf_dev_configure(accel_dev); + ret = qat_crypto_dev_config(accel_dev); if (ret) goto out_err_free_reg; diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h deleted file mode 100644 index e270e4a..0000000 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ -#ifndef ADF_DH895xVF_DRV_H_ -#define ADF_DH895xVF_DRV_H_ -#include -#include - -void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); -void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data); -int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev); -void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev); -void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring); -#endif diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c deleted file mode 100644 index 87c5d8a..0000000 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c +++ /dev/null @@ -1,258 +0,0 @@ -/* - This file is provided under a dual BSD/GPLv2 license. When using or - redistributing this file, you may do so under either license. - - GPL LICENSE SUMMARY - Copyright(c) 2014 Intel Corporation. - This program is free software; you can redistribute it and/or modify - it under the terms of version 2 of the GNU General Public License as - published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - Contact Information: - qat-linux@intel.com - - BSD LICENSE - Copyright(c) 2014 Intel Corporation. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - * Neither the name of Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "adf_drv.h" -#include "adf_dh895xccvf_hw_data.h" - -static int adf_enable_msi(struct adf_accel_dev *accel_dev) -{ - struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; - int stat = pci_enable_msi(pci_dev_info->pci_dev); - - if (stat) { - dev_err(&GET_DEV(accel_dev), - "Failed to enable MSI interrupts\n"); - return stat; - } - - accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); - if (!accel_dev->vf.irq_name) - return -ENOMEM; - - return stat; -} - -static void adf_disable_msi(struct adf_accel_dev *accel_dev) -{ - struct pci_dev *pdev = accel_to_pci_dev(accel_dev); - - kfree(accel_dev->vf.irq_name); - pci_disable_msi(pdev); -} - -static void adf_pf2vf_bh_handler(void *data) -{ - struct adf_accel_dev *accel_dev = data; - void __iomem *pmisc_bar_addr = - (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr; - u32 msg; - - /* Read the message from PF */ - msg = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET); - - if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM)) - /* Ignore legacy non-system (non-kernel) PF2VF messages */ - goto err; - - switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) { - case ADF_PF2VF_MSGTYPE_RESTARTING: - dev_dbg(&GET_DEV(accel_dev), - "Restarting msg received from PF 0x%x\n", msg); - adf_dev_stop(accel_dev); - break; - case ADF_PF2VF_MSGTYPE_VERSION_RESP: - dev_dbg(&GET_DEV(accel_dev), - "Version resp received from PF 0x%x\n", msg); - accel_dev->vf.pf_version = - (msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >> - ADF_PF2VF_VERSION_RESP_VERS_SHIFT; - accel_dev->vf.compatible = - (msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >> - ADF_PF2VF_VERSION_RESP_RESULT_SHIFT; - complete(&accel_dev->vf.iov_msg_completion); - break; - default: - goto err; - } - - /* To ack, clear the PF2VFINT bit */ - msg &= ~ADF_DH895XCC_PF2VF_PF2VFINT; - ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET, msg); - - /* Re-enable PF2VF interrupts */ - adf_enable_pf2vf_interrupts(accel_dev); - return; -err: - dev_err(&GET_DEV(accel_dev), - "Unknown message from PF (0x%x); leaving PF2VF ints disabled\n", - msg); -} - -static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev) -{ - tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet, - (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev); - - mutex_init(&accel_dev->vf.vf2pf_lock); - return 0; -} - -static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev) -{ - tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet); - tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet); - mutex_destroy(&accel_dev->vf.vf2pf_lock); -} - -static irqreturn_t adf_isr(int irq, void *privdata) -{ - struct adf_accel_dev *accel_dev = privdata; - void __iomem *pmisc_bar_addr = - (&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr; - u32 v_int; - - /* Read VF INT source CSR to determine the source of VF interrupt */ - v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_VINTSOU_OFFSET); - - /* Check for PF2VF interrupt */ - if (v_int & ADF_DH895XCC_VINTSOU_PF2VF) { - /* Disable PF to VF interrupt */ - adf_disable_pf2vf_interrupts(accel_dev); - - /* Schedule tasklet to handle interrupt BH */ - tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet); - return IRQ_HANDLED; - } - - /* Check bundle interrupt */ - if (v_int & ADF_DH895XCC_VINTSOU_BUN) { - struct adf_etr_data *etr_data = accel_dev->transport; - struct adf_etr_bank_data *bank = &etr_data->banks[0]; - - /* Disable Flag and Coalesce Ring Interrupts */ - WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, - 0); - tasklet_hi_schedule(&bank->resp_handler); - return IRQ_HANDLED; - } - - return IRQ_NONE; -} - -static int adf_request_msi_irq(struct adf_accel_dev *accel_dev) -{ - struct pci_dev *pdev = accel_to_pci_dev(accel_dev); - unsigned int cpu; - int ret; - - snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME, - "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn)); - ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name, - (void *)accel_dev); - if (ret) { - dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n", - accel_dev->vf.irq_name); - return ret; - } - cpu = accel_dev->accel_id % num_online_cpus(); - irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu)); - - return ret; -} - -static int adf_setup_bh(struct adf_accel_dev *accel_dev) -{ - struct adf_etr_data *priv_data = accel_dev->transport; - - tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler, - (unsigned long)priv_data->banks); - return 0; -} - -static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) -{ - struct adf_etr_data *priv_data = accel_dev->transport; - - tasklet_disable(&priv_data->banks[0].resp_handler); - tasklet_kill(&priv_data->banks[0].resp_handler); -} - -void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev) -{ - struct pci_dev *pdev = accel_to_pci_dev(accel_dev); - - irq_set_affinity_hint(pdev->irq, NULL); - free_irq(pdev->irq, (void *)accel_dev); - adf_cleanup_bh(accel_dev); - adf_cleanup_pf2vf_bh(accel_dev); - adf_disable_msi(accel_dev); -} - -int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) -{ - if (adf_enable_msi(accel_dev)) - goto err_out; - - if (adf_setup_pf2vf_bh(accel_dev)) - goto err_out; - - if (adf_setup_bh(accel_dev)) - goto err_out; - - if (adf_request_msi_irq(accel_dev)) - goto err_out; - - return 0; -err_out: - adf_vf_isr_resource_free(accel_dev); - return -EFAULT; -} -- cgit v0.10.2 From 890c55f4dc0e60a4ba71ab9b6877f69ff7053213 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 4 Dec 2015 16:56:34 -0800 Subject: crypto: qat - add support for c3xxx accel type Add support for c3xxx accel type. Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index eefccf7..7b3e791 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig @@ -22,6 +22,17 @@ config CRYPTO_DEV_QAT_DH895xCC To compile this as a module, choose M here: the module will be called qat_dh895xcc. +config CRYPTO_DEV_QAT_C3XXX + tristate "Support for Intel(R) C3XXX" + depends on X86 && PCI + select CRYPTO_DEV_QAT + help + Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology + for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_c3xxx. + config CRYPTO_DEV_QAT_DH895xCCVF tristate "Support for Intel(R) DH895xCC Virtual Function" depends on X86 && PCI diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile index a3ce0b7..e08d660 100644 --- a/drivers/crypto/qat/Makefile +++ b/drivers/crypto/qat/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ +obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ diff --git a/drivers/crypto/qat/qat_c3xxx/Makefile b/drivers/crypto/qat/qat_c3xxx/Makefile new file mode 100644 index 0000000..8f5fd48 --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxx/Makefile @@ -0,0 +1,3 @@ +ccflags-y := -I$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o +qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c new file mode 100644 index 0000000..bda8f9f --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -0,0 +1,248 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include "adf_c3xxx_hw_data.h" + +/* Worker thread to service arbiter mappings based on dev SKUs */ +static const u32 thrd_to_arb_map_8_me_sku[] = { + 0x10000888, 0x11000888, 0x10000888, 0x11000888, 0x10000888, + 0x11000888, 0x10000888, 0x11000888, 0, 0 +}; + +static const u32 thrd_to_arb_map_10_me_sku[] = { + 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, + 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA +}; + +static struct adf_hw_device_class c3xxx_class = { + .name = ADF_C3XXX_DEVICE_NAME, + .type = DEV_C3XXX, + .instances = 0 +}; + +static u32 get_accel_mask(u32 fuse) +{ + return (~fuse) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET & + ADF_C3XXX_ACCELERATORS_MASK; +} + +static u32 get_ae_mask(u32 fuse) +{ + return (~fuse) & ADF_C3XXX_ACCELENGINES_MASK; +} + +static u32 get_num_accels(struct adf_hw_device_data *self) +{ + u32 i, ctr = 0; + + if (!self || !self->accel_mask) + return 0; + + for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) { + if (self->accel_mask & (1 << i)) + ctr++; + } + return ctr; +} + +static u32 get_num_aes(struct adf_hw_device_data *self) +{ + u32 i, ctr = 0; + + if (!self || !self->ae_mask) + return 0; + + for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) { + if (self->ae_mask & (1 << i)) + ctr++; + } + return ctr; +} + +static u32 get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C3XXX_PMISC_BAR; +} + +static u32 get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C3XXX_ETR_BAR; +} + +static u32 get_sram_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C3XXX_SRAM_BAR; +} + +static enum dev_sku_info get_sku(struct adf_hw_device_data *self) +{ + int aes = get_num_aes(self); + + if (aes == 8) + return DEV_SKU_2; + else if (aes == 10) + return DEV_SKU_4; + + return DEV_SKU_UNKNOWN; +} + +static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, + u32 const **arb_map_config) +{ + switch (accel_dev->accel_pci_dev.sku) { + case DEV_SKU_2: + *arb_map_config = thrd_to_arb_map_8_me_sku; + break; + case DEV_SKU_4: + *arb_map_config = thrd_to_arb_map_10_me_sku; + break; + default: + dev_err(&GET_DEV(accel_dev), + "The configuration doesn't match any SKU"); + *arb_map_config = NULL; + } +} + +static u32 get_pf2vf_offset(u32 i) +{ + return ADF_C3XXX_PF2VF_OFFSET(i); +} + +static u32 get_vintmsk_offset(u32 i) +{ + return ADF_C3XXX_VINTMSK_OFFSET(i); +} + +static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR]; + void __iomem *csr = misc_bar->virt_addr; + unsigned int val, i; + + /* Enable Accel Engine error detection & correction */ + for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { + val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i)); + val |= ADF_C3XXX_ENABLE_AE_ECC_ERR; + ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val); + val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i)); + val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR; + ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val); + } + + /* Enable shared memory error detection & correction */ + for (i = 0; i < hw_device->get_num_accels(hw_device); i++) { + val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i)); + val |= ADF_C3XXX_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val); + val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i)); + val |= ADF_C3XXX_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val); + } +} + +static void adf_enable_ints(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr; + + addr = (&GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR])->virt_addr; + + /* Enable bundle and misc interrupts */ + ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF0_MASK_OFFSET, + ADF_C3XXX_SMIA0_MASK); + ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF1_MASK_OFFSET, + ADF_C3XXX_SMIA1_MASK); +} + +static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &c3xxx_class; + hw_data->instance_id = c3xxx_class.instances++; + hw_data->num_banks = ADF_C3XXX_ETR_MAX_BANKS; + hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS; + hw_data->num_logical_accel = 1; + hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES; + hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK; + hw_data->alloc_irq = adf_isr_resource_alloc; + hw_data->free_irq = adf_isr_resource_free; + hw_data->enable_error_correction = adf_enable_error_correction; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_sram_bar_id = get_sram_bar_id; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_pf2vf_offset = get_pf2vf_offset; + hw_data->get_vintmsk_offset = get_vintmsk_offset; + hw_data->get_sku = get_sku; + hw_data->fw_name = ADF_C3XXX_FW; + hw_data->fw_mmp_name = ADF_C3XXX_MMP; + hw_data->init_admin_comms = adf_init_admin_comms; + hw_data->exit_admin_comms = adf_exit_admin_comms; + hw_data->disable_iov = adf_disable_sriov; + hw_data->send_admin_init = adf_send_admin_init; + hw_data->init_arb = adf_init_arb; + hw_data->exit_arb = adf_exit_arb; + hw_data->get_arb_mapping = adf_get_arbiter_mapping; + hw_data->enable_ints = adf_enable_ints; + hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; + hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; +} + +void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; +} diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h new file mode 100644 index 0000000..f2fa234 --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h @@ -0,0 +1,84 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_C3XXX_HW_DATA_H_ +#define ADF_C3XXX_HW_DATA_H_ + +/* PCIe configuration space */ +#define ADF_C3XXX_SRAM_BAR 0 +#define ADF_C3XXX_PMISC_BAR 1 +#define ADF_C3XXX_ETR_BAR 2 +#define ADF_C3XXX_RX_RINGS_OFFSET 8 +#define ADF_C3XXX_TX_RINGS_MASK 0xFF +#define ADF_C3XXX_MAX_ACCELERATORS 3 +#define ADF_C3XXX_MAX_ACCELENGINES 6 +#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16 +#define ADF_C3XXX_ACCELERATORS_MASK 0x3 +#define ADF_C3XXX_ACCELENGINES_MASK 0x3F +#define ADF_C3XXX_ETR_MAX_BANKS 16 +#define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) +#define ADF_C3XXX_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) +#define ADF_C3XXX_SMIA0_MASK 0xFFFF +#define ADF_C3XXX_SMIA1_MASK 0x1 +/* Error detection and correction */ +#define ADF_C3XXX_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) +#define ADF_C3XXX_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) +#define ADF_C3XXX_ENABLE_AE_ECC_ERR BIT(28) +#define ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) +#define ADF_C3XXX_UERRSSMSH(i) (i * 0x4000 + 0x18) +#define ADF_C3XXX_CERRSSMSH(i) (i * 0x4000 + 0x10) +#define ADF_C3XXX_ERRSSMSH_EN BIT(3) + +#define ADF_C3XXX_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) +#define ADF_C3XXX_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) + +/* Firmware Binary */ +#define ADF_C3XXX_FW "qat_c3xxx.bin" +#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin" + +void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_c3xxx(struct adf_hw_device_data *hw_data); +#endif diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c new file mode 100644 index 0000000..e13bd08 --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c @@ -0,0 +1,335 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_c3xxx_hw_data.h" + +#define ADF_SYSTEM_DEVICE(device_id) \ + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +static const struct pci_device_id adf_pci_tbl[] = { + ADF_SYSTEM_DEVICE(ADF_C3XXX_PCI_DEVICE_ID), + {0,} +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); +static void adf_remove(struct pci_dev *dev); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_C3XXX_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .sriov_configure = adf_sriov_configure, +}; + +static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) +{ + pci_release_regions(accel_dev->accel_pci_dev.pci_dev); + pci_disable_device(accel_dev->accel_pci_dev.pci_dev); +} + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; + int i; + + for (i = 0; i < ADF_PCI_MAX_BARS; i++) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + + if (bar->virt_addr) + pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); + } + + if (accel_dev->hw_device) { + switch (accel_pci_dev->pci_dev->device) { + case ADF_C3XXX_PCI_DEVICE_ID: + adf_clean_hw_data_c3xxx(accel_dev->hw_device); + break; + default: + break; + } + kfree(accel_dev->hw_device); + accel_dev->hw_device = NULL; + } + adf_cfg_dev_remove(accel_dev); + debugfs_remove(accel_dev->debugfs_dir); + adf_devmgr_rm_dev(accel_dev, NULL); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + char name[ADF_DEVICE_NAME_LENGTH]; + unsigned int i, bar_nr; + int ret, bar_mask; + + switch (ent->device) { + case ADF_C3XXX_PCI_DEVICE_ID: + break; + default: + dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); + return -ENODEV; + } + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + /* If the accelerator is connected to a node with no memory + * there is no point in using the accelerator since the remote + * memory transaction will be very slow. */ + dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); + return -EINVAL; + } + + accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!accel_dev) + return -ENOMEM; + + INIT_LIST_HEAD(&accel_dev->crypto_list); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + + /* Add accel device to accel table. + * This should be called before adf_cleanup_accel is called */ + if (adf_devmgr_add_dev(accel_dev, NULL)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + kfree(accel_dev); + return -EFAULT; + } + + accel_dev->owner = THIS_MODULE; + /* Allocate and configure device configuration structure */ + hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + + accel_dev->hw_device = hw_data; + adf_init_hw_data_c3xxx(accel_dev->hw_device); + pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); + pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, + &hw_data->fuses); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + /* If the device has no acceleration engines then ignore it. */ + if (!hw_data->accel_mask || !hw_data->ae_mask || + ((~hw_data->ae_mask) & 0x01)) { + dev_err(&pdev->dev, "No acceleration units found"); + ret = -EFAULT; + goto out_err; + } + + /* Create dev top level debugfs entry */ + snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", + ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); + if (!accel_dev->debugfs_dir) { + dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); + ret = -EINVAL; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* enable PCI device */ + if (pci_enable_device(pdev)) { + ret = -EFAULT; + goto out_err; + } + + /* set dma identifier */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, "No usable DMA configuration\n"); + ret = -EFAULT; + goto out_err_disable; + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } + + if (pci_request_regions(pdev, ADF_C3XXX_DEVICE_NAME)) { + ret = -EFAULT; + goto out_err_disable; + } + + /* Read accelerator capabilities mask */ + pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, + &hw_data->accel_capabilities_mask); + + /* Find and map all the device's BARS */ + i = 0; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, + ADF_PCI_MAX_BARS * 2) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; + + bar->base_addr = pci_resource_start(pdev, bar_nr); + if (!bar->base_addr) + break; + bar->size = pci_resource_len(pdev, bar_nr); + bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); + ret = -EFAULT; + goto out_err_free_reg; + } + } + pci_set_master(pdev); + + if (adf_enable_aer(accel_dev, &adf_driver)) { + dev_err(&pdev->dev, "Failed to enable aer\n"); + ret = -EFAULT; + goto out_err_free_reg; + } + + if (pci_save_state(pdev)) { + dev_err(&pdev->dev, "Failed to save pci state\n"); + ret = -ENOMEM; + goto out_err_free_reg; + } + + ret = qat_crypto_dev_config(accel_dev); + if (ret) + goto out_err_free_reg; + + ret = adf_dev_init(accel_dev); + if (ret) + goto out_err_dev_shutdown; + + ret = adf_dev_start(accel_dev); + if (ret) + goto out_err_dev_stop; + + return ret; + +out_err_dev_stop: + adf_dev_stop(accel_dev); +out_err_dev_shutdown: + adf_dev_shutdown(accel_dev); +out_err_free_reg: + pci_release_regions(accel_pci_dev->pci_dev); +out_err_disable: + pci_disable_device(accel_pci_dev->pci_dev); +out_err: + adf_cleanup_accel(accel_dev); + kfree(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + if (adf_dev_stop(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); + + adf_dev_shutdown(accel_dev); + adf_disable_aer(accel_dev); + adf_cleanup_accel(accel_dev); + adf_cleanup_pci_dev(accel_dev); + kfree(accel_dev); +} + +static int __init adfdrv_init(void) +{ + request_module("intel_qat"); + + if (pci_register_driver(&adf_driver)) { + pr_err("QAT: Driver initialization failed\n"); + return -EFAULT; + } + return 0; +} + +static void __exit adfdrv_release(void) +{ + pci_unregister_driver(&adf_driver); +} + +module_init(adfdrv_init); +module_exit(adfdrv_release); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel"); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h index c697fb1..39884c9 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h @@ -72,6 +72,7 @@ enum adf_device_type { DEV_UNKNOWN = 0, DEV_DH895XCC, DEV_DH895XCCVF, + DEV_C3XXX }; struct adf_dev_status_info { diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c index 8dfdb8f..b3ebb25 100644 --- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c @@ -53,6 +53,7 @@ static LIST_HEAD(accel_table); static LIST_HEAD(vfs_table); static DEFINE_MUTEX(table_lock); static uint32_t num_devices; +static u8 id_map[ADF_MAX_DEVICES]; struct vf_id_map { u32 bdf; @@ -116,8 +117,10 @@ void adf_clean_vf_map(bool vf) mutex_lock(&table_lock); list_for_each_safe(ptr, tmp, &vfs_table) { map = list_entry(ptr, struct vf_id_map, list); - if (map->bdf != -1) + if (map->bdf != -1) { + id_map[map->id] = 0; num_devices--; + } if (vf && map->bdf == -1) continue; @@ -154,6 +157,19 @@ void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data) } EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index); +static unsigned int adf_find_free_id(void) +{ + unsigned int i; + + for (i = 0; i < ADF_MAX_DEVICES; i++) { + if (!id_map[i]) { + id_map[i] = 1; + return i; + } + } + return ADF_MAX_DEVICES + 1; +} + /** * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework * @accel_dev: Pointer to acceleration device. @@ -194,8 +210,12 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, } list_add_tail(&accel_dev->list, &accel_table); - accel_dev->accel_id = num_devices++; - + accel_dev->accel_id = adf_find_free_id(); + if (accel_dev->accel_id > ADF_MAX_DEVICES) { + ret = -EFAULT; + goto unlock; + } + num_devices++; map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) { ret = -ENOMEM; @@ -236,8 +256,13 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, ret = -ENOMEM; goto unlock; } - - accel_dev->accel_id = num_devices++; + accel_dev->accel_id = adf_find_free_id(); + if (accel_dev->accel_id > ADF_MAX_DEVICES) { + kfree(map); + ret = -EFAULT; + goto unlock; + } + num_devices++; list_add_tail(&accel_dev->list, &accel_table); map->bdf = adf_get_vf_num(accel_dev); map->id = accel_dev->accel_id; @@ -271,6 +296,7 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, { mutex_lock(&table_lock); if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) { + id_map[accel_dev->accel_id] = 0; num_devices--; } else if (accel_dev->is_vf && pf) { struct vf_id_map *map, *next; -- cgit v0.10.2 From a6dabee6c8ba770bab7a3ec63b6a5c1059331d5c Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 4 Dec 2015 16:56:40 -0800 Subject: crypto: qat - add support for c62x accel type Add support for qat c62x accel type Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index 7b3e791..33ae482 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig @@ -33,6 +33,17 @@ config CRYPTO_DEV_QAT_C3XXX To compile this as a module, choose M here: the module will be called qat_c3xxx. +config CRYPTO_DEV_QAT_C62X + tristate "Support for Intel(R) C62X" + depends on X86 && PCI + select CRYPTO_DEV_QAT + help + Support for Intel(R) C62x with Intel(R) QuickAssist Technology + for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_c62x. + config CRYPTO_DEV_QAT_DH895xCCVF tristate "Support for Intel(R) DH895xCC Virtual Function" depends on X86 && PCI diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile index e08d660..6bc4194 100644 --- a/drivers/crypto/qat/Makefile +++ b/drivers/crypto/qat/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/ +obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ diff --git a/drivers/crypto/qat/qat_c62x/Makefile b/drivers/crypto/qat/qat_c62x/Makefile new file mode 100644 index 0000000..bd75ace --- /dev/null +++ b/drivers/crypto/qat/qat_c62x/Makefile @@ -0,0 +1,3 @@ +ccflags-y := -I$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o +qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c new file mode 100644 index 0000000..879e04c --- /dev/null +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c @@ -0,0 +1,248 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include "adf_c62x_hw_data.h" + +/* Worker thread to service arbiter mappings based on dev SKUs */ +static const u32 thrd_to_arb_map_8_me_sku[] = { + 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, + 0x11222AAA, 0x12222AAA, 0x11222AAA, 0, 0 +}; + +static const u32 thrd_to_arb_map_10_me_sku[] = { + 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, + 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA +}; + +static struct adf_hw_device_class c62x_class = { + .name = ADF_C62X_DEVICE_NAME, + .type = DEV_C62X, + .instances = 0 +}; + +static u32 get_accel_mask(u32 fuse) +{ + return (~fuse) >> ADF_C62X_ACCELERATORS_REG_OFFSET & + ADF_C62X_ACCELERATORS_MASK; +} + +static u32 get_ae_mask(u32 fuse) +{ + return (~fuse) & ADF_C62X_ACCELENGINES_MASK; +} + +static u32 get_num_accels(struct adf_hw_device_data *self) +{ + u32 i, ctr = 0; + + if (!self || !self->accel_mask) + return 0; + + for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) { + if (self->accel_mask & (1 << i)) + ctr++; + } + return ctr; +} + +static u32 get_num_aes(struct adf_hw_device_data *self) +{ + u32 i, ctr = 0; + + if (!self || !self->ae_mask) + return 0; + + for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) { + if (self->ae_mask & (1 << i)) + ctr++; + } + return ctr; +} + +static u32 get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C62X_PMISC_BAR; +} + +static u32 get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C62X_ETR_BAR; +} + +static u32 get_sram_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C62X_SRAM_BAR; +} + +static enum dev_sku_info get_sku(struct adf_hw_device_data *self) +{ + int aes = get_num_aes(self); + + if (aes == 8) + return DEV_SKU_2; + else if (aes == 10) + return DEV_SKU_4; + + return DEV_SKU_UNKNOWN; +} + +static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, + u32 const **arb_map_config) +{ + switch (accel_dev->accel_pci_dev.sku) { + case DEV_SKU_2: + *arb_map_config = thrd_to_arb_map_8_me_sku; + break; + case DEV_SKU_4: + *arb_map_config = thrd_to_arb_map_10_me_sku; + break; + default: + dev_err(&GET_DEV(accel_dev), + "The configuration doesn't match any SKU"); + *arb_map_config = NULL; + } +} + +static u32 get_pf2vf_offset(u32 i) +{ + return ADF_C62X_PF2VF_OFFSET(i); +} + +static u32 get_vintmsk_offset(u32 i) +{ + return ADF_C62X_VINTMSK_OFFSET(i); +} + +static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR]; + void __iomem *csr = misc_bar->virt_addr; + unsigned int val, i; + + /* Enable Accel Engine error detection & correction */ + for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { + val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i)); + val |= ADF_C62X_ENABLE_AE_ECC_ERR; + ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val); + val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i)); + val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR; + ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val); + } + + /* Enable shared memory error detection & correction */ + for (i = 0; i < hw_device->get_num_accels(hw_device); i++) { + val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i)); + val |= ADF_C62X_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val); + val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i)); + val |= ADF_C62X_ERRSSMSH_EN; + ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val); + } +} + +static void adf_enable_ints(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr; + + addr = (&GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR])->virt_addr; + + /* Enable bundle and misc interrupts */ + ADF_CSR_WR(addr, ADF_C62X_SMIAPF0_MASK_OFFSET, + ADF_C62X_SMIA0_MASK); + ADF_CSR_WR(addr, ADF_C62X_SMIAPF1_MASK_OFFSET, + ADF_C62X_SMIA1_MASK); +} + +static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &c62x_class; + hw_data->instance_id = c62x_class.instances++; + hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS; + hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS; + hw_data->num_logical_accel = 1; + hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES; + hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK; + hw_data->alloc_irq = adf_isr_resource_alloc; + hw_data->free_irq = adf_isr_resource_free; + hw_data->enable_error_correction = adf_enable_error_correction; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_sram_bar_id = get_sram_bar_id; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_pf2vf_offset = get_pf2vf_offset; + hw_data->get_vintmsk_offset = get_vintmsk_offset; + hw_data->get_sku = get_sku; + hw_data->fw_name = ADF_C62X_FW; + hw_data->fw_mmp_name = ADF_C62X_MMP; + hw_data->init_admin_comms = adf_init_admin_comms; + hw_data->exit_admin_comms = adf_exit_admin_comms; + hw_data->disable_iov = adf_disable_sriov; + hw_data->send_admin_init = adf_send_admin_init; + hw_data->init_arb = adf_init_arb; + hw_data->exit_arb = adf_exit_arb; + hw_data->get_arb_mapping = adf_get_arbiter_mapping; + hw_data->enable_ints = adf_enable_ints; + hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; + hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; +} + +void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; +} diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h new file mode 100644 index 0000000..17a8a32 --- /dev/null +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h @@ -0,0 +1,84 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_C62X_HW_DATA_H_ +#define ADF_C62X_HW_DATA_H_ + +/* PCIe configuration space */ +#define ADF_C62X_SRAM_BAR 0 +#define ADF_C62X_PMISC_BAR 1 +#define ADF_C62X_ETR_BAR 2 +#define ADF_C62X_RX_RINGS_OFFSET 8 +#define ADF_C62X_TX_RINGS_MASK 0xFF +#define ADF_C62X_MAX_ACCELERATORS 5 +#define ADF_C62X_MAX_ACCELENGINES 10 +#define ADF_C62X_ACCELERATORS_REG_OFFSET 16 +#define ADF_C62X_ACCELERATORS_MASK 0x1F +#define ADF_C62X_ACCELENGINES_MASK 0x3FF +#define ADF_C62X_ETR_MAX_BANKS 16 +#define ADF_C62X_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) +#define ADF_C62X_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) +#define ADF_C62X_SMIA0_MASK 0xFFFF +#define ADF_C62X_SMIA1_MASK 0x1 +/* Error detection and correction */ +#define ADF_C62X_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) +#define ADF_C62X_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960) +#define ADF_C62X_ENABLE_AE_ECC_ERR BIT(28) +#define ADF_C62X_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12)) +#define ADF_C62X_UERRSSMSH(i) (i * 0x4000 + 0x18) +#define ADF_C62X_CERRSSMSH(i) (i * 0x4000 + 0x10) +#define ADF_C62X_ERRSSMSH_EN BIT(3) + +#define ADF_C62X_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) +#define ADF_C62X_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) + +/* Firmware Binary */ +#define ADF_C62X_FW "qat_c62x.bin" +#define ADF_C62X_MMP "qat_c62x_mmp.bin" + +void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data); +#endif diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c new file mode 100644 index 0000000..512c565 --- /dev/null +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c @@ -0,0 +1,335 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_c62x_hw_data.h" + +#define ADF_SYSTEM_DEVICE(device_id) \ + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +static const struct pci_device_id adf_pci_tbl[] = { + ADF_SYSTEM_DEVICE(ADF_C62X_PCI_DEVICE_ID), + {0,} +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); +static void adf_remove(struct pci_dev *dev); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_C62X_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .sriov_configure = adf_sriov_configure, +}; + +static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) +{ + pci_release_regions(accel_dev->accel_pci_dev.pci_dev); + pci_disable_device(accel_dev->accel_pci_dev.pci_dev); +} + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; + int i; + + for (i = 0; i < ADF_PCI_MAX_BARS; i++) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + + if (bar->virt_addr) + pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); + } + + if (accel_dev->hw_device) { + switch (accel_pci_dev->pci_dev->device) { + case ADF_C62X_PCI_DEVICE_ID: + adf_clean_hw_data_c62x(accel_dev->hw_device); + break; + default: + break; + } + kfree(accel_dev->hw_device); + accel_dev->hw_device = NULL; + } + adf_cfg_dev_remove(accel_dev); + debugfs_remove(accel_dev->debugfs_dir); + adf_devmgr_rm_dev(accel_dev, NULL); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + char name[ADF_DEVICE_NAME_LENGTH]; + unsigned int i, bar_nr; + int ret, bar_mask; + + switch (ent->device) { + case ADF_C62X_PCI_DEVICE_ID: + break; + default: + dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); + return -ENODEV; + } + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + /* If the accelerator is connected to a node with no memory + * there is no point in using the accelerator since the remote + * memory transaction will be very slow. */ + dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); + return -EINVAL; + } + + accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!accel_dev) + return -ENOMEM; + + INIT_LIST_HEAD(&accel_dev->crypto_list); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + + /* Add accel device to accel table. + * This should be called before adf_cleanup_accel is called */ + if (adf_devmgr_add_dev(accel_dev, NULL)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + kfree(accel_dev); + return -EFAULT; + } + + accel_dev->owner = THIS_MODULE; + /* Allocate and configure device configuration structure */ + hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + + accel_dev->hw_device = hw_data; + adf_init_hw_data_c62x(accel_dev->hw_device); + pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); + pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, + &hw_data->fuses); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + /* If the device has no acceleration engines then ignore it. */ + if (!hw_data->accel_mask || !hw_data->ae_mask || + ((~hw_data->ae_mask) & 0x01)) { + dev_err(&pdev->dev, "No acceleration units found"); + ret = -EFAULT; + goto out_err; + } + + /* Create dev top level debugfs entry */ + snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", + ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); + if (!accel_dev->debugfs_dir) { + dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); + ret = -EINVAL; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* enable PCI device */ + if (pci_enable_device(pdev)) { + ret = -EFAULT; + goto out_err; + } + + /* set dma identifier */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, "No usable DMA configuration\n"); + ret = -EFAULT; + goto out_err_disable; + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } + + if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) { + ret = -EFAULT; + goto out_err_disable; + } + + /* Read accelerator capabilities mask */ + pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, + &hw_data->accel_capabilities_mask); + + /* Find and map all the device's BARS */ + i = 0; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, + ADF_PCI_MAX_BARS * 2) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; + + bar->base_addr = pci_resource_start(pdev, bar_nr); + if (!bar->base_addr) + break; + bar->size = pci_resource_len(pdev, bar_nr); + bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); + ret = -EFAULT; + goto out_err_free_reg; + } + } + pci_set_master(pdev); + + if (adf_enable_aer(accel_dev, &adf_driver)) { + dev_err(&pdev->dev, "Failed to enable aer\n"); + ret = -EFAULT; + goto out_err_free_reg; + } + + if (pci_save_state(pdev)) { + dev_err(&pdev->dev, "Failed to save pci state\n"); + ret = -ENOMEM; + goto out_err_free_reg; + } + + ret = qat_crypto_dev_config(accel_dev); + if (ret) + goto out_err_free_reg; + + ret = adf_dev_init(accel_dev); + if (ret) + goto out_err_dev_shutdown; + + ret = adf_dev_start(accel_dev); + if (ret) + goto out_err_dev_stop; + + return ret; + +out_err_dev_stop: + adf_dev_stop(accel_dev); +out_err_dev_shutdown: + adf_dev_shutdown(accel_dev); +out_err_free_reg: + pci_release_regions(accel_pci_dev->pci_dev); +out_err_disable: + pci_disable_device(accel_pci_dev->pci_dev); +out_err: + adf_cleanup_accel(accel_dev); + kfree(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + if (adf_dev_stop(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); + + adf_dev_shutdown(accel_dev); + adf_disable_aer(accel_dev); + adf_cleanup_accel(accel_dev); + adf_cleanup_pci_dev(accel_dev); + kfree(accel_dev); +} + +static int __init adfdrv_init(void) +{ + request_module("intel_qat"); + + if (pci_register_driver(&adf_driver)) { + pr_err("QAT: Driver initialization failed\n"); + return -EFAULT; + } + return 0; +} + +static void __exit adfdrv_release(void) +{ + pci_unregister_driver(&adf_driver); +} + +module_init(adfdrv_init); +module_exit(adfdrv_release); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel"); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h index 39884c9..5aae6b9 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h @@ -72,6 +72,7 @@ enum adf_device_type { DEV_UNKNOWN = 0, DEV_DH895XCC, DEV_DH895XCCVF, + DEV_C62X, DEV_C3XXX }; -- cgit v0.10.2 From 8b206f2d666f41f0aa83dec83504801ee945d3dc Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 4 Dec 2015 16:56:45 -0800 Subject: crypto: qat - add support for c3xxxvf accel type Add support for c3xxx accelerator Virtual Function Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index 33ae482..c2d1886 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig @@ -56,3 +56,14 @@ config CRYPTO_DEV_QAT_DH895xCCVF To compile this as a module, choose M here: the module will be called qat_dh895xccvf. + +config CRYPTO_DEV_QAT_C3XXXVF + tristate "Support for Intel(R) C3XXX Virtual Function" + depends on X86 && PCI + select CRYPTO_DEV_QAT + help + Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology + Virtual Function for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_c3xxxvf. diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile index 6bc4194..ac86281 100644 --- a/drivers/crypto/qat/Makefile +++ b/drivers/crypto/qat/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ +obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/ diff --git a/drivers/crypto/qat/qat_c3xxxvf/Makefile b/drivers/crypto/qat/qat_c3xxxvf/Makefile new file mode 100644 index 0000000..16d178e --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxxvf/Makefile @@ -0,0 +1,3 @@ +ccflags-y := -I$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o +qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c new file mode 100644 index 0000000..1af321c --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c @@ -0,0 +1,173 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2015 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2015 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include "adf_c3xxxvf_hw_data.h" + +static struct adf_hw_device_class c3xxxiov_class = { + .name = ADF_C3XXXVF_DEVICE_NAME, + .type = DEV_C3XXXVF, + .instances = 0 +}; + +static u32 get_accel_mask(u32 fuse) +{ + return ADF_C3XXXIOV_ACCELERATORS_MASK; +} + +static u32 get_ae_mask(u32 fuse) +{ + return ADF_C3XXXIOV_ACCELENGINES_MASK; +} + +static u32 get_num_accels(struct adf_hw_device_data *self) +{ + return ADF_C3XXXIOV_MAX_ACCELERATORS; +} + +static u32 get_num_aes(struct adf_hw_device_data *self) +{ + return ADF_C3XXXIOV_MAX_ACCELENGINES; +} + +static u32 get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C3XXXIOV_PMISC_BAR; +} + +static u32 get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C3XXXIOV_ETR_BAR; +} + +static enum dev_sku_info get_sku(struct adf_hw_device_data *self) +{ + return DEV_SKU_VF; +} + +static u32 get_pf2vf_offset(u32 i) +{ + return ADF_C3XXXIOV_PF2VF_OFFSET; +} + +static u32 get_vintmsk_offset(u32 i) +{ + return ADF_C3XXXIOV_VINTMSK_OFFSET; +} + +static int adf_vf_int_noop(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static void adf_vf_void_noop(struct adf_accel_dev *accel_dev) +{ +} + +static int adf_vf2pf_init(struct adf_accel_dev *accel_dev) +{ + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | + (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT)); + + if (adf_iov_putmsg(accel_dev, msg, 0)) { + dev_err(&GET_DEV(accel_dev), + "Failed to send Init event to PF\n"); + return -EFAULT; + } + return 0; +} + +static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) +{ + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | + (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT)); + + if (adf_iov_putmsg(accel_dev, msg, 0)) + dev_err(&GET_DEV(accel_dev), + "Failed to send Shutdown event to PF\n"); +} + +void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &c3xxxiov_class; + hw_data->num_banks = ADF_C3XXXIOV_ETR_MAX_BANKS; + hw_data->num_accel = ADF_C3XXXIOV_MAX_ACCELERATORS; + hw_data->num_logical_accel = 1; + hw_data->num_engines = ADF_C3XXXIOV_MAX_ACCELENGINES; + hw_data->tx_rx_gap = ADF_C3XXXIOV_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_C3XXXIOV_TX_RINGS_MASK; + hw_data->alloc_irq = adf_vf_isr_resource_alloc; + hw_data->free_irq = adf_vf_isr_resource_free; + hw_data->enable_error_correction = adf_vf_void_noop; + hw_data->init_admin_comms = adf_vf_int_noop; + hw_data->exit_admin_comms = adf_vf_void_noop; + hw_data->send_admin_init = adf_vf2pf_init; + hw_data->init_arb = adf_vf_int_noop; + hw_data->exit_arb = adf_vf_void_noop; + hw_data->disable_iov = adf_vf2pf_shutdown; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_pf2vf_offset = get_pf2vf_offset; + hw_data->get_vintmsk_offset = get_vintmsk_offset; + hw_data->get_sku = get_sku; + hw_data->enable_ints = adf_vf_void_noop; + hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms; + hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; + hw_data->dev_class->instances++; + adf_devmgr_update_class_index(hw_data); +} + +void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; + adf_devmgr_update_class_index(hw_data); +} diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h new file mode 100644 index 0000000..934f216 --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h @@ -0,0 +1,64 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2015 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2015 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_C3XXXVF_HW_DATA_H_ +#define ADF_C3XXXVF_HW_DATA_H_ + +#define ADF_C3XXXIOV_PMISC_BAR 1 +#define ADF_C3XXXIOV_ACCELERATORS_MASK 0x1 +#define ADF_C3XXXIOV_ACCELENGINES_MASK 0x1 +#define ADF_C3XXXIOV_MAX_ACCELERATORS 1 +#define ADF_C3XXXIOV_MAX_ACCELENGINES 1 +#define ADF_C3XXXIOV_RX_RINGS_OFFSET 8 +#define ADF_C3XXXIOV_TX_RINGS_MASK 0xFF +#define ADF_C3XXXIOV_ETR_BAR 0 +#define ADF_C3XXXIOV_ETR_MAX_BANKS 1 +#define ADF_C3XXXIOV_PF2VF_OFFSET 0x200 +#define ADF_C3XXXIOV_VINTMSK_OFFSET 0x208 + +void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data); +#endif diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c new file mode 100644 index 0000000..1ac4ae9 --- /dev/null +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c @@ -0,0 +1,305 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_c3xxxvf_hw_data.h" + +#define ADF_SYSTEM_DEVICE(device_id) \ + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +static const struct pci_device_id adf_pci_tbl[] = { + ADF_SYSTEM_DEVICE(ADF_C3XXXIOV_PCI_DEVICE_ID), + {0,} +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); +static void adf_remove(struct pci_dev *dev); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_C3XXXVF_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, +}; + +static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) +{ + pci_release_regions(accel_dev->accel_pci_dev.pci_dev); + pci_disable_device(accel_dev->accel_pci_dev.pci_dev); +} + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; + struct adf_accel_dev *pf; + int i; + + for (i = 0; i < ADF_PCI_MAX_BARS; i++) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + + if (bar->virt_addr) + pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); + } + + if (accel_dev->hw_device) { + switch (accel_pci_dev->pci_dev->device) { + case ADF_C3XXXIOV_PCI_DEVICE_ID: + adf_clean_hw_data_c3xxxiov(accel_dev->hw_device); + break; + default: + break; + } + kfree(accel_dev->hw_device); + accel_dev->hw_device = NULL; + } + adf_cfg_dev_remove(accel_dev); + debugfs_remove(accel_dev->debugfs_dir); + pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn); + adf_devmgr_rm_dev(accel_dev, pf); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_dev *pf; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + char name[ADF_DEVICE_NAME_LENGTH]; + unsigned int i, bar_nr; + int ret, bar_mask; + + switch (ent->device) { + case ADF_C3XXXIOV_PCI_DEVICE_ID: + break; + default: + dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); + return -ENODEV; + } + + accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!accel_dev) + return -ENOMEM; + + accel_dev->is_vf = true; + pf = adf_devmgr_pci_to_accel_dev(pdev->physfn); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + + /* Add accel device to accel table */ + if (adf_devmgr_add_dev(accel_dev, pf)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + kfree(accel_dev); + return -EFAULT; + } + INIT_LIST_HEAD(&accel_dev->crypto_list); + + accel_dev->owner = THIS_MODULE; + /* Allocate and configure device configuration structure */ + hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + accel_dev->hw_device = hw_data; + adf_init_hw_data_c3xxxiov(accel_dev->hw_device); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + + /* Create dev top level debugfs entry */ + snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", + ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); + if (!accel_dev->debugfs_dir) { + dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); + ret = -EINVAL; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* enable PCI device */ + if (pci_enable_device(pdev)) { + ret = -EFAULT; + goto out_err; + } + + /* set dma identifier */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, "No usable DMA configuration\n"); + ret = -EFAULT; + goto out_err_disable; + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } + + if (pci_request_regions(pdev, ADF_C3XXXVF_DEVICE_NAME)) { + ret = -EFAULT; + goto out_err_disable; + } + + /* Find and map all the device's BARS */ + i = 0; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, + ADF_PCI_MAX_BARS * 2) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; + + bar->base_addr = pci_resource_start(pdev, bar_nr); + if (!bar->base_addr) + break; + bar->size = pci_resource_len(pdev, bar_nr); + bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); + ret = -EFAULT; + goto out_err_free_reg; + } + } + pci_set_master(pdev); + /* Completion for VF2PF request/response message exchange */ + init_completion(&accel_dev->vf.iov_msg_completion); + + ret = qat_crypto_dev_config(accel_dev); + if (ret) + goto out_err_free_reg; + + ret = adf_dev_init(accel_dev); + if (ret) + goto out_err_dev_shutdown; + + ret = adf_dev_start(accel_dev); + if (ret) + goto out_err_dev_stop; + + return ret; + +out_err_dev_stop: + adf_dev_stop(accel_dev); +out_err_dev_shutdown: + adf_dev_shutdown(accel_dev); +out_err_free_reg: + pci_release_regions(accel_pci_dev->pci_dev); +out_err_disable: + pci_disable_device(accel_pci_dev->pci_dev); +out_err: + adf_cleanup_accel(accel_dev); + kfree(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + if (adf_dev_stop(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); + + adf_dev_shutdown(accel_dev); + adf_cleanup_accel(accel_dev); + adf_cleanup_pci_dev(accel_dev); + kfree(accel_dev); +} + +static int __init adfdrv_init(void) +{ + request_module("intel_qat"); + + if (pci_register_driver(&adf_driver)) { + pr_err("QAT: Driver initialization failed\n"); + return -EFAULT; + } + return 0; +} + +static void __exit adfdrv_release(void) +{ + pci_unregister_driver(&adf_driver); + adf_clean_vf_map(true); +} + +module_init(adfdrv_init); +module_exit(adfdrv_release); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel"); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h index 5aae6b9..59a5d2a 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h @@ -73,7 +73,8 @@ enum adf_device_type { DEV_DH895XCC, DEV_DH895XCCVF, DEV_C62X, - DEV_C3XXX + DEV_C3XXX, + DEV_C3XXXVF }; struct adf_dev_status_info { -- cgit v0.10.2 From 3771df3cff7536da19cba2b4755ad628dc4bf371 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Fri, 4 Dec 2015 16:56:51 -0800 Subject: crypto: qat - add support for c62xvf accel type Add support for c62x accelerator Virtual Function Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index c2d1886..d275d48 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig @@ -67,3 +67,14 @@ config CRYPTO_DEV_QAT_C3XXXVF To compile this as a module, choose M here: the module will be called qat_c3xxxvf. + +config CRYPTO_DEV_QAT_C62XVF + tristate "Support for Intel(R) C62X Virtual Function" + depends on X86 && PCI + select CRYPTO_DEV_QAT + help + Support for Intel(R) C62x with Intel(R) QuickAssist Technology + Virtual Function for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_c62xvf. diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile index ac86281..8265106 100644 --- a/drivers/crypto/qat/Makefile +++ b/drivers/crypto/qat/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/ +obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/ diff --git a/drivers/crypto/qat/qat_c62xvf/Makefile b/drivers/crypto/qat/qat_c62xvf/Makefile new file mode 100644 index 0000000..ecd708c --- /dev/null +++ b/drivers/crypto/qat/qat_c62xvf/Makefile @@ -0,0 +1,3 @@ +ccflags-y := -I$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o +qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c new file mode 100644 index 0000000..baf4b509 --- /dev/null +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c @@ -0,0 +1,173 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2015 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2015 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include "adf_c62xvf_hw_data.h" + +static struct adf_hw_device_class c62xiov_class = { + .name = ADF_C62XVF_DEVICE_NAME, + .type = DEV_C62XVF, + .instances = 0 +}; + +static u32 get_accel_mask(u32 fuse) +{ + return ADF_C62XIOV_ACCELERATORS_MASK; +} + +static u32 get_ae_mask(u32 fuse) +{ + return ADF_C62XIOV_ACCELENGINES_MASK; +} + +static u32 get_num_accels(struct adf_hw_device_data *self) +{ + return ADF_C62XIOV_MAX_ACCELERATORS; +} + +static u32 get_num_aes(struct adf_hw_device_data *self) +{ + return ADF_C62XIOV_MAX_ACCELENGINES; +} + +static u32 get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C62XIOV_PMISC_BAR; +} + +static u32 get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_C62XIOV_ETR_BAR; +} + +static enum dev_sku_info get_sku(struct adf_hw_device_data *self) +{ + return DEV_SKU_VF; +} + +static u32 get_pf2vf_offset(u32 i) +{ + return ADF_C62XIOV_PF2VF_OFFSET; +} + +static u32 get_vintmsk_offset(u32 i) +{ + return ADF_C62XIOV_VINTMSK_OFFSET; +} + +static int adf_vf_int_noop(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static void adf_vf_void_noop(struct adf_accel_dev *accel_dev) +{ +} + +static int adf_vf2pf_init(struct adf_accel_dev *accel_dev) +{ + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | + (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT)); + + if (adf_iov_putmsg(accel_dev, msg, 0)) { + dev_err(&GET_DEV(accel_dev), + "Failed to send Init event to PF\n"); + return -EFAULT; + } + return 0; +} + +static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) +{ + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | + (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT)); + + if (adf_iov_putmsg(accel_dev, msg, 0)) + dev_err(&GET_DEV(accel_dev), + "Failed to send Shutdown event to PF\n"); +} + +void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class = &c62xiov_class; + hw_data->num_banks = ADF_C62XIOV_ETR_MAX_BANKS; + hw_data->num_accel = ADF_C62XIOV_MAX_ACCELERATORS; + hw_data->num_logical_accel = 1; + hw_data->num_engines = ADF_C62XIOV_MAX_ACCELENGINES; + hw_data->tx_rx_gap = ADF_C62XIOV_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_C62XIOV_TX_RINGS_MASK; + hw_data->alloc_irq = adf_vf_isr_resource_alloc; + hw_data->free_irq = adf_vf_isr_resource_free; + hw_data->enable_error_correction = adf_vf_void_noop; + hw_data->init_admin_comms = adf_vf_int_noop; + hw_data->exit_admin_comms = adf_vf_void_noop; + hw_data->send_admin_init = adf_vf2pf_init; + hw_data->init_arb = adf_vf_int_noop; + hw_data->exit_arb = adf_vf_void_noop; + hw_data->disable_iov = adf_vf2pf_shutdown; + hw_data->get_accel_mask = get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = get_num_accels; + hw_data->get_num_aes = get_num_aes; + hw_data->get_etr_bar_id = get_etr_bar_id; + hw_data->get_misc_bar_id = get_misc_bar_id; + hw_data->get_pf2vf_offset = get_pf2vf_offset; + hw_data->get_vintmsk_offset = get_vintmsk_offset; + hw_data->get_sku = get_sku; + hw_data->enable_ints = adf_vf_void_noop; + hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms; + hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; + hw_data->dev_class->instances++; + adf_devmgr_update_class_index(hw_data); +} + +void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; + adf_devmgr_update_class_index(hw_data); +} diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h new file mode 100644 index 0000000..a28d83e --- /dev/null +++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h @@ -0,0 +1,64 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2015 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2015 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef ADF_C62XVF_HW_DATA_H_ +#define ADF_C62XVF_HW_DATA_H_ + +#define ADF_C62XIOV_PMISC_BAR 1 +#define ADF_C62XIOV_ACCELERATORS_MASK 0x1 +#define ADF_C62XIOV_ACCELENGINES_MASK 0x1 +#define ADF_C62XIOV_MAX_ACCELERATORS 1 +#define ADF_C62XIOV_MAX_ACCELENGINES 1 +#define ADF_C62XIOV_RX_RINGS_OFFSET 8 +#define ADF_C62XIOV_TX_RINGS_MASK 0xFF +#define ADF_C62XIOV_ETR_BAR 0 +#define ADF_C62XIOV_ETR_MAX_BANKS 1 +#define ADF_C62XIOV_PF2VF_OFFSET 0x200 +#define ADF_C62XIOV_VINTMSK_OFFSET 0x208 + +void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data); +void adf_clean_hw_data_c62xiov(struct adf_hw_device_data *hw_data); +#endif diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c new file mode 100644 index 0000000..d2e4b92 --- /dev/null +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c @@ -0,0 +1,305 @@ +/* + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + Copyright(c) 2014 Intel Corporation. + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + Contact Information: + qat-linux@intel.com + + BSD LICENSE + Copyright(c) 2014 Intel Corporation. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_c62xvf_hw_data.h" + +#define ADF_SYSTEM_DEVICE(device_id) \ + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +static const struct pci_device_id adf_pci_tbl[] = { + ADF_SYSTEM_DEVICE(ADF_C62XIOV_PCI_DEVICE_ID), + {0,} +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); +static void adf_remove(struct pci_dev *dev); + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_C62XVF_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, +}; + +static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) +{ + pci_release_regions(accel_dev->accel_pci_dev.pci_dev); + pci_disable_device(accel_dev->accel_pci_dev.pci_dev); +} + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; + struct adf_accel_dev *pf; + int i; + + for (i = 0; i < ADF_PCI_MAX_BARS; i++) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i]; + + if (bar->virt_addr) + pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr); + } + + if (accel_dev->hw_device) { + switch (accel_pci_dev->pci_dev->device) { + case ADF_C62XIOV_PCI_DEVICE_ID: + adf_clean_hw_data_c62xiov(accel_dev->hw_device); + break; + default: + break; + } + kfree(accel_dev->hw_device); + accel_dev->hw_device = NULL; + } + adf_cfg_dev_remove(accel_dev); + debugfs_remove(accel_dev->debugfs_dir); + pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn); + adf_devmgr_rm_dev(accel_dev, pf); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_dev *pf; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + char name[ADF_DEVICE_NAME_LENGTH]; + unsigned int i, bar_nr; + int ret, bar_mask; + + switch (ent->device) { + case ADF_C62XIOV_PCI_DEVICE_ID: + break; + default: + dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); + return -ENODEV; + } + + accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!accel_dev) + return -ENOMEM; + + accel_dev->is_vf = true; + pf = adf_devmgr_pci_to_accel_dev(pdev->physfn); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + + /* Add accel device to accel table */ + if (adf_devmgr_add_dev(accel_dev, pf)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + kfree(accel_dev); + return -EFAULT; + } + INIT_LIST_HEAD(&accel_dev->crypto_list); + + accel_dev->owner = THIS_MODULE; + /* Allocate and configure device configuration structure */ + hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + accel_dev->hw_device = hw_data; + adf_init_hw_data_c62xiov(accel_dev->hw_device); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + + /* Create dev top level debugfs entry */ + snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d", + ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name, + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); + if (!accel_dev->debugfs_dir) { + dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); + ret = -EINVAL; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* enable PCI device */ + if (pci_enable_device(pdev)) { + ret = -EFAULT; + goto out_err; + } + + /* set dma identifier */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, "No usable DMA configuration\n"); + ret = -EFAULT; + goto out_err_disable; + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + } else { + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } + + if (pci_request_regions(pdev, ADF_C62XVF_DEVICE_NAME)) { + ret = -EFAULT; + goto out_err_disable; + } + + /* Find and map all the device's BARS */ + i = 0; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, + ADF_PCI_MAX_BARS * 2) { + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; + + bar->base_addr = pci_resource_start(pdev, bar_nr); + if (!bar->base_addr) + break; + bar->size = pci_resource_len(pdev, bar_nr); + bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0); + if (!bar->virt_addr) { + dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr); + ret = -EFAULT; + goto out_err_free_reg; + } + } + pci_set_master(pdev); + /* Completion for VF2PF request/response message exchange */ + init_completion(&accel_dev->vf.iov_msg_completion); + + ret = qat_crypto_dev_config(accel_dev); + if (ret) + goto out_err_free_reg; + + ret = adf_dev_init(accel_dev); + if (ret) + goto out_err_dev_shutdown; + + ret = adf_dev_start(accel_dev); + if (ret) + goto out_err_dev_stop; + + return ret; + +out_err_dev_stop: + adf_dev_stop(accel_dev); +out_err_dev_shutdown: + adf_dev_shutdown(accel_dev); +out_err_free_reg: + pci_release_regions(accel_pci_dev->pci_dev); +out_err_disable: + pci_disable_device(accel_pci_dev->pci_dev); +out_err: + adf_cleanup_accel(accel_dev); + kfree(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + if (adf_dev_stop(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); + + adf_dev_shutdown(accel_dev); + adf_cleanup_accel(accel_dev); + adf_cleanup_pci_dev(accel_dev); + kfree(accel_dev); +} + +static int __init adfdrv_init(void) +{ + request_module("intel_qat"); + + if (pci_register_driver(&adf_driver)) { + pr_err("QAT: Driver initialization failed\n"); + return -EFAULT; + } + return 0; +} + +static void __exit adfdrv_release(void) +{ + pci_unregister_driver(&adf_driver); + adf_clean_vf_map(true); +} + +module_init(adfdrv_init); +module_exit(adfdrv_release); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel"); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h index 59a5d2a..673dbf7 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h @@ -73,6 +73,7 @@ enum adf_device_type { DEV_DH895XCC, DEV_DH895XCCVF, DEV_C62X, + DEV_C62XVF, DEV_C3XXX, DEV_C3XXXVF }; -- cgit v0.10.2 From 28a4618ad14cf17009a87d8b5718132a5d4ef852 Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Sat, 5 Dec 2015 17:09:33 +0100 Subject: crypto: akcipher - add akcipher declarations needed by templates. Add a struct akcipher_instance and struct akcipher_spawn similar to how AEAD declares them and the macros for converting to/from crypto_instance/crypto_spawn. Also add register functions to avoid exposing crypto_akcipher_type. Signed-off-by: Andrew Zaborowski Signed-off-by: Herbert Xu diff --git a/crypto/akcipher.c b/crypto/akcipher.c index 120ec04..def301e 100644 --- a/crypto/akcipher.c +++ b/crypto/akcipher.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "internal.h" #ifdef CONFIG_NET @@ -75,9 +76,17 @@ static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm) return 0; } +static void crypto_akcipher_free_instance(struct crypto_instance *inst) +{ + struct akcipher_instance *akcipher = akcipher_instance(inst); + + akcipher->free(akcipher); +} + static const struct crypto_type crypto_akcipher_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_akcipher_init_tfm, + .free = crypto_akcipher_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_akcipher_show, #endif @@ -88,6 +97,14 @@ static const struct crypto_type crypto_akcipher_type = { .tfmsize = offsetof(struct crypto_akcipher, base), }; +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, + u32 type, u32 mask) +{ + spawn->base.frontend = &crypto_akcipher_type; + return crypto_grab_spawn(&spawn->base, name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_grab_akcipher); + struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, u32 mask) { @@ -95,13 +112,20 @@ struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_akcipher); -int crypto_register_akcipher(struct akcipher_alg *alg) +static void akcipher_prepare_alg(struct akcipher_alg *alg) { struct crypto_alg *base = &alg->base; base->cra_type = &crypto_akcipher_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER; +} + +int crypto_register_akcipher(struct akcipher_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + akcipher_prepare_alg(alg); return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_akcipher); @@ -112,5 +136,13 @@ void crypto_unregister_akcipher(struct akcipher_alg *alg) } EXPORT_SYMBOL_GPL(crypto_unregister_akcipher); +int akcipher_register_instance(struct crypto_template *tmpl, + struct akcipher_instance *inst) +{ + akcipher_prepare_alg(&inst->alg); + return crypto_register_instance(tmpl, akcipher_crypto_instance(inst)); +} +EXPORT_SYMBOL_GPL(akcipher_register_instance); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic public key cipher type"); diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h index 9a2bda1..479a007 100644 --- a/include/crypto/internal/akcipher.h +++ b/include/crypto/internal/akcipher.h @@ -13,6 +13,22 @@ #ifndef _CRYPTO_AKCIPHER_INT_H #define _CRYPTO_AKCIPHER_INT_H #include +#include + +struct akcipher_instance { + void (*free)(struct akcipher_instance *inst); + union { + struct { + char head[offsetof(struct akcipher_alg, base)]; + struct crypto_instance base; + } s; + struct akcipher_alg alg; + }; +}; + +struct crypto_akcipher_spawn { + struct crypto_spawn base; +}; /* * Transform internal helpers. @@ -38,6 +54,56 @@ static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm) return crypto_akcipher_tfm(tfm)->__crt_alg->cra_name; } +static inline struct crypto_instance *akcipher_crypto_instance( + struct akcipher_instance *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline struct akcipher_instance *akcipher_instance( + struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct akcipher_instance, alg.base); +} + +static inline struct akcipher_instance *akcipher_alg_instance( + struct crypto_akcipher *akcipher) +{ + return akcipher_instance(crypto_tfm_alg_instance(&akcipher->base)); +} + +static inline void *akcipher_instance_ctx(struct akcipher_instance *inst) +{ + return crypto_instance_ctx(akcipher_crypto_instance(inst)); +} + +static inline void crypto_set_akcipher_spawn( + struct crypto_akcipher_spawn *spawn, + struct crypto_instance *inst) +{ + crypto_set_spawn(&spawn->base, inst); +} + +int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name, + u32 type, u32 mask); + +static inline struct crypto_akcipher *crypto_spawn_akcipher( + struct crypto_akcipher_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_drop_akcipher(struct crypto_akcipher_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct akcipher_alg *crypto_spawn_akcipher_alg( + struct crypto_akcipher_spawn *spawn) +{ + return container_of(spawn->base.alg, struct akcipher_alg, base); +} + /** * crypto_register_akcipher() -- Register public key algorithm * @@ -57,4 +123,16 @@ int crypto_register_akcipher(struct akcipher_alg *alg); * @alg: algorithm definition */ void crypto_unregister_akcipher(struct akcipher_alg *alg); + +/** + * akcipher_register_instance() -- Unregister public key template instance + * + * Function registers an implementation of an asymmetric key algorithm + * created from a template + * + * @tmpl: the template from which the algorithm was created + * @inst: the template instance + */ +int akcipher_register_instance(struct crypto_template *tmpl, + struct akcipher_instance *inst); #endif -- cgit v0.10.2 From 3d5b1ecdea6fb94f8c61554fcb2ba776a2d3d0e6 Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Sat, 5 Dec 2015 17:09:34 +0100 Subject: crypto: rsa - RSA padding algorithm This patch adds PKCS#1 v1.5 standard RSA padding as a separate template. This way an RSA cipher with padding can be obtained by instantiating "pkcs1pad(rsa)". The reason for adding this is that RSA is almost never used without this padding (or OAEP) so it will be needed for either certificate work in the kernel or the userspace, and I also hear that it is likely implemented by hardware RSA in which case hardware implementations of the whole of pkcs1pad(rsa) can be provided. Signed-off-by: Andrew Zaborowski Signed-off-by: Herbert Xu diff --git a/crypto/Makefile b/crypto/Makefile index f7aba92..2acdbbd 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -40,6 +40,7 @@ rsa_generic-y := rsapubkey-asn1.o rsa_generic-y += rsaprivkey-asn1.o rsa_generic-y += rsa.o rsa_generic-y += rsa_helper.o +rsa_generic-y += rsa-pkcs1pad.o obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o cryptomgr-y := algboss.o testmgr.o diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c new file mode 100644 index 0000000..accc67d --- /dev/null +++ b/crypto/rsa-pkcs1pad.c @@ -0,0 +1,617 @@ +/* + * RSA padding templates. + * + * Copyright (c) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct pkcs1pad_ctx { + struct crypto_akcipher *child; + + unsigned int key_size; +}; + +struct pkcs1pad_request { + struct akcipher_request child_req; + + struct scatterlist in_sg[3], out_sg[2]; + uint8_t *in_buf, *out_buf; +}; + +static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + int err, size; + + err = crypto_akcipher_set_pub_key(ctx->child, key, keylen); + + if (!err) { + /* Find out new modulus size from rsa implementation */ + size = crypto_akcipher_maxsize(ctx->child); + + ctx->key_size = size > 0 ? size : 0; + if (size <= 0) + err = size; + } + + return err; +} + +static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + int err, size; + + err = crypto_akcipher_set_priv_key(ctx->child, key, keylen); + + if (!err) { + /* Find out new modulus size from rsa implementation */ + size = crypto_akcipher_maxsize(ctx->child); + + ctx->key_size = size > 0 ? size : 0; + if (size <= 0) + err = size; + } + + return err; +} + +static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + + /* + * The maximum destination buffer size for the encrypt/sign operations + * will be the same as for RSA, even though it's smaller for + * decrypt/verify. + */ + + return ctx->key_size ?: -EINVAL; +} + +static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len, + struct scatterlist *next) +{ + int nsegs = next ? 1 : 0; + + if (offset_in_page(buf) + len <= PAGE_SIZE) { + nsegs += 1; + sg_init_table(sg, nsegs); + sg_set_buf(sg, buf, len); + } else { + nsegs += 2; + sg_init_table(sg, nsegs); + sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf)); + sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf), + offset_in_page(buf) + len - PAGE_SIZE); + } + + if (next) + sg_chain(sg, nsegs, next); +} + +static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + uint8_t zeros[ctx->key_size - req_ctx->child_req.dst_len]; + + if (!err) { + if (req_ctx->child_req.dst_len < ctx->key_size) { + memset(zeros, 0, sizeof(zeros)); + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, + sizeof(zeros)), + zeros, sizeof(zeros)); + } + + sg_pcopy_from_buffer(req->dst, + sg_nents_for_len(req->dst, ctx->key_size), + req_ctx->out_buf, req_ctx->child_req.dst_len, + sizeof(zeros)); + } + req->dst_len = ctx->key_size; + + kfree(req_ctx->in_buf); + kzfree(req_ctx->out_buf); + + return err; +} + +static void pkcs1pad_encrypt_sign_complete_cb( + struct crypto_async_request *child_async_req, int err) +{ + struct akcipher_request *req = child_async_req->data; + struct crypto_async_request async_req; + + if (err == -EINPROGRESS) + return; + + async_req.data = req->base.data; + async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); + async_req.flags = child_async_req->flags; + req->base.complete(&async_req, + pkcs1pad_encrypt_sign_complete(req, err)); +} + +static int pkcs1pad_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + unsigned int i, ps_end; + + if (!ctx->key_size) + return -EINVAL; + + if (req->src_len > ctx->key_size - 11) + return -EOVERFLOW; + + if (req->dst_len < ctx->key_size) { + req->dst_len = ctx->key_size; + return -EOVERFLOW; + } + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* + * Replace both input and output to add the padding in the input and + * the potential missing leading zeros in the output. + */ + req_ctx->child_req.src = req_ctx->in_sg; + req_ctx->child_req.src_len = ctx->key_size - 1; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size; + + req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->in_buf) + return -ENOMEM; + + ps_end = ctx->key_size - req->src_len - 2; + req_ctx->in_buf[0] = 0x02; + for (i = 1; i < ps_end; i++) + req_ctx->in_buf[i] = 1 + prandom_u32_max(255); + req_ctx->in_buf[ps_end] = 0x00; + + pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, + ctx->key_size - 1 - req->src_len, req->src); + + req_ctx->out_buf = kmalloc(ctx->key_size, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) { + kfree(req_ctx->in_buf); + return -ENOMEM; + } + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_encrypt_sign_complete_cb, req); + + err = crypto_akcipher_encrypt(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_encrypt_sign_complete(req, err); + + return err; +} + +static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + unsigned int pos; + + if (err == -EOVERFLOW) + /* Decrypted value had no leading 0 byte */ + err = -EINVAL; + + if (err) + goto done; + + if (req_ctx->child_req.dst_len != ctx->key_size - 1) { + err = -EINVAL; + goto done; + } + + if (req_ctx->out_buf[0] != 0x02) { + err = -EINVAL; + goto done; + } + for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) + if (req_ctx->out_buf[pos] == 0x00) + break; + if (pos < 9 || pos == req_ctx->child_req.dst_len) { + err = -EINVAL; + goto done; + } + pos++; + + if (req->dst_len < req_ctx->child_req.dst_len - pos) + err = -EOVERFLOW; + req->dst_len = req_ctx->child_req.dst_len - pos; + + if (!err) + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, req->dst_len), + req_ctx->out_buf + pos, req->dst_len); + +done: + kzfree(req_ctx->out_buf); + + return err; +} + +static void pkcs1pad_decrypt_complete_cb( + struct crypto_async_request *child_async_req, int err) +{ + struct akcipher_request *req = child_async_req->data; + struct crypto_async_request async_req; + + if (err == -EINPROGRESS) + return; + + async_req.data = req->base.data; + async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); + async_req.flags = child_async_req->flags; + req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err)); +} + +static int pkcs1pad_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + + if (!ctx->key_size || req->src_len != ctx->key_size) + return -EINVAL; + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* Reuse input buffer, output to a new buffer */ + req_ctx->child_req.src = req->src; + req_ctx->child_req.src_len = req->src_len; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size - 1; + + req_ctx->out_buf = kmalloc(ctx->key_size - 1, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) + return -ENOMEM; + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size - 1, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_decrypt_complete_cb, req); + + err = crypto_akcipher_decrypt(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_decrypt_complete(req, err); + + return err; +} + +static int pkcs1pad_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + unsigned int ps_end; + + if (!ctx->key_size) + return -EINVAL; + + if (req->src_len > ctx->key_size - 11) + return -EOVERFLOW; + + if (req->dst_len < ctx->key_size) { + req->dst_len = ctx->key_size; + return -EOVERFLOW; + } + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* + * Replace both input and output to add the padding in the input and + * the potential missing leading zeros in the output. + */ + req_ctx->child_req.src = req_ctx->in_sg; + req_ctx->child_req.src_len = ctx->key_size - 1; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size; + + req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->in_buf) + return -ENOMEM; + + ps_end = ctx->key_size - req->src_len - 2; + req_ctx->in_buf[0] = 0x01; + memset(req_ctx->in_buf + 1, 0xff, ps_end - 1); + req_ctx->in_buf[ps_end] = 0x00; + + pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, + ctx->key_size - 1 - req->src_len, req->src); + + req_ctx->out_buf = kmalloc(ctx->key_size, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) { + kfree(req_ctx->in_buf); + return -ENOMEM; + } + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_encrypt_sign_complete_cb, req); + + err = crypto_akcipher_sign(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_encrypt_sign_complete(req, err); + + return err; +} + +static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + unsigned int pos; + + if (err == -EOVERFLOW) + /* Decrypted value had no leading 0 byte */ + err = -EINVAL; + + if (err) + goto done; + + if (req_ctx->child_req.dst_len != ctx->key_size - 1) { + err = -EINVAL; + goto done; + } + + if (req_ctx->out_buf[0] != 0x01) { + err = -EINVAL; + goto done; + } + for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) + if (req_ctx->out_buf[pos] != 0xff) + break; + if (pos < 9 || pos == req_ctx->child_req.dst_len || + req_ctx->out_buf[pos] != 0x00) { + err = -EINVAL; + goto done; + } + pos++; + + if (req->dst_len < req_ctx->child_req.dst_len - pos) + err = -EOVERFLOW; + req->dst_len = req_ctx->child_req.dst_len - pos; + + if (!err) + sg_copy_from_buffer(req->dst, + sg_nents_for_len(req->dst, req->dst_len), + req_ctx->out_buf + pos, req->dst_len); + +done: + kzfree(req_ctx->out_buf); + + return err; +} + +static void pkcs1pad_verify_complete_cb( + struct crypto_async_request *child_async_req, int err) +{ + struct akcipher_request *req = child_async_req->data; + struct crypto_async_request async_req; + + if (err == -EINPROGRESS) + return; + + async_req.data = req->base.data; + async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req)); + async_req.flags = child_async_req->flags; + req->base.complete(&async_req, pkcs1pad_verify_complete(req, err)); +} + +/* + * The verify operation is here for completeness similar to the verification + * defined in RFC2313 section 10.2 except that block type 0 is not accepted, + * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to + * retrieve the DigestInfo from a signature, instead the user is expected + * to call the sign operation to generate the expected signature and compare + * signatures instead of the message-digests. + */ +static int pkcs1pad_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); + int err; + + if (!ctx->key_size || req->src_len != ctx->key_size) + return -EINVAL; + + if (ctx->key_size > PAGE_SIZE) + return -ENOTSUPP; + + /* Reuse input buffer, output to a new buffer */ + req_ctx->child_req.src = req->src; + req_ctx->child_req.src_len = req->src_len; + req_ctx->child_req.dst = req_ctx->out_sg; + req_ctx->child_req.dst_len = ctx->key_size - 1; + + req_ctx->out_buf = kmalloc(ctx->key_size - 1, + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC); + if (!req_ctx->out_buf) + return -ENOMEM; + + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, + ctx->key_size - 1, NULL); + + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, + pkcs1pad_verify_complete_cb, req); + + err = crypto_akcipher_verify(&req_ctx->child_req); + if (err != -EINPROGRESS && + (err != -EBUSY || + !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) + return pkcs1pad_verify_complete(req, err); + + return err; +} + +static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm) +{ + struct akcipher_instance *inst = akcipher_alg_instance(tfm); + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + struct crypto_akcipher *child_tfm; + + child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst)); + if (IS_ERR(child_tfm)) + return PTR_ERR(child_tfm); + + ctx->child = child_tfm; + + return 0; +} + +static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm) +{ + struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); + + crypto_free_akcipher(ctx->child); +} + +static void pkcs1pad_free(struct akcipher_instance *inst) +{ + struct crypto_akcipher_spawn *spawn = akcipher_instance_ctx(inst); + + crypto_drop_akcipher(spawn); + + kfree(inst); +} + +static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct crypto_attr_type *algt; + struct akcipher_instance *inst; + struct crypto_akcipher_spawn *spawn; + struct akcipher_alg *rsa_alg; + const char *rsa_alg_name; + int err; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask) + return -EINVAL; + + rsa_alg_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(rsa_alg_name)) + return PTR_ERR(rsa_alg_name); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + spawn = akcipher_instance_ctx(inst); + crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst)); + err = crypto_grab_akcipher(spawn, rsa_alg_name, 0, + crypto_requires_sync(algt->type, algt->mask)); + if (err) + goto out_free_inst; + + rsa_alg = crypto_spawn_akcipher_alg(spawn); + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_name) >= + CRYPTO_MAX_ALG_NAME || + snprintf(inst->alg.base.cra_driver_name, + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", + rsa_alg->base.cra_driver_name) >= + CRYPTO_MAX_ALG_NAME) + goto out_drop_alg; + + inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; + inst->alg.base.cra_priority = rsa_alg->base.cra_priority; + inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx); + + inst->alg.init = pkcs1pad_init_tfm; + inst->alg.exit = pkcs1pad_exit_tfm; + + inst->alg.encrypt = pkcs1pad_encrypt; + inst->alg.decrypt = pkcs1pad_decrypt; + inst->alg.sign = pkcs1pad_sign; + inst->alg.verify = pkcs1pad_verify; + inst->alg.set_pub_key = pkcs1pad_set_pub_key; + inst->alg.set_priv_key = pkcs1pad_set_priv_key; + inst->alg.max_size = pkcs1pad_get_max_size; + inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize; + + inst->free = pkcs1pad_free; + + err = akcipher_register_instance(tmpl, inst); + if (err) + goto out_drop_alg; + + return 0; + +out_drop_alg: + crypto_drop_akcipher(spawn); +out_free_inst: + kfree(inst); + return err; +} + +struct crypto_template rsa_pkcs1pad_tmpl = { + .name = "pkcs1pad", + .create = pkcs1pad_create, + .module = THIS_MODULE, +}; diff --git a/crypto/rsa.c b/crypto/rsa.c index 58aad69..77d737f 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -13,6 +13,7 @@ #include #include #include +#include /* * RSAEP function [RFC3447 sec 5.1.1] @@ -315,11 +316,24 @@ static struct akcipher_alg rsa = { static int rsa_init(void) { - return crypto_register_akcipher(&rsa); + int err; + + err = crypto_register_akcipher(&rsa); + if (err) + return err; + + err = crypto_register_template(&rsa_pkcs1pad_tmpl); + if (err) { + crypto_unregister_akcipher(&rsa); + return err; + } + + return 0; } static void rsa_exit(void) { + crypto_unregister_template(&rsa_pkcs1pad_tmpl); crypto_unregister_akcipher(&rsa); } diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h index f997e2d..c7585bd 100644 --- a/include/crypto/internal/rsa.h +++ b/include/crypto/internal/rsa.h @@ -27,4 +27,6 @@ int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, unsigned int key_len); void rsa_free_key(struct rsa_key *rsa_key); + +extern struct crypto_template rsa_pkcs1pad_tmpl; #endif -- cgit v0.10.2 From 161151d79ff4f7ed35d4ebb0eb7727a517c34ef2 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 6 Dec 2015 02:51:38 +0100 Subject: crypto: chacha20poly1305 - Skip encryption/decryption for 0-len If the length of the plaintext is zero, there's no need to waste cycles on encryption and decryption. Using the chacha20poly1305 construction for zero-length plaintexts is a common way of using a shared encryption key for AAD authentication. Signed-off-by: Jason A. Donenfeld Signed-off-by: Herbert Xu diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 99c3cce..7b6b935 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -130,6 +130,9 @@ static int chacha_decrypt(struct aead_request *req) struct scatterlist *src, *dst; int err; + if (rctx->cryptlen == 0) + goto skip; + chacha_iv(creq->iv, req, 1); sg_init_table(rctx->src, 2); @@ -150,6 +153,7 @@ static int chacha_decrypt(struct aead_request *req) if (err) return err; +skip: return poly_verify_tag(req); } @@ -415,6 +419,9 @@ static int chacha_encrypt(struct aead_request *req) struct scatterlist *src, *dst; int err; + if (req->cryptlen == 0) + goto skip; + chacha_iv(creq->iv, req, 1); sg_init_table(rctx->src, 2); @@ -435,6 +442,7 @@ static int chacha_encrypt(struct aead_request *req) if (err) return err; +skip: return poly_genkey(req); } -- cgit v0.10.2 From e4bc02aced3731776c8828d34e13c02ebdec3088 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 7 Dec 2015 21:36:57 +0100 Subject: crypto: drbg - constify drbg_state_ops structures The drbg_state_ops structures are never modified, so declare them as const. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: Herbert Xu diff --git a/crypto/drbg.c b/crypto/drbg.c index a7c2314..ab6ef1d 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -626,7 +626,7 @@ out: return len; } -static struct drbg_state_ops drbg_ctr_ops = { +static const struct drbg_state_ops drbg_ctr_ops = { .update = drbg_ctr_update, .generate = drbg_ctr_generate, .crypto_init = drbg_init_sym_kernel, @@ -752,7 +752,7 @@ static int drbg_hmac_generate(struct drbg_state *drbg, return len; } -static struct drbg_state_ops drbg_hmac_ops = { +static const struct drbg_state_ops drbg_hmac_ops = { .update = drbg_hmac_update, .generate = drbg_hmac_generate, .crypto_init = drbg_init_hash_kernel, @@ -1032,7 +1032,7 @@ out: * scratchpad usage: as update and generate are used isolated, both * can use the scratchpad */ -static struct drbg_state_ops drbg_hash_ops = { +static const struct drbg_state_ops drbg_hash_ops = { .update = drbg_hash_update, .generate = drbg_hash_generate, .crypto_init = drbg_init_hash_kernel, -- cgit v0.10.2 From 75d3f811f33ac994264c2cfb4bbc5220734300a5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 8 Dec 2015 16:23:51 +0100 Subject: crypto: sahara - fix 64-bit dma_addr_t compilation The sahara hardware uses DMA descriptors with 32-bit addresses, but dma_addr_t is variable size depending on whether we want to support any devices that use 64-bit DMA addresses in hardware. This means that the definition of the DMA descriptor structure is wrong, and we helpfully get a compiler warning about them too: drivers/crypto/sahara.c:423:372: warning: format '%x' expects argument of type 'unsigned int', but argument 4 has type 'dma_addr_t {aka long long unsigned int}' [-Wformat=] This changes the definition of the sahara_hw_desc and sahara_hw_link structures to only contain fixed-length members, which is required to make the driver work on ARM LPAE mode, and avoids most of the gcc warnings we get. Signed-off-by: Arnd Bergmann Signed-off-by: Herbert Xu diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index cc738f3..38bf12a 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -130,18 +130,18 @@ #define SAHARA_REG_IDAR 0x20 struct sahara_hw_desc { - u32 hdr; - u32 len1; - dma_addr_t p1; - u32 len2; - dma_addr_t p2; - dma_addr_t next; + u32 hdr; + u32 len1; + u32 p1; + u32 len2; + u32 p2; + u32 next; }; struct sahara_hw_link { - u32 len; - dma_addr_t p; - dma_addr_t next; + u32 len; + u32 p; + u32 next; }; struct sahara_ctx { -- cgit v0.10.2 From d4b98f20f46454c153201d55c8336f769b7eb195 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 8 Dec 2015 16:24:22 +0100 Subject: crypto: sahara - fix debug output for 64-bit dma_addr_t The sahara_dump_descriptors and sahara_dump_links functions attempt to print a dma_addr_t value with a 0x%08x format string, which produces a warning when dma_addr_t is 64-bit wide: drivers/crypto/sahara.c:419:120: warning: format '%x' expects argument of type 'unsigned int', but argument 5 has type 'dma_addr_t {aka long long unsigned int}' [-Wformat=] This changes the code to use the %pad format string that is meant for dma_addr_t, which avoids the warning and gives us the correct output in all configurations. Signed-off-by: Arnd Bergmann Signed-off-by: Herbert Xu diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 38bf12a..6c4f91c 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -416,8 +416,8 @@ static void sahara_dump_descriptors(struct sahara_dev *dev) return; for (i = 0; i < SAHARA_MAX_HW_DESC; i++) { - dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n", - i, dev->hw_phys_desc[i]); + dev_dbg(dev->device, "Descriptor (%d) (%pad):\n", + i, &dev->hw_phys_desc[i]); dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr); dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1); dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1); @@ -437,8 +437,8 @@ static void sahara_dump_links(struct sahara_dev *dev) return; for (i = 0; i < SAHARA_MAX_HW_LINK; i++) { - dev_dbg(dev->device, "Link (%d) (0x%08x):\n", - i, dev->hw_phys_link[i]); + dev_dbg(dev->device, "Link (%d) (%pad):\n", + i, &dev->hw_phys_link[i]); dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len); dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p); dev_dbg(dev->device, "\tnext = 0x%08x\n", -- cgit v0.10.2 From e14a1f1e36e76580c1094694f2f666ac582b34df Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Tue, 8 Dec 2015 09:00:23 +0100 Subject: crypto: akcipher - fix typos in include/crypto/akcipher.h Fix numerous spelling error in include/crypto/akcipher.h Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 45cd5b3..354de15 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -21,9 +21,9 @@ * @src: Source data * @dst: Destination data * @src_len: Size of the input buffer - * @dst_len: Size of the output buffer. It needs to be at leaset + * @dst_len: Size of the output buffer. It needs to be at least * as big as the expected result depending on the operation - * After operation it will be updated with the acctual size of the + * After operation it will be updated with the actual size of the * result. * In case of error where the dst sgl size was insufficient, * it will be updated to the size required for the operation. @@ -59,7 +59,7 @@ struct crypto_akcipher { * algorithm. In case of error, where the dst_len was insufficient, * the req->dst_len will be updated to the size required for the * operation - * @encrypt: Function performs an encrytp operation as defined by public key + * @encrypt: Function performs an encrypt operation as defined by public key * algorithm. In case of error, where the dst_len was insufficient, * the req->dst_len will be updated to the size required for the * operation @@ -73,7 +73,7 @@ struct crypto_akcipher { * @set_priv_key: Function invokes the algorithm specific set private key * function, which knows how to decode and interpret * the BER encoded private key - * @max_size: Function returns dest buffer size reqired for a given key. + * @max_size: Function returns dest buffer size required for a given key. * @init: Initialize the cryptographic transformation object. * This function is used to initialize the cryptographic * transformation object. This function is called only once at @@ -232,7 +232,7 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req, } /** - * akcipher_request_set_crypt() -- Sets reqest parameters + * akcipher_request_set_crypt() -- Sets request parameters * * Sets parameters required by crypto operation * -- cgit v0.10.2 From 06cabd755a97325cd5f53e73153fd10f4ea51cb7 Mon Sep 17 00:00:00 2001 From: Harvijay Saini Date: Wed, 9 Dec 2015 11:59:45 -0800 Subject: crypto: qat - ring returning retry even though ring has BW When many threads submit multiple requests they get blocked until all responses are processed, which prevents them from submitting more requests even though there is space on the rings. To fix this we need to decrement the inflight counter early to in the callback. Signed-off-by: Harvijay Saini Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index eff00cd..a6f3766 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -141,6 +141,7 @@ static int adf_handle_response(struct adf_etr_ring_data *ring) while (*msg != ADF_RING_EMPTY_SIG) { ring->callback((uint32_t *)msg); + atomic_dec(ring->inflights); *msg = ADF_RING_EMPTY_SIG; ring->head = adf_modulo(ring->head + ADF_MSG_SIZE_TO_BYTES(ring->msg_size), @@ -148,12 +149,10 @@ static int adf_handle_response(struct adf_etr_ring_data *ring) msg_counter++; msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); } - if (msg_counter > 0) { + if (msg_counter > 0) WRITE_CSR_RING_HEAD(ring->bank->csr_addr, ring->bank->bank_number, ring->ring_number, ring->head); - atomic_sub(msg_counter, ring->inflights); - } return 0; } -- cgit v0.10.2 From 75910d375ecb2079b3418f8b304fd775916025e2 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Wed, 9 Dec 2015 11:59:47 -0800 Subject: crypto: qat - select PCI_IOV when VF are enabled Fix i386-randconfig-x004-12092241. PCI_IOV needs to be selected when VFs are enabled Reported-by: Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index d275d48..85b44e5 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig @@ -60,6 +60,7 @@ config CRYPTO_DEV_QAT_DH895xCCVF config CRYPTO_DEV_QAT_C3XXXVF tristate "Support for Intel(R) C3XXX Virtual Function" depends on X86 && PCI + select PCI_IOV select CRYPTO_DEV_QAT help Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology @@ -71,6 +72,7 @@ config CRYPTO_DEV_QAT_C3XXXVF config CRYPTO_DEV_QAT_C62XVF tristate "Support for Intel(R) C62X Virtual Function" depends on X86 && PCI + select PCI_IOV select CRYPTO_DEV_QAT help Support for Intel(R) C62x with Intel(R) QuickAssist Technology -- cgit v0.10.2 From 1f6a9ab05ab500a033c1d5490c3a6bd993bfd602 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Wed, 9 Dec 2015 15:05:28 -0500 Subject: crypto: asymmetric_keys - signature.c does not need This file does not contain any modular related function calls. So get rid of module.h since it drags in a lot of other headers and adds to the preprocessing load. It does export some symbols though, so we'll need to ensure it has export.h present instead. Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Signed-off-by: Paul Gortmaker Signed-off-by: Herbert Xu diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index 9441240..004d5fc 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -13,7 +13,7 @@ #define pr_fmt(fmt) "SIG: "fmt #include -#include +#include #include #include #include "asymmetric_keys.h" -- cgit v0.10.2 From 40c18a59d226c94901a2789027c8678fcfcac098 Mon Sep 17 00:00:00 2001 From: Zeng Xin Date: Wed, 9 Dec 2015 21:38:30 -0800 Subject: crypto: qat - enable VF irq after guest exits ungracefully The VF bundle interrupt is not triggered any more in the case when guest is shut down with sample app running. Need to clear the flag interrupt bit when restarting to fix this irrecoverable state. Signed-off-by: Zeng Xin Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index a6f3766..57d2622 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -345,7 +345,7 @@ void adf_response_handler(uintptr_t bank_addr) { struct adf_etr_bank_data *bank = (void *)bank_addr; - /* Handle all the responses nad reenable IRQs */ + /* Handle all the responses and reenable IRQs */ adf_ring_response_handler(bank); WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, bank->irq_mask); @@ -434,6 +434,7 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev, goto err; } + WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK); WRITE_CSR_INT_SRCSEL(csr_addr, bank_num); return 0; err: diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h index 6ad7e4e..80e02a2 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h +++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h @@ -50,12 +50,14 @@ #include "adf_accel_devices.h" #define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL #define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL +#define ADF_BANK_INT_FLAG_CLEAR_MASK 0xFFFF #define ADF_RING_CSR_RING_CONFIG 0x000 #define ADF_RING_CSR_RING_LBASE 0x040 #define ADF_RING_CSR_RING_UBASE 0x080 #define ADF_RING_CSR_RING_HEAD 0x0C0 #define ADF_RING_CSR_RING_TAIL 0x100 #define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_FLAG 0x170 #define ADF_RING_CSR_INT_SRCSEL 0x174 #define ADF_RING_CSR_INT_SRCSEL_2 0x178 #define ADF_RING_CSR_INT_COL_EN 0x17C @@ -144,6 +146,9 @@ do { \ #define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ ADF_RING_CSR_RING_TAIL + (ring << 2), value) +#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_FLAG, value) #define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ do { \ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ -- cgit v0.10.2 From 50eca2561beb0b7d3d43287b9e7cf8a39301c346 Mon Sep 17 00:00:00 2001 From: Sam Protsenko Date: Thu, 10 Dec 2015 18:06:59 +0200 Subject: crypto: omap-des - Fix "schedule while atomic" bug When using DES module the next bug appears: BUG: scheduling while atomic: kworker/0:1/63/0x00000102 With backtrace as follows: <<<<<<<<<<<<<<<<<<<<<<<<<<<<<< cut here >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> [] (dump_backtrace) from [] (show_stack+0x18/0x1c) [] (show_stack) from [] (dump_stack+0x84/0xc4) [] (dump_stack) from [] (__schedule_bug+0x54/0x64) [] (__schedule_bug) from [] (__schedule+0x4ac/0x53c) [] (__schedule) from [] (schedule+0x38/0x88) [] (schedule) from [] (rpm_resume+0x158/0x59c) [] (rpm_resume) from [] (__pm_runtime_resume+0x54/0x6c) [] (__pm_runtime_resume) from [] (omap_des_handle_queue+0x154/0x7bc) [] (omap_des_handle_queue) from [] (omap_des_crypt+0x58/0xbc) [] (omap_des_crypt) from [] (omap_des_cbc_decrypt+0x14/0x18) [] (omap_des_cbc_decrypt) from [] (authenc_verify_ahash_done+0xe0/0xe8) [] (authenc_verify_ahash_done) from [] (omap_sham_finish_req+0x58/0xa8) [] (omap_sham_finish_req) from [] (omap_sham_done_task+0x1c0/0x1e0) [] (omap_sham_done_task) from [] (tasklet_action+0x80/0x118) [] (tasklet_action) from [] (__do_softirq+0x11c/0x260) [] (__do_softirq) from [] (irq_exit+0xc0/0xfc) [] (irq_exit) from [] (handle_IRQ+0x4c/0x98) [] (handle_IRQ) from [] (gic_handle_irq+0x34/0x64) [] (gic_handle_irq) from [] (__irq_svc+0x40/0x70) <<<<<<<<<<<<<<<<<<<<<<<<<<<<<< cut here >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Insight was seen in drivers/crypto/omap-sham.c driver. All credits for this patch go to Grygorii Strashko. Signed-off-by: Sam Protsenko Signed-off-by: Herbert Xu diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 0a70e46..db1ef28 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -1086,6 +1086,7 @@ static int omap_des_probe(struct platform_device *pdev) dd->phys_base = res->start; pm_runtime_enable(dev); + pm_runtime_irq_safe(dev); err = pm_runtime_get_sync(dev); if (err < 0) { pm_runtime_put_noidle(dev); -- cgit v0.10.2 From 81b312f11dfd7466462d94667f0a8df14a412d2a Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Thu, 10 Dec 2015 14:23:03 -0800 Subject: crypto: qat - uint8_t is not large enough for accel_id accel_id has to be large enough to hold ADF_MAX_DEVICES + 1 (which is > 1025) so uint8_t is too small. Reported-by: Dan Carpenter Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 4d78ec0..f96d427 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -251,6 +251,6 @@ struct adf_accel_dev { } vf; }; bool is_vf; - uint8_t accel_id; + u32 accel_id; } __packed; #endif diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h index 673dbf7..8c4f657 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h @@ -80,8 +80,8 @@ enum adf_device_type { struct adf_dev_status_info { enum adf_device_type type; - uint8_t accel_id; - uint8_t instance_id; + u32 accel_id; + u32 instance_id; uint8_t num_ae; uint8_t num_accel; uint8_t num_logical_accel; -- cgit v0.10.2 From 6333ed8f26cf77311088d2e2b7cf16d8480bcbb2 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Sun, 13 Dec 2015 03:30:41 -0800 Subject: crypto: nx-842 - Mask XERS0 bit in return value NX842 coprocessor sets 3rd bit in CR register with XER[S0] which is nothing to do with NX request. Since this bit can be set with other valuable return status, mast this bit. One of other bits (INITIATED, BUSY or REJECTED) will be returned for any given NX request. Signed-off-by: Haren Myneni Signed-off-by: Herbert Xu diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h index 9f8402b..27e588f 100644 --- a/arch/powerpc/include/asm/icswx.h +++ b/arch/powerpc/include/asm/icswx.h @@ -164,6 +164,7 @@ struct coprocessor_request_block { #define ICSWX_INITIATED (0x8) #define ICSWX_BUSY (0x4) #define ICSWX_REJECTED (0x2) +#define ICSWX_XERS0 (0x1) /* undefined or set from XERSO. */ static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb) { diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 87f7a0f..1710f80 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -442,6 +442,14 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, (unsigned int)ccw, (unsigned int)be32_to_cpu(crb->ccw)); + /* + * NX842 coprocessor sets 3rd bit in CR register with XER[S0]. + * XER[S0] is the integer summary overflow bit which is nothing + * to do NX. Since this bit can be set with other return values, + * mask this bit. + */ + ret &= ~ICSWX_XERS0; + switch (ret) { case ICSWX_INITIATED: ret = wait_for_csb(wmem, csb); @@ -454,10 +462,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, pr_err_ratelimited("ICSWX rejected\n"); ret = -EPROTO; break; - default: - pr_err_ratelimited("Invalid ICSWX return code %x\n", ret); - ret = -EPROTO; - break; } if (!ret) -- cgit v0.10.2 From 871b88a8419c8606ad2ebf752cf414c7627cad1a Mon Sep 17 00:00:00 2001 From: Rahul Pathak Date: Mon, 14 Dec 2015 08:44:19 +0000 Subject: crypto: atmel-sha - Removed unused variable "err" Removed unused variable "err" and directly return "0" Reported by coccicheck - ./drivers/crypto/atmel-sha.c:758:5-8: Unneeded variable: "err". Return "0" on line 766 Signed-off-by: Rahul Pathak Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 660d8c0..20de861 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -755,7 +755,6 @@ static int atmel_sha_finish(struct ahash_request *req) { struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); struct atmel_sha_dev *dd = ctx->dd; - int err = 0; if (ctx->digcnt[0] || ctx->digcnt[1]) atmel_sha_copy_ready_hash(req); @@ -763,7 +762,7 @@ static int atmel_sha_finish(struct ahash_request *req) dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt); - return err; + return 0; } static void atmel_sha_finish_req(struct ahash_request *req, int err) -- cgit v0.10.2 From 16f080aaadcb912c9a47c8603a38ccad87da38ea Mon Sep 17 00:00:00 2001 From: Rahul Pathak Date: Mon, 14 Dec 2015 08:45:23 +0000 Subject: crypto: omap - Removed unused variable "err" Removed unused variable "err" and directly return "0" Reported by coccicheck - ./drivers/crypto/omap-aes.c:542:5-8: Unneeded variable: "err". Return "0" on line 551 ./drivers/crypto/omap-des.c:530:5-8: Unneeded variable: "err". Return "0" on line 539 Signed-off-by: Rahul Pathak Signed-off-by: Herbert Xu diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index eba2314..dd355bd 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -539,8 +539,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) { - int err = 0; - pr_debug("total: %d\n", dd->total); omap_aes_dma_stop(dd); @@ -548,7 +546,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) dmaengine_terminate_all(dd->dma_lch_in); dmaengine_terminate_all(dd->dma_lch_out); - return err; + return 0; } static int omap_aes_check_aligned(struct scatterlist *sg, int total) diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index db1ef28..dd7b93f 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -527,8 +527,6 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err) static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) { - int err = 0; - pr_debug("total: %d\n", dd->total); omap_des_dma_stop(dd); @@ -536,7 +534,7 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) dmaengine_terminate_all(dd->dma_lch_in); dmaengine_terminate_all(dd->dma_lch_out); - return err; + return 0; } static int omap_des_copy_needed(struct scatterlist *sg) -- cgit v0.10.2 From 5319216dcfee14886abb2b7090e8fcf2e2d8a611 Mon Sep 17 00:00:00 2001 From: Andrzej Zaborowski Date: Sat, 12 Dec 2015 00:03:51 -0500 Subject: crypto: rsa-pkcs1pad - don't allocate buffer on stack Avoid the s390 compile "warning: 'pkcs1pad_encrypt_sign_complete' uses dynamic stack allocation" reported by kbuild test robot. Don't use a flat zero-filled buffer, instead zero the contents of the SGL. Signed-off-by: Andrew Zaborowski Signed-off-by: Herbert Xu diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index accc67d..50f5c97 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -110,21 +110,32 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); - uint8_t zeros[ctx->key_size - req_ctx->child_req.dst_len]; + size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len; + size_t chunk_len, pad_left; + struct sg_mapping_iter miter; if (!err) { - if (req_ctx->child_req.dst_len < ctx->key_size) { - memset(zeros, 0, sizeof(zeros)); - sg_copy_from_buffer(req->dst, - sg_nents_for_len(req->dst, - sizeof(zeros)), - zeros, sizeof(zeros)); + if (pad_len) { + sg_miter_start(&miter, req->dst, + sg_nents_for_len(req->dst, pad_len), + SG_MITER_ATOMIC | SG_MITER_TO_SG); + + pad_left = pad_len; + while (pad_left) { + sg_miter_next(&miter); + + chunk_len = min(miter.length, pad_left); + memset(miter.addr, 0, chunk_len); + pad_left -= chunk_len; + } + + sg_miter_stop(&miter); } sg_pcopy_from_buffer(req->dst, sg_nents_for_len(req->dst, ctx->key_size), req_ctx->out_buf, req_ctx->child_req.dst_len, - sizeof(zeros)); + pad_len); } req->dst_len = ctx->key_size; -- cgit v0.10.2 From 46621e6f84908bb9df494d5fc2bcd145041810d5 Mon Sep 17 00:00:00 2001 From: Pingchao Yang Date: Wed, 16 Dec 2015 10:39:40 +0800 Subject: crypto: qat - fix CTX_ENABLES bits shift direction issue AE CTX bits should be 8-15 in CTX_ENABLES, so the mask value 0xff should be left shifted 0x8. Reported-by: Dan Carpenter Signed-off-by: Yang Pingchao Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 45c1739..81bd1fe 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -418,7 +418,7 @@ int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable); qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active); - if ((enable & (0xff >> CE_ENABLE_BITPOS)) || + if ((enable & (0xff << CE_ENABLE_BITPOS)) || (active & (1 << ACS_ABO_BITPOS))) return 1; else -- cgit v0.10.2 From 51d77dddfff4a554744bfa0e67cf571319635645 Mon Sep 17 00:00:00 2001 From: Pingchao Yang Date: Wed, 16 Dec 2015 14:09:50 +0800 Subject: crypto: qat - fix some timeout tests Change the timeout condition since the times value would be -1 after running MAX_RETRY_TIMES. Reported-by: Dan Carpenter Signed-off-by: Yang Pingchao Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 81bd1fe..0ac0ba8 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -186,7 +186,7 @@ static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle, if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS))) return 0; } - if (!times) { + if (times < 0) { pr_err("QAT: wait_num_cycles time out\n"); return -EFAULT; } -- cgit v0.10.2 From 0c4c78de0417ced1da92351a3013e631860ea576 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Thu, 17 Dec 2015 13:45:39 +0100 Subject: crypto: hash - add zero length message hash for shax and md5 Some crypto drivers cannot process empty data message and return a precalculated hash for md5/sha1/sha224/sha256. This patch add thoses precalculated hash in include/crypto. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/crypto/md5.c b/crypto/md5.c index 33d17e9..2355a7c 100644 --- a/crypto/md5.c +++ b/crypto/md5.c @@ -24,6 +24,12 @@ #include #include +const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = { + 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, + 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, +}; +EXPORT_SYMBOL_GPL(md5_zero_message_hash); + /* XXX: this stuff can be optimized */ static inline void le32_to_cpu_array(u32 *buf, unsigned int words) { diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 39e3acc..6877cbb 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -26,6 +26,13 @@ #include #include +const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = { + 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, + 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, + 0xaf, 0xd8, 0x07, 0x09 +}; +EXPORT_SYMBOL_GPL(sha1_zero_message_hash); + static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src, int blocks) { diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 7843116..8f9c47e 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -27,6 +27,22 @@ #include #include +const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = { + 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, + 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, + 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, + 0x2f +}; +EXPORT_SYMBOL_GPL(sha224_zero_message_hash); + +const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, + 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, + 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 +}; +EXPORT_SYMBOL_GPL(sha256_zero_message_hash); + static inline u32 Ch(u32 x, u32 y, u32 z) { return z ^ (x & (y ^ z)); diff --git a/include/crypto/md5.h b/include/crypto/md5.h index 146af82..327deac 100644 --- a/include/crypto/md5.h +++ b/include/crypto/md5.h @@ -13,6 +13,8 @@ #define MD5_H2 0x98badcfeUL #define MD5_H3 0x10325476UL +extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE]; + struct md5_state { u32 hash[MD5_HASH_WORDS]; u32 block[MD5_BLOCK_WORDS]; diff --git a/include/crypto/sha.h b/include/crypto/sha.h index dd7905a..c94d3eb 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -64,6 +64,12 @@ #define SHA512_H6 0x1f83d9abfb41bd6bULL #define SHA512_H7 0x5be0cd19137e2179ULL +extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE]; + +extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE]; + +extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE]; + struct sha1_state { u32 state[SHA1_DIGEST_SIZE / 4]; u64 count; -- cgit v0.10.2 From 8054b8005b9f5ae172dac79a49370bfcc027a0f5 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Thu, 17 Dec 2015 13:45:40 +0100 Subject: crypto: n2 - Use precalculated hash from headers Precalculated hash for empty message are now present in hash headers. This patch just use them. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 95dccde..3a3a1e7 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -194,6 +194,9 @@ config CRYPTO_DEV_NIAGARA2 select CRYPTO_DES select CRYPTO_BLKCIPHER select CRYPTO_HASH + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 depends on SPARC64 help Each core of a Niagara2 processor contains a Stream diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 739a786..b85a7a7 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -241,7 +241,7 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) struct n2_ahash_alg { struct list_head entry; - const char *hash_zero; + const u8 *hash_zero; const u32 *hash_init; u8 hw_op_hashsz; u8 digest_size; @@ -1267,7 +1267,7 @@ static LIST_HEAD(cipher_algs); struct n2_hash_tmpl { const char *name; - const char *hash_zero; + const u8 *hash_zero; const u32 *hash_init; u8 hw_op_hashsz; u8 digest_size; @@ -1276,40 +1276,19 @@ struct n2_hash_tmpl { u8 hmac_type; }; -static const char md5_zero[MD5_DIGEST_SIZE] = { - 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, - 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, -}; static const u32 md5_init[MD5_HASH_WORDS] = { cpu_to_le32(MD5_H0), cpu_to_le32(MD5_H1), cpu_to_le32(MD5_H2), cpu_to_le32(MD5_H3), }; -static const char sha1_zero[SHA1_DIGEST_SIZE] = { - 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, - 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, - 0x07, 0x09 -}; static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, }; -static const char sha256_zero[SHA256_DIGEST_SIZE] = { - 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, - 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, - 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, - 0x1b, 0x78, 0x52, 0xb8, 0x55 -}; static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, }; -static const char sha224_zero[SHA224_DIGEST_SIZE] = { - 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, - 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, - 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, - 0x2f -}; static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, @@ -1317,7 +1296,7 @@ static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { static const struct n2_hash_tmpl hash_tmpls[] = { { .name = "md5", - .hash_zero = md5_zero, + .hash_zero = md5_zero_message_hash, .hash_init = md5_init, .auth_type = AUTH_TYPE_MD5, .hmac_type = AUTH_TYPE_HMAC_MD5, @@ -1325,7 +1304,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .digest_size = MD5_DIGEST_SIZE, .block_size = MD5_HMAC_BLOCK_SIZE }, { .name = "sha1", - .hash_zero = sha1_zero, + .hash_zero = sha1_zero_message_hash, .hash_init = sha1_init, .auth_type = AUTH_TYPE_SHA1, .hmac_type = AUTH_TYPE_HMAC_SHA1, @@ -1333,7 +1312,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .digest_size = SHA1_DIGEST_SIZE, .block_size = SHA1_BLOCK_SIZE }, { .name = "sha256", - .hash_zero = sha256_zero, + .hash_zero = sha256_zero_message_hash, .hash_init = sha256_init, .auth_type = AUTH_TYPE_SHA256, .hmac_type = AUTH_TYPE_HMAC_SHA256, @@ -1341,7 +1320,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .digest_size = SHA256_DIGEST_SIZE, .block_size = SHA256_BLOCK_SIZE }, { .name = "sha224", - .hash_zero = sha224_zero, + .hash_zero = sha224_zero_message_hash, .hash_init = sha224_init, .auth_type = AUTH_TYPE_SHA256, .hmac_type = AUTH_TYPE_RESERVED, -- cgit v0.10.2 From bdd75064d2b2068007f4fc5e26ac726e8617a090 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Thu, 17 Dec 2015 13:45:41 +0100 Subject: crypto: ccp - Use precalculated hash from headers Precalculated hash for empty message are now present in hash headers. This patch just use them. Signed-off-by: LABBE Corentin Tested-by: Tom Lendacky Acked-by: Tom Lendacky Signed-off-by: Herbert Xu diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 3cd8481..6e37845 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -3,6 +3,8 @@ config CRYPTO_DEV_CCP_DD depends on CRYPTO_DEV_CCP default m select HW_RANDOM + select CRYPTO_SHA1 + select CRYPTO_SHA256 help Provides the interface to use the AMD Cryptographic Coprocessor which can be used to offload encryption operations such as SHA, diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index c6e883b..6613aee 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -152,32 +152,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), }; -/* The CCP cannot perform zero-length sha operations so the caller - * is required to buffer data for the final operation. However, a - * sha operation for a message with a total length of zero is valid - * so known values are required to supply the result. - */ -static const u8 ccp_sha1_zero[CCP_SHA_CTXSIZE] = { - 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, - 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, - 0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -}; - -static const u8 ccp_sha224_zero[CCP_SHA_CTXSIZE] = { - 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, - 0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, - 0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, - 0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00, -}; - -static const u8 ccp_sha256_zero[CCP_SHA_CTXSIZE] = { - 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, - 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, - 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, - 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55, -}; - static u32 ccp_addr_lo(struct ccp_dma_info *info) { return lower_32_bits(info->address + info->offset); @@ -1391,18 +1365,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (sha->msg_bits) return -EINVAL; - /* A sha operation for a message with a total length of zero, - * return known result. + /* The CCP cannot perform zero-length sha operations so the + * caller is required to buffer data for the final operation. + * However, a sha operation for a message with a total length + * of zero is valid so known values are required to supply + * the result. */ switch (sha->type) { case CCP_SHA_TYPE_1: - sha_zero = ccp_sha1_zero; + sha_zero = sha1_zero_message_hash; break; case CCP_SHA_TYPE_224: - sha_zero = ccp_sha224_zero; + sha_zero = sha224_zero_message_hash; break; case CCP_SHA_TYPE_256: - sha_zero = ccp_sha256_zero; + sha_zero = sha256_zero_message_hash; break; default: return -EINVAL; -- cgit v0.10.2 From e4ae86e22edc1647f0ee0c2ab12ec05fce17d782 Mon Sep 17 00:00:00 2001 From: LABBE Corentin Date: Thu, 17 Dec 2015 13:45:42 +0100 Subject: crypto: ux500 - Use precalculated hash from headers Precalculated hash for empty message are now present in hash headers. This patch just use them. Signed-off-by: LABBE Corentin Signed-off-by: Herbert Xu diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig index 3079644..0e338bf 100644 --- a/drivers/crypto/ux500/Kconfig +++ b/drivers/crypto/ux500/Kconfig @@ -18,6 +18,8 @@ config CRYPTO_DEV_UX500_HASH tristate "UX500 crypto driver for HASH block" depends on CRYPTO_DEV_UX500 select CRYPTO_HASH + select CRYPTO_SHA1 + select CRYPTO_SHA256 help This selects the hash driver for the UX500_HASH hardware. Depends on UX500/STM DMA if running in DMA mode. diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index f47d112..d6fdc58 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -41,22 +41,6 @@ static int hash_mode; module_param(hash_mode, int, 0); MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1"); -/** - * Pre-calculated empty message digests. - */ -static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = { - 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, - 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, - 0xaf, 0xd8, 0x07, 0x09 -}; - -static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = { - 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, - 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, - 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, - 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 -}; - /* HMAC-SHA1, no key */ static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = { 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08, @@ -242,13 +226,13 @@ static int get_empty_message_digest( if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { if (HASH_ALGO_SHA1 == ctx->config.algorithm) { - memcpy(zero_hash, &zero_message_hash_sha1[0], + memcpy(zero_hash, &sha1_zero_message_hash[0], SHA1_DIGEST_SIZE); *zero_hash_size = SHA1_DIGEST_SIZE; *zero_digest = true; } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { - memcpy(zero_hash, &zero_message_hash_sha256[0], + memcpy(zero_hash, &sha256_zero_message_hash[0], SHA256_DIGEST_SIZE); *zero_hash_size = SHA256_DIGEST_SIZE; *zero_digest = true; -- cgit v0.10.2 From dc2c632272d5614b77359b24f77c0a80ddc3a962 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Sat, 19 Dec 2015 16:22:51 +0800 Subject: crypto: qat - use list_for_each_entry* Use list_for_each_entry*() instead of list_for_each*() to simplify the code. Signed-off-by: Geliang Tang Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index 2e6d0c5..5c897e6 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -255,12 +255,9 @@ out: static int adf_ctl_is_device_in_use(int id) { - struct list_head *itr, *head = adf_devmgr_get_head(); - - list_for_each(itr, head) { - struct adf_accel_dev *dev = - list_entry(itr, struct adf_accel_dev, list); + struct adf_accel_dev *dev; + list_for_each_entry(dev, adf_devmgr_get_head(), list) { if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) { if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) { dev_info(&GET_DEV(dev), @@ -275,12 +272,10 @@ static int adf_ctl_is_device_in_use(int id) static int adf_ctl_stop_devices(uint32_t id) { - struct list_head *itr, *head = adf_devmgr_get_head(); + struct adf_accel_dev *accel_dev; int ret = 0; - list_for_each_prev(itr, head) { - struct adf_accel_dev *accel_dev = - list_entry(itr, struct adf_accel_dev, list); + list_for_each_entry_reverse(accel_dev, adf_devmgr_get_head(), list) { if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { if (!adf_dev_started(accel_dev)) continue; diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index 4d0c65b..3852d31 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -67,13 +67,10 @@ void qat_crypto_put_instance(struct qat_crypto_instance *inst) static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) { - struct qat_crypto_instance *inst; - struct list_head *list_ptr, *tmp; + struct qat_crypto_instance *inst, *tmp; int i; - list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) { - inst = list_entry(list_ptr, struct qat_crypto_instance, list); - + list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) { for (i = 0; i < atomic_read(&inst->refctr); i++) qat_crypto_put_instance(inst); @@ -89,7 +86,7 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) if (inst->pke_rx) adf_remove_ring(inst->pke_rx); - list_del(list_ptr); + list_del(&inst->list); kfree(inst); } return 0; @@ -97,17 +94,13 @@ static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) struct qat_crypto_instance *qat_crypto_get_instance_node(int node) { - struct adf_accel_dev *accel_dev = NULL; - struct qat_crypto_instance *inst = NULL; - struct list_head *itr; + struct adf_accel_dev *accel_dev = NULL, *tmp_dev; + struct qat_crypto_instance *inst = NULL, *tmp_inst; unsigned long best = ~0; - list_for_each(itr, adf_devmgr_get_head()) { - struct adf_accel_dev *tmp_dev; + list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) { unsigned long ctr; - tmp_dev = list_entry(itr, struct adf_accel_dev, list); - if ((node == dev_to_node(&GET_DEV(tmp_dev)) || dev_to_node(&GET_DEV(tmp_dev)) < 0) && adf_dev_started(tmp_dev) && @@ -123,10 +116,7 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) if (!accel_dev) { pr_info("QAT: Could not find a device on node %d\n", node); /* Get any started device */ - list_for_each(itr, adf_devmgr_get_head()) { - struct adf_accel_dev *tmp_dev; - - tmp_dev = list_entry(itr, struct adf_accel_dev, list); + list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) { if (adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->crypto_list)) { accel_dev = tmp_dev; @@ -139,11 +129,9 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) return NULL; best = ~0; - list_for_each(itr, &accel_dev->crypto_list) { - struct qat_crypto_instance *tmp_inst; + list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) { unsigned long ctr; - tmp_inst = list_entry(itr, struct qat_crypto_instance, list); ctr = atomic_read(&tmp_inst->refctr); if (best > ctr) { inst = tmp_inst; -- cgit v0.10.2 From 973e209d743e22e9d514cd3378281608845456f6 Mon Sep 17 00:00:00 2001 From: Leilei Zhao Date: Thu, 17 Dec 2015 17:48:32 +0100 Subject: crypto: atmel-aes - add new version Add new version of atmel-aes available with SAMA5D2 devices. Signed-off-by: Leilei Zhao Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index bfb1f79..854e281 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -1258,6 +1258,11 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) /* keep only major version number */ switch (dd->hw_version & 0xff0) { + case 0x500: + dd->caps.has_dualbuff = 1; + dd->caps.has_cfb64 = 1; + dd->caps.max_burst_size = 4; + break; case 0x200: dd->caps.has_dualbuff = 1; dd->caps.has_cfb64 = 1; -- cgit v0.10.2 From c0b28d8c32a42c6a54b5ddee06ce078d99f93e51 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:33 +0100 Subject: crypto: atmel-aes - constify value argument of atmel_aes_write_n() atmel_aes_write_n() should not modify its value argument. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 854e281..1d3997a 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -231,7 +231,7 @@ static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset, } static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, - u32 *value, int count) + const u32 *value, int count) { for (; count--; value++, offset += 4) atmel_aes_write(dd, offset, *value); -- cgit v0.10.2 From 88efd9a999e032d68bccfb80c4b7446a217686de Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:34 +0100 Subject: crypto: atmel-aes - change algorithm priorities Increase the algorithm priorities so the hardware acceleration is now preferred to the software computation: the "aes-generice" driver uses 100 as priority. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 1d3997a..e545636 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -43,6 +43,8 @@ #include #include "atmel-aes-regs.h" +#define ATMEL_AES_PRIORITY 300 + #define CFB8_BLOCK_SIZE 1 #define CFB16_BLOCK_SIZE 2 #define CFB32_BLOCK_SIZE 4 @@ -960,7 +962,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "ecb(aes)", .cra_driver_name = "atmel-ecb-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -980,7 +982,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "cbc(aes)", .cra_driver_name = "atmel-cbc-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1001,7 +1003,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "ofb(aes)", .cra_driver_name = "atmel-ofb-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1022,7 +1024,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "cfb(aes)", .cra_driver_name = "atmel-cfb-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1043,7 +1045,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "cfb32(aes)", .cra_driver_name = "atmel-cfb32-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = CFB32_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1064,7 +1066,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "cfb16(aes)", .cra_driver_name = "atmel-cfb16-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = CFB16_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1085,7 +1087,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "cfb8(aes)", .cra_driver_name = "atmel-cfb8-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = CFB8_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1106,7 +1108,7 @@ static struct crypto_alg aes_algs[] = { { .cra_name = "ctr(aes)", .cra_driver_name = "atmel-ctr-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), @@ -1129,7 +1131,7 @@ static struct crypto_alg aes_algs[] = { static struct crypto_alg aes_cfb64_alg = { .cra_name = "cfb64(aes)", .cra_driver_name = "atmel-cfb64-aes", - .cra_priority = 100, + .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = CFB64_BLOCK_SIZE, .cra_ctxsize = sizeof(struct atmel_aes_ctx), -- cgit v0.10.2 From 924a8bc79636692d43179ff2b08f25f11e82ab7f Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:35 +0100 Subject: crypto: atmel-aes - fix unregistration order of crypto algorithms This dummy patch fixes atmel_aes_unregister_algs() so crypto algorithms are unregistered in the reverse order they were registered by atmel_aes_register_algs(). Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index e545636..e948bf2 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -1219,10 +1219,11 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) { int i; - for (i = 0; i < ARRAY_SIZE(aes_algs); i++) - crypto_unregister_alg(&aes_algs[i]); if (dd->caps.has_cfb64) crypto_unregister_alg(&aes_cfb64_alg); + + for (i = 0; i < ARRAY_SIZE(aes_algs); i++) + crypto_unregister_alg(&aes_algs[i]); } static int atmel_aes_register_algs(struct atmel_aes_dev *dd) -- cgit v0.10.2 From 7f1cbbc5145fcd6f357cf8932e1d5410777f4d65 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:36 +0100 Subject: crypto: atmel-aes - remove unused header includes Hash headers have nothing to do with AES block ciphers. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index e948bf2..7d1b055 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -33,12 +33,9 @@ #include #include #include -#include #include #include #include -#include -#include #include #include #include "atmel-aes-regs.h" -- cgit v0.10.2 From aab0a39b281e9a26ae991f0737f2fa6f710d0dab Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:37 +0100 Subject: crypto: atmel-aes - propagate error from atmel_aes_hw_version_init() Before this patch atmel_aes_hw_version_init() had no returned value. However it calls atmel_aes_hw_init(), which may fail. So check the returned code of atmel_aes_hw_init() and propagate error if needed. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 7d1b055..9a9e1b8 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -280,16 +280,20 @@ static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd) return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff; } -static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd) +static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd) { - atmel_aes_hw_init(dd); + int err; + + err = atmel_aes_hw_init(dd); + if (err) + return err; dd->hw_version = atmel_aes_get_version(dd); - dev_info(dd->dev, - "version: 0x%x\n", dd->hw_version); + dev_info(dd->dev, "version: 0x%x\n", dd->hw_version); clk_disable_unprepare(dd->iclk); + return 0; } static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err) @@ -1407,7 +1411,9 @@ static int atmel_aes_probe(struct platform_device *pdev) goto res_err; } - atmel_aes_hw_version_init(aes_dd); + err = atmel_aes_hw_version_init(aes_dd); + if (err) + goto res_err; atmel_aes_get_cap(aes_dd); -- cgit v0.10.2 From cdfab4a7e3f16224e3a52dfe990a9bd870363690 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:38 +0100 Subject: crypto: atmel-aes - change atmel_aes_write_ctrl() signature This patch changes the signature of atmel_aes_write_ctrl() to make it more generic. This will be used by future patches when implementing new block cipher modes such as GCM. Especially atmel_aes_hw_init() is now called outside atmel_aes_write_ctrl(): this allows to call atmel_aes_write_ctrl() many times, still initializing the hardware only once. Indeed, the support of GCM will require to update the Mode Register and the IV when processing a single request. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 9a9e1b8..ccb8a83 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -496,16 +496,11 @@ static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) return err; } -static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd) +static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, + const u32 *iv) { - int err; u32 valcr = 0, valmr = 0; - err = atmel_aes_hw_init(dd); - - if (err) - return err; - /* MR register must be set before IV registers */ if (dd->ctx->keylen == AES_KEYSIZE_128) valmr |= AES_MR_KEYSIZE_128; @@ -539,7 +534,7 @@ static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd) if (dd->flags & AES_FLAGS_ENCRYPT) valmr |= AES_MR_CYPHER_ENC; - if (dd->total > ATMEL_AES_DMA_THRESHOLD) { + if (use_dma) { valmr |= AES_MR_SMOD_IDATAR0; if (dd->caps.has_dualbuff) valmr |= AES_MR_DUALBUFF; @@ -555,11 +550,9 @@ static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd) if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) || (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) && - dd->req->info) { - atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4); + iv) { + atmel_aes_write_n(dd, AES_IVR(0), iv, 4); } - - return 0; } static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, @@ -570,6 +563,7 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, struct atmel_aes_reqctx *rctx; unsigned long flags; int err, ret = 0; + bool use_dma; spin_lock_irqsave(&dd->lock, flags); if (req) @@ -607,9 +601,11 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, dd->ctx = ctx; ctx->dd = dd; - err = atmel_aes_write_ctrl(dd); + err = atmel_aes_hw_init(dd); if (!err) { - if (dd->total > ATMEL_AES_DMA_THRESHOLD) + use_dma = (dd->total > ATMEL_AES_DMA_THRESHOLD); + atmel_aes_write_ctrl(dd, use_dma, req->info); + if (use_dma) err = atmel_aes_crypt_dma_start(dd); else err = atmel_aes_crypt_cpu_start(dd); -- cgit v0.10.2 From ccbf72980b22b04f0b7afb2b82eb699ee7da635c Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:39 +0100 Subject: crypto: atmel-aes - make crypto request queue management more generic This patch changes atmel_aes_handle_queue() to make it more generic. The function argument is now a pointer to struct crypto_async_request, which is the common base of struct ablkcipher_request and struct aead_request. Also this patch introduces struct atmel_aes_base_ctx which will be the common base of all the transformation contexts. Hence the very same queue will be used to manage both block cipher and AEAD requests (such as gcm and authenc implemented in further patches). Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index ccb8a83..48407a7 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -78,8 +78,13 @@ struct atmel_aes_caps { struct atmel_aes_dev; -struct atmel_aes_ctx { + +typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *); + + +struct atmel_aes_base_ctx { struct atmel_aes_dev *dd; + atmel_aes_fn_t start; int keylen; u32 key[AES_KEYSIZE_256 / sizeof(u32)]; @@ -87,6 +92,10 @@ struct atmel_aes_ctx { u16 block_size; }; +struct atmel_aes_ctx { + struct atmel_aes_base_ctx base; +}; + struct atmel_aes_reqctx { unsigned long mode; }; @@ -101,7 +110,9 @@ struct atmel_aes_dev { unsigned long phys_base; void __iomem *io_base; - struct atmel_aes_ctx *ctx; + struct crypto_async_request *areq; + struct atmel_aes_base_ctx *ctx; + struct device *dev; struct clk *iclk; int irq; @@ -115,7 +126,6 @@ struct atmel_aes_dev { struct tasklet_struct done_task; struct tasklet_struct queue_task; - struct ablkcipher_request *req; size_t total; struct scatterlist *in_sg; @@ -236,7 +246,7 @@ static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, atmel_aes_write(dd, offset, *value); } -static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx) +static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx) { struct atmel_aes_dev *aes_dd = NULL; struct atmel_aes_dev *tmp; @@ -298,7 +308,7 @@ static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd) static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err) { - struct ablkcipher_request *req = dd->req; + struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); clk_disable_unprepare(dd->iclk); dd->flags &= ~AES_FLAGS_BUSY; @@ -396,6 +406,8 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) { + struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); + dd->flags &= ~AES_FLAGS_DMA; dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in, @@ -404,11 +416,11 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) dd->dma_size, DMA_FROM_DEVICE); /* use cache buffers */ - dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); + dd->nb_in_sg = atmel_aes_sg_length(req, dd->in_sg); if (!dd->nb_in_sg) return -EINVAL; - dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg); + dd->nb_out_sg = atmel_aes_sg_length(req, dd->out_sg); if (!dd->nb_out_sg) return -EINVAL; @@ -556,38 +568,49 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, } static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, - struct ablkcipher_request *req) + struct crypto_async_request *new_areq) { - struct crypto_async_request *async_req, *backlog; - struct atmel_aes_ctx *ctx; - struct atmel_aes_reqctx *rctx; + struct crypto_async_request *areq, *backlog; + struct atmel_aes_base_ctx *ctx; unsigned long flags; int err, ret = 0; - bool use_dma; spin_lock_irqsave(&dd->lock, flags); - if (req) - ret = ablkcipher_enqueue_request(&dd->queue, req); + if (new_areq) + ret = crypto_enqueue_request(&dd->queue, new_areq); if (dd->flags & AES_FLAGS_BUSY) { spin_unlock_irqrestore(&dd->lock, flags); return ret; } backlog = crypto_get_backlog(&dd->queue); - async_req = crypto_dequeue_request(&dd->queue); - if (async_req) + areq = crypto_dequeue_request(&dd->queue); + if (areq) dd->flags |= AES_FLAGS_BUSY; spin_unlock_irqrestore(&dd->lock, flags); - if (!async_req) + if (!areq) return ret; if (backlog) backlog->complete(backlog, -EINPROGRESS); - req = ablkcipher_request_cast(async_req); + ctx = crypto_tfm_ctx(areq->tfm); + + dd->areq = areq; + dd->ctx = ctx; + + err = ctx->start(dd); + return (areq != new_areq) ? ret : err; +} + +static int atmel_aes_start(struct atmel_aes_dev *dd) +{ + struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); + struct atmel_aes_reqctx *rctx; + bool use_dma; + int err; /* assign new request to device */ - dd->req = req; dd->total = req->nbytes; dd->in_offset = 0; dd->in_sg = req->src; @@ -595,11 +618,8 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, dd->out_sg = req->dst; rctx = ablkcipher_request_ctx(req); - ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); rctx->mode &= AES_FLAGS_MODE_MASK; dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode; - dd->ctx = ctx; - ctx->dd = dd; err = atmel_aes_hw_init(dd); if (!err) { @@ -616,7 +636,7 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, tasklet_schedule(&dd->queue_task); } - return ret; + return -EINPROGRESS; } static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) @@ -704,7 +724,7 @@ static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { - struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx( + struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx( crypto_ablkcipher_reqtfm(req)); struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); struct atmel_aes_dev *dd; @@ -747,7 +767,7 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) rctx->mode = mode; - return atmel_aes_handle_queue(dd, req); + return atmel_aes_handle_queue(dd, &req->base); } static bool atmel_aes_filter(struct dma_chan *chan, void *slave) @@ -822,7 +842,7 @@ static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { - struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); + struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm); if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) { @@ -946,7 +966,10 @@ static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req) static int atmel_aes_cra_init(struct crypto_tfm *tfm) { + struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm); + tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); + ctx->base.start = atmel_aes_start; return 0; } -- cgit v0.10.2 From 794595d2047a31702905b3666145c6a59bfee472 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:40 +0100 Subject: crypto: atmel-aes - remove useless write in the Control Register As claimed by the datasheet, writing 0 into the Control Register has no effet. So we remove this useless register access. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 48407a7..f1ea9c8 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -511,7 +511,7 @@ static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, const u32 *iv) { - u32 valcr = 0, valmr = 0; + u32 valmr = 0; /* MR register must be set before IV registers */ if (dd->ctx->keylen == AES_KEYSIZE_128) @@ -554,7 +554,6 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, valmr |= AES_MR_SMOD_AUTO; } - atmel_aes_write(dd, AES_CR, valcr); atmel_aes_write(dd, AES_MR, valmr); atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, -- cgit v0.10.2 From 77dacf5fc511484eab47f802d7369c03175c2b9e Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:41 +0100 Subject: crypto: atmel-aes - simplify the configuration of the AES IP This patch reworks the AES_FLAGS_* to simplify the configuration of the AES IP. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index f1ea9c8..c10c54c 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -48,22 +48,28 @@ #define CFB64_BLOCK_SIZE 8 /* AES flags */ -#define AES_FLAGS_MODE_MASK 0x03ff -#define AES_FLAGS_ENCRYPT BIT(0) -#define AES_FLAGS_CBC BIT(1) -#define AES_FLAGS_CFB BIT(2) -#define AES_FLAGS_CFB8 BIT(3) -#define AES_FLAGS_CFB16 BIT(4) -#define AES_FLAGS_CFB32 BIT(5) -#define AES_FLAGS_CFB64 BIT(6) -#define AES_FLAGS_CFB128 BIT(7) -#define AES_FLAGS_OFB BIT(8) -#define AES_FLAGS_CTR BIT(9) - -#define AES_FLAGS_INIT BIT(16) -#define AES_FLAGS_DMA BIT(17) -#define AES_FLAGS_BUSY BIT(18) -#define AES_FLAGS_FAST BIT(19) +/* Reserve bits [18:16] [14:12] [0] for mode (same as for AES_MR) */ +#define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC +#define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK) +#define AES_FLAGS_ECB AES_MR_OPMOD_ECB +#define AES_FLAGS_CBC AES_MR_OPMOD_CBC +#define AES_FLAGS_OFB AES_MR_OPMOD_OFB +#define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b) +#define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b) +#define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b) +#define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b) +#define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b) +#define AES_FLAGS_CTR AES_MR_OPMOD_CTR + +#define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \ + AES_FLAGS_ENCRYPT) + +#define AES_FLAGS_INIT BIT(2) +#define AES_FLAGS_BUSY BIT(3) +#define AES_FLAGS_DMA BIT(4) +#define AES_FLAGS_FAST BIT(5) + +#define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY) #define ATMEL_AES_QUEUE_LENGTH 50 @@ -306,6 +312,13 @@ static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd) return 0; } +static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd, + const struct atmel_aes_reqctx *rctx) +{ + /* Clear all but persistent flags and set request flags. */ + dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode; +} + static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err) { struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); @@ -329,6 +342,34 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, { struct scatterlist sg[2]; struct dma_async_tx_descriptor *in_desc, *out_desc; + enum dma_slave_buswidth addr_width; + u32 maxburst; + + switch (dd->ctx->block_size) { + case CFB8_BLOCK_SIZE: + addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + maxburst = 1; + break; + + case CFB16_BLOCK_SIZE: + addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + maxburst = 1; + break; + + case CFB32_BLOCK_SIZE: + case CFB64_BLOCK_SIZE: + addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + maxburst = 1; + break; + + case AES_BLOCK_SIZE: + addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + maxburst = dd->caps.max_burst_size; + break; + + default: + return -EINVAL; + } dd->dma_size = length; @@ -337,35 +378,13 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, dma_sync_single_for_device(dd->dev, dma_addr_out, length, DMA_FROM_DEVICE); - if (dd->flags & AES_FLAGS_CFB8) { - dd->dma_lch_in.dma_conf.dst_addr_width = - DMA_SLAVE_BUSWIDTH_1_BYTE; - dd->dma_lch_out.dma_conf.src_addr_width = - DMA_SLAVE_BUSWIDTH_1_BYTE; - } else if (dd->flags & AES_FLAGS_CFB16) { - dd->dma_lch_in.dma_conf.dst_addr_width = - DMA_SLAVE_BUSWIDTH_2_BYTES; - dd->dma_lch_out.dma_conf.src_addr_width = - DMA_SLAVE_BUSWIDTH_2_BYTES; - } else { - dd->dma_lch_in.dma_conf.dst_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; - dd->dma_lch_out.dma_conf.src_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; - } + dd->dma_lch_in.dma_conf.dst_addr_width = addr_width; + dd->dma_lch_in.dma_conf.src_maxburst = maxburst; + dd->dma_lch_in.dma_conf.dst_maxburst = maxburst; - if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 | - AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) { - dd->dma_lch_in.dma_conf.src_maxburst = 1; - dd->dma_lch_in.dma_conf.dst_maxburst = 1; - dd->dma_lch_out.dma_conf.src_maxburst = 1; - dd->dma_lch_out.dma_conf.dst_maxburst = 1; - } else { - dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; - dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; - dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; - dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; - } + dd->dma_lch_out.dma_conf.src_addr_width = addr_width; + dd->dma_lch_out.dma_conf.src_maxburst = maxburst; + dd->dma_lch_out.dma_conf.dst_maxburst = maxburst; dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); @@ -521,30 +540,7 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, else valmr |= AES_MR_KEYSIZE_256; - if (dd->flags & AES_FLAGS_CBC) { - valmr |= AES_MR_OPMOD_CBC; - } else if (dd->flags & AES_FLAGS_CFB) { - valmr |= AES_MR_OPMOD_CFB; - if (dd->flags & AES_FLAGS_CFB8) - valmr |= AES_MR_CFBS_8b; - else if (dd->flags & AES_FLAGS_CFB16) - valmr |= AES_MR_CFBS_16b; - else if (dd->flags & AES_FLAGS_CFB32) - valmr |= AES_MR_CFBS_32b; - else if (dd->flags & AES_FLAGS_CFB64) - valmr |= AES_MR_CFBS_64b; - else if (dd->flags & AES_FLAGS_CFB128) - valmr |= AES_MR_CFBS_128b; - } else if (dd->flags & AES_FLAGS_OFB) { - valmr |= AES_MR_OPMOD_OFB; - } else if (dd->flags & AES_FLAGS_CTR) { - valmr |= AES_MR_OPMOD_CTR; - } else { - valmr |= AES_MR_OPMOD_ECB; - } - - if (dd->flags & AES_FLAGS_ENCRYPT) - valmr |= AES_MR_CYPHER_ENC; + valmr |= dd->flags & AES_FLAGS_MODE_MASK; if (use_dma) { valmr |= AES_MR_SMOD_IDATAR0; @@ -559,11 +555,8 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, dd->ctx->keylen >> 2); - if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) || - (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) && - iv) { + if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB) atmel_aes_write_n(dd, AES_IVR(0), iv, 4); - } } static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, @@ -617,8 +610,7 @@ static int atmel_aes_start(struct atmel_aes_dev *dd) dd->out_sg = req->dst; rctx = ablkcipher_request_ctx(req); - rctx->mode &= AES_FLAGS_MODE_MASK; - dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode; + atmel_aes_set_mode(dd, rctx); err = atmel_aes_hw_init(dd); if (!err) { @@ -728,36 +720,26 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); struct atmel_aes_dev *dd; - if (mode & AES_FLAGS_CFB8) { - if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) { - pr_err("request size is not exact amount of CFB8 blocks\n"); - return -EINVAL; - } + switch (mode & AES_FLAGS_OPMODE_MASK) { + case AES_FLAGS_CFB8: ctx->block_size = CFB8_BLOCK_SIZE; - } else if (mode & AES_FLAGS_CFB16) { - if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) { - pr_err("request size is not exact amount of CFB16 blocks\n"); - return -EINVAL; - } + break; + + case AES_FLAGS_CFB16: ctx->block_size = CFB16_BLOCK_SIZE; - } else if (mode & AES_FLAGS_CFB32) { - if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) { - pr_err("request size is not exact amount of CFB32 blocks\n"); - return -EINVAL; - } + break; + + case AES_FLAGS_CFB32: ctx->block_size = CFB32_BLOCK_SIZE; - } else if (mode & AES_FLAGS_CFB64) { - if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) { - pr_err("request size is not exact amount of CFB64 blocks\n"); - return -EINVAL; - } + break; + + case AES_FLAGS_CFB64: ctx->block_size = CFB64_BLOCK_SIZE; - } else { - if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { - pr_err("request size is not exact amount of AES blocks\n"); - return -EINVAL; - } + break; + + default: ctx->block_size = AES_BLOCK_SIZE; + break; } dd = atmel_aes_find_dev(ctx); @@ -857,14 +839,12 @@ static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT); + return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT); } static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - 0); + return atmel_aes_crypt(req, AES_FLAGS_ECB); } static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req) @@ -893,62 +873,52 @@ static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req) static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128); + return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT); } static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CFB | AES_FLAGS_CFB128); + return atmel_aes_crypt(req, AES_FLAGS_CFB128); } static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64); + return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT); } static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CFB | AES_FLAGS_CFB64); + return atmel_aes_crypt(req, AES_FLAGS_CFB64); } static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32); + return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT); } static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CFB | AES_FLAGS_CFB32); + return atmel_aes_crypt(req, AES_FLAGS_CFB32); } static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16); + return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT); } static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CFB | AES_FLAGS_CFB16); + return atmel_aes_crypt(req, AES_FLAGS_CFB16); } static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8); + return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT); } static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CFB | AES_FLAGS_CFB8); + return atmel_aes_crypt(req, AES_FLAGS_CFB8); } static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req) -- cgit v0.10.2 From 10f12c1b86d8decf95e110cd2f7ec0c0e19801ec Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:42 +0100 Subject: crypto: atmel-aes - rework crypto request completion This patch introduces a new callback 'resume' in the struct atmel_aes_dev. This callback is run to resume/complete the processing of the crypto request when woken up by I/O events such as AES interrupts or DMA completion. This callback will help implementing the GCM mode support in further patches. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index c10c54c..ac551ee 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -119,6 +119,9 @@ struct atmel_aes_dev { struct crypto_async_request *areq; struct atmel_aes_base_ctx *ctx; + bool is_async; + atmel_aes_fn_t resume; + struct device *dev; struct clk *iclk; int irq; @@ -319,14 +322,17 @@ static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd, dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode; } -static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err) +static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) { - struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); - clk_disable_unprepare(dd->iclk); dd->flags &= ~AES_FLAGS_BUSY; - req->base.complete(&req->base, err); + if (dd->is_async) + dd->areq->complete(dd->areq, err); + + tasklet_schedule(&dd->queue_task); + + return err; } static void atmel_aes_dma_callback(void *data) @@ -423,6 +429,8 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, return 0; } +static int atmel_aes_cpu_complete(struct atmel_aes_dev *dd); + static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) { struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); @@ -455,9 +463,12 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in, dd->bufcnt >> 2); - return 0; + dd->resume = atmel_aes_cpu_complete; + return -EINPROGRESS; } +static int atmel_aes_dma_complete(struct atmel_aes_dev *dd); + static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) { int err, fast = 0, in, out; @@ -524,7 +535,8 @@ static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); } - return err; + dd->resume = atmel_aes_dma_complete; + return err ? : -EINPROGRESS; } static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, @@ -590,9 +602,10 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, dd->areq = areq; dd->ctx = ctx; + dd->is_async = (areq != new_areq); err = ctx->start(dd); - return (areq != new_areq) ? ret : err; + return (dd->is_async) ? ret : err; } static int atmel_aes_start(struct atmel_aes_dev *dd) @@ -621,10 +634,9 @@ static int atmel_aes_start(struct atmel_aes_dev *dd) else err = atmel_aes_crypt_cpu_start(dd); } - if (err) { + if (err && err != -EINPROGRESS) { /* aes_task will not finish it, so do it here */ - atmel_aes_finish_req(dd, err); - tasklet_schedule(&dd->queue_task); + return atmel_aes_complete(dd, err); } return -EINPROGRESS; @@ -1149,20 +1161,14 @@ static void atmel_aes_queue_task(unsigned long data) static void atmel_aes_done_task(unsigned long data) { struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data; - int err; - - if (!(dd->flags & AES_FLAGS_DMA)) { - atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out, - dd->bufcnt >> 2); - if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg, - dd->buf_out, dd->bufcnt)) - err = 0; - else - err = -EINVAL; + dd->is_async = true; + (void)dd->resume(dd); +} - goto cpu_end; - } +static int atmel_aes_dma_complete(struct atmel_aes_dev *dd) +{ + int err; err = atmel_aes_crypt_dma_stop(dd); @@ -1177,13 +1183,27 @@ static void atmel_aes_done_task(unsigned long data) } if (!err) err = atmel_aes_crypt_dma_start(dd); - if (!err) - return; /* DMA started. Not fininishing. */ + if (!err || err == -EINPROGRESS) + return -EINPROGRESS; /* DMA started. Not fininishing. */ } -cpu_end: - atmel_aes_finish_req(dd, err); - atmel_aes_handle_queue(dd, NULL); + return atmel_aes_complete(dd, err); +} + +static int atmel_aes_cpu_complete(struct atmel_aes_dev *dd) +{ + int err; + + atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out, + dd->bufcnt >> 2); + + if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg, + dd->buf_out, dd->bufcnt)) + err = 0; + else + err = -EINVAL; + + return atmel_aes_complete(dd, err); } static irqreturn_t atmel_aes_irq(int irq, void *dev_id) -- cgit v0.10.2 From bd5f43decd61b52210910b14f132f083923fa6fa Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:43 +0100 Subject: crypto: atmel-aes - remove unused 'err' member of struct atmel_aes_dev This 'err' member was initialized to 0 but its value never changed. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index ac551ee..25dc7bd 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -127,7 +127,6 @@ struct atmel_aes_dev { int irq; unsigned long flags; - int err; spinlock_t lock; struct crypto_queue queue; @@ -288,7 +287,6 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd) atmel_aes_write(dd, AES_CR, AES_CR_SWRST); atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); dd->flags |= AES_FLAGS_INIT; - dd->err = 0; } return 0; @@ -1171,9 +1169,6 @@ static int atmel_aes_dma_complete(struct atmel_aes_dev *dd) int err; err = atmel_aes_crypt_dma_stop(dd); - - err = dd->err ? : err; - if (dd->total && !err) { if (dd->flags & AES_FLAGS_FAST) { dd->in_sg = sg_next(dd->in_sg); -- cgit v0.10.2 From 13c7f876c2aac59863f6d28bd72e98bc45be3c9a Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:44 +0100 Subject: crypto: atmel-aes - reduce latency of DMA completion atmel_aes_dma_callback() now directly calls the 'resume' callback instead of scheduling the done task, which in turn only calls the very same 'resume' callback. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 25dc7bd..8a2b3e8 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -337,8 +337,8 @@ static void atmel_aes_dma_callback(void *data) { struct atmel_aes_dev *dd = data; - /* dma_lch_out - completed */ - tasklet_schedule(&dd->done_task); + dd->is_async = true; + (void)dd->resume(dd); } static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, -- cgit v0.10.2 From 820599a0ea5f99112322011021e637c40f0048db Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:45 +0100 Subject: crypto: atmel-aes - remove useless AES_FLAGS_DMA flag Since the 'done' task code was split into atmel_aes_cpu_complete() and atmel_aes_dma_complete(), the AES_FLAGS_DMA flag has become useless. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 8a2b3e8..68e4177 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -66,7 +66,6 @@ #define AES_FLAGS_INIT BIT(2) #define AES_FLAGS_BUSY BIT(3) -#define AES_FLAGS_DMA BIT(4) #define AES_FLAGS_FAST BIT(5) #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY) @@ -393,8 +392,6 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); - dd->flags |= AES_FLAGS_DMA; - sg_init_table(&sg[0], 1); sg_dma_address(&sg[0]) = dma_addr_in; sg_dma_len(&sg[0]) = length; @@ -433,8 +430,6 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) { struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); - dd->flags &= ~AES_FLAGS_DMA; - dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in, dd->dma_size, DMA_TO_DEVICE); dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, @@ -642,25 +637,23 @@ static int atmel_aes_start(struct atmel_aes_dev *dd) static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) { - int err = -EINVAL; + int err = 0; size_t count; - if (dd->flags & AES_FLAGS_DMA) { - err = 0; - if (dd->flags & AES_FLAGS_FAST) { - dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); - dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - } else { - dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, - dd->dma_size, DMA_FROM_DEVICE); - - /* copy data */ - count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset, - dd->buf_out, dd->buflen, dd->dma_size, 1); - if (count != dd->dma_size) { - err = -EINVAL; - pr_err("not all data converted: %zu\n", count); - } + if (dd->flags & AES_FLAGS_FAST) { + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + } else { + dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, + dd->dma_size, DMA_FROM_DEVICE); + + /* copy data */ + count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset, + dd->buf_out, dd->buflen, + dd->dma_size, 1); + if (count != dd->dma_size) { + err = -EINVAL; + pr_err("not all data converted: %zu\n", count); } } -- cgit v0.10.2 From 2a377828914f98aabcfeb0cb620f9b7ab808d3af Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 17:48:46 +0100 Subject: crypto: atmel-aes - fix atmel_aes_remove() Add missing call to atmel_aes_buff_cleanup(). Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 68e4177..343199c 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -1469,6 +1469,7 @@ static int atmel_aes_remove(struct platform_device *pdev) tasklet_kill(&aes_dd->queue_task); atmel_aes_dma_cleanup(aes_dd); + atmel_aes_buff_cleanup(aes_dd); return 0; } -- cgit v0.10.2 From bbe628ed897d728d38c4035381d12b2f308fac6f Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 18:13:00 +0100 Subject: crypto: atmel-aes - improve performances of data transfer This patch totally reworks data transfer. 1 - DMA The new code now fully supports scatter-gather lists hence reducing the number of interrupts in some cases. Also buffer alignments are better managed to avoid useless copies. 2 - CPU The new code allows to use PIO accesses even when transferring more than one AES block, so futher patches could tune the DMA threshold (ATMEL_AES_DMA_THRESHOLD). Moreover, CPU transfers now have a chance to be processed synchronously, hence reducing the latency by avoiding context switches when possible (less interrupts to process, less scheduling of the 'done' task). Indeed the 'DATA READY' bit is polled only one time in the Interrupt Status Register before enabling then waiting for the associated interrupt. In some condition, this single poll is enough as the data have already been processed by the AES hardware and so are ready. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 343199c..9ef38ec 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -42,11 +42,16 @@ #define ATMEL_AES_PRIORITY 300 +#define ATMEL_AES_BUFFER_ORDER 2 +#define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER) + #define CFB8_BLOCK_SIZE 1 #define CFB16_BLOCK_SIZE 2 #define CFB32_BLOCK_SIZE 4 #define CFB64_BLOCK_SIZE 8 +#define SIZE_IN_WORDS(x) ((x) >> 2) + /* AES flags */ /* Reserve bits [18:16] [14:12] [0] for mode (same as for AES_MR) */ #define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC @@ -66,7 +71,6 @@ #define AES_FLAGS_INIT BIT(2) #define AES_FLAGS_BUSY BIT(3) -#define AES_FLAGS_FAST BIT(5) #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY) @@ -106,8 +110,11 @@ struct atmel_aes_reqctx { }; struct atmel_aes_dma { - struct dma_chan *chan; - struct dma_slave_config dma_conf; + struct dma_chan *chan; + struct scatterlist *sg; + int nents; + unsigned int remainder; + unsigned int sg_len; }; struct atmel_aes_dev { @@ -120,6 +127,7 @@ struct atmel_aes_dev { bool is_async; atmel_aes_fn_t resume; + atmel_aes_fn_t cpu_transfer_complete; struct device *dev; struct clk *iclk; @@ -133,28 +141,17 @@ struct atmel_aes_dev { struct tasklet_struct done_task; struct tasklet_struct queue_task; - size_t total; - - struct scatterlist *in_sg; - unsigned int nb_in_sg; - size_t in_offset; - struct scatterlist *out_sg; - unsigned int nb_out_sg; - size_t out_offset; - - size_t bufcnt; - size_t buflen; - size_t dma_size; + size_t total; + size_t datalen; + u32 *data; - void *buf_in; - int dma_in; - dma_addr_t dma_addr_in; - struct atmel_aes_dma dma_lch_in; + struct atmel_aes_dma src; + struct atmel_aes_dma dst; - void *buf_out; - int dma_out; - dma_addr_t dma_addr_out; - struct atmel_aes_dma dma_lch_out; + size_t buflen; + void *buf; + struct scatterlist aligned_sg; + struct scatterlist *real_dst; struct atmel_aes_caps caps; @@ -171,62 +168,6 @@ static struct atmel_aes_drv atmel_aes = { .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock), }; -static int atmel_aes_sg_length(struct ablkcipher_request *req, - struct scatterlist *sg) -{ - unsigned int total = req->nbytes; - int sg_nb; - unsigned int len; - struct scatterlist *sg_list; - - sg_nb = 0; - sg_list = sg; - total = req->nbytes; - - while (total) { - len = min(sg_list->length, total); - - sg_nb++; - total -= len; - - sg_list = sg_next(sg_list); - if (!sg_list) - total = 0; - } - - return sg_nb; -} - -static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset, - void *buf, size_t buflen, size_t total, int out) -{ - size_t count, off = 0; - - while (buflen && total) { - count = min((*sg)->length - *offset, total); - count = min(count, buflen); - - if (!count) - return off; - - scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out); - - off += count; - buflen -= count; - *offset += count; - total -= count; - - if (*offset == (*sg)->length) { - *sg = sg_next(*sg); - if (*sg) - *offset = 0; - else - total = 0; - } - } - - return off; -} static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) { @@ -253,6 +194,37 @@ static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, atmel_aes_write(dd, offset, *value); } +static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset, + u32 *value) +{ + atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE)); +} + +static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset, + const u32 *value) +{ + atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE)); +} + +static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd, + atmel_aes_fn_t resume) +{ + u32 isr = atmel_aes_read(dd, AES_ISR); + + if (unlikely(isr & AES_INT_DATARDY)) + return resume(dd); + + dd->resume = resume; + atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); + return -EINPROGRESS; +} + +static inline size_t atmel_aes_padlen(size_t len, size_t block_size) +{ + len &= block_size - 1; + return len ? block_size - len : 0; +} + static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx) { struct atmel_aes_dev *aes_dd = NULL; @@ -332,204 +304,363 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) return err; } -static void atmel_aes_dma_callback(void *data) + +/* CPU transfer */ + +static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd) { - struct atmel_aes_dev *dd = data; + int err = 0; + u32 isr; - dd->is_async = true; - (void)dd->resume(dd); + for (;;) { + atmel_aes_read_block(dd, AES_ODATAR(0), dd->data); + dd->data += 4; + dd->datalen -= AES_BLOCK_SIZE; + + if (dd->datalen < AES_BLOCK_SIZE) + break; + + atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); + + isr = atmel_aes_read(dd, AES_ISR); + if (!(isr & AES_INT_DATARDY)) { + dd->resume = atmel_aes_cpu_transfer; + atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); + return -EINPROGRESS; + } + } + + if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst), + dd->buf, dd->total)) + err = -EINVAL; + + if (err) + return atmel_aes_complete(dd, err); + + return dd->cpu_transfer_complete(dd); } -static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, - dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length) +static int atmel_aes_cpu_start(struct atmel_aes_dev *dd, + struct scatterlist *src, + struct scatterlist *dst, + size_t len, + atmel_aes_fn_t resume) { - struct scatterlist sg[2]; - struct dma_async_tx_descriptor *in_desc, *out_desc; - enum dma_slave_buswidth addr_width; - u32 maxburst; + size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE); - switch (dd->ctx->block_size) { - case CFB8_BLOCK_SIZE: - addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; - maxburst = 1; - break; + if (unlikely(len == 0)) + return -EINVAL; - case CFB16_BLOCK_SIZE: - addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; - maxburst = 1; - break; + sg_copy_to_buffer(src, sg_nents(src), dd->buf, len); - case CFB32_BLOCK_SIZE: - case CFB64_BLOCK_SIZE: - addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - maxburst = 1; - break; + dd->total = len; + dd->real_dst = dst; + dd->cpu_transfer_complete = resume; + dd->datalen = len + padlen; + dd->data = (u32 *)dd->buf; + atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); + return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer); +} - case AES_BLOCK_SIZE: - addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - maxburst = dd->caps.max_burst_size; - break; - default: - return -EINVAL; +/* DMA transfer */ + +static void atmel_aes_dma_callback(void *data); + +static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd, + struct scatterlist *sg, + size_t len, + struct atmel_aes_dma *dma) +{ + int nents; + + if (!IS_ALIGNED(len, dd->ctx->block_size)) + return false; + + for (nents = 0; sg; sg = sg_next(sg), ++nents) { + if (!IS_ALIGNED(sg->offset, sizeof(u32))) + return false; + + if (len <= sg->length) { + if (!IS_ALIGNED(len, dd->ctx->block_size)) + return false; + + dma->nents = nents+1; + dma->remainder = sg->length - len; + sg->length = len; + return true; + } + + if (!IS_ALIGNED(sg->length, dd->ctx->block_size)) + return false; + + len -= sg->length; } - dd->dma_size = length; + return false; +} - dma_sync_single_for_device(dd->dev, dma_addr_in, length, - DMA_TO_DEVICE); - dma_sync_single_for_device(dd->dev, dma_addr_out, length, - DMA_FROM_DEVICE); +static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma) +{ + struct scatterlist *sg = dma->sg; + int nents = dma->nents; - dd->dma_lch_in.dma_conf.dst_addr_width = addr_width; - dd->dma_lch_in.dma_conf.src_maxburst = maxburst; - dd->dma_lch_in.dma_conf.dst_maxburst = maxburst; + if (!dma->remainder) + return; - dd->dma_lch_out.dma_conf.src_addr_width = addr_width; - dd->dma_lch_out.dma_conf.src_maxburst = maxburst; - dd->dma_lch_out.dma_conf.dst_maxburst = maxburst; + while (--nents > 0 && sg) + sg = sg_next(sg); - dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); - dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf); + if (!sg) + return; - sg_init_table(&sg[0], 1); - sg_dma_address(&sg[0]) = dma_addr_in; - sg_dma_len(&sg[0]) = length; + sg->length += dma->remainder; +} - sg_init_table(&sg[1], 1); - sg_dma_address(&sg[1]) = dma_addr_out; - sg_dma_len(&sg[1]) = length; +static int atmel_aes_map(struct atmel_aes_dev *dd, + struct scatterlist *src, + struct scatterlist *dst, + size_t len) +{ + bool src_aligned, dst_aligned; + size_t padlen; - in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0], - 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!in_desc) - return -EINVAL; + dd->total = len; + dd->src.sg = src; + dd->dst.sg = dst; + dd->real_dst = dst; - out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1], - 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!out_desc) - return -EINVAL; + src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src); + if (src == dst) + dst_aligned = src_aligned; + else + dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst); + if (!src_aligned || !dst_aligned) { + padlen = atmel_aes_padlen(len, dd->ctx->block_size); + + if (dd->buflen < len + padlen) + return -ENOMEM; + + if (!src_aligned) { + sg_copy_to_buffer(src, sg_nents(src), dd->buf, len); + dd->src.sg = &dd->aligned_sg; + dd->src.nents = 1; + dd->src.remainder = 0; + } - out_desc->callback = atmel_aes_dma_callback; - out_desc->callback_param = dd; + if (!dst_aligned) { + dd->dst.sg = &dd->aligned_sg; + dd->dst.nents = 1; + dd->dst.remainder = 0; + } - dmaengine_submit(out_desc); - dma_async_issue_pending(dd->dma_lch_out.chan); + sg_init_table(&dd->aligned_sg, 1); + sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen); + } - dmaengine_submit(in_desc); - dma_async_issue_pending(dd->dma_lch_in.chan); + if (dd->src.sg == dd->dst.sg) { + dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents, + DMA_BIDIRECTIONAL); + dd->dst.sg_len = dd->src.sg_len; + if (!dd->src.sg_len) + return -EFAULT; + } else { + dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents, + DMA_TO_DEVICE); + if (!dd->src.sg_len) + return -EFAULT; + + dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents, + DMA_FROM_DEVICE); + if (!dd->dst.sg_len) { + dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, + DMA_TO_DEVICE); + return -EFAULT; + } + } return 0; } -static int atmel_aes_cpu_complete(struct atmel_aes_dev *dd); - -static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) +static void atmel_aes_unmap(struct atmel_aes_dev *dd) { - struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); + if (dd->src.sg == dd->dst.sg) { + dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, + DMA_BIDIRECTIONAL); - dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in, - dd->dma_size, DMA_TO_DEVICE); - dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, - dd->dma_size, DMA_FROM_DEVICE); + if (dd->src.sg != &dd->aligned_sg) + atmel_aes_restore_sg(&dd->src); + } else { + dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents, + DMA_FROM_DEVICE); - /* use cache buffers */ - dd->nb_in_sg = atmel_aes_sg_length(req, dd->in_sg); - if (!dd->nb_in_sg) - return -EINVAL; + if (dd->dst.sg != &dd->aligned_sg) + atmel_aes_restore_sg(&dd->dst); - dd->nb_out_sg = atmel_aes_sg_length(req, dd->out_sg); - if (!dd->nb_out_sg) - return -EINVAL; + dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, + DMA_TO_DEVICE); + + if (dd->src.sg != &dd->aligned_sg) + atmel_aes_restore_sg(&dd->src); + } + + if (dd->dst.sg == &dd->aligned_sg) + sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst), + dd->buf, dd->total); +} - dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg, - dd->buf_in, dd->total); +static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd, + enum dma_slave_buswidth addr_width, + enum dma_transfer_direction dir, + u32 maxburst) +{ + struct dma_async_tx_descriptor *desc; + struct dma_slave_config config; + dma_async_tx_callback callback; + struct atmel_aes_dma *dma; + int err; + + memset(&config, 0, sizeof(config)); + config.direction = dir; + config.src_addr_width = addr_width; + config.dst_addr_width = addr_width; + config.src_maxburst = maxburst; + config.dst_maxburst = maxburst; + + switch (dir) { + case DMA_MEM_TO_DEV: + dma = &dd->src; + callback = NULL; + config.dst_addr = dd->phys_base + AES_IDATAR(0); + break; - if (!dd->bufcnt) + case DMA_DEV_TO_MEM: + dma = &dd->dst; + callback = atmel_aes_dma_callback; + config.src_addr = dd->phys_base + AES_ODATAR(0); + break; + + default: return -EINVAL; + } - dd->total -= dd->bufcnt; + err = dmaengine_slave_config(dma->chan, &config); + if (err) + return err; - atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); - atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in, - dd->bufcnt >> 2); + desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -ENOMEM; - dd->resume = atmel_aes_cpu_complete; - return -EINPROGRESS; -} + desc->callback = callback; + desc->callback_param = dd; + dmaengine_submit(desc); + dma_async_issue_pending(dma->chan); -static int atmel_aes_dma_complete(struct atmel_aes_dev *dd); + return 0; +} -static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) +static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd, + enum dma_transfer_direction dir) { - int err, fast = 0, in, out; - size_t count; - dma_addr_t addr_in, addr_out; + struct atmel_aes_dma *dma; - if ((!dd->in_offset) && (!dd->out_offset)) { - /* check for alignment */ - in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) && - IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size); - out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) && - IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size); - fast = in && out; + switch (dir) { + case DMA_MEM_TO_DEV: + dma = &dd->src; + break; + + case DMA_DEV_TO_MEM: + dma = &dd->dst; + break; - if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg)) - fast = 0; + default: + return; } + dmaengine_terminate_all(dma->chan); +} - if (fast) { - count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg)); - count = min_t(size_t, count, sg_dma_len(dd->out_sg)); +static int atmel_aes_dma_start(struct atmel_aes_dev *dd, + struct scatterlist *src, + struct scatterlist *dst, + size_t len, + atmel_aes_fn_t resume) +{ + enum dma_slave_buswidth addr_width; + u32 maxburst; + int err; - err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - if (!err) { - dev_err(dd->dev, "dma_map_sg() error\n"); - return -EINVAL; - } + switch (dd->ctx->block_size) { + case CFB8_BLOCK_SIZE: + addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + maxburst = 1; + break; - err = dma_map_sg(dd->dev, dd->out_sg, 1, - DMA_FROM_DEVICE); - if (!err) { - dev_err(dd->dev, "dma_map_sg() error\n"); - dma_unmap_sg(dd->dev, dd->in_sg, 1, - DMA_TO_DEVICE); - return -EINVAL; - } + case CFB16_BLOCK_SIZE: + addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + maxburst = 1; + break; - addr_in = sg_dma_address(dd->in_sg); - addr_out = sg_dma_address(dd->out_sg); + case CFB32_BLOCK_SIZE: + case CFB64_BLOCK_SIZE: + addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + maxburst = 1; + break; - dd->flags |= AES_FLAGS_FAST; + case AES_BLOCK_SIZE: + addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + maxburst = dd->caps.max_burst_size; + break; - } else { - dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in, - dd->dma_size, DMA_TO_DEVICE); + default: + err = -EINVAL; + goto exit; + } - /* use cache buffers */ - count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset, - dd->buf_in, dd->buflen, dd->total, 0); + err = atmel_aes_map(dd, src, dst, len); + if (err) + goto exit; - addr_in = dd->dma_addr_in; - addr_out = dd->dma_addr_out; + dd->resume = resume; - dd->flags &= ~AES_FLAGS_FAST; - } + /* Set output DMA transfer first */ + err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM, + maxburst); + if (err) + goto unmap; - dd->total -= count; + /* Then set input DMA transfer */ + err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV, + maxburst); + if (err) + goto output_transfer_stop; - err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count); + return -EINPROGRESS; - if (err && (dd->flags & AES_FLAGS_FAST)) { - dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); - } +output_transfer_stop: + atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM); +unmap: + atmel_aes_unmap(dd); +exit: + return atmel_aes_complete(dd, err); +} - dd->resume = atmel_aes_dma_complete; - return err ? : -EINPROGRESS; +static void atmel_aes_dma_stop(struct atmel_aes_dev *dd) +{ + atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV); + atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM); + atmel_aes_unmap(dd); +} + +static void atmel_aes_dma_callback(void *data) +{ + struct atmel_aes_dev *dd = data; + + atmel_aes_dma_stop(dd); + dd->is_async = true; + (void)dd->resume(dd); } static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, @@ -601,119 +732,52 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, return (dd->is_async) ? ret : err; } +static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd) +{ + return atmel_aes_complete(dd, 0); +} + static int atmel_aes_start(struct atmel_aes_dev *dd) { struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); - struct atmel_aes_reqctx *rctx; - bool use_dma; + struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); + bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD || + dd->ctx->block_size != AES_BLOCK_SIZE); int err; - /* assign new request to device */ - dd->total = req->nbytes; - dd->in_offset = 0; - dd->in_sg = req->src; - dd->out_offset = 0; - dd->out_sg = req->dst; - - rctx = ablkcipher_request_ctx(req); atmel_aes_set_mode(dd, rctx); err = atmel_aes_hw_init(dd); - if (!err) { - use_dma = (dd->total > ATMEL_AES_DMA_THRESHOLD); - atmel_aes_write_ctrl(dd, use_dma, req->info); - if (use_dma) - err = atmel_aes_crypt_dma_start(dd); - else - err = atmel_aes_crypt_cpu_start(dd); - } - if (err && err != -EINPROGRESS) { - /* aes_task will not finish it, so do it here */ + if (err) return atmel_aes_complete(dd, err); - } - - return -EINPROGRESS; -} - -static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd) -{ - int err = 0; - size_t count; - if (dd->flags & AES_FLAGS_FAST) { - dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); - dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - } else { - dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out, - dd->dma_size, DMA_FROM_DEVICE); - - /* copy data */ - count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset, - dd->buf_out, dd->buflen, - dd->dma_size, 1); - if (count != dd->dma_size) { - err = -EINVAL; - pr_err("not all data converted: %zu\n", count); - } - } + atmel_aes_write_ctrl(dd, use_dma, req->info); + if (use_dma) + return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes, + atmel_aes_transfer_complete); - return err; + return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes, + atmel_aes_transfer_complete); } static int atmel_aes_buff_init(struct atmel_aes_dev *dd) { - int err = -ENOMEM; - - dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0); - dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0); - dd->buflen = PAGE_SIZE; + dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER); + dd->buflen = ATMEL_AES_BUFFER_SIZE; dd->buflen &= ~(AES_BLOCK_SIZE - 1); - if (!dd->buf_in || !dd->buf_out) { + if (!dd->buf) { dev_err(dd->dev, "unable to alloc pages.\n"); - goto err_alloc; - } - - /* MAP here */ - dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, - dd->buflen, DMA_TO_DEVICE); - if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { - dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen); - err = -EINVAL; - goto err_map_in; - } - - dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, - dd->buflen, DMA_FROM_DEVICE); - if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { - dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen); - err = -EINVAL; - goto err_map_out; + return -ENOMEM; } return 0; - -err_map_out: - dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, - DMA_TO_DEVICE); -err_map_in: -err_alloc: - free_page((unsigned long)dd->buf_out); - free_page((unsigned long)dd->buf_in); - if (err) - pr_err("error: %d\n", err); - return err; } static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) { - dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, - DMA_FROM_DEVICE); - dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, - DMA_TO_DEVICE); - free_page((unsigned long)dd->buf_out); - free_page((unsigned long)dd->buf_in); + free_page((unsigned long)dd->buf); } static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) @@ -767,8 +831,9 @@ static bool atmel_aes_filter(struct dma_chan *chan, void *slave) } static int atmel_aes_dma_init(struct atmel_aes_dev *dd, - struct crypto_platform_data *pdata) + struct crypto_platform_data *pdata) { + struct at_dma_slave *slave; int err = -ENOMEM; dma_cap_mask_t mask; @@ -776,42 +841,22 @@ static int atmel_aes_dma_init(struct atmel_aes_dev *dd, dma_cap_set(DMA_SLAVE, mask); /* Try to grab 2 DMA channels */ - dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask, - atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); - if (!dd->dma_lch_in.chan) + slave = &pdata->dma_slave->rxdata; + dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, + slave, dd->dev, "tx"); + if (!dd->src.chan) goto err_dma_in; - dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; - dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + - AES_IDATAR(0); - dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size; - dd->dma_lch_in.dma_conf.src_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; - dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size; - dd->dma_lch_in.dma_conf.dst_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; - dd->dma_lch_in.dma_conf.device_fc = false; - - dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask, - atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx"); - if (!dd->dma_lch_out.chan) + slave = &pdata->dma_slave->txdata; + dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, + slave, dd->dev, "rx"); + if (!dd->dst.chan) goto err_dma_out; - dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM; - dd->dma_lch_out.dma_conf.src_addr = dd->phys_base + - AES_ODATAR(0); - dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size; - dd->dma_lch_out.dma_conf.src_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; - dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size; - dd->dma_lch_out.dma_conf.dst_addr_width = - DMA_SLAVE_BUSWIDTH_4_BYTES; - dd->dma_lch_out.dma_conf.device_fc = false; - return 0; err_dma_out: - dma_release_channel(dd->dma_lch_in.chan); + dma_release_channel(dd->src.chan); err_dma_in: dev_warn(dd->dev, "no DMA channel available\n"); return err; @@ -819,8 +864,8 @@ err_dma_in: static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) { - dma_release_channel(dd->dma_lch_in.chan); - dma_release_channel(dd->dma_lch_out.chan); + dma_release_channel(dd->dst.chan); + dma_release_channel(dd->src.chan); } static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, @@ -1157,43 +1202,6 @@ static void atmel_aes_done_task(unsigned long data) (void)dd->resume(dd); } -static int atmel_aes_dma_complete(struct atmel_aes_dev *dd) -{ - int err; - - err = atmel_aes_crypt_dma_stop(dd); - if (dd->total && !err) { - if (dd->flags & AES_FLAGS_FAST) { - dd->in_sg = sg_next(dd->in_sg); - dd->out_sg = sg_next(dd->out_sg); - if (!dd->in_sg || !dd->out_sg) - err = -EINVAL; - } - if (!err) - err = atmel_aes_crypt_dma_start(dd); - if (!err || err == -EINPROGRESS) - return -EINPROGRESS; /* DMA started. Not fininishing. */ - } - - return atmel_aes_complete(dd, err); -} - -static int atmel_aes_cpu_complete(struct atmel_aes_dev *dd) -{ - int err; - - atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out, - dd->bufcnt >> 2); - - if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg, - dd->buf_out, dd->bufcnt)) - err = 0; - else - err = -EINVAL; - - return atmel_aes_complete(dd, err); -} - static irqreturn_t atmel_aes_irq(int irq, void *dev_id) { struct atmel_aes_dev *aes_dd = dev_id; @@ -1430,8 +1438,8 @@ static int atmel_aes_probe(struct platform_device *pdev) goto err_algs; dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n", - dma_chan_name(aes_dd->dma_lch_in.chan), - dma_chan_name(aes_dd->dma_lch_out.chan)); + dma_chan_name(aes_dd->src.chan), + dma_chan_name(aes_dd->dst.chan)); return 0; -- cgit v0.10.2 From 2bfd04cde2b34e65d0954ff8372a7edf116afe41 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 18:13:01 +0100 Subject: crypto: atmel-aes - use SIZE_IN_WORDS() helper macro This is a dummy cosmetic patch. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 9ef38ec..176ab38 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -689,10 +689,10 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, atmel_aes_write(dd, AES_MR, valmr); atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, - dd->ctx->keylen >> 2); + SIZE_IN_WORDS(dd->ctx->keylen)); if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB) - atmel_aes_write_n(dd, AES_IVR(0), iv, 4); + atmel_aes_write_block(dd, AES_IVR(0), iv); } static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, -- cgit v0.10.2 From afbac17e676fb3a8b8821e67e642f3bc1691f50a Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 18:13:02 +0100 Subject: crypto: atmel-aes - fix typo and indentation Dummy patch to fix typo and indentation. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 176ab38..208fa8d 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -80,9 +80,9 @@ struct atmel_aes_caps { - bool has_dualbuff; - bool has_cfb64; - u32 max_burst_size; + bool has_dualbuff; + bool has_cfb64; + u32 max_burst_size; }; struct atmel_aes_dev; @@ -92,13 +92,11 @@ typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *); struct atmel_aes_base_ctx { - struct atmel_aes_dev *dd; - atmel_aes_fn_t start; - - int keylen; - u32 key[AES_KEYSIZE_256 / sizeof(u32)]; - - u16 block_size; + struct atmel_aes_dev *dd; + atmel_aes_fn_t start; + int keylen; + u32 key[AES_KEYSIZE_256 / sizeof(u32)]; + u16 block_size; }; struct atmel_aes_ctx { @@ -106,7 +104,7 @@ struct atmel_aes_ctx { }; struct atmel_aes_reqctx { - unsigned long mode; + unsigned long mode; }; struct atmel_aes_dma { @@ -131,7 +129,7 @@ struct atmel_aes_dev { struct device *dev; struct clk *iclk; - int irq; + int irq; unsigned long flags; @@ -155,7 +153,7 @@ struct atmel_aes_dev { struct atmel_aes_caps caps; - u32 hw_version; + u32 hw_version; }; struct atmel_aes_drv { @@ -782,11 +780,11 @@ static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { - struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx( - crypto_ablkcipher_reqtfm(req)); - struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); + struct atmel_aes_base_ctx *ctx; + struct atmel_aes_reqctx *rctx; struct atmel_aes_dev *dd; + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); switch (mode & AES_FLAGS_OPMODE_MASK) { case AES_FLAGS_CFB8: ctx->block_size = CFB8_BLOCK_SIZE; @@ -813,6 +811,7 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) if (!dd) return -ENODEV; + rctx = ablkcipher_request_ctx(req); rctx->mode = mode; return atmel_aes_handle_queue(dd, &req->base); @@ -873,8 +872,9 @@ static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, { struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm); - if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && - keylen != AES_KEYSIZE_256) { + if (keylen != AES_KEYSIZE_128 && + keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) { crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -897,26 +897,22 @@ static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req) static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CBC); + return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT); } static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CBC); + return atmel_aes_crypt(req, AES_FLAGS_CBC); } static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_OFB); + return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT); } static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_OFB); + return atmel_aes_crypt(req, AES_FLAGS_OFB); } static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) @@ -971,14 +967,12 @@ static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req) static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_ENCRYPT | AES_FLAGS_CTR); + return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT); } static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req) { - return atmel_aes_crypt(req, - AES_FLAGS_CTR); + return atmel_aes_crypt(req, AES_FLAGS_CTR); } static int atmel_aes_cra_init(struct crypto_tfm *tfm) @@ -1196,7 +1190,7 @@ static void atmel_aes_queue_task(unsigned long data) static void atmel_aes_done_task(unsigned long data) { - struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data; + struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; dd->is_async = true; (void)dd->resume(dd); -- cgit v0.10.2 From e37a7e55505176ae83d376585c2ebd37d4258910 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 18:13:03 +0100 Subject: crypto: atmel-aes - create sections to regroup functions by usage This patch only creates sections to regroup functions by usage. This will help to integrate the GCM support patch later by making the difference between shared/common and specific code. Hence current sections are: - Shared functions: common code which will be reused by the GCM support. - CPU transfer: handles transfers monitored by the CPU (PIO accesses). - DMA transfer: handles transfers monitored by the DMA controller. - AES async block ciphers: dedicated to the already supported block ciphers - Probe functions: used to register all crypto algorithms. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 208fa8d..e964cb0 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -166,6 +166,7 @@ static struct atmel_aes_drv atmel_aes = { .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock), }; +/* Shared functions */ static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) { @@ -302,6 +303,38 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) return err; } +static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, + const u32 *iv) +{ + u32 valmr = 0; + + /* MR register must be set before IV registers */ + if (dd->ctx->keylen == AES_KEYSIZE_128) + valmr |= AES_MR_KEYSIZE_128; + else if (dd->ctx->keylen == AES_KEYSIZE_192) + valmr |= AES_MR_KEYSIZE_192; + else + valmr |= AES_MR_KEYSIZE_256; + + valmr |= dd->flags & AES_FLAGS_MODE_MASK; + + if (use_dma) { + valmr |= AES_MR_SMOD_IDATAR0; + if (dd->caps.has_dualbuff) + valmr |= AES_MR_DUALBUFF; + } else { + valmr |= AES_MR_SMOD_AUTO; + } + + atmel_aes_write(dd, AES_MR, valmr); + + atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, + SIZE_IN_WORDS(dd->ctx->keylen)); + + if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB) + atmel_aes_write_block(dd, AES_IVR(0), iv); +} + /* CPU transfer */ @@ -661,38 +694,6 @@ static void atmel_aes_dma_callback(void *data) (void)dd->resume(dd); } -static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, - const u32 *iv) -{ - u32 valmr = 0; - - /* MR register must be set before IV registers */ - if (dd->ctx->keylen == AES_KEYSIZE_128) - valmr |= AES_MR_KEYSIZE_128; - else if (dd->ctx->keylen == AES_KEYSIZE_192) - valmr |= AES_MR_KEYSIZE_192; - else - valmr |= AES_MR_KEYSIZE_256; - - valmr |= dd->flags & AES_FLAGS_MODE_MASK; - - if (use_dma) { - valmr |= AES_MR_SMOD_IDATAR0; - if (dd->caps.has_dualbuff) - valmr |= AES_MR_DUALBUFF; - } else { - valmr |= AES_MR_SMOD_AUTO; - } - - atmel_aes_write(dd, AES_MR, valmr); - - atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, - SIZE_IN_WORDS(dd->ctx->keylen)); - - if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB) - atmel_aes_write_block(dd, AES_IVR(0), iv); -} - static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, struct crypto_async_request *new_areq) { @@ -730,6 +731,9 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, return (dd->is_async) ? ret : err; } + +/* AES async block ciphers */ + static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd) { return atmel_aes_complete(dd, 0); @@ -758,26 +762,6 @@ static int atmel_aes_start(struct atmel_aes_dev *dd) atmel_aes_transfer_complete); } - -static int atmel_aes_buff_init(struct atmel_aes_dev *dd) -{ - dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER); - dd->buflen = ATMEL_AES_BUFFER_SIZE; - dd->buflen &= ~(AES_BLOCK_SIZE - 1); - - if (!dd->buf) { - dev_err(dd->dev, "unable to alloc pages.\n"); - return -ENOMEM; - } - - return 0; -} - -static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) -{ - free_page((unsigned long)dd->buf); -} - static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { struct atmel_aes_base_ctx *ctx; @@ -817,56 +801,6 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) return atmel_aes_handle_queue(dd, &req->base); } -static bool atmel_aes_filter(struct dma_chan *chan, void *slave) -{ - struct at_dma_slave *sl = slave; - - if (sl && sl->dma_dev == chan->device->dev) { - chan->private = sl; - return true; - } else { - return false; - } -} - -static int atmel_aes_dma_init(struct atmel_aes_dev *dd, - struct crypto_platform_data *pdata) -{ - struct at_dma_slave *slave; - int err = -ENOMEM; - dma_cap_mask_t mask; - - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - - /* Try to grab 2 DMA channels */ - slave = &pdata->dma_slave->rxdata; - dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, - slave, dd->dev, "tx"); - if (!dd->src.chan) - goto err_dma_in; - - slave = &pdata->dma_slave->txdata; - dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, - slave, dd->dev, "rx"); - if (!dd->dst.chan) - goto err_dma_out; - - return 0; - -err_dma_out: - dma_release_channel(dd->src.chan); -err_dma_in: - dev_warn(dd->dev, "no DMA channel available\n"); - return err; -} - -static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) -{ - dma_release_channel(dd->dst.chan); - dma_release_channel(dd->src.chan); -} - static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { @@ -1181,6 +1115,78 @@ static struct crypto_alg aes_cfb64_alg = { } }; + +/* Probe functions */ + +static int atmel_aes_buff_init(struct atmel_aes_dev *dd) +{ + dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER); + dd->buflen = ATMEL_AES_BUFFER_SIZE; + dd->buflen &= ~(AES_BLOCK_SIZE - 1); + + if (!dd->buf) { + dev_err(dd->dev, "unable to alloc pages.\n"); + return -ENOMEM; + } + + return 0; +} + +static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) +{ + free_page((unsigned long)dd->buf); +} + +static bool atmel_aes_filter(struct dma_chan *chan, void *slave) +{ + struct at_dma_slave *sl = slave; + + if (sl && sl->dma_dev == chan->device->dev) { + chan->private = sl; + return true; + } else { + return false; + } +} + +static int atmel_aes_dma_init(struct atmel_aes_dev *dd, + struct crypto_platform_data *pdata) +{ + struct at_dma_slave *slave; + int err = -ENOMEM; + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* Try to grab 2 DMA channels */ + slave = &pdata->dma_slave->rxdata; + dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, + slave, dd->dev, "tx"); + if (!dd->src.chan) + goto err_dma_in; + + slave = &pdata->dma_slave->txdata; + dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, + slave, dd->dev, "rx"); + if (!dd->dst.chan) + goto err_dma_out; + + return 0; + +err_dma_out: + dma_release_channel(dd->src.chan); +err_dma_in: + dev_warn(dd->dev, "no DMA channel available\n"); + return err; +} + +static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) +{ + dma_release_channel(dd->dst.chan); + dma_release_channel(dd->src.chan); +} + static void atmel_aes_queue_task(unsigned long data) { struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; -- cgit v0.10.2 From da7b850edb68ab6e3e3cc8593c6ce2c577b6b61c Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 18:13:04 +0100 Subject: crypto: atmel-aes - fix atmel-ctr-aes driver for RFC 3686 crypto_rfc3686_alloc() in crypto/ctr.c expects to be used with a stream cipher (alg->cra_blocksize == 1). Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index e964cb0..5f6dc48 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -1075,7 +1075,7 @@ static struct crypto_alg aes_algs[] = { .cra_driver_name = "atmel-ctr-aes", .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, - .cra_blocksize = AES_BLOCK_SIZE, + .cra_blocksize = 1, .cra_ctxsize = sizeof(struct atmel_aes_ctx), .cra_alignmask = 0xf, .cra_type = &crypto_ablkcipher_type, -- cgit v0.10.2 From fcac83656a3e3b15e7a16b4a64ee5067eecec446 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 18:13:05 +0100 Subject: crypto: atmel-aes - fix the counter overflow in CTR mode Depending on its hardware version, the AES IP provides either a 16 or a 32 bit counter. However the CTR mode expects the size of the counter to be the same as the size of the cipher block, ie 128 bits for AES. This patch detects and handles counter overflows. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 5f6dc48..a34919f 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -82,6 +82,7 @@ struct atmel_aes_caps { bool has_dualbuff; bool has_cfb64; + bool has_ctr32; u32 max_burst_size; }; @@ -103,6 +104,15 @@ struct atmel_aes_ctx { struct atmel_aes_base_ctx base; }; +struct atmel_aes_ctr_ctx { + struct atmel_aes_base_ctx base; + + u32 iv[AES_BLOCK_SIZE / sizeof(u32)]; + size_t offset; + struct scatterlist src[2]; + struct scatterlist dst[2]; +}; + struct atmel_aes_reqctx { unsigned long mode; }; @@ -762,6 +772,96 @@ static int atmel_aes_start(struct atmel_aes_dev *dd) atmel_aes_transfer_complete); } +static inline struct atmel_aes_ctr_ctx * +atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx) +{ + return container_of(ctx, struct atmel_aes_ctr_ctx, base); +} + +static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd) +{ + struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); + struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); + struct scatterlist *src, *dst; + u32 ctr, blocks; + size_t datalen; + bool use_dma, fragmented = false; + + /* Check for transfer completion. */ + ctx->offset += dd->total; + if (ctx->offset >= req->nbytes) + return atmel_aes_transfer_complete(dd); + + /* Compute data length. */ + datalen = req->nbytes - ctx->offset; + blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE); + ctr = be32_to_cpu(ctx->iv[3]); + if (dd->caps.has_ctr32) { + /* Check 32bit counter overflow. */ + u32 start = ctr; + u32 end = start + blocks - 1; + + if (end < start) { + ctr |= 0xffffffff; + datalen = AES_BLOCK_SIZE * -start; + fragmented = true; + } + } else { + /* Check 16bit counter overflow. */ + u16 start = ctr & 0xffff; + u16 end = start + (u16)blocks - 1; + + if (blocks >> 16 || end < start) { + ctr |= 0xffff; + datalen = AES_BLOCK_SIZE * (0x10000-start); + fragmented = true; + } + } + use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD); + + /* Jump to offset. */ + src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset); + dst = ((req->src == req->dst) ? src : + scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset)); + + /* Configure hardware. */ + atmel_aes_write_ctrl(dd, use_dma, ctx->iv); + if (unlikely(fragmented)) { + /* + * Increment the counter manually to cope with the hardware + * counter overflow. + */ + ctx->iv[3] = cpu_to_be32(ctr); + crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE); + } + + if (use_dma) + return atmel_aes_dma_start(dd, src, dst, datalen, + atmel_aes_ctr_transfer); + + return atmel_aes_cpu_start(dd, src, dst, datalen, + atmel_aes_ctr_transfer); +} + +static int atmel_aes_ctr_start(struct atmel_aes_dev *dd) +{ + struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); + struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); + struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); + int err; + + atmel_aes_set_mode(dd, rctx); + + err = atmel_aes_hw_init(dd); + if (err) + return atmel_aes_complete(dd, err); + + memcpy(ctx->iv, req->info, AES_BLOCK_SIZE); + ctx->offset = 0; + dd->total = 0; + return atmel_aes_ctr_transfer(dd); +} + static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { struct atmel_aes_base_ctx *ctx; @@ -919,6 +1019,16 @@ static int atmel_aes_cra_init(struct crypto_tfm *tfm) return 0; } +static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm) +{ + struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); + ctx->base.start = atmel_aes_ctr_start; + + return 0; +} + static void atmel_aes_cra_exit(struct crypto_tfm *tfm) { } @@ -1076,11 +1186,11 @@ static struct crypto_alg aes_algs[] = { .cra_priority = ATMEL_AES_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct atmel_aes_ctx), + .cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx), .cra_alignmask = 0xf, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, - .cra_init = atmel_aes_cra_init, + .cra_init = atmel_aes_ctr_cra_init, .cra_exit = atmel_aes_cra_exit, .cra_u.ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, @@ -1262,6 +1372,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) { dd->caps.has_dualbuff = 0; dd->caps.has_cfb64 = 0; + dd->caps.has_ctr32 = 0; dd->caps.max_burst_size = 1; /* keep only major version number */ @@ -1269,11 +1380,13 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) case 0x500: dd->caps.has_dualbuff = 1; dd->caps.has_cfb64 = 1; + dd->caps.has_ctr32 = 1; dd->caps.max_burst_size = 4; break; case 0x200: dd->caps.has_dualbuff = 1; dd->caps.has_cfb64 = 1; + dd->caps.has_ctr32 = 1; dd->caps.max_burst_size = 4; break; case 0x130: -- cgit v0.10.2 From 129f8bb6bb882d6af49f83a1369215aeb0ccb95d Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 18:13:06 +0100 Subject: crypto: atmel-aes - change the DMA threshold Increase the DMA threshold to 256: PIO accesses offer better performances than the DMA when processing small amounts of data. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index a34919f..ea645b4 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -76,7 +76,7 @@ #define ATMEL_AES_QUEUE_LENGTH 50 -#define ATMEL_AES_DMA_THRESHOLD 16 +#define ATMEL_AES_DMA_THRESHOLD 256 struct atmel_aes_caps { -- cgit v0.10.2 From d4419548dba9575934fee6d9fa20a480257889b2 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 18:13:07 +0100 Subject: crypto: atmel-aes - add support to GCM mode This patch adds support to the GCM mode. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 3a3a1e7..3dd69df 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -383,6 +383,7 @@ config CRYPTO_DEV_ATMEL_AES tristate "Support for Atmel AES hw accelerator" depends on AT_XDMAC || AT_HDMAC || COMPILE_TEST select CRYPTO_AES + select CRYPTO_AEAD select CRYPTO_BLKCIPHER help Some Atmel processors have AES hw accelerator. diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h index 2786bb1..6c2951b 100644 --- a/drivers/crypto/atmel-aes-regs.h +++ b/drivers/crypto/atmel-aes-regs.h @@ -9,6 +9,7 @@ #define AES_MR 0x04 #define AES_MR_CYPHER_DEC (0 << 0) #define AES_MR_CYPHER_ENC (1 << 0) +#define AES_MR_GTAGEN (1 << 1) #define AES_MR_DUALBUFF (1 << 3) #define AES_MR_PROCDLY_MASK (0xF << 4) #define AES_MR_PROCDLY_OFFSET 4 @@ -26,6 +27,7 @@ #define AES_MR_OPMOD_OFB (0x2 << 12) #define AES_MR_OPMOD_CFB (0x3 << 12) #define AES_MR_OPMOD_CTR (0x4 << 12) +#define AES_MR_OPMOD_GCM (0x5 << 12) #define AES_MR_LOD (0x1 << 15) #define AES_MR_CFBS_MASK (0x7 << 16) #define AES_MR_CFBS_128b (0x0 << 16) @@ -44,6 +46,7 @@ #define AES_ISR 0x1C #define AES_INT_DATARDY (1 << 0) #define AES_INT_URAD (1 << 8) +#define AES_INT_TAGRDY (1 << 16) #define AES_ISR_URAT_MASK (0xF << 12) #define AES_ISR_URAT_IDR_WR_PROC (0x0 << 12) #define AES_ISR_URAT_ODR_RD_PROC (0x1 << 12) @@ -57,6 +60,13 @@ #define AES_ODATAR(x) (0x50 + ((x) * 0x04)) #define AES_IVR(x) (0x60 + ((x) * 0x04)) +#define AES_AADLENR 0x70 +#define AES_CLENR 0x74 +#define AES_GHASHR(x) (0x78 + ((x) * 0x04)) +#define AES_TAGR(x) (0x88 + ((x) * 0x04)) +#define AES_CTRR 0x98 +#define AES_GCMHR(x) (0x9c + ((x) * 0x04)) + #define AES_HW_VERSION 0xFC #endif /* __ATMEL_AES_REGS_H__ */ diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index ea645b4..0a37e56 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include "atmel-aes-regs.h" @@ -53,8 +54,9 @@ #define SIZE_IN_WORDS(x) ((x) >> 2) /* AES flags */ -/* Reserve bits [18:16] [14:12] [0] for mode (same as for AES_MR) */ +/* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */ #define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC +#define AES_FLAGS_GTAGEN AES_MR_GTAGEN #define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK) #define AES_FLAGS_ECB AES_MR_OPMOD_ECB #define AES_FLAGS_CBC AES_MR_OPMOD_CBC @@ -65,9 +67,11 @@ #define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b) #define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b) #define AES_FLAGS_CTR AES_MR_OPMOD_CTR +#define AES_FLAGS_GCM AES_MR_OPMOD_GCM #define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \ - AES_FLAGS_ENCRYPT) + AES_FLAGS_ENCRYPT | \ + AES_FLAGS_GTAGEN) #define AES_FLAGS_INIT BIT(2) #define AES_FLAGS_BUSY BIT(3) @@ -83,6 +87,7 @@ struct atmel_aes_caps { bool has_dualbuff; bool has_cfb64; bool has_ctr32; + bool has_gcm; u32 max_burst_size; }; @@ -113,6 +118,22 @@ struct atmel_aes_ctr_ctx { struct scatterlist dst[2]; }; +struct atmel_aes_gcm_ctx { + struct atmel_aes_base_ctx base; + + struct scatterlist src[2]; + struct scatterlist dst[2]; + + u32 j0[AES_BLOCK_SIZE / sizeof(u32)]; + u32 tag[AES_BLOCK_SIZE / sizeof(u32)]; + u32 ghash[AES_BLOCK_SIZE / sizeof(u32)]; + size_t textlen; + + const u32 *ghash_in; + u32 *ghash_out; + atmel_aes_fn_t ghash_resume; +}; + struct atmel_aes_reqctx { unsigned long mode; }; @@ -234,6 +255,12 @@ static inline size_t atmel_aes_padlen(size_t len, size_t block_size) return len ? block_size - len : 0; } +static inline struct aead_request * +aead_request_cast(struct crypto_async_request *req) +{ + return container_of(req, struct aead_request, base); +} + static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx) { struct atmel_aes_dev *aes_dd = NULL; @@ -300,6 +327,11 @@ static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd, dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode; } +static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd) +{ + return (dd->flags & AES_FLAGS_ENCRYPT); +} + static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) { clk_disable_unprepare(dd->iclk); @@ -1226,6 +1258,409 @@ static struct crypto_alg aes_cfb64_alg = { }; +/* gcm aead functions */ + +static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd, + const u32 *data, size_t datalen, + const u32 *ghash_in, u32 *ghash_out, + atmel_aes_fn_t resume); +static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd); + +static int atmel_aes_gcm_start(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_process(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_length(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_data(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd); +static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd); + +static inline struct atmel_aes_gcm_ctx * +atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx) +{ + return container_of(ctx, struct atmel_aes_gcm_ctx, base); +} + +static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd, + const u32 *data, size_t datalen, + const u32 *ghash_in, u32 *ghash_out, + atmel_aes_fn_t resume) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + + dd->data = (u32 *)data; + dd->datalen = datalen; + ctx->ghash_in = ghash_in; + ctx->ghash_out = ghash_out; + ctx->ghash_resume = resume; + + atmel_aes_write_ctrl(dd, false, NULL); + return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init); +} + +static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + + /* Set the data length. */ + atmel_aes_write(dd, AES_AADLENR, dd->total); + atmel_aes_write(dd, AES_CLENR, 0); + + /* If needed, overwrite the GCM Intermediate Hash Word Registers */ + if (ctx->ghash_in) + atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in); + + return atmel_aes_gcm_ghash_finalize(dd); +} + +static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + u32 isr; + + /* Write data into the Input Data Registers. */ + while (dd->datalen > 0) { + atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); + dd->data += 4; + dd->datalen -= AES_BLOCK_SIZE; + + isr = atmel_aes_read(dd, AES_ISR); + if (!(isr & AES_INT_DATARDY)) { + dd->resume = atmel_aes_gcm_ghash_finalize; + atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); + return -EINPROGRESS; + } + } + + /* Read the computed hash from GHASHRx. */ + atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out); + + return ctx->ghash_resume(dd); +} + + +static int atmel_aes_gcm_start(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + struct aead_request *req = aead_request_cast(dd->areq); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct atmel_aes_reqctx *rctx = aead_request_ctx(req); + size_t ivsize = crypto_aead_ivsize(tfm); + size_t datalen, padlen; + const void *iv = req->iv; + u8 *data = dd->buf; + int err; + + atmel_aes_set_mode(dd, rctx); + + err = atmel_aes_hw_init(dd); + if (err) + return atmel_aes_complete(dd, err); + + if (likely(ivsize == 12)) { + memcpy(ctx->j0, iv, ivsize); + ctx->j0[3] = cpu_to_be32(1); + return atmel_aes_gcm_process(dd); + } + + padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE); + datalen = ivsize + padlen + AES_BLOCK_SIZE; + if (datalen > dd->buflen) + return atmel_aes_complete(dd, -EINVAL); + + memcpy(data, iv, ivsize); + memset(data + ivsize, 0, padlen + sizeof(u64)); + ((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8); + + return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen, + NULL, ctx->j0, atmel_aes_gcm_process); +} + +static int atmel_aes_gcm_process(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + struct aead_request *req = aead_request_cast(dd->areq); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + bool enc = atmel_aes_is_encrypt(dd); + u32 authsize; + + /* Compute text length. */ + authsize = crypto_aead_authsize(tfm); + ctx->textlen = req->cryptlen - (enc ? 0 : authsize); + + /* + * According to tcrypt test suite, the GCM Automatic Tag Generation + * fails when both the message and its associated data are empty. + */ + if (likely(req->assoclen != 0 || ctx->textlen != 0)) + dd->flags |= AES_FLAGS_GTAGEN; + + atmel_aes_write_ctrl(dd, false, NULL); + return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length); +} + +static int atmel_aes_gcm_length(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + struct aead_request *req = aead_request_cast(dd->areq); + u32 j0_lsw, *j0 = ctx->j0; + size_t padlen; + + /* Write incr32(J0) into IV. */ + j0_lsw = j0[3]; + j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1); + atmel_aes_write_block(dd, AES_IVR(0), j0); + j0[3] = j0_lsw; + + /* Set aad and text lengths. */ + atmel_aes_write(dd, AES_AADLENR, req->assoclen); + atmel_aes_write(dd, AES_CLENR, ctx->textlen); + + /* Check whether AAD are present. */ + if (unlikely(req->assoclen == 0)) { + dd->datalen = 0; + return atmel_aes_gcm_data(dd); + } + + /* Copy assoc data and add padding. */ + padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE); + if (unlikely(req->assoclen + padlen > dd->buflen)) + return atmel_aes_complete(dd, -EINVAL); + sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen); + + /* Write assoc data into the Input Data register. */ + dd->data = (u32 *)dd->buf; + dd->datalen = req->assoclen + padlen; + return atmel_aes_gcm_data(dd); +} + +static int atmel_aes_gcm_data(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + struct aead_request *req = aead_request_cast(dd->areq); + bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD); + struct scatterlist *src, *dst; + u32 isr, mr; + + /* Write AAD first. */ + while (dd->datalen > 0) { + atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); + dd->data += 4; + dd->datalen -= AES_BLOCK_SIZE; + + isr = atmel_aes_read(dd, AES_ISR); + if (!(isr & AES_INT_DATARDY)) { + dd->resume = atmel_aes_gcm_data; + atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); + return -EINPROGRESS; + } + } + + /* GMAC only. */ + if (unlikely(ctx->textlen == 0)) + return atmel_aes_gcm_tag_init(dd); + + /* Prepare src and dst scatter lists to transfer cipher/plain texts */ + src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen); + dst = ((req->src == req->dst) ? src : + scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen)); + + if (use_dma) { + /* Update the Mode Register for DMA transfers. */ + mr = atmel_aes_read(dd, AES_MR); + mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF); + mr |= AES_MR_SMOD_IDATAR0; + if (dd->caps.has_dualbuff) + mr |= AES_MR_DUALBUFF; + atmel_aes_write(dd, AES_MR, mr); + + return atmel_aes_dma_start(dd, src, dst, ctx->textlen, + atmel_aes_gcm_tag_init); + } + + return atmel_aes_cpu_start(dd, src, dst, ctx->textlen, + atmel_aes_gcm_tag_init); +} + +static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + struct aead_request *req = aead_request_cast(dd->areq); + u64 *data = dd->buf; + + if (likely(dd->flags & AES_FLAGS_GTAGEN)) { + if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) { + dd->resume = atmel_aes_gcm_tag_init; + atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY); + return -EINPROGRESS; + } + + return atmel_aes_gcm_finalize(dd); + } + + /* Read the GCM Intermediate Hash Word Registers. */ + atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash); + + data[0] = cpu_to_be64(req->assoclen * 8); + data[1] = cpu_to_be64(ctx->textlen * 8); + + return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE, + ctx->ghash, ctx->ghash, atmel_aes_gcm_tag); +} + +static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + unsigned long flags; + + /* + * Change mode to CTR to complete the tag generation. + * Use J0 as Initialization Vector. + */ + flags = dd->flags; + dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN); + dd->flags |= AES_FLAGS_CTR; + atmel_aes_write_ctrl(dd, false, ctx->j0); + dd->flags = flags; + + atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash); + return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize); +} + +static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd) +{ + struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); + struct aead_request *req = aead_request_cast(dd->areq); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + bool enc = atmel_aes_is_encrypt(dd); + u32 offset, authsize, itag[4], *otag = ctx->tag; + int err; + + /* Read the computed tag. */ + if (likely(dd->flags & AES_FLAGS_GTAGEN)) + atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag); + else + atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag); + + offset = req->assoclen + ctx->textlen; + authsize = crypto_aead_authsize(tfm); + if (enc) { + scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1); + err = 0; + } else { + scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0); + err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0; + } + + return atmel_aes_complete(dd, err); +} + +static int atmel_aes_gcm_crypt(struct aead_request *req, + unsigned long mode) +{ + struct atmel_aes_base_ctx *ctx; + struct atmel_aes_reqctx *rctx; + struct atmel_aes_dev *dd; + + ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + ctx->block_size = AES_BLOCK_SIZE; + + dd = atmel_aes_find_dev(ctx); + if (!dd) + return -ENODEV; + + rctx = aead_request_ctx(req); + rctx->mode = AES_FLAGS_GCM | mode; + + return atmel_aes_handle_queue(dd, &req->base); +} + +static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm); + + if (keylen != AES_KEYSIZE_256 && + keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_128) { + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key, key, keylen); + ctx->keylen = keylen; + + return 0; +} + +static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm, + unsigned int authsize) +{ + /* Same as crypto_gcm_authsize() from crypto/gcm.c */ + switch (authsize) { + case 4: + case 8: + case 12: + case 13: + case 14: + case 15: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +static int atmel_aes_gcm_encrypt(struct aead_request *req) +{ + return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT); +} + +static int atmel_aes_gcm_decrypt(struct aead_request *req) +{ + return atmel_aes_gcm_crypt(req, 0); +} + +static int atmel_aes_gcm_init(struct crypto_aead *tfm) +{ + struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx)); + ctx->base.start = atmel_aes_gcm_start; + + return 0; +} + +static void atmel_aes_gcm_exit(struct crypto_aead *tfm) +{ + +} + +static struct aead_alg aes_gcm_alg = { + .setkey = atmel_aes_gcm_setkey, + .setauthsize = atmel_aes_gcm_setauthsize, + .encrypt = atmel_aes_gcm_encrypt, + .decrypt = atmel_aes_gcm_decrypt, + .init = atmel_aes_gcm_init, + .exit = atmel_aes_gcm_exit, + .ivsize = 12, + .maxauthsize = AES_BLOCK_SIZE, + + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "atmel-gcm-aes", + .cra_priority = ATMEL_AES_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx), + .cra_alignmask = 0xf, + .cra_module = THIS_MODULE, + }, +}; + + /* Probe functions */ static int atmel_aes_buff_init(struct atmel_aes_dev *dd) @@ -1334,6 +1769,9 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) { int i; + if (dd->caps.has_gcm) + crypto_unregister_aead(&aes_gcm_alg); + if (dd->caps.has_cfb64) crypto_unregister_alg(&aes_cfb64_alg); @@ -1357,8 +1795,16 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd) goto err_aes_cfb64_alg; } + if (dd->caps.has_gcm) { + err = crypto_register_aead(&aes_gcm_alg); + if (err) + goto err_aes_gcm_alg; + } + return 0; +err_aes_gcm_alg: + crypto_unregister_alg(&aes_cfb64_alg); err_aes_cfb64_alg: i = ARRAY_SIZE(aes_algs); err_aes_algs: @@ -1373,6 +1819,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) dd->caps.has_dualbuff = 0; dd->caps.has_cfb64 = 0; dd->caps.has_ctr32 = 0; + dd->caps.has_gcm = 0; dd->caps.max_burst_size = 1; /* keep only major version number */ @@ -1381,12 +1828,14 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd) dd->caps.has_dualbuff = 1; dd->caps.has_cfb64 = 1; dd->caps.has_ctr32 = 1; + dd->caps.has_gcm = 1; dd->caps.max_burst_size = 4; break; case 0x200: dd->caps.has_dualbuff = 1; dd->caps.has_cfb64 = 1; dd->caps.has_ctr32 = 1; + dd->caps.has_gcm = 1; dd->caps.max_burst_size = 4; break; case 0x130: -- cgit v0.10.2 From 4537992be7cb9954d19647bec4008ee39ad77217 Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Thu, 17 Dec 2015 18:13:08 +0100 Subject: crypto: atmel-aes - add debug facilities to monitor register accesses. This feature should not be enabled in release but can be usefull for developers who need to monitor register accesses at some specific places. Set the AES_FLAGS_DUMP_REG flag inside dd->flags to start monitoring the I/O accesses, clear it to stop monitoring. Signed-off-by: Cyrille Pitchen Signed-off-by: Herbert Xu diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 0a37e56..5621612 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -75,6 +75,7 @@ #define AES_FLAGS_INIT BIT(2) #define AES_FLAGS_BUSY BIT(3) +#define AES_FLAGS_DUMP_REG BIT(4) #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY) @@ -197,16 +198,128 @@ static struct atmel_aes_drv atmel_aes = { .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock), }; +#ifdef VERBOSE_DEBUG +static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz) +{ + switch (offset) { + case AES_CR: + return "CR"; + + case AES_MR: + return "MR"; + + case AES_ISR: + return "ISR"; + + case AES_IMR: + return "IMR"; + + case AES_IER: + return "IER"; + + case AES_IDR: + return "IDR"; + + case AES_KEYWR(0): + case AES_KEYWR(1): + case AES_KEYWR(2): + case AES_KEYWR(3): + case AES_KEYWR(4): + case AES_KEYWR(5): + case AES_KEYWR(6): + case AES_KEYWR(7): + snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2); + break; + + case AES_IDATAR(0): + case AES_IDATAR(1): + case AES_IDATAR(2): + case AES_IDATAR(3): + snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2); + break; + + case AES_ODATAR(0): + case AES_ODATAR(1): + case AES_ODATAR(2): + case AES_ODATAR(3): + snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2); + break; + + case AES_IVR(0): + case AES_IVR(1): + case AES_IVR(2): + case AES_IVR(3): + snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2); + break; + + case AES_AADLENR: + return "AADLENR"; + + case AES_CLENR: + return "CLENR"; + + case AES_GHASHR(0): + case AES_GHASHR(1): + case AES_GHASHR(2): + case AES_GHASHR(3): + snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2); + break; + + case AES_TAGR(0): + case AES_TAGR(1): + case AES_TAGR(2): + case AES_TAGR(3): + snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2); + break; + + case AES_CTRR: + return "CTRR"; + + case AES_GCMHR(0): + case AES_GCMHR(1): + case AES_GCMHR(2): + case AES_GCMHR(3): + snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2); + + default: + snprintf(tmp, sz, "0x%02x", offset); + break; + } + + return tmp; +} +#endif /* VERBOSE_DEBUG */ + /* Shared functions */ static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) { - return readl_relaxed(dd->io_base + offset); + u32 value = readl_relaxed(dd->io_base + offset); + +#ifdef VERBOSE_DEBUG + if (dd->flags & AES_FLAGS_DUMP_REG) { + char tmp[16]; + + dev_vdbg(dd->dev, "read 0x%08x from %s\n", value, + atmel_aes_reg_name(offset, tmp, sizeof(tmp))); + } +#endif /* VERBOSE_DEBUG */ + + return value; } static inline void atmel_aes_write(struct atmel_aes_dev *dd, u32 offset, u32 value) { +#ifdef VERBOSE_DEBUG + if (dd->flags & AES_FLAGS_DUMP_REG) { + char tmp[16]; + + dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, + atmel_aes_reg_name(offset, tmp)); + } +#endif /* VERBOSE_DEBUG */ + writel_relaxed(value, dd->io_base + offset); } -- cgit v0.10.2 From 5ca636b986eecce09d4935d490f8d16248b6ce08 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Mon, 21 Dec 2015 12:52:10 -0500 Subject: crypto: 842 - remove WARN inside printk Remove the WARN() from the beN_to_cpu macro, which is used as a param to a pr_debug() call. With a certain kernel config, this printk-in-printk results in the no_printk() macro trying to recursively call the no_printk() macro, and since macros can't recursively call themselves a build error results. Reported-by: Randy Dunlap Signed-off-by: Dan Streetman Signed-off-by: Herbert Xu diff --git a/lib/842/842_decompress.c b/lib/842/842_decompress.c index 8881dad..a7f278d 100644 --- a/lib/842/842_decompress.c +++ b/lib/842/842_decompress.c @@ -69,7 +69,7 @@ struct sw842_param { ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \ (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \ (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \ - WARN(1, "pr_debug param err invalid size %x\n", s)) + 0) static int next_bits(struct sw842_param *p, u64 *d, u8 n); @@ -202,10 +202,14 @@ static int __do_index(struct sw842_param *p, u8 size, u8 bits, u64 fsize) return -EINVAL; } - pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n", - size, (unsigned long)index, (unsigned long)(index * size), - (unsigned long)offset, (unsigned long)total, - (unsigned long)beN_to_cpu(&p->ostart[offset], size)); + if (size != 2 && size != 4 && size != 8) + WARN(1, "__do_index invalid size %x\n", size); + else + pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n", + size, (unsigned long)index, + (unsigned long)(index * size), (unsigned long)offset, + (unsigned long)total, + (unsigned long)beN_to_cpu(&p->ostart[offset], size)); memcpy(p->out, &p->ostart[offset], size); p->out += size; -- cgit v0.10.2 From a239c36e527e003ff15ad4335c2596de692aa2f4 Mon Sep 17 00:00:00 2001 From: Ahsan Atta Date: Tue, 22 Dec 2015 11:20:34 -0800 Subject: crypto: qat - Rename dh895xcc mmp firmware Resending again. The fw name suppoed to be 895xcc instead of 895xxcc. Sorry for the noise. Rename dh895xcc mmp fw to make it consistent with other mmp images. Signed-off-by: Ahsan Atta Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index 72eacfd..092f735 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -82,7 +82,7 @@ #define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04)) /* FW names */ #define ADF_DH895XCC_FW "qat_895xcc.bin" -#define ADF_DH895XCC_MMP "qat_mmp.bin" +#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin" void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data); -- cgit v0.10.2 From c6c59bf2c0d60e67449190a8a95628ecd04b3969 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Wed, 23 Dec 2015 20:49:01 +0800 Subject: crypto: ccp - use to_pci_dev and to_platform_device Use to_pci_dev() and to_platform_device() instead of open-coding. Signed-off-by: Geliang Tang Signed-off-by: Herbert Xu diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 6ade02f..7690467 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c @@ -44,7 +44,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp) { struct ccp_pci *ccp_pci = ccp->dev_specific; struct device *dev = ccp->dev; - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(dev); struct msix_entry msix_entry[MSIX_VECTORS]; unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1; int v, ret; @@ -86,7 +86,7 @@ e_irq: static int ccp_get_msi_irq(struct ccp_device *ccp) { struct device *dev = ccp->dev; - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(dev); int ret; ret = pci_enable_msi(pdev); @@ -133,7 +133,7 @@ static void ccp_free_irqs(struct ccp_device *ccp) { struct ccp_pci *ccp_pci = ccp->dev_specific; struct device *dev = ccp->dev; - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(dev); if (ccp_pci->msix_count) { while (ccp_pci->msix_count--) @@ -149,7 +149,7 @@ static void ccp_free_irqs(struct ccp_device *ccp) static int ccp_find_mmio_area(struct ccp_device *ccp) { struct device *dev = ccp->dev; - struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct pci_dev *pdev = to_pci_dev(dev); resource_size_t io_len; unsigned long io_flags; diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c index 01b50cb..66dd7c9 100644 --- a/drivers/crypto/ccp/ccp-platform.c +++ b/drivers/crypto/ccp/ccp-platform.c @@ -35,8 +35,7 @@ struct ccp_platform { static int ccp_get_irq(struct ccp_device *ccp) { struct device *dev = ccp->dev; - struct platform_device *pdev = container_of(dev, - struct platform_device, dev); + struct platform_device *pdev = to_platform_device(dev); int ret; ret = platform_get_irq(pdev, 0); @@ -78,8 +77,7 @@ static void ccp_free_irqs(struct ccp_device *ccp) static struct resource *ccp_find_mmio_area(struct ccp_device *ccp) { struct device *dev = ccp->dev; - struct platform_device *pdev = container_of(dev, - struct platform_device, dev); + struct platform_device *pdev = to_platform_device(dev); struct resource *ior; ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- cgit v0.10.2 From 1fa844e2ff914370a1c7f14bb854f220bfe87c73 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Wed, 23 Dec 2015 06:49:58 -0800 Subject: crypto: qat - Fix random config build issue Reported-by: kbuild test robot Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index c03e286..0e82ce3 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -106,8 +106,6 @@ int adf_dev_start(struct adf_accel_dev *accel_dev); int adf_dev_stop(struct adf_accel_dev *accel_dev); void adf_dev_shutdown(struct adf_accel_dev *accel_dev); -void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); -void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr); void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev); @@ -236,6 +234,8 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, uint32_t vf_mask); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, uint32_t vf_mask); +void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); +void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); #else static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) { @@ -245,5 +245,13 @@ static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) { } + +static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) +{ +} #endif #endif -- cgit v0.10.2 From 2a5de720dcecbc7ba998bc1ae8f7b9cd7cb81654 Mon Sep 17 00:00:00 2001 From: Tadeusz Struk Date: Wed, 23 Dec 2015 07:36:28 -0800 Subject: crypto: qat - fix SKU definiftion for c3xxx dev c3xxx doesn't have the esram BAR and only has 6 ue. Signed-off-by: Tadeusz Struk Signed-off-by: Herbert Xu diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c index bda8f9f..c5bd5a9 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -50,14 +50,9 @@ #include "adf_c3xxx_hw_data.h" /* Worker thread to service arbiter mappings based on dev SKUs */ -static const u32 thrd_to_arb_map_8_me_sku[] = { - 0x10000888, 0x11000888, 0x10000888, 0x11000888, 0x10000888, - 0x11000888, 0x10000888, 0x11000888, 0, 0 -}; - -static const u32 thrd_to_arb_map_10_me_sku[] = { - 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, - 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA +static const u32 thrd_to_arb_map_6_me_sku[] = { + 0x12222AAA, 0x11222AAA, 0x12222AAA, + 0x11222AAA, 0x12222AAA, 0x11222AAA }; static struct adf_hw_device_class c3xxx_class = { @@ -117,16 +112,14 @@ static u32 get_etr_bar_id(struct adf_hw_device_data *self) static u32 get_sram_bar_id(struct adf_hw_device_data *self) { - return ADF_C3XXX_SRAM_BAR; + return 0; } static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { int aes = get_num_aes(self); - if (aes == 8) - return DEV_SKU_2; - else if (aes == 10) + if (aes == 6) return DEV_SKU_4; return DEV_SKU_UNKNOWN; @@ -136,11 +129,8 @@ static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev, u32 const **arb_map_config) { switch (accel_dev->accel_pci_dev.sku) { - case DEV_SKU_2: - *arb_map_config = thrd_to_arb_map_8_me_sku; - break; case DEV_SKU_4: - *arb_map_config = thrd_to_arb_map_10_me_sku; + *arb_map_config = thrd_to_arb_map_6_me_sku; break; default: dev_err(&GET_DEV(accel_dev), diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h index f2fa234..2f2681d 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h @@ -48,9 +48,8 @@ #define ADF_C3XXX_HW_DATA_H_ /* PCIe configuration space */ -#define ADF_C3XXX_SRAM_BAR 0 -#define ADF_C3XXX_PMISC_BAR 1 -#define ADF_C3XXX_ETR_BAR 2 +#define ADF_C3XXX_PMISC_BAR 0 +#define ADF_C3XXX_ETR_BAR 1 #define ADF_C3XXX_RX_RINGS_OFFSET 8 #define ADF_C3XXX_TX_RINGS_MASK 0xFF #define ADF_C3XXX_MAX_ACCELERATORS 3 -- cgit v0.10.2 From 48d627648141479c8be8acd110191072e24eba25 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Mon, 28 Dec 2015 21:53:07 +0800 Subject: crypto: hifn_795x, picoxcell - use ablkcipher_request_cast Use ablkcipher_request_cast() instead of open-coding it. Signed-off-by: Geliang Tang Signed-off-by: Herbert Xu diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 201e57d..eee2c7e 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -1943,7 +1943,7 @@ static void hifn_flush(struct hifn_device *dev) spin_lock_irqsave(&dev->lock, flags); while ((async_req = crypto_dequeue_request(&dev->queue))) { - req = container_of(async_req, struct ablkcipher_request, base); + req = ablkcipher_request_cast(async_req); spin_unlock_irqrestore(&dev->lock, flags); hifn_process_ready(req, -ENODEV); @@ -2062,7 +2062,7 @@ static int hifn_process_queue(struct hifn_device *dev) if (backlog) backlog->complete(backlog, -EINPROGRESS); - req = container_of(async_req, struct ablkcipher_request, base); + req = ablkcipher_request_cast(async_req); err = hifn_handle_req(req); if (err) diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 15b5e39..3b1c7ec 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -856,8 +856,7 @@ static int spacc_ablk_need_fallback(struct spacc_req *req) static void spacc_ablk_complete(struct spacc_req *req) { - struct ablkcipher_request *ablk_req = - container_of(req->req, struct ablkcipher_request, base); + struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req); if (ablk_req->src != ablk_req->dst) { spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src, -- cgit v0.10.2