From 0cff9ad872c8aede1d76f6c87e1d6fa217a718e3 Mon Sep 17 00:00:00 2001 From: Varun Sethi Date: Sun, 6 Apr 2014 23:58:32 +0530 Subject: iommu/fsl: PAMU power management support. PAMU driver suspend and resume support. Signed-off-by: Varun Sethi Change-Id: I00eb16aa0fe8a591399349396aaced3df1dbb33b Reviewed-on: http://git.am.freescale.net:8181/10716 Tested-by: Review Code-CDREVIEW Reviewed-by: Jeffrey Ladouceur Reviewed-by: Jose Rivera diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index e7b3d45..e9843f0 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "fsl_pamu.h" @@ -44,10 +45,13 @@ #define make64(high, low) (((u64)(high) << 32) | (low)) -struct pamu_isr_data { +struct pamu_info { void __iomem *pamu_reg_base; /* Base address of PAMU regs*/ unsigned int count; /* The number of PAMUs */ -}; +} pamu_info_data; + +/* Pointer to the device configuration space */ +static struct ccsr_guts __iomem *guts_regs; static struct paace *ppaact; static struct paace *spaact; @@ -123,6 +127,34 @@ static struct paace *pamu_get_ppaace(int liodn) } /** + * set_dcfg_liodn() - set the device LIODN in DCFG + * @np: device tree node pointer + * @liodn: liodn value to program + * + * Returns 0 upon success else error code < 0 returned + */ +static int set_dcfg_liodn(struct device_node *np, int liodn) +{ + const __be32 *prop; + u32 liodn_reg_offset; + int len; + void __iomem *dcfg_region = (void *)guts_regs; + + if (!dcfg_region) + return -ENODEV; + + prop = of_get_property(np, "fsl,liodn-reg", &len); + if (!prop || len != 8) + return -EINVAL; + + liodn_reg_offset = be32_to_cpup(&prop[1]); + + out_be32((u32 *)(dcfg_region + liodn_reg_offset), liodn); + + return 0; +} + +/** * pamu_enable_liodn() - Set valid bit of PACCE * @liodn: liodn PAACT index for desired PAACE * @@ -823,7 +855,7 @@ static void __init setup_omt(struct ome *omt) * Get the maximum number of PAACT table entries * and subwindows supported by PAMU */ -static void get_pamu_cap_values(unsigned long pamu_reg_base) +static void get_pamu_cap_values(void *pamu_reg_base) { u32 pc_val; @@ -833,9 +865,8 @@ static void get_pamu_cap_values(unsigned long pamu_reg_base) } /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */ -int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, - phys_addr_t ppaact_phys, phys_addr_t spaact_phys, - phys_addr_t omt_phys) +int setup_one_pamu(void *pamu_reg_base, phys_addr_t ppaact_phys, + phys_addr_t spaact_phys, phys_addr_t omt_phys) { u32 *pc; struct pamu_mmap_regs *pamu_regs; @@ -959,7 +990,7 @@ static void __init setup_liodns(void) irqreturn_t pamu_av_isr(int irq, void *arg) { - struct pamu_isr_data *data = arg; + struct pamu_info *data = arg; phys_addr_t phys; unsigned int i, j, ret; @@ -1204,11 +1235,9 @@ static const struct { static int __init fsl_pamu_probe(struct platform_device *pdev) { void __iomem *pamu_regs = NULL; - struct ccsr_guts __iomem *guts_regs = NULL; u32 pamubypenr, pamu_counter; + void __iomem *pamu_reg_base; unsigned long pamu_reg_off; - unsigned long pamu_reg_base; - struct pamu_isr_data *data = NULL; struct device_node *guts_node; u64 size; struct page *p; @@ -1234,23 +1263,17 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) } of_get_address(pdev->dev.of_node, 0, &size, NULL); + pamu_info_data.pamu_reg_base = pamu_regs; + pamu_info_data.count = size / PAMU_OFFSET; + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (irq == NO_IRQ) { dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n"); goto error; } - data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL); - if (!data) { - dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n"); - ret = -ENOMEM; - goto error; - } - data->pamu_reg_base = pamu_regs; - data->count = size / PAMU_OFFSET; - /* The ISR needs access to the regs, so we won't iounmap them */ - ret = request_irq(irq, pamu_av_isr, 0, "pamu", data); + ret = request_irq(irq, pamu_av_isr, 0, "pamu", &pamu_info_data); if (ret < 0) { dev_err(&pdev->dev, "error %i installing ISR for irq %i\n", ret, irq); @@ -1274,7 +1297,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) } /* read in the PAMU capability registers */ - get_pamu_cap_values((unsigned long)pamu_regs); + get_pamu_cap_values(pamu_regs); /* * To simplify the allocation of a coherency domain, we allocate the * PAACT and the OMT in the same memory buffer. Unfortunately, this @@ -1353,9 +1376,9 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size; pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) { - pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off; - setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys, - spaact_phys, omt_phys); + pamu_reg_base = pamu_regs + pamu_reg_off; + setup_one_pamu(pamu_reg_base, ppaact_phys, spaact_phys, + omt_phys); /* Disable PAMU bypass for this PAMU */ pamubypenr &= ~pamu_counter; } @@ -1365,8 +1388,6 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) /* Enable all relevant PAMU(s) */ out_be32(&guts_regs->pamubypenr, pamubypenr); - iounmap(guts_regs); - /* Enable DMA for the LIODNs in the device tree*/ setup_liodns(); @@ -1378,12 +1399,7 @@ error_genpool: error: if (irq != NO_IRQ) - free_irq(irq, data); - - if (data) { - memset(data, 0, sizeof(struct pamu_isr_data)); - kfree(data); - } + free_irq(irq, &pamu_info_data); if (pamu_regs) iounmap(pamu_regs); @@ -1417,6 +1433,77 @@ static struct platform_driver fsl_of_pamu_driver = { .probe = fsl_pamu_probe, }; +#ifdef CONFIG_SUSPEND +static int iommu_suspend(void) +{ + int i; + + for (i = 0; i < pamu_info_data.count; i++) { + u32 val; + void __iomem *p; + + p = pamu_info_data.pamu_reg_base + i * PAMU_OFFSET; + val = in_be32((u32 *)(p + PAMU_PICS)); + /* Disable access violation interrupts */ + out_be32((u32 *)(p + PAMU_PICS), + val & ~PAMU_ACCESS_VIOLATION_ENABLE); + } + + return 0; +} + +static void restore_dcfg_liodns(void) +{ + struct device_node *node; + const __be32 *prop; + int ret, liodn; + + for_each_node_with_property(node, "fsl,liodn-reg") { + prop = of_get_property(node, "fsl,liodn", 0); + if (!prop) + continue; + liodn = be32_to_cpup(prop); + ret = set_dcfg_liodn(node, liodn); + if (ret) + pr_debug("LIODN restore failed for %s\n", + node->full_name); + } +} + +static void iommu_resume(void) +{ + int i; + u32 pamubypenr, pamu_counter; + + restore_dcfg_liodns(); + pamubypenr = in_be32(&guts_regs->pamubypenr); + for (i = 0, pamu_counter = 0x80000000; i < pamu_info_data.count; + i++, pamu_counter >>= 1) { + void __iomem *p; + + p = pamu_info_data.pamu_reg_base + i * PAMU_OFFSET; + setup_one_pamu(p, virt_to_phys(ppaact), virt_to_phys(spaact), + virt_to_phys(omt)); + pamubypenr &= ~pamu_counter; + } + /* Enable all PAMUs */ + out_be32(&guts_regs->pamubypenr, pamubypenr); +} + +static struct syscore_ops iommu_syscore_ops = { + .resume = iommu_resume, + .suspend = iommu_suspend, +}; + +static void __init init_iommu_pm_ops(void) +{ + register_syscore_ops(&iommu_syscore_ops); +} + +#else +static inline void init_iommu_pm_ops(void) {} +#endif /* CONFIG_SUSPEND */ + static __init int fsl_pamu_init(void) { struct platform_device *pdev = NULL; @@ -1474,6 +1561,8 @@ static __init int fsl_pamu_init(void) goto error_device_add; } + init_iommu_pm_ops(); + return 0; error_device_add: -- cgit v0.10.2 From 552a7fc91047e8e3cfe21b3e8268765c29f0ea03 Mon Sep 17 00:00:00 2001 From: Alok Makhariya Date: Fri, 4 Apr 2014 16:34:43 +0530 Subject: gianfar: adding EXTRA_HEADROOM in skb_size calc of asf_gianfar recycle fn. Recycling of skb was not working. This was due to checks in gfar_recycle_skb is getting false which results in freeing skb every time. There is mismatch b/w skb_size calculated and actual skb size (skb->end - skb->head). So this patch adds EXTRA_HEADROOM when skb_size is calculated. CR:ENGR00306216 Signed-off-by: Alok Makhariya Change-Id: I524254ceb2a7ec757645d11922e7066d0b22b756 Reviewed-on: http://git.am.freescale.net:8181/10689 Tested-by: Review Code-CDREVIEW Reviewed-by: Rajan Gupta Reviewed-by: Jose Rivera diff --git a/drivers/net/ethernet/freescale/asf_gianfar.c b/drivers/net/ethernet/freescale/asf_gianfar.c index af7f205..5280ed9 100644 --- a/drivers/net/ethernet/freescale/asf_gianfar.c +++ b/drivers/net/ethernet/freescale/asf_gianfar.c @@ -55,7 +55,8 @@ static inline void gfar_asf_reclaim_skb(struct sk_buff *skb) static inline void gfar_recycle_skb(struct sk_buff *skb) { struct sk_buff_head *h = &__get_cpu_var(skb_recycle_list); - int skb_size = SKB_DATA_ALIGN(GFAR_RXB_REC_SZ + NET_SKB_PAD); + int skb_size = SKB_DATA_ALIGN(GFAR_RXB_REC_SZ + NET_SKB_PAD + + EXTRA_HEADROOM); if (skb_queue_len(h) < DEFAULT_RX_RING_SIZE && !skb_cloned(skb) && !skb_is_nonlinear(skb) && -- cgit v0.10.2 From eb3e90e3505621c442be275000e1dc509c9e8b82 Mon Sep 17 00:00:00 2001 From: Prabhakar Kushwaha Date: Wed, 2 Apr 2014 12:10:12 +0530 Subject: mtd/nand:Update mtd.name assignment type to u64 for IFC, eLBC mtd.name is assigned to IFC NAND physical address. Assignment type is u32. It is not providing correct physical address of IFC NAND. Update assignment type to u64. This patch is alread present on sdk-kernel-3.8 branch http://git.am.freescale.net:8181/#/c/7535/ Signed-off-by: Prabhakar Kushwaha Change-Id: Iaea31773bb59d8d40a0399c1d0cb6c42164a089c Reviewed-on: http://git.am.freescale.net:8181/10575 Tested-by: Review Code-CDREVIEW Reviewed-by: Shengzhou Liu Reviewed-by: Jose Rivera diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index c966fc7..defd72e 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c @@ -875,7 +875,7 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev) goto err; } - priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start); + priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start); if (!priv->mtd.name) { ret = -ENOMEM; goto err; diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 4335577..f56e574 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -1101,7 +1101,7 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) IFC_NAND_EVTER_INTR_FTOERIR_EN | IFC_NAND_EVTER_INTR_WPERIR_EN, &ifc->ifc_nand.nand_evter_intr_en); - priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start); + priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start); if (!priv->mtd.name) { ret = -ENOMEM; goto err; -- cgit v0.10.2 From ea5bf3a492e7cd1b8371d288a6c37c4037396ce0 Mon Sep 17 00:00:00 2001 From: Jeffrey Ladouceur Date: Fri, 22 Nov 2013 12:26:55 -0500 Subject: fsl_pme: Add support for suspend and resume power management callbacks When power is removed from the pme device, all its internal state is lost. The suspend() function saves all necessary state in order to later resume the device. The resume() function "writes" back the previously saved state. The functionality is implemented as part of the pme ctrl plane device. The state the requires saving is CCSR registers and internal SRAM memory. Signed-off-by: Jeffrey Ladouceur Change-Id: I27ada84b338d7905b07dcabdb2ece2a9700a764c Reviewed-on: http://git.am.freescale.net:8181/6831 Tested-by: Review Code-CDREVIEW Reviewed-by: Geoff Thorpe Reviewed-by: Jose Rivera (cherry picked from commit d9a11a097905fbb0aa1add75c17aae7724099564) Reviewed-on: http://git.am.freescale.net:8181/10772 diff --git a/drivers/staging/fsl_pme2/Makefile b/drivers/staging/fsl_pme2/Makefile index 694513b..e91c514 100644 --- a/drivers/staging/fsl_pme2/Makefile +++ b/drivers/staging/fsl_pme2/Makefile @@ -1,5 +1,5 @@ # PME -obj-$(CONFIG_FSL_PME2_CTRL) += pme2_ctrl.o pme2_sysfs.o +obj-$(CONFIG_FSL_PME2_CTRL) += pme2_ctrl.o pme2_sysfs.o pme2_suspend.o obj-$(CONFIG_FSL_PME2_PORTAL) += pme2.o pme2-y := pme2_low.o pme2_high.o obj-$(CONFIG_FSL_PME2_TEST_HIGH) += pme2_test_high.o diff --git a/drivers/staging/fsl_pme2/pme2_ctrl.c b/drivers/staging/fsl_pme2/pme2_ctrl.c index 30956c2..9901fe6 100644 --- a/drivers/staging/fsl_pme2/pme2_ctrl.c +++ b/drivers/staging/fsl_pme2/pme2_ctrl.c @@ -295,6 +295,7 @@ static int of_fsl_pme_probe(struct platform_device *ofdev) int srec_aim = 0, srec_esr = 0; u32 srecontextsize_code; u32 dec1; + struct pme2_private_data *priv_data; /* * TODO: This standby handling won't work properly after failover, it's @@ -415,6 +416,11 @@ static int of_fsl_pme_probe(struct platform_device *ofdev) (CONFIG_FSL_PME2_SRE_MAX_INSTRUCTION_LIMIT << 16) | CONFIG_FSL_PME2_SRE_MAX_BLOCK_NUMBER); +#ifdef CONFIG_PM + /* can't flush pme device easily. Disable caching for FC and RES */ + pme_out(global_pme, CDCR, 0x00000009); +#endif + /* Setup Accumulator */ if (pme_stat_interval) schedule_delayed_work(&accumulator_work, @@ -424,6 +430,19 @@ static int of_fsl_pme_probe(struct platform_device *ofdev) if (err) goto out_stop_accumulator; + priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL); + if (!priv_data) + goto out_stop_accumulator; + + priv_data->regs = (uint32_t __iomem *)regs; + priv_data->pme_rev1 = pme_in(global_pme, PM_IP_REV1); + dev_set_drvdata(dev, priv_data); + +#ifdef CONFIG_PM + /* setup resources required for power management */ + init_pme_suspend(priv_data); +#endif + /* Enable interrupts */ pme_out(global_pme, IER, PME_ALL_ERR); dev_info(dev, "ver: 0x%08x\n", pme_in(global_pme, PM_IP_REV1)); @@ -448,11 +467,79 @@ out: return err; } +#ifdef CONFIG_PM +void restore_all_ccsr(struct ccsr_backup_info *save_ccsr, + uint32_t __iomem *regs) +{ + int i; + int num_regs = sizeof(save_ccsr->regdb)/sizeof(uint32_t); + + uint32_t *pme_reg = &save_ccsr->regdb.pmfa.isr; + + for (i = 0; i < num_regs; i++) { + /* skip enable register */ + if ((pme_reg + i) != (&save_ccsr->regdb.pmfa.faconf)) + out_be32(regs + i, *(pme_reg + i)); + } +} + +void save_all_ccsr(struct ccsr_backup_info *save_ccsr, uint32_t __iomem *regs) +{ + int i; + int num_regs = sizeof(save_ccsr->regdb)/sizeof(uint32_t); + uint32_t *pme_reg; + + /* setup ddr space to save ccsr */ + pme_reg = &save_ccsr->regdb.pmfa.isr; + + for (i = 0; i < num_regs; i++) + *(pme_reg+i) = in_be32(regs + i); +} + +static int pme2_pm_suspend(struct device *dev) +{ + struct pme2_private_data *priv_data; + + priv_data = dev_get_drvdata(dev); + if (!priv_data) { + pr_err("No device data\n"); + return -ENOMEM; + } + dev_dbg(dev, "fsl-pme PM suspend\n"); + + return pme_suspend(priv_data); +} + +static int pme2_pm_resume(struct device *dev) +{ + struct pme2_private_data *priv_data; + + priv_data = dev_get_drvdata(dev); + if (!priv_data) { + pr_err("No device data\n"); + return -ENOMEM; + } + dev_dbg(dev, "fsl-pme PM resume\n"); + + return pme_resume(priv_data); + +} +#else +#define pme2_pm_suspend NULL +#define pme2_pm_resume NULL +#endif + +static const struct dev_pm_ops pme2_pm_ops = { + .suspend = pme2_pm_suspend, + .resume = pme2_pm_resume, +}; + static struct platform_driver of_fsl_pme_driver = { .driver = { .owner = THIS_MODULE, .name = DRV_NAME, .of_match_table = of_fsl_pme_ids, + .pm = &pme2_pm_ops, }, .probe = of_fsl_pme_probe, .remove = of_fsl_pme_remove, @@ -768,7 +855,25 @@ int pme_attr_set(enum pme_attr attr, u32 val) case pme_attr_pmtr: pme_out(global_pme, PMTR, val); break; - + case pme_attr_faconf_rst: + attr_val = pme_in(global_pme, FACONF); + if (val) + attr_val |= PME_FACONF_RESET; + else + attr_val &= ~(PME_FACONF_RESET); + pme_out(global_pme, FACONF, attr_val); + break; + case pme_attr_faconf_en: + attr_val = pme_in(global_pme, FACONF); + if (val) + attr_val |= PME_FACONF_ENABLE; + else + attr_val &= ~(PME_FACONF_ENABLE); + pme_out(global_pme, FACONF, attr_val); + break; + case pme_attr_efqc: + pme_out(global_pme, EFQC, val); + break; default: pr_err("pme: Unknown attr %u\n", attr); return -EINVAL; @@ -1246,6 +1351,22 @@ int pme_attr_get(enum pme_attr attr, u32 *val) attr_val = pme_in(global_pme, SRRWC); break; + case pme_attr_faconf_rst: + attr_val = pme_in(global_pme, FACONF); + attr_val &= PME_FACONF_RESET; + break; + + case pme_attr_faconf_en: + attr_val = pme_in(global_pme, FACONF); + attr_val &= PME_FACONF_ENABLE; + attr_val >>= 1; + break; + + case pme_attr_efqc: + attr_val = pme_in(global_pme, EFQC); + break; + + default: pr_err("pme: Unknown attr %u\n", attr); return -EINVAL; diff --git a/drivers/staging/fsl_pme2/pme2_private.h b/drivers/staging/fsl_pme2/pme2_private.h index 3968f02..e1ea3e8 100644 --- a/drivers/staging/fsl_pme2/pme2_private.h +++ b/drivers/staging/fsl_pme2/pme2_private.h @@ -29,6 +29,9 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#ifndef PME2_PRIVATE_H +#define PME2_PRIVATE_H + #include "pme2_sys.h" #include @@ -50,6 +53,218 @@ void pme2_remove_sysfs_dev_files(struct platform_device *ofdev); void accumulator_update_interval(u32 interval); #endif +#ifdef CONFIG_PM + +struct pme_save_regs_pmfa { + uint32_t isr; + uint32_t ier; + uint32_t isdr; + uint32_t iir; + uint32_t ifr; + uint32_t rll; + uint32_t cdcr; + uint32_t reserved1[2]; + uint32_t trunci; + uint32_t rbc; + uint32_t esr; + uint32_t ecr0; + uint32_t ecr1; + uint32_t reserved2[6]; + uint32_t efqc; + uint32_t sram_addr; + uint32_t sram_rdat; + uint32_t sram_wdat; + uint32_t faconf; + uint32_t pmstat; + uint32_t famcr; + uint32_t pmtr; + uint32_t reserved3; + uint32_t pehd; + uint32_t reserved4[2]; + uint32_t bsc0; + uint32_t bsc1; + uint32_t bsc2; + uint32_t bsc3; + uint32_t bsc4; + uint32_t bsc5; + uint32_t bsc6; + uint32_t bsc7; + uint32_t reserved5[16]; + uint32_t qmbfd0; + uint32_t qmbfd1; + uint32_t qmbfd2; + uint32_t qmbfd3; + uint32_t qmbctxtah; + uint32_t qmbctxtal; + uint32_t qmbctxtb; + uint32_t qmbctl; + uint32_t ecc1bes; + uint32_t ecc2bes; + uint32_t reserved6[2]; + uint32_t eccaddr; + uint32_t reserved7[27]; + uint32_t tbt0ecc1th; + uint32_t tbt0ecc1ec; + uint32_t tbt1ecc1th; + uint32_t tbt1ecc1ec; + uint32_t vlt0ecc1th; + uint32_t vlt0ecc1ec; + uint32_t vlt1ecc1th; + uint32_t vlt1ecc1ec; + uint32_t cmecc1th; + uint32_t cmecc1ec; + uint32_t reserved8[2]; + uint32_t dxcmecc1th; + uint32_t dxcmecc1ec; + uint32_t reserved9[2]; + uint32_t dxemecc1th; + uint32_t dxemecc1ec; + uint32_t reserved10[14]; +}; + +struct pme_save_regs_kes { + uint32_t stnib; + uint32_t stnis; + uint32_t stnth1; + uint32_t stnth2; + uint32_t stnthv; + uint32_t stnths; + uint32_t stnch; + uint32_t swdb; + uint32_t kvlts; + uint32_t kec; + uint32_t reserved1[22]; +}; + +struct pme_save_regs_dxe { + uint32_t stnpm; + uint32_t stns1m; + uint32_t drcic; + uint32_t drcmc; + uint32_t stnpmr; + uint32_t reserved1[3]; + uint32_t pdsrbah; + uint32_t pdsrbal; + uint32_t dmcr; + uint32_t dec0; + uint32_t dec1; + uint32_t reserved2[3]; + uint32_t dlc; + uint32_t reserved3[15]; +}; + +struct pme_save_regs_sre { + uint32_t stndrs; + uint32_t stnesr; + uint32_t stns1r; + uint32_t stnob; + uint32_t scbarh; + uint32_t scbarl; + uint32_t smcr; + uint32_t reserved1; + uint32_t srec; + uint32_t reserved2; + uint32_t esrp; + uint32_t reserved3[3]; + uint32_t srrv0; + uint32_t srrv1; + uint32_t srrv2; + uint32_t srrv3; + uint32_t srrv4; + uint32_t srrv5; + uint32_t srrv6; + uint32_t srrv7; + uint32_t srrfi; + uint32_t reserved4; + uint32_t srri; + uint32_t srrr; + uint32_t srrwc; + uint32_t sfrcc; + uint32_t sec1; + uint32_t sec2; + uint32_t sec3; + uint32_t reserved5; +}; + +struct pme_save_regs_mia { + uint32_t mia_byc; + uint32_t mia_blc; + uint32_t mia_ce; + uint32_t reserved1; + uint32_t mia_cr; + uint32_t reserved2[284]; +}; + +struct pme_save_regs_gen { + uint32_t liodnbr; + uint32_t reserved1[126]; + uint32_t srcidr; + uint32_t reserved2[2]; + uint32_t liodnr; + uint32_t reserved3[122]; + uint32_t pm_ip_rev_1; + uint32_t pm_ip_rev_2; +}; + +struct pme_save_reg_all { + struct pme_save_regs_pmfa pmfa; + struct pme_save_regs_kes kes; + struct pme_save_regs_dxe dxe; + struct pme_save_regs_sre sre; + struct pme_save_regs_mia mia; + struct pme_save_regs_gen gen; +}; + +struct pme_pwrmgmt_ctx { + struct qman_fq tx_fq; + struct qman_fq rx_fq; + struct qm_fd result_fd; + struct completion done; +}; + +struct pmtcc_raw_db { + /* vmalloc's memory. Save PME's sram data */ + uint8_t *alldb; +}; + +struct ccsr_backup_info { + uint32_t save_faconf_en; + uint32_t save_cdcr; + struct pme_save_reg_all regdb; +}; + +struct portal_backup_info { + /* vmalloc's memory. Save PME's sram data */ + struct pmtcc_raw_db db; + struct pme_pwrmgmt_ctx *ctx; + struct platform_device *pdev; +}; + +#endif /* CONFIG_PM */ + +struct pme2_private_data { + uint32_t pme_rev1; + uint32_t __iomem *regs; +#ifdef CONFIG_PM + struct ccsr_backup_info save_ccsr; + struct portal_backup_info save_db; +#endif +}; + +#ifdef CONFIG_PM +/* Hooks from pme_ctrl to pme_suspend */ +int init_pme_suspend(struct pme2_private_data *priv_data); +void exit_pme_suspend(struct pme2_private_data *priv_data); +int pme_suspend(struct pme2_private_data *priv_data); +int pme_resume(struct pme2_private_data *priv_data); + +/* Hooks from pme_suspend into pme_ctrl */ +void restore_all_ccsr(struct ccsr_backup_info *save_ccsr, + uint32_t __iomem *regs); +void save_all_ccsr(struct ccsr_backup_info *save_ccsr, + uint32_t __iomem *regs); +#endif + static inline void set_fd_addr(struct qm_fd *fd, dma_addr_t addr) { qm_fd_addr_set64(fd, addr); @@ -215,3 +430,10 @@ static inline int is_version_2_1_4(u32 pme_rev1, u32 pme_rev2) (get_errata_rev(pme_rev2) == 4); } +static inline int is_version(u32 pme_rev1, int major, int minor) +{ + return (get_major_rev(pme_rev1) == major) && + (get_minor_rev(pme_rev1) == minor); +} + +#endif diff --git a/drivers/staging/fsl_pme2/pme2_suspend.c b/drivers/staging/fsl_pme2/pme2_suspend.c new file mode 100644 index 0000000..69951bf --- /dev/null +++ b/drivers/staging/fsl_pme2/pme2_suspend.c @@ -0,0 +1,1187 @@ +/* Copyright 2013 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * This software is provided by Freescale Semiconductor "as is" and any + * express or implied warranties, including, but not limited to, the implied + * warranties of merchantability and fitness for a particular purpose are + * disclaimed. In no event shall Freescale Semiconductor be liable for any + * direct, indirect, incidental, special, exemplary, or consequential damages + * (including, but not limited to, procurement of substitute goods or services; + * loss of use, data, or profits; or business interruption) however caused and + * on any theory of liability, whether in contract, strict liability, or tort + * (including negligence or otherwise) arising in any way out of the use of + * this software, even if advised of the possibility of such damage. + */ + +#ifdef CONFIG_PM + +#include "pme2_private.h" +#include "pme2_regs.h" +#include + +static dma_addr_t pme_suspend_map(struct platform_device *pdev, void *ptr) +{ + return dma_map_single(&pdev->dev, ptr, 1, DMA_BIDIRECTIONAL); +} + +#ifdef PME_SUSPEND_DEBUG + +static inline void __hexdump(unsigned long start, unsigned long end, + unsigned long p, size_t sz, const unsigned char *c) +{ + while (start < end) { + unsigned int pos = 0; + char buf[64]; + int nl = 0; + pos += sprintf(buf + pos, "%08lx: ", start); + do { + if ((start < p) || (start >= (p + sz))) + pos += sprintf(buf + pos, ".."); + else + pos += sprintf(buf + pos, "%02x", *(c++)); + if (!(++start & 15)) { + buf[pos++] = '\n'; + nl = 1; + } else { + nl = 0; + if (!(start & 1)) + buf[pos++] = ' '; + if (!(start & 3)) + buf[pos++] = ' '; + } + } while (start & 15); + if (!nl) + buf[pos++] = '\n'; + buf[pos] = '\0'; + pr_info("%s", buf); + } +} + +static inline void hexdump(const void *ptr, size_t sz) +{ + unsigned long p = (unsigned long)ptr; + unsigned long start = p & ~(unsigned long)15; + unsigned long end = (p + sz + 15) & ~(unsigned long)15; + const unsigned char *c = ptr; + __hexdump(start, end, p, sz, c); +} +#endif + +/* + * The following SRAM tables need to be saved + * 1-byte trigger table + * 2-byte trigger table + * variable length trigger table + * confidence table + * User-Defined Group Mapping tablle + * Equivalent Byte Mapping table + * Special Trigger table + */ +enum pme_pmtcc_table_id { + PME_ONE_BYTE_TRIGGER_TBL = 0x00, + PME_TWO_BYTE_TRIGGER_TBL = 0x01, + PME_VARIABLE_TRIGGER_TBL = 0x02, + PME_CONFIDENCE_TBL = 0x03, + PME_UDG_TBL = 0x05, + PME_EQUIVALENT_BYTE_TBL = 0x06, + PME_SPECIAL_TRIGGER_TBL = 0x08, + PME_LAST_TABLE = PME_SPECIAL_TRIGGER_TBL +}; + +static enum pme_pmtcc_table_id table_list[] = {PME_ONE_BYTE_TRIGGER_TBL, + PME_TWO_BYTE_TRIGGER_TBL, PME_VARIABLE_TRIGGER_TBL, PME_CONFIDENCE_TBL, + PME_UDG_TBL, PME_EQUIVALENT_BYTE_TBL, PME_SPECIAL_TRIGGER_TBL}; + +struct pme_pmtcc_header_t { + uint8_t protocol_version; + uint8_t msg_type; + uint16_t reserved; + /* total message length, including the header */ + uint32_t msg_length; + uint64_t msg_id; + uint8_t data[0]; +} __packed; + +/* + * The next few macros define the sizes (in bytes) of the entries in + * the different PM H/W tables. + */ +#define PME_ONE_BYTE_TRIGGER_ENTRY_SIZE 32 +#define PME_TWO_BYTE_TRIGGER_ENTRY_SIZE 8 +#define PME_VARIABLE_TRIGGER_ENTRY_SIZE 8 +#define PME_CONFIDENCE_ENTRY_SIZE 4 +#define PME_CONFIRMATION_ENTRY_SIZE 128 +#define PME_USER_DEFINED_GROUP_READ_ENTRY_SIZE 4 +#define PME_USER_DEFINED_GROUP_WRITE_ENTRY_SIZE 256 +#define PME_EQUIVALENCE_READ_ENTRY_SIZE 4 +#define PME_EQUIVALENCE_WRITE_ENTRY_SIZE 256 +#define PME_SESSION_CONTEXT_ENTRY_SIZE 32 +#define PME_SPECIAL_TRIGGER_ENTRY_SIZE 32 + +union pme_table_entry_t { + /* The next few types define the entries for the different PM tables. */ + uint8_t one_byte_trigger_entry[PME_ONE_BYTE_TRIGGER_ENTRY_SIZE]; + uint8_t two_byte_trigger_entry[PME_TWO_BYTE_TRIGGER_ENTRY_SIZE]; + uint8_t variable_trigger_entry[PME_VARIABLE_TRIGGER_ENTRY_SIZE]; + uint8_t confidence_entry[PME_CONFIDENCE_ENTRY_SIZE]; + uint8_t udg_read_entry[PME_USER_DEFINED_GROUP_READ_ENTRY_SIZE]; + uint8_t udg_write_entry[PME_USER_DEFINED_GROUP_WRITE_ENTRY_SIZE]; + uint8_t equivalence_read_entry[PME_EQUIVALENCE_READ_ENTRY_SIZE]; + uint8_t equivalence_write_entry[PME_EQUIVALENCE_WRITE_ENTRY_SIZE]; + uint8_t special_trigger_entry[PME_SPECIAL_TRIGGER_ENTRY_SIZE]; +} __packed; + +/* This type defines an indexed table entry. */ +struct pme_indexed_table_entry_t { + uint32_t index; + union pme_table_entry_t entry; +} __packed; + +/* table read request */ +struct pme_pmtcc_read_request_msg_t { + struct pme_pmtcc_header_t header; + uint32_t table_id; + uint32_t index; +} __packed; + +/* table read reply message. */ +struct pme_pmtcc_read_reply_msg_t { + struct pme_pmtcc_header_t header; + uint32_t table_id; + struct pme_indexed_table_entry_t indexed_entry; +} __packed; + +/* table write request message */ +struct pme_pmtcc_write_request_msg_t { + struct pme_pmtcc_header_t header; + uint32_t table_id; + struct pme_indexed_table_entry_t indexed_entry; +} __packed; + +/* + * The next few macros define the number of entries in the different PM + * H/W tables. + */ +#define PME_CONFIDENCE_ENTRY_NUM_PER_TRIGGER_ENTRY 4 + +#define PME_ONE_BYTE_TRIGGER_ENTRY_NUM 1 + +#define PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V1 512 +#define PME_VARIABLE_TRIGGER_ENTRY_NUM_V1 4096 + +#define PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_0 2048 +#define PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_0 16384 + +#define PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_1 1024 +#define PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_1 8192 + +#define PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_2 512 +#define PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_2 4096 + +#define PME_SPECIAL_CONFIDENCE_ENTRY_NUM 64 +#define PME_ONE_BYTE_CONFIDENCE_ENTRY_NUM 64 + +#define PME_CONFIDENCE_ENTRY_NUM_V1 \ + ((PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V1 + \ + PME_VARIABLE_TRIGGER_ENTRY_NUM_V1 + \ + PME_ONE_BYTE_CONFIDENCE_ENTRY_NUM + \ + PME_SPECIAL_CONFIDENCE_ENTRY_NUM) * \ + PME_CONFIDENCE_ENTRY_NUM_PER_TRIGGER_ENTRY) + +#define PME_CONFIDENCE_ENTRY_NUM_V2_0 \ + ((PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_0 + \ + PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_0 + \ + PME_ONE_BYTE_CONFIDENCE_ENTRY_NUM + \ + PME_SPECIAL_CONFIDENCE_ENTRY_NUM) * \ + PME_CONFIDENCE_ENTRY_NUM_PER_TRIGGER_ENTRY) + +#define PME_CONFIDENCE_ENTRY_NUM_V2_1 \ + ((PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_1 + \ + PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_1 + \ + PME_ONE_BYTE_CONFIDENCE_ENTRY_NUM + \ + PME_SPECIAL_CONFIDENCE_ENTRY_NUM) * \ + PME_CONFIDENCE_ENTRY_NUM_PER_TRIGGER_ENTRY) + +#define PME_CONFIDENCE_ENTRY_NUM_V2_2 \ + ((PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_2 + \ + PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_2 + \ + PME_ONE_BYTE_CONFIDENCE_ENTRY_NUM + \ + PME_SPECIAL_CONFIDENCE_ENTRY_NUM) * \ + PME_CONFIDENCE_ENTRY_NUM_PER_TRIGGER_ENTRY) + +#define PME_EQUIVALENCE_ENTRY_NUM 1 +#define PME_USER_DEFINED_GROUP_ENTRY_NUM 1 +#define PME_SPECIAL_TRIGGER_ENTRY_NUM 1 + +/* + * The next few macros below define the sizes of the different + * messages. Note the the macros related to the table read and write + * messages assume that there is only one entry in the read/write + * message. + */ +#define PME_TABLE_READ_REQUEST_MSG_SIZE \ + sizeof(struct pme_pmtcc_read_request_msg_t) + +#define PME_ONE_BYTE_TABLE_READ_REPLY_MSG_SIZE \ + (sizeof(struct pme_pmtcc_read_reply_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_ONE_BYTE_TRIGGER_ENTRY_SIZE) + +#define PME_TWO_BYTE_TABLE_READ_REPLY_MSG_SIZE \ + (sizeof(struct pme_pmtcc_read_reply_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_TWO_BYTE_TRIGGER_ENTRY_SIZE) + +#define PME_VARIABLE_TABLE_READ_REPLY_MSG_SIZE \ + (sizeof(struct pme_pmtcc_read_reply_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_VARIABLE_TRIGGER_ENTRY_SIZE) + +#define PME_CONFIDENCE_TABLE_READ_REPLY_MSG_SIZE \ + (sizeof(struct pme_pmtcc_read_reply_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_CONFIDENCE_ENTRY_SIZE) + +#define PME_UDG_TABLE_READ_REPLY_MSG_SIZE \ + (sizeof(struct pme_pmtcc_read_reply_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_USER_DEFINED_GROUP_READ_ENTRY_SIZE) + +#define PME_EQUIVALENCE_TABLE_READ_REPLY_MSG_SIZE \ + (sizeof(struct pme_pmtcc_read_reply_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_EQUIVALENCE_READ_ENTRY_SIZE) + +#define PME_SPECIAL_TABLE_READ_REPLY_MSG_SIZE \ + (sizeof(struct pme_pmtcc_read_reply_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_SPECIAL_TRIGGER_ENTRY_SIZE) + +#define PME_ONE_BYTE_TABLE_WRITE_REQUEST_MSG_SIZE \ + (sizeof(struct pme_pmtcc_write_request_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_ONE_BYTE_TRIGGER_ENTRY_SIZE) + +#define PME_TWO_BYTE_TABLE_WRITE_REQUEST_MSG_SIZE \ + (sizeof(struct pme_pmtcc_write_request_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_TWO_BYTE_TRIGGER_ENTRY_SIZE) + +#define PME_VARIABLE_TABLE_WRITE_REQUEST_MSG_SIZE \ + (sizeof(struct pme_pmtcc_write_request_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_VARIABLE_TRIGGER_ENTRY_SIZE) + +#define PME_CONFIDENCE_TABLE_WRITE_REQUEST_MSG_SIZE \ + (sizeof(struct pme_pmtcc_write_request_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_CONFIDENCE_ENTRY_SIZE) + +#define PME_UDG_TABLE_WRITE_REQUEST_MSG_SIZE \ + (sizeof(struct pme_pmtcc_write_request_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_USER_DEFINED_GROUP_WRITE_ENTRY_SIZE) + +#define PME_EQUIVALENCE_TABLE_WRITE_REQUEST_MSG_SIZE \ + (sizeof(struct pme_pmtcc_write_request_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_EQUIVALENCE_WRITE_ENTRY_SIZE) + +#define PME_SPECIAL_TABLE_WRITE_REQUEST_MSG_SIZE \ + (sizeof(struct pme_pmtcc_write_request_msg_t) - \ + sizeof(union pme_table_entry_t) + \ + PME_SPECIAL_TRIGGER_ENTRY_SIZE) + +/* + * Index 0..255, bools do indicated which errors are serious + * 0x40, 0x41, 0x48, 0x49, 0x4c, 0x4e, 0x4f, 0x50, 0x51, 0x59, 0x5a, 0x5b, + * 0x5c, 0x5d, 0x5f, 0x60, 0x80, 0xc0, 0xc1, 0xc2, 0xc4, 0xd2, + * 0xd4, 0xd5, 0xd7, 0xd9, 0xda, 0xe0, 0xe7 + */ +static u8 serious_error_vec[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x01, 0x01, 0x00, 0x01, 0x00, 0x01, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *portal, + struct qman_fq *fq, const struct qm_dqrr_entry *dq) +{ + u8 status = (u8)pme_fd_res_status(&dq->fd); + u8 flags = pme_fd_res_flags(&dq->fd); + struct pme_pwrmgmt_ctx *ctx = (struct pme_pwrmgmt_ctx *)fq; + + if (unlikely(flags & PME_STATUS_UNRELIABLE)) + pr_err("pme error %d\n", __LINE__); + else if (unlikely((serious_error_vec[status]))) + pr_err("pme error %d\n", __LINE__); + else { + memcpy(&ctx->result_fd, &dq->fd, sizeof(*&dq->fd)); + complete(&ctx->done); + } + return qman_cb_dqrr_consume; +} + +static void cb_fqs(__always_unused struct qman_portal *portal, + __always_unused struct qman_fq *fq, + const struct qm_mr_entry *mr) +{ + u8 verb = mr->verb & QM_MR_VERB_TYPE_MASK; + if (verb == QM_MR_VERB_FQRNI) + return; + /* nothing else is supposed to occur */ + BUG(); +} + +static const struct qman_fq_cb pme_fq_base_out = { + .dqrr = cb_dqrr, + .fqs = cb_fqs +}; + +static const struct qman_fq_cb pme_fq_base_in = { + .fqs = cb_fqs, + .ern = NULL +}; + +static void pme_pwrmgmt_initfq(struct qm_mcc_initfq *initfq, u32 rfqid) +{ + struct pme_context_a *pme_a = + (struct pme_context_a *)&initfq->fqd.context_a; + struct pme_context_b *pme_b = + (struct pme_context_b *)&initfq->fqd.context_b; + + initfq->we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA | + QM_INITFQ_WE_CONTEXTB; + initfq->fqd.dest.channel = qm_channel_pme; + initfq->fqd.dest.wq = 4; + pme_a->mode = pme_mode_direct; + pme_context_a_set64(pme_a, 0); + pme_b->rfqid = rfqid; +} + +static int pme_pwrmgmt_ctx_reconfigure_tx(struct pme_pwrmgmt_ctx *ctx) +{ + struct qm_mcc_initfq initfq; + u32 flags = QMAN_INITFQ_FLAG_SCHED | QMAN_INITFQ_FLAG_LOCAL; + int ret; + + memset(&initfq, 0, sizeof(initfq)); + initfq.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL; + initfq.fqd.dest.wq = 4; + initfq.fqd.fq_ctrl = 0; /* disable stashing */ + ret = qman_init_fq(&ctx->tx_fq, flags, &initfq); + return ret; +} + +static int pme_pwrmgmt_ctx_reconfigure_rx(struct pme_pwrmgmt_ctx *ctx) +{ + struct qm_mcc_initfq initfq; + int ret; + + memset(&initfq, 0, sizeof(initfq)); + pme_pwrmgmt_initfq(&initfq, qman_fq_fqid(&ctx->tx_fq)); + ret = qman_init_fq(&ctx->rx_fq, 0, &initfq); + return ret; +} + +int pme_pwrmgmt_ctx_init(struct pme_pwrmgmt_ctx *ctx) +{ + int ret; + + ctx->tx_fq.cb = pme_fq_base_out; + ctx->rx_fq.cb = pme_fq_base_in; + + /* Create tx (from pme point of view) frame queue */ + ret = qman_create_fq(0, QMAN_FQ_FLAG_TO_DCPORTAL | + QMAN_FQ_FLAG_DYNAMIC_FQID | QMAN_FQ_FLAG_LOCKED, + &ctx->rx_fq); + if (ret) + return ret; + + ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE | + QMAN_FQ_FLAG_DYNAMIC_FQID | QMAN_FQ_FLAG_LOCKED, + &ctx->tx_fq); + if (ret) + goto create_rx_failed; + + ret = pme_pwrmgmt_ctx_reconfigure_rx(ctx); + if (ret) + goto config_rx_failed; + + ret = pme_pwrmgmt_ctx_reconfigure_tx(ctx); + if (ret) + goto config_tx_failed; + + return 0; +config_tx_failed: +config_rx_failed: + qman_destroy_fq(&ctx->rx_fq, 0); +create_rx_failed: + qman_destroy_fq(&ctx->tx_fq, 0); + return ret; +} + +static void pme_pwrmgmt_ctx_finish(struct pme_pwrmgmt_ctx *ctx) +{ + u32 flags; + int ret; + + ret = qman_retire_fq(&ctx->tx_fq, &flags); + BUG_ON(ret); + BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS); + ret = qman_retire_fq(&ctx->rx_fq, &flags); + BUG_ON(ret); + BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS); + ret = qman_oos_fq(&ctx->tx_fq); + BUG_ON(ret); + ret = qman_oos_fq(&ctx->rx_fq); + BUG_ON(ret); + qman_destroy_fq(&ctx->tx_fq, 0); + qman_destroy_fq(&ctx->rx_fq, 0); +} + +static int create_pwrmgmt_ctx(struct portal_backup_info *save_db) +{ + int ret; + + /* check to see if context already exists */ + if (save_db->ctx) + return 0; + + save_db->ctx = kzalloc(sizeof(*save_db->ctx), GFP_KERNEL); + if (!save_db->ctx) + return -ENOMEM; + + init_completion(&save_db->ctx->done); + ret = pme_pwrmgmt_ctx_init(save_db->ctx); + if (ret) { + pr_err("Error pme_pwrmgmt_ctx_init\n"); + goto error_free_mem; + } + return 0; + +error_free_mem: + kfree(save_db->ctx); + save_db->ctx = NULL; + return ret; +} + +static int delete_pwrmgmt_ctx(struct portal_backup_info *save_db) +{ + if (!save_db->ctx) + return 0; + + pme_pwrmgmt_ctx_finish(save_db->ctx); + kfree(save_db->ctx); + save_db->ctx = NULL; + + return 0; +} + +/* Send a pmtcc pme frame */ +static int pme_pwrmgmt_ctx_pmtcc(struct pme_pwrmgmt_ctx *ctx, u32 flags, + struct qm_fd *fd) +{ + int ret; + + struct pme_cmd_pmtcc *pmtcc = (struct pme_cmd_pmtcc *)&fd->cmd; + pmtcc->cmd = pme_cmd_pmtcc; + + ret = qman_enqueue(&ctx->rx_fq, fd, flags & + (QMAN_ENQUEUE_FLAG_WAIT | QMAN_ENQUEUE_FLAG_WAIT_INT)); + + return ret; +} + +static int get_table_attributes(enum pme_pmtcc_table_id tbl_id, + uint32_t pme_rev1, int *num_read_entries, int *num_write_entries, + int *read_size, int *read_reply_size, int *write_size, + int *read_entry_size, int *write_entry_size) +{ + *read_size = PME_TABLE_READ_REQUEST_MSG_SIZE; + + switch (tbl_id) { + case PME_ONE_BYTE_TRIGGER_TBL: + *num_read_entries = PME_ONE_BYTE_TRIGGER_ENTRY_NUM; + *num_write_entries = PME_ONE_BYTE_TRIGGER_ENTRY_NUM; + *read_reply_size = PME_ONE_BYTE_TABLE_READ_REPLY_MSG_SIZE; + *write_size = PME_ONE_BYTE_TABLE_WRITE_REQUEST_MSG_SIZE; + *read_entry_size = PME_ONE_BYTE_TRIGGER_ENTRY_SIZE; + *write_entry_size = PME_ONE_BYTE_TRIGGER_ENTRY_SIZE; + break; + + case PME_TWO_BYTE_TRIGGER_TBL: + if (is_version(pme_rev1, 2, 0)) + *num_read_entries = PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_0; + else if (is_version(pme_rev1, 2, 1)) + *num_read_entries = PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_1; + else if (is_version(pme_rev1, 2, 2)) + *num_read_entries = PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_2; + else { + pr_err("pme suspend: unsupported pme version %u\n", + pme_rev1); + return -EINVAL; + } + *num_write_entries = *num_read_entries; + *read_reply_size = PME_TWO_BYTE_TABLE_READ_REPLY_MSG_SIZE; + *write_size = PME_TWO_BYTE_TABLE_WRITE_REQUEST_MSG_SIZE; + *read_entry_size = PME_TWO_BYTE_TRIGGER_ENTRY_SIZE; + *write_entry_size = PME_TWO_BYTE_TRIGGER_ENTRY_SIZE; + break; + + case PME_VARIABLE_TRIGGER_TBL: + if (is_version(pme_rev1, 2, 0)) + *num_read_entries = PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_0; + else if (is_version(pme_rev1, 2, 1)) + *num_read_entries = PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_1; + else if (is_version(pme_rev1, 2, 2)) + *num_read_entries = PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_2; + else { + pr_err("pme suspend: unsupported pme version %u\n", + pme_rev1); + return -EINVAL; + } + *num_write_entries = *num_read_entries; + *read_reply_size = PME_VARIABLE_TABLE_READ_REPLY_MSG_SIZE; + *write_size = PME_VARIABLE_TABLE_WRITE_REQUEST_MSG_SIZE; + *read_entry_size = PME_VARIABLE_TRIGGER_ENTRY_SIZE; + *write_entry_size = PME_VARIABLE_TRIGGER_ENTRY_SIZE; + break; + + case PME_CONFIDENCE_TBL: + if (is_version(pme_rev1, 2, 0)) + *num_read_entries = PME_CONFIDENCE_ENTRY_NUM_V2_0; + else if (is_version(pme_rev1, 2, 1)) + *num_read_entries = PME_CONFIDENCE_ENTRY_NUM_V2_1; + else if (is_version(pme_rev1, 2, 2)) + *num_read_entries = PME_CONFIDENCE_ENTRY_NUM_V2_2; + else { + pr_err("pme suspend: unsupported pme version %u\n", + pme_rev1); + return -EINVAL; + } + *num_write_entries = *num_read_entries; + *read_reply_size = PME_CONFIDENCE_TABLE_READ_REPLY_MSG_SIZE; + *write_size = PME_CONFIDENCE_TABLE_WRITE_REQUEST_MSG_SIZE; + *read_entry_size = PME_CONFIDENCE_ENTRY_SIZE; + *write_entry_size = PME_CONFIDENCE_ENTRY_SIZE; + break; + case PME_UDG_TBL: + *num_read_entries = 256; + *num_write_entries = PME_USER_DEFINED_GROUP_ENTRY_NUM; + *read_reply_size = PME_UDG_TABLE_READ_REPLY_MSG_SIZE; + *write_size = PME_UDG_TABLE_WRITE_REQUEST_MSG_SIZE; + *read_entry_size = (PME_UDG_TABLE_READ_REPLY_MSG_SIZE - + PME_TABLE_READ_REQUEST_MSG_SIZE); + *write_entry_size = PME_USER_DEFINED_GROUP_WRITE_ENTRY_SIZE; + break; + + case PME_EQUIVALENT_BYTE_TBL: + *num_read_entries = 256; + *num_write_entries = PME_EQUIVALENCE_ENTRY_NUM; + *read_reply_size = PME_EQUIVALENCE_TABLE_READ_REPLY_MSG_SIZE; + *write_size = PME_EQUIVALENCE_TABLE_WRITE_REQUEST_MSG_SIZE; + *read_entry_size = (PME_EQUIVALENCE_TABLE_READ_REPLY_MSG_SIZE - + PME_TABLE_READ_REQUEST_MSG_SIZE); + *write_entry_size = PME_EQUIVALENCE_WRITE_ENTRY_SIZE; + break; + + case PME_SPECIAL_TRIGGER_TBL: + *num_read_entries = PME_SPECIAL_TRIGGER_ENTRY_NUM; + *num_write_entries = PME_SPECIAL_TRIGGER_ENTRY_NUM; + *read_reply_size = PME_SPECIAL_TABLE_READ_REPLY_MSG_SIZE; + *write_size = PME_SPECIAL_TABLE_WRITE_REQUEST_MSG_SIZE; + *read_entry_size = PME_SPECIAL_TRIGGER_ENTRY_SIZE; + *write_entry_size = PME_SPECIAL_TRIGGER_ENTRY_SIZE; + break; + } + return 0; +} + +#ifdef PME_SUSPEND_DEBUG +static int total_size_read_request(enum pme_pmtcc_table_id tbl_id, + uint32_t pme_rev1) +{ + int ret, num_read_entries, read_size, read_reply_size, write_size, + read_entry_size, write_entry_size, num_write_entries; + + ret = get_table_attributes(tbl_id, pme_rev1, &num_read_entries, + &num_write_entries, &read_size, &read_reply_size, &write_size, + &read_entry_size, &write_entry_size); + + if (ret) + return ret; + + return num_read_entries * read_size; +} + +static int total_size_read_response_request(enum pme_pmtcc_table_id tbl_id, + uint32_t pme_rev1) +{ + int ret, num_read_entries, read_size, read_reply_size, write_size, + read_entry_size, write_entry_size, num_write_entries; + + ret = get_table_attributes(tbl_id, pme_rev1, &num_read_entries, + &num_write_entries, &read_size, &read_reply_size, &write_size, + &read_entry_size, &write_entry_size); + + if (ret) + return ret; + + return num_read_entries * read_reply_size; +} + +static int total_size_write_request(enum pme_pmtcc_table_id tbl_id, + uint32_t pme_rev1) +{ + int ret, num_read_entries, read_size, read_reply_size, write_size, + read_entry_size, write_entry_size, num_write_entries; + + ret = get_table_attributes(tbl_id, pme_rev1, &num_read_entries, + &num_write_entries, &read_size, &read_reply_size, &write_size, + &read_entry_size, &write_entry_size); + + if (ret) + return ret; + + return num_write_entries * write_entry_size; +} +#endif + +static int sizeof_all_db_tables(uint32_t pme_rev1) +{ + enum pme_pmtcc_table_id tbl_id; + int i, ret, size = 0; + + for (i = 0; i < ARRAY_SIZE(table_list); i++) { + int num_read_entries, read_size, read_reply_size, write_size, + read_entry_size, write_entry_size, num_write_entries; + tbl_id = table_list[i]; + + ret = get_table_attributes(tbl_id, pme_rev1, &num_read_entries, + &num_write_entries, &read_size, &read_reply_size, + &write_size, &read_entry_size, &write_entry_size); + + if (ret) + return ret; + size += (write_entry_size * num_write_entries); + } + return size; +} + +#ifdef PME_SUSPEND_DEBUG +static void print_debug(uint32_t pme_rev1) +{ + int i = 0; + + pr_info("size of db is %d\n", sizeof_all_db_tables(pme_rev1)); + + do { + int num_read_entries, read_size, read_reply_size, write_size, + num_write_entries, read_entry_size, write_entry_size; + + get_table_attributes(table_list[i], pme_rev1, &num_read_entries, + &num_write_entries, &read_size, &read_reply_size, + &write_size, &read_entry_size, &write_entry_size); + + pr_info("Table Id %d\n", table_list[i]); + pr_info(" num_read_entries %d, r_sz %d, rr_sz %d, w_sz %d\n", + num_read_entries, read_size, read_reply_size, + write_size); + pr_info(" num_wr_entries %d, r_entry_size %d w_entry_size %d\n", + num_write_entries, read_entry_size, write_entry_size); + pr_info(" total read request size %d\n", + total_size_read_request(table_list[i], pme_rev1)); + pr_info(" total read reply request size %d\n", + total_size_read_response_request(table_list[i], + pme_rev1)); + pr_info(" total write request size %d\n", + total_size_write_request(table_list[i], pme_rev1)); + + if (table_list[i] == PME_LAST_TABLE) + break; + i++; + } while (1); +} +#endif + +static void free_databases(struct portal_backup_info *save_db) +{ + vfree(save_db->db.alldb); + save_db->db.alldb = NULL; +} + +static int alloc_databases(struct pme2_private_data *priv_data) +{ + int sizedb; + + sizedb = sizeof_all_db_tables(priv_data->pme_rev1); + if (sizedb < 0) { + pr_err("Error getting db size\n"); + return -EINVAL; + } + + priv_data->save_db.db.alldb = vzalloc(sizedb); + if (!priv_data->save_db.db.alldb) + return -ENOMEM; + + return 0; +} + +static int save_all_tables(struct portal_backup_info *save_db, + uint32_t pme_rev1) +{ + struct pmtcc_raw_db *db = &save_db->db; + enum pme_pmtcc_table_id tbl_id; + int i, ret; + uint8_t *current_tbl = db->alldb; + + for (i = 0; i < ARRAY_SIZE(table_list); i++) { + int num_read_entries, read_size, read_reply_size, write_size, + read_entry_size, write_entry_size, num_write_entries; + int idx; + struct pme_pmtcc_read_request_msg_t *entry; + struct qm_fd fd; + struct qm_sg_entry *sg_table = NULL; + uint8_t *input_data, *output_data; + enum pme_status status; + + tbl_id = table_list[i]; + ret = get_table_attributes(tbl_id, pme_rev1, &num_read_entries, + &num_write_entries, &read_size, &read_reply_size, + &write_size, &read_entry_size, &write_entry_size); + + /* Allocate input and output frame data */ + output_data = kzalloc(read_reply_size, GFP_KERNEL); + input_data = kzalloc(read_size, GFP_KERNEL); + sg_table = kzalloc(2 * sizeof(*sg_table), GFP_KERNEL); + + entry = (struct pme_pmtcc_read_request_msg_t *) + input_data; + entry->header.protocol_version = pme_rev1; + entry->header.msg_length = read_size; + entry->table_id = tbl_id; + + /* build fd */ + memset(&fd, 0, sizeof(fd)); + qm_sg_entry_set64(&sg_table[0], pme_suspend_map(save_db->pdev, + output_data)); + sg_table[0].length = read_reply_size; + qm_sg_entry_set64(&sg_table[1], pme_suspend_map(save_db->pdev, + input_data)); + sg_table[1].length = read_size; + sg_table[1].final = 1; + fd.format = qm_fd_compound; + qm_fd_addr_set64(&fd, pme_suspend_map(save_db->pdev, sg_table)); +#ifdef PME_SUSPEND_DEBUG + pr_info("Doing table %d\n", tbl_id); +#endif + for (idx = 0; idx < num_read_entries; idx++) { + entry->index = idx; + memset(output_data, 0, read_reply_size); + ret = pme_pwrmgmt_ctx_pmtcc(save_db->ctx, + QMAN_ENQUEUE_FLAG_WAIT, &fd); + + if (ret) + pr_err("error with pme_pwrmgmt_ctx_pmtcc\n"); + + wait_for_completion(&save_db->ctx->done); + + status = pme_fd_res_status(&save_db->ctx->result_fd); + if (status) { + ret = -EINVAL; + pr_err("PMTCC read status failed %d\n", status); + } + if (pme_fd_res_flags(&save_db->ctx->result_fd) & + PME_STATUS_UNRELIABLE) { + pr_err("pme %x\n", pme_fd_res_flags( + &save_db->ctx->result_fd)); + ret = -EINVAL; + } + /* copy the response */ + if (tbl_id == PME_UDG_TBL || + tbl_id == PME_EQUIVALENT_BYTE_TBL) { + /* Only copy over 8 lower bits to first byte */ + uint32_t tmp32; + uint8_t tmp8; + memcpy(&tmp32, output_data + read_size, + read_entry_size); + tmp8 = (uint8_t)tmp32; + memcpy(current_tbl + (idx * 1), &tmp8, 1); + } else { + memcpy(current_tbl + (idx * write_entry_size), + output_data + read_size, + write_entry_size); + } + } + current_tbl += num_write_entries * write_entry_size; + + /* Free input and output frame data */ + kfree(output_data); + kfree(input_data); + kfree(sg_table); + } + return 0; +} + +/* don't need to write zero to PME sram since POR is all zero */ +static int is_all_zero(uint8_t *buf, int size) +{ + int i; + for (i = 0; i < size; i++) { + if (buf[i] != 0) + return 0; + } + return 1; +} + +static int restore_all_tables(struct portal_backup_info *save_db, + uint32_t pme_rev1) +{ + struct pmtcc_raw_db *db = &save_db->db; + enum pme_pmtcc_table_id tbl_id; + int i, ret; + uint8_t *current_tbl = db->alldb; + + for (i = 0; i < ARRAY_SIZE(table_list); i++) { + int num_read_entries, read_size, read_reply_size, write_size, + read_entry_size, write_entry_size, num_write_entries; + int idx; + struct pme_pmtcc_write_request_msg_t *entry; + struct qm_fd fd; + uint8_t *input_data; + enum pme_status status; + + tbl_id = table_list[i]; + ret = get_table_attributes(tbl_id, pme_rev1, &num_read_entries, + &num_write_entries, &read_size, &read_reply_size, + &write_size, &read_entry_size, &write_entry_size); + + /* Allocate input frame data */ + input_data = kzalloc(write_size, GFP_KERNEL); + + entry = (struct pme_pmtcc_write_request_msg_t *) + input_data; + entry->header.protocol_version = pme_rev1; + entry->header.msg_type = 0x01; /* write */ + entry->header.msg_length = write_size; + entry->table_id = tbl_id; + + /* build fd */ + memset(&fd, 0, sizeof(fd)); + qm_fd_addr_set64(&fd, pme_suspend_map(save_db->pdev, + input_data)); + fd.format = qm_fd_contig_big; + fd.length29 = write_size; +#ifdef PME_SUSPEND_DEBUG + pr_info("Doing table %d\n", tbl_id); +#endif + for (idx = 0; idx < num_write_entries; idx++) { + if (is_all_zero(current_tbl + (idx * write_entry_size), + write_entry_size)) { + continue; + } + entry->indexed_entry.index = idx; + + memcpy(input_data + (write_size - write_entry_size), + current_tbl + (idx * write_entry_size), + write_entry_size); + + ret = pme_pwrmgmt_ctx_pmtcc(save_db->ctx, + QMAN_ENQUEUE_FLAG_WAIT, &fd); + + if (ret) + pr_err("error with pmtcc\n"); + + wait_for_completion(&save_db->ctx->done); + + status = pme_fd_res_status(&save_db->ctx->result_fd); + if (status) { + ret = -EINVAL; + pr_err("PMTCC write status fail %d\n", status); + } + if (pme_fd_res_flags(&save_db->ctx->result_fd) & + PME_STATUS_UNRELIABLE) { + pr_err("pme %x\n", pme_fd_res_flags( + &save_db->ctx->result_fd)); + ret = -EINVAL; + } + } + current_tbl += num_write_entries * write_entry_size; + + /* Free input and output frame data */ + kfree(input_data); + } + return 0; +} + +int fsl_pme_save_db(struct pme2_private_data *priv_data) +{ + int ret; + struct portal_backup_info *save_db = &priv_data->save_db; + +#ifdef PME_SUSPEND_DEBUG + print_debug(priv_data->pme_rev1); +#endif + ret = save_all_tables(save_db, priv_data->pme_rev1); + + return ret; +} + +static int is_pme_active(void) +{ + uint32_t val; + int ret; + + ret = pme_attr_get(pme_attr_pmstat, &val); + if (ret) { + pr_err("Error reading activity bit\n"); + return ret; + } + return val; +} + +/** + * pme_suspend - power management suspend function + * + * @priv_data: pme2 device private data + * + * Saves the pme device volatile state prior to suspension. + * CCSR space and SRAM state is saved to DDR + */ +int pme_suspend(struct pme2_private_data *priv_data) +{ + int ret; + struct ccsr_backup_info *ccsr_info; + struct portal_backup_info *db_info; + + ccsr_info = &priv_data->save_ccsr; + db_info = &priv_data->save_db; + + pme_attr_get(pme_attr_faconf_en, &ccsr_info->save_faconf_en); + pme_attr_get(pme_attr_cdcr, &ccsr_info->save_cdcr); + + /* disable pme */ + pme_attr_set(pme_attr_faconf_en, 0); + /* disable caching, only SRE will be flushed. FC caching already off */ + pme_attr_set(pme_attr_cdcr, 0xffffffff); + + /* wait until device is not active */ + while (is_pme_active()) { + cpu_relax(); + /* TODO: sanity check */ + } +#ifdef PME_SUSPEND_DEBUG + pr_info("PME is quiescent\n"); +#endif + + /* save CCSR space */ + save_all_ccsr(ccsr_info, priv_data->regs); + +#ifdef PME_SUSPEND_DEBUG + pr_info("First reg read is %u\n", + ccsr_info->regdb.pmfa.faconf); + pr_info("Last reg read is %u\n", + ccsr_info->regdb.gen.pm_ip_rev_2); +#endif + + /* save sram, must first configure the new exclusive fq before + * enabling pme */ + ret = pme2_exclusive_set(&db_info->ctx->rx_fq); + if (ret) + pr_err("Error getting exclusive mode\n"); + + /* save sram database, hook into pme_suspend. enable pme first */ + pme_attr_set(pme_attr_faconf_en, 1); + ret = fsl_pme_save_db(priv_data); + /* disable pme */ + pme_attr_set(pme_attr_faconf_en, 0); + + /* wait until device is not active */ + while (is_pme_active()) { + cpu_relax(); + /* TODO: sanity check */ + } +#ifdef PME_SUSPEND_DEBUG + pr_info("PME is quiescent\n"); +#endif + +#ifdef PME_SUSPEND_DEBUG + /* set the PME reset bit */ + pme_attr_set(pme_attr_faconf_rst, 1); + /* clear the PME reset bit */ + pme_attr_set(pme_attr_faconf_rst, 0); +#endif + + return 0; +} + +/** + * pme_resume - power management resume function + * + * @priv_data: pme2 device private data + * + * Restores the pme device to its original state prior to suspension. + * CCSR space and SRAM state is restored + */ +int pme_resume(struct pme2_private_data *priv_data) +{ + int ret; + struct ccsr_backup_info *ccsr_info; + struct portal_backup_info *db_info; + + ccsr_info = &priv_data->save_ccsr; + db_info = &priv_data->save_db; + +#ifdef PME_SUSPEND_DEBUG + pr_info("fsl_pme_restore_db\n"); + print_debug(priv_data->pme_rev1); +#endif + + /* when PME was saved, it was disabled. Therefore it will remain */ + restore_all_ccsr(ccsr_info, priv_data->regs); + /* restore caching state */ + pme_attr_set(pme_attr_cdcr, ccsr_info->save_cdcr); + + /* set private exclusive mode before enabling pme */ + /* save sram, must first configure the new exclusive fq before + * enabling pme */ + ret = pme2_exclusive_set(&db_info->ctx->rx_fq); + if (ret) + pr_err("Error getting exclusive mode\n"); + + /* save sram database, hook into pme_suspend. enable pme first */ + pme_attr_set(pme_attr_faconf_en, 1); + + ret = restore_all_tables(db_info, priv_data->pme_rev1); + + /* disable pme */ + pme_attr_set(pme_attr_faconf_en, 0); + /* wait until device is not active */ + while (is_pme_active()) { + cpu_relax(); + /* TODO: sanity check */ + } + + /* restore EFQC register */ + pme_attr_set(pme_attr_efqc, ccsr_info->regdb.pmfa.efqc); + + /* restore pme enable state */ + pme_attr_set(pme_attr_faconf_en, ccsr_info->save_faconf_en); + + free_databases(db_info); + + return 0; +} + + +/** + * init_pme_suspend - initialize pme resources for power management + * + * @priv_data: pme2 device private data + * + * All resources required to suspend the PME device are allocated. + * They include memory,frame queues, platform device + */ +int init_pme_suspend(struct pme2_private_data *priv_data) +{ + int ret; + struct ccsr_backup_info *ccsr_info; + struct portal_backup_info *db_info; + + ccsr_info = &priv_data->save_ccsr; + db_info = &priv_data->save_db; + + db_info->pdev = platform_device_alloc("fsl_pme_suspend", -1); + if (!db_info->pdev) + goto failed_alloc_device; + if (dma_set_mask(&db_info->pdev->dev, DMA_BIT_MASK(40))) + goto failed_dma_mask; + if (platform_device_add(db_info->pdev)) + goto failed_device_add; + + /* allocate frame queues */ + ret = create_pwrmgmt_ctx(db_info); + if (ret) + goto failed_create_pwrmgmt_ctx; + + ret = alloc_databases(priv_data); + if (ret) + goto failed_alloc_databases; + + return 0; + +failed_alloc_databases: + delete_pwrmgmt_ctx(db_info); +failed_create_pwrmgmt_ctx: + platform_device_del(db_info->pdev); +failed_device_add: +failed_dma_mask: + platform_device_put(db_info->pdev); + db_info->pdev = NULL; +failed_alloc_device: + return -ENOMEM; +} + +/** + * exit_pme_suspend - release pme resources for power management + * + * @priv_data: pme2 device private data + * + * All resources required to suspend the PME device are released. + * They include memory,frame queues, platform device + */ +void exit_pme_suspend(struct pme2_private_data *priv_data) +{ + struct portal_backup_info *db_info; + + db_info = &priv_data->save_db; + + free_databases(db_info); + delete_pwrmgmt_ctx(db_info); + platform_device_del(db_info->pdev); + platform_device_put(db_info->pdev); + db_info->pdev = NULL; +} + +#endif /* CONFIG_PM */ + diff --git a/include/linux/fsl_pme.h b/include/linux/fsl_pme.h index 330b10b..fbeec4f 100644 --- a/include/linux/fsl_pme.h +++ b/include/linux/fsl_pme.h @@ -770,6 +770,9 @@ enum pme_attr { pme_attr_scbarl, pme_attr_bsc_first, /* create 64-wide space for bsc */ pme_attr_bsc_last = pme_attr_bsc_first + 63, + pme_attr_faconf_rst, + pme_attr_faconf_en, + pme_attr_efqc }; #define pme_attr_bsc(n) (pme_attr_bsc_first + (n)) -- cgit v0.10.2 From a1a36c47104a2c104368bcb90f808579809e6720 Mon Sep 17 00:00:00 2001 From: Jeffrey Ladouceur Date: Mon, 25 Nov 2013 15:55:06 -0500 Subject: fsl_pme: Remove duplicate memory release During a refactoring exercise memory allocated is only released upon device exit. Should not be freed during power management resume. Signed-off-by: Jeffrey Ladouceur Change-Id: Ie2f7690de0497d6adb29e5c25cf379c3ff3e46b6 Reviewed-on: http://git.am.freescale.net:8181/6847 Reviewed-by: Haiying Wang Reviewed-by: Roy Pledge Reviewed-by: Geoff Thorpe Tested-by: Review Code-CDREVIEW Reviewed-by: Jose Rivera (cherry picked from commit e15e0b2f451cdcf3579a0c42b5583c437fa74408) Reviewed-on: http://git.am.freescale.net:8181/10776 diff --git a/drivers/staging/fsl_pme2/pme2_suspend.c b/drivers/staging/fsl_pme2/pme2_suspend.c index 69951bf..4de830b 100644 --- a/drivers/staging/fsl_pme2/pme2_suspend.c +++ b/drivers/staging/fsl_pme2/pme2_suspend.c @@ -1108,8 +1108,6 @@ int pme_resume(struct pme2_private_data *priv_data) /* restore pme enable state */ pme_attr_set(pme_attr_faconf_en, ccsr_info->save_faconf_en); - free_databases(db_info); - return 0; } -- cgit v0.10.2 From 563e96070a46ad41e172992dd408d2279facac6f Mon Sep 17 00:00:00 2001 From: Jeffrey Ladouceur Date: Thu, 27 Feb 2014 19:15:48 -0500 Subject: fsl_pme: Support for power management Change the pme error interrupt handler to be per-CPU to avoid a warning ("Cannot set affinity for irq") when entering suspend-to-ram state. Added IRQF_PERCPU flag. While sending frames to the PME device, this device can return a serious error. Added logic to properly handle this scenario. If this happens while reading the pme database, upon resume the resulting database will not be restored. The size of the confidence table was incorrect for a specific version of pme. Signed-off-by: Jeffrey Ladouceur Change-Id: Ic8cbc048c7a0e4cd8389298267ec9b4876055568 Reviewed-on: http://git.am.freescale.net:8181/9267 Tested-by: Review Code-CDREVIEW Reviewed-by: Geoff Thorpe Reviewed-by: Jose Rivera (cherry picked from commit f13d881f9e464079b62dba71f96dd73be4ad83a0) Reviewed-on: http://git.am.freescale.net:8181/10777 diff --git a/drivers/staging/fsl_pme2/pme2_ctrl.c b/drivers/staging/fsl_pme2/pme2_ctrl.c index 9901fe6..016648c 100644 --- a/drivers/staging/fsl_pme2/pme2_ctrl.c +++ b/drivers/staging/fsl_pme2/pme2_ctrl.c @@ -340,8 +340,8 @@ static int of_fsl_pme_probe(struct platform_device *ofdev) if (likely(pme_err_irq != NO_IRQ)) { /* Register the pme ISR handler */ - err = request_irq(pme_err_irq, pme_isr, IRQF_SHARED, "pme-err", - dev); + err = request_irq(pme_err_irq, pme_isr, + IRQF_SHARED | IRQF_PERCPU, "pme-err", dev); if (err) { dev_err(dev, "request_irq() failed\n"); goto out_unmap_ctrl_region; @@ -473,14 +473,31 @@ void restore_all_ccsr(struct ccsr_backup_info *save_ccsr, { int i; int num_regs = sizeof(save_ccsr->regdb)/sizeof(uint32_t); - uint32_t *pme_reg = &save_ccsr->regdb.pmfa.isr; +#ifdef CONFIG_PM_DEBUG + int diff_count = 0; +#endif for (i = 0; i < num_regs; i++) { +#ifdef CONFIG_PM_DEBUG + /* skip enable register */ + if ((pme_reg + i) != (&save_ccsr->regdb.pmfa.faconf)) { + uint32_t pme_reg_val; + pme_reg_val = in_be32(regs + i); + if (pme_reg_val != *(pme_reg + i)) + diff_count++; + out_be32(regs + i, *(pme_reg + i)); + } +#else /* skip enable register */ if ((pme_reg + i) != (&save_ccsr->regdb.pmfa.faconf)) out_be32(regs + i, *(pme_reg + i)); +#endif } + +#ifdef CONFIG_PM_DEBUG + pr_info("pme ccsr restore: %d registers were different\n", diff_count); +#endif } void save_all_ccsr(struct ccsr_backup_info *save_ccsr, uint32_t __iomem *regs) diff --git a/drivers/staging/fsl_pme2/pme2_private.h b/drivers/staging/fsl_pme2/pme2_private.h index e1ea3e8..4bd92c2 100644 --- a/drivers/staging/fsl_pme2/pme2_private.h +++ b/drivers/staging/fsl_pme2/pme2_private.h @@ -238,6 +238,7 @@ struct portal_backup_info { struct pmtcc_raw_db db; struct pme_pwrmgmt_ctx *ctx; struct platform_device *pdev; + int backup_failed; }; #endif /* CONFIG_PM */ diff --git a/drivers/staging/fsl_pme2/pme2_suspend.c b/drivers/staging/fsl_pme2/pme2_suspend.c index 4de830b..e2ef2af 100644 --- a/drivers/staging/fsl_pme2/pme2_suspend.c +++ b/drivers/staging/fsl_pme2/pme2_suspend.c @@ -40,49 +40,6 @@ static dma_addr_t pme_suspend_map(struct platform_device *pdev, void *ptr) return dma_map_single(&pdev->dev, ptr, 1, DMA_BIDIRECTIONAL); } -#ifdef PME_SUSPEND_DEBUG - -static inline void __hexdump(unsigned long start, unsigned long end, - unsigned long p, size_t sz, const unsigned char *c) -{ - while (start < end) { - unsigned int pos = 0; - char buf[64]; - int nl = 0; - pos += sprintf(buf + pos, "%08lx: ", start); - do { - if ((start < p) || (start >= (p + sz))) - pos += sprintf(buf + pos, ".."); - else - pos += sprintf(buf + pos, "%02x", *(c++)); - if (!(++start & 15)) { - buf[pos++] = '\n'; - nl = 1; - } else { - nl = 0; - if (!(start & 1)) - buf[pos++] = ' '; - if (!(start & 3)) - buf[pos++] = ' '; - } - } while (start & 15); - if (!nl) - buf[pos++] = '\n'; - buf[pos] = '\0'; - pr_info("%s", buf); - } -} - -static inline void hexdump(const void *ptr, size_t sz) -{ - unsigned long p = (unsigned long)ptr; - unsigned long start = p & ~(unsigned long)15; - unsigned long end = (p + sz + 15) & ~(unsigned long)15; - const unsigned char *c = ptr; - __hexdump(start, end, p, sz, c); -} -#endif - /* * The following SRAM tables need to be saved * 1-byte trigger table @@ -197,6 +154,8 @@ struct pme_pmtcc_write_request_msg_t { #define PME_SPECIAL_CONFIDENCE_ENTRY_NUM 64 #define PME_ONE_BYTE_CONFIDENCE_ENTRY_NUM 64 +#define PME_SPECIAL_CONFIDENCE_ENTRY_NUM_V2_2 32 + #define PME_CONFIDENCE_ENTRY_NUM_V1 \ ((PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V1 + \ PME_VARIABLE_TRIGGER_ENTRY_NUM_V1 + \ @@ -222,7 +181,7 @@ struct pme_pmtcc_write_request_msg_t { ((PME_TWO_BYTE_TRIGGER_ENTRY_NUM_V2_2 + \ PME_VARIABLE_TRIGGER_ENTRY_NUM_V2_2 + \ PME_ONE_BYTE_CONFIDENCE_ENTRY_NUM + \ - PME_SPECIAL_CONFIDENCE_ENTRY_NUM) * \ + PME_SPECIAL_CONFIDENCE_ENTRY_NUM_V2_2) * \ PME_CONFIDENCE_ENTRY_NUM_PER_TRIGGER_ENTRY) #define PME_EQUIVALENCE_ENTRY_NUM 1 @@ -351,13 +310,11 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *portal, struct pme_pwrmgmt_ctx *ctx = (struct pme_pwrmgmt_ctx *)fq; if (unlikely(flags & PME_STATUS_UNRELIABLE)) - pr_err("pme error %d\n", __LINE__); + pr_err("pme status error 0x%x\n", (u32)flags); else if (unlikely((serious_error_vec[status]))) - pr_err("pme error %d\n", __LINE__); - else { - memcpy(&ctx->result_fd, &dq->fd, sizeof(*&dq->fd)); - complete(&ctx->done); - } + pr_err("pme error status 0x%x\n", (u32)status); + memcpy(&ctx->result_fd, &dq->fd, sizeof(*&dq->fd)); + complete(&ctx->done); return qman_cb_dqrr_consume; } @@ -830,12 +787,16 @@ static int save_all_tables(struct portal_backup_info *save_db, if (status) { ret = -EINVAL; pr_err("PMTCC read status failed %d\n", status); + save_db->backup_failed = 1; + break; } if (pme_fd_res_flags(&save_db->ctx->result_fd) & PME_STATUS_UNRELIABLE) { pr_err("pme %x\n", pme_fd_res_flags( &save_db->ctx->result_fd)); ret = -EINVAL; + save_db->backup_failed = 1; + break; } /* copy the response */ if (tbl_id == PME_UDG_TBL || @@ -859,8 +820,11 @@ static int save_all_tables(struct portal_backup_info *save_db, kfree(output_data); kfree(input_data); kfree(sg_table); + /* if failed, stop saving database */ + if (ret) + break; } - return 0; + return ret; } /* don't need to write zero to PME sram since POR is all zero */ @@ -938,20 +902,25 @@ static int restore_all_tables(struct portal_backup_info *save_db, if (status) { ret = -EINVAL; pr_err("PMTCC write status fail %d\n", status); + break; } if (pme_fd_res_flags(&save_db->ctx->result_fd) & PME_STATUS_UNRELIABLE) { pr_err("pme %x\n", pme_fd_res_flags( &save_db->ctx->result_fd)); ret = -EINVAL; + break; } } current_tbl += num_write_entries * write_entry_size; /* Free input and output frame data */ kfree(input_data); + /* if failed, stop restoring database */ + if (ret) + break; } - return 0; + return ret; } int fsl_pme_save_db(struct pme2_private_data *priv_data) @@ -980,6 +949,11 @@ static int is_pme_active(void) return val; } +static void reset_db_saved_state(struct portal_backup_info *db_info) +{ + db_info->backup_failed = 0; +} + /** * pme_suspend - power management suspend function * @@ -997,6 +971,8 @@ int pme_suspend(struct pme2_private_data *priv_data) ccsr_info = &priv_data->save_ccsr; db_info = &priv_data->save_db; + reset_db_saved_state(db_info); + pme_attr_get(pme_attr_faconf_en, &ccsr_info->save_faconf_en); pme_attr_get(pme_attr_cdcr, &ccsr_info->save_cdcr); @@ -1045,13 +1021,18 @@ int pme_suspend(struct pme2_private_data *priv_data) pr_info("PME is quiescent\n"); #endif -#ifdef PME_SUSPEND_DEBUG - /* set the PME reset bit */ - pme_attr_set(pme_attr_faconf_rst, 1); - /* clear the PME reset bit */ - pme_attr_set(pme_attr_faconf_rst, 0); -#endif - + /* if saving db failed, reset internal state explicitly */ + if (db_info->backup_failed) { + /* set the PME reset bit */ + pme_attr_set(pme_attr_faconf_rst, 1); + /* clear the PME reset bit */ + pme_attr_set(pme_attr_faconf_rst, 0); + /* wait until device is not active */ + while (is_pme_active()) { + cpu_relax(); + /* TODO: sanity check */ + } + } return 0; } @@ -1068,6 +1049,7 @@ int pme_resume(struct pme2_private_data *priv_data) int ret; struct ccsr_backup_info *ccsr_info; struct portal_backup_info *db_info; + int db_restore_failed = 0; ccsr_info = &priv_data->save_ccsr; db_info = &priv_data->save_db; @@ -1082,6 +1064,9 @@ int pme_resume(struct pme2_private_data *priv_data) /* restore caching state */ pme_attr_set(pme_attr_cdcr, ccsr_info->save_cdcr); + /* Don't restore database if it wasn't saved properly */ + if (db_info->backup_failed) + goto skip_db_restore; /* set private exclusive mode before enabling pme */ /* save sram, must first configure the new exclusive fq before * enabling pme */ @@ -1093,6 +1078,8 @@ int pme_resume(struct pme2_private_data *priv_data) pme_attr_set(pme_attr_faconf_en, 1); ret = restore_all_tables(db_info, priv_data->pme_rev1); + if (ret) + db_restore_failed = 1; /* disable pme */ pme_attr_set(pme_attr_faconf_en, 0); @@ -1101,10 +1088,22 @@ int pme_resume(struct pme2_private_data *priv_data) cpu_relax(); /* TODO: sanity check */ } + if (db_restore_failed) { + /* set the PME reset bit */ + pme_attr_set(pme_attr_faconf_rst, 1); + /* clear the PME reset bit */ + pme_attr_set(pme_attr_faconf_rst, 0); + /* when PME was saved, it was disabled. Therefore it will + * remain disabled */ + restore_all_ccsr(ccsr_info, priv_data->regs); + /* restore caching state */ + pme_attr_set(pme_attr_cdcr, ccsr_info->save_cdcr); + } /* restore EFQC register */ pme_attr_set(pme_attr_efqc, ccsr_info->regdb.pmfa.efqc); +skip_db_restore: /* restore pme enable state */ pme_attr_set(pme_attr_faconf_en, ccsr_info->save_faconf_en); -- cgit v0.10.2 From 43ac6e9948db456b5ec1bb2274b077be65652799 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Tue, 3 Dec 2013 17:04:39 +0800 Subject: DMA: Freescale: Add suspend resume functions for DMA driver This patch adds suspend resume functions for Freescale DMA driver. .prepare callback is used to stop further descriptors from being added into the pending queue, and also issue pending queues into execution if there is any. .suspend callback makes sure all the pending jobs are cleaned up and all the channels are idle, and save the mode registers. .resume callback re-initializes the channels by restore the mode registers. Signed-off-by: Hongbo Zhang Change-Id: I89826288b8d0589e42063e24c4eeeafb82b6100b Reviewed-on: http://git.am.freescale.net:8181/10795 Tested-by: Review Code-CDREVIEW Reviewed-by: Yang Li Reviewed-by: Jose Rivera diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index b150668..d56d692 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -411,6 +411,14 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_bh(&chan->desc_lock); +#ifdef CONFIG_PM + if (unlikely(chan->pm_state != RUNNING)) { + chan_dbg(chan, "cannot submit due to suspend\n"); + spin_unlock_bh(&chan->desc_lock); + return -1; + } +#endif + /* * assign cookies to all of the software descriptors * that make up this transaction @@ -1334,6 +1342,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, INIT_LIST_HEAD(&chan->ld_running); INIT_LIST_HEAD(&chan->ld_completed); chan->idle = true; +#ifdef CONFIG_PM + chan->pm_state = RUNNING; +#endif chan->common.device = &fdev->common; dma_cookie_init(&chan->common); @@ -1473,6 +1484,91 @@ static int fsldma_of_remove(struct platform_device *op) return 0; } +#ifdef CONFIG_PM +static int fsldma_prepare(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fsldma_device *fdev = platform_get_drvdata(pdev); + struct fsldma_chan *chan; + int i; + + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + chan = fdev->chan[i]; + if (!chan) + continue; + + spin_lock_bh(&chan->desc_lock); + chan->pm_state = SUSPENDING; + if (!list_empty(&chan->ld_pending)) + fsl_chan_xfer_ld_queue(chan); + spin_unlock_bh(&chan->desc_lock); + } + + return 0; +} + +static int fsldma_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fsldma_device *fdev = platform_get_drvdata(pdev); + struct fsldma_chan *chan; + int i; + + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + chan = fdev->chan[i]; + if (!chan) + continue; + + spin_lock_bh(&chan->desc_lock); + if (!chan->idle) + goto out; + chan->regs_save.mr = DMA_IN(chan, &chan->regs->mr, 32); + chan->pm_state = SUSPENDED; + spin_unlock_bh(&chan->desc_lock); + } + return 0; + +out: + for (; i >= 0; i--) { + chan = fdev->chan[i]; + if (!chan) + continue; + spin_unlock_bh(&chan->desc_lock); + } + return -EBUSY; +} + +static int fsldma_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fsldma_device *fdev = platform_get_drvdata(pdev); + struct fsldma_chan *chan; + u32 mode; + int i; + + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + chan = fdev->chan[i]; + if (!chan) + continue; + + spin_lock_bh(&chan->desc_lock); + mode = chan->regs_save.mr + & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA; + DMA_OUT(chan, &chan->regs->mr, mode, 32); + chan->pm_state = RUNNING; + spin_unlock_bh(&chan->desc_lock); + } + + return 0; +} + +static const struct dev_pm_ops fsldma_pm_ops = { + .prepare = fsldma_prepare, + .suspend = fsldma_suspend, + .resume = fsldma_resume, +}; +#endif + static const struct of_device_id fsldma_of_ids[] = { { .compatible = "fsl,elo3-dma", }, { .compatible = "fsl,eloplus-dma", }, @@ -1485,6 +1581,9 @@ static struct platform_driver fsldma_of_driver = { .name = "fsl-elo-dma", .owner = THIS_MODULE, .of_match_table = fsldma_of_ids, +#ifdef CONFIG_PM + .pm = &fsldma_pm_ops, +#endif }, .probe = fsldma_of_probe, .remove = fsldma_of_remove, diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index ec19517..eecaf9e 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -134,6 +134,18 @@ struct fsldma_device { #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 #define FSL_DMA_CHAN_START_EXT 0x00002000 +#ifdef CONFIG_PM +struct fsldma_chan_regs_save { + u32 mr; +}; + +enum fsldma_pm_state { + RUNNING = 0, + SUSPENDING, + SUSPENDED, +}; +#endif + struct fsldma_chan { char name[8]; /* Channel name */ struct fsldma_chan_regs __iomem *regs; @@ -161,6 +173,10 @@ struct fsldma_chan { struct tasklet_struct tasklet; u32 feature; bool idle; /* DMA controller is idle */ +#ifdef CONFIG_PM + struct fsldma_chan_regs_save regs_save; + enum fsldma_pm_state pm_state; +#endif void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); -- cgit v0.10.2 From 36c3075e63c92a1d817ee45ee3fa2a1dc38a4846 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Thu, 11 Apr 2013 14:21:37 +0300 Subject: gianfar: Enable changing mac addr when if up Use device flag IFF_LIVE_ADDR_CHANGE to signal that the device supports changing the hardware address when the device is running. This allows eth_mac_addr() to change the mac address also when the network device's interface is open. This capability is required by certain applications, like bonding mode 6 (Adaptive Load Balancing). Change-Id: Ifec3730ad8a23f656dcbeafacff786d7c1b038a4 Signed-off-by: Claudiu Manoil Reviewed-on: http://git.am.freescale.net:8181/1238 Reviewed-by: Xie Jianhua-B29408 Reviewed-by: Fleming Andrew-AFLEMING Reviewed-on: http://git.am.freescale.net:8181/10808 Tested-by: Review Code-CDREVIEW Reviewed-by: Jianhua Xie Reviewed-by: Jose Rivera diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 471c3ce..d1eb25a 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -483,6 +483,15 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev) return &dev->stats; } +int gfar_set_mac_addr(struct net_device *dev, void *p) +{ + eth_mac_addr(dev, p); + + gfar_set_mac_for_addr(dev, 0, dev->dev_addr); + + return 0; +} + static const struct net_device_ops gfar_netdev_ops = { .ndo_open = gfar_enet_open, .ndo_start_xmit = gfar_start_xmit, @@ -493,7 +502,7 @@ static const struct net_device_ops gfar_netdev_ops = { .ndo_tx_timeout = gfar_timeout, .ndo_do_ioctl = gfar_ioctl, .ndo_get_stats = gfar_get_stats, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = gfar_set_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = gfar_netpoll, @@ -1333,6 +1342,8 @@ static int gfar_probe(struct platform_device *ofdev) dev->features |= NETIF_F_HW_VLAN_CTAG_RX; } + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + gfar_init_addr_hash_table(priv); /* Insert receive time stamps into padding alignment bytes */ -- cgit v0.10.2 From e27776e829b54051a67b41952f71c0d1130d5a2b Mon Sep 17 00:00:00 2001 From: Liu Gang Date: Mon, 11 Nov 2013 18:34:26 +0800 Subject: uio/dma: Update the UIO DMA module based on the new DMA dts The updated DMA dts files added a new compatible "fsl,elo3-dma" for some new platforms, and removed the property "cell-index". So the code should be updated based on those changes. Signed-off-by: Liu Gang Signed-off-by: Minghuan Lian Change-Id: I66603f76ca0feeb689c49b9eeb9c0621eb5c3897 Reviewed-on: http://git.am.freescale.net:8181/6555 Tested-by: Review Code-CDREVIEW Reviewed-by: Tiefei Zang Reviewed-by: Thomas Trefny (cherry picked from commit 18af7f2d2fe1f182b7c2716eaabb613d5523d2d8) Reviewed-on: http://git.am.freescale.net:8181/10811 Reviewed-by: Jose Rivera diff --git a/drivers/uio/fsl_dma_uio.c b/drivers/uio/fsl_dma_uio.c index fca99bf..f16a009 100644 --- a/drivers/uio/fsl_dma_uio.c +++ b/drivers/uio/fsl_dma_uio.c @@ -82,7 +82,7 @@ static irqreturn_t dma_uio_irq_handler(int irq, struct uio_info *dev_info) return IRQ_HANDLED; } -static int __init dma_chan_uio_setup(struct dma_chan *dma_ch) +static int dma_chan_uio_setup(struct dma_chan *dma_ch) { int ret; struct dma_uio_info *info; @@ -113,7 +113,6 @@ static int __init dma_chan_uio_setup(struct dma_chan *dma_ch) info->uio.open = dma_uio_open; info->uio.release = dma_uio_release; info->uio.priv = dma_ch; - ret = uio_register_device(dma_ch->dev, &info->uio); if (ret) { dev_err(dma_ch->dev, "dma_uio: UIO registration failed\n"); @@ -124,13 +123,13 @@ static int __init dma_chan_uio_setup(struct dma_chan *dma_ch) } static int fsl_dma_chan_probe(struct fsldma_device *fdev, - struct device_node *node) + struct device_node *node, + u32 chanid) { struct resource regs; struct dma_chan *dma_ch; struct device_node *dma_node; int err; - u32 *cell; struct platform_device *dev = fdev->dev; dma_node = node; @@ -142,14 +141,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, return -ENOMEM; } - cell = (u32 *)of_get_property(dma_node, "cell-index", NULL); - if (!cell) { - dev_err(&dev->dev, "Can't get property 'cell-index'\n"); - return -EFAULT; - } dma_ch->dma_id = fdev->dma_id; - dma_ch->ch_id = *cell; + dma_ch->ch_id = chanid; dma_ch->dev = &dev->dev; err = of_address_to_resource(dma_node, 0, ®s); @@ -201,13 +195,8 @@ static int fsl_dma_uio_probe(struct platform_device *dev) { struct device_node *child; struct fsldma_device *fdev; - u32 *cell; - - cell = (u32 *)of_get_property(dev->dev.of_node, "cell-index", NULL); - if (!cell) { - dev_err(&dev->dev, "Can't get property 'cell-index'\n"); - return -ENODEV; - } + static u32 dmaid; + u32 chanid = 0; fdev = devm_kzalloc(&dev->dev, sizeof(struct fsldma_device), GFP_KERNEL); @@ -216,15 +205,14 @@ static int fsl_dma_uio_probe(struct platform_device *dev) return -ENOMEM; } - fdev->dma_id = *cell; + fdev->dma_id = dmaid++; fdev->dev = dev; INIT_LIST_HEAD(&fdev->ch_list); dev_set_drvdata(&dev->dev, fdev); for_each_child_of_node(dev->dev.of_node, child) if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) - fsl_dma_chan_probe(fdev, child); - + fsl_dma_chan_probe(fdev, child, chanid++); return 0; } @@ -244,9 +232,8 @@ static int fsl_dma_uio_remove(struct platform_device *dev) static const struct of_device_id fsl_of_dma_match[] = { - { - .compatible = "fsl,eloplus-dma", - }, + { .compatible = "fsl,elo3-dma", }, + { .compatible = "fsl,eloplus-dma", }, {} }; -- cgit v0.10.2 From 146a574441e18973aae99325111f268f11335944 Mon Sep 17 00:00:00 2001 From: Bogdan Purcareata Date: Wed, 26 Mar 2014 12:12:49 -0400 Subject: Enable magic SysRq key for P1010 family boards This enables certain investigation facilities when the system is stuck. The option is present for P4 boards (corenet32_smp_defconfig) and T4 boards (85xx/e6500rev2_defconfig). In particular, LXC containers use it by default. Signed-off-by: Bogdan Purcareata Change-Id: I7aadd49f1966a2ef8055baad7c874798323de4ca Reviewed-on: http://git.am.freescale.net:8181/10455 Tested-by: Review Code-CDREVIEW Reviewed-by: Mihai Claudiu Caraman Reviewed-by: Jose Rivera diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig index 7c8904d..7e4a6dd 100644 --- a/arch/powerpc/configs/mpc85xx_defconfig +++ b/arch/powerpc/configs/mpc85xx_defconfig @@ -264,6 +264,7 @@ CONFIG_LZO_DECOMPRESS=y CONFIG_CRYPTO_DEFLATE=y CONFIG_CRYPTO_LZO=y CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_INFO=y CONFIG_CRYPTO_PCBC=m -- cgit v0.10.2 From c20dabae52b3fc3ffc04d4ae3fd5895b86356d0f Mon Sep 17 00:00:00 2001 From: Bogdan Purcareata Date: Fri, 28 Mar 2014 07:36:35 -0400 Subject: Enable magic SysRq key for P2020 family boards This enables certain investigation facilities when the system is stuck. The option is present for P4 boards (corenet32_smp_defconfig) and T4 boards (85xx/e6500rev2_defconfig). In particular, LXC containers use it by default. Signed-off-by: Bogdan Purcareata Change-Id: Ia761fb981ba040ed8e9b00c68ad94e95b2790189 Reviewed-on: http://git.am.freescale.net:8181/10456 Tested-by: Review Code-CDREVIEW Reviewed-by: Mihai Claudiu Caraman Reviewed-by: Jose Rivera diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig index 0396c23..7eb8788 100644 --- a/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/arch/powerpc/configs/mpc85xx_smp_defconfig @@ -276,6 +276,7 @@ CONFIG_LZO_DECOMPRESS=y CONFIG_CRYPTO_DEFLATE=y CONFIG_CRYPTO_LZO=y CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_INFO=y CONFIG_CRYPTO_PCBC=m -- cgit v0.10.2 From f710dc6e819473025e635d5887a035070effb175 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 14 Nov 2013 21:10:16 -0800 Subject: fork: Allow CLONE_PARENT after setns(CLONE_NEWPID) Serge Hallyn writes: > Hi Oleg, > > commit 40a0d32d1eaffe6aac7324ca92604b6b3977eb0e : > "fork: unify and tighten up CLONE_NEWUSER/CLONE_NEWPID checks" > breaks lxc-attach in 3.12. That code forks a child which does > setns() and then does a clone(CLONE_PARENT). That way the > grandchild can be in the right namespaces (which the child was > not) and be a child of the original task, which is the monitor. > > lxc-attach in 3.11 was working fine with no side effects that I > could see. Is there a real danger in allowing CLONE_PARENT > when current->nsproxy->pidns_for_children is not our pidns, > or was this done out of an "over-abundance of caution"? Can we > safely revert that new extra check? The two fundamental things I know we can not allow are: - A shared signal queue aka CLONE_THREAD. Because we compute the pid and uid of the signal when we place it in the queue. - Changing the pid and by extention pid_namespace of an existing process. From a parents perspective there is nothing special about the pid namespace, to deny CLONE_PARENT, because the parent simply won't know or care. From the childs perspective all that is special really are shared signal queues. User mode threading with CLONE_PARENT|CLONE_VM|CLONE_SIGHAND and tasks in different pid namespaces is almost certainly going to break because it is complicated. But shared signal handlers can look at per thread information to know which pid namespace a process is in, so I don't know of any reason not to support CLONE_PARENT|CLONE_VM|CLONE_SIGHAND threads at the kernel level. It would be absolutely stupid to implement but that is a different thing. So hmm. Because it can do no harm, and because it is a regression let's remove the CLONE_PARENT check and send it stable. Change-Id: Idf491465e6d2755cd18086f49c475d36dae6570a Cc: stable@vger.kernel.org Acked-by: Oleg Nesterov Acked-by: Andy Lutomirski Acked-by: Serge E. Hallyn Signed-off-by: "Eric W. Biederman" (cherry picked from commit 1f7f4dde5c945f41a7abc2285be43d918029ecc5) Acked-by: Bogdan Purcareata Change-Id: Idf491465e6d2755cd18086f49c475d36dae6570a Reviewed-on: http://git.am.freescale.net:8181/10797 Tested-by: Review Code-CDREVIEW Reviewed-by: Mihai Claudiu Caraman Reviewed-by: Jose Rivera diff --git a/kernel/fork.c b/kernel/fork.c index 728d5be..f82fa2e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1171,7 +1171,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, * do not allow it to share a thread group or signal handlers or * parent with the forking task. */ - if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) { + if (clone_flags & CLONE_SIGHAND) { if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || (task_active_pid_ns(current) != current->nsproxy->pid_ns_for_children)) -- cgit v0.10.2 From f584822a2ed39f757ca290b464bef7cdd7bb9ab3 Mon Sep 17 00:00:00 2001 From: Cristian Sovaiala Date: Thu, 10 Apr 2014 15:47:10 +0300 Subject: fmd: Unmask normal Fman interrupt when entering sleep mode Magic Packet raises normal Fman interrupt so unmask it when the system enters sleep/deep sleep mode such that the system can be woken up. Signed-off-by: Cristian Sovaiala Change-Id: I6325493dad3d0ea54a34575d6a24229fd351fbde Reviewed-on: http://git.am.freescale.net:8181/10888 Reviewed-by: Mandy Lavi Reviewed-by: Madalin-Cristian Bucur Tested-by: Madalin-Cristian Bucur (cherry picked from commit a55ee50a7e6907561f21015ba8dc6a022b46722e) Reviewed-on: http://git.am.freescale.net:8181/10924 diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c index 7c09d8b..0da272b 100755 --- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c +++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_fm.c @@ -790,7 +790,7 @@ static t_Error ConfigureFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev) if (unlikely(_errno < 0)) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("can_request_irq() = %d", _errno)); #endif - _errno = devm_request_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->irq, fm_irq, 0, "fman", p_LnxWrpFmDev); + _errno = devm_request_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->irq, fm_irq, IRQF_NO_SUSPEND, "fman", p_LnxWrpFmDev); if (unlikely(_errno < 0)) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("request_irq(%d) = %d", p_LnxWrpFmDev->irq, _errno)); -- cgit v0.10.2 From 6f834514194a11e2e1b17a7a4e6e17a81969378d Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 2 Apr 2014 17:15:56 +0300 Subject: gianfar: Fix suspend/resume for wol magic packet Don't detach the interfaces that are already down. Use correct sequence to stop Tx traffic and to prevent Tx timeout, including napi disabling. Use netif_tx_lock() to prevent races while stopping Tx, replacing the driver specific lock_tx_qs() which is not correct for this purpose. Use gfar_halt() to correctly stop the traffic at controller level (i.e. graceful stop the DMA). Fix the invalid device references for the wakeup routines, from the invalid &dev->dev references to the correct &ofdev->dev (or priv->dev) references. Remove buggy device_set_wakeup_enable() from the open() routine. Only the ethtool is allowed to enable/disable the wol capabilities. Remove superfluous priv->bflock lock, as it's not justified. Use IRQF_NO_SUSPEND to be able to wake up the system by magic packet generated interrupts. Change-Id: If9b4a878aa3ee6df8867bab9ca19d96b731e9fa9 Signed-off-by: Claudiu Manoil Reviewed-on: http://git.am.freescale.net:8181/10728 Tested-by: Review Code-CDREVIEW Reviewed-by: Jeffrey Ladouceur Reviewed-by: Jose Rivera diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index d1eb25a..02f9170 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1298,7 +1298,6 @@ static int gfar_probe(struct platform_device *ofdev) priv->dev = &ofdev->dev; SET_NETDEV_DEV(dev, &ofdev->dev); - spin_lock_init(&priv->bflock); INIT_WORK(&priv->reset_task, gfar_reset_task); platform_set_drvdata(ofdev, priv); @@ -1392,9 +1391,8 @@ static int gfar_probe(struct platform_device *ofdev) /* Carrier starts down, phylib will bring it up */ netif_carrier_off(dev); - device_init_wakeup(&dev->dev, - priv->device_flags & - FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + device_set_wakeup_capable(&ofdev->dev, priv->device_flags & + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); /* fill out IRQ number and name fields */ for (i = 0; i < priv->num_grps; i++) { @@ -1469,48 +1467,37 @@ static int gfar_suspend(struct device *dev) struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned long flags; u32 tempval; - int magic_packet = priv->wol_en && (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + if (!netif_running(ndev)) + return 0; + + disable_napi(priv); + netif_tx_lock(ndev); netif_device_detach(ndev); + netif_tx_unlock(ndev); - if (netif_running(ndev)) { + gfar_halt(priv); - local_irq_save(flags); - lock_tx_qs(priv); + if (magic_packet) { + /* Enable interrupt on Magic Packet */ + gfar_write(®s->imask, IMASK_MAG); - gfar_halt_nodisable(priv); + /* Enable Magic Packet mode */ + tempval = gfar_read(®s->maccfg2); + tempval |= MACCFG2_MPEN; + gfar_write(®s->maccfg2, tempval); - /* Disable Tx, and Rx if wake-on-LAN is disabled. */ + /* re-enable the Rx block */ tempval = gfar_read(®s->maccfg1); - - tempval &= ~MACCFG1_TX_EN; - - if (!magic_packet) - tempval &= ~MACCFG1_RX_EN; - + tempval |= MACCFG1_RX_EN; gfar_write(®s->maccfg1, tempval); - unlock_tx_qs(priv); - local_irq_restore(flags); - - disable_napi(priv); - - if (magic_packet) { - /* Enable interrupt on Magic Packet */ - gfar_write(®s->imask, IMASK_MAG); - - /* Enable Magic Packet mode */ - tempval = gfar_read(®s->maccfg2); - tempval |= MACCFG2_MPEN; - gfar_write(®s->maccfg2, tempval); - } else { - phy_stop(priv->phydev); - } + } else { + phy_stop(priv->phydev); } return 0; @@ -1521,37 +1508,26 @@ static int gfar_resume(struct device *dev) struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned long flags; u32 tempval; int magic_packet = priv->wol_en && (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); - if (!netif_running(ndev)) { - netif_device_attach(ndev); + if (!netif_running(ndev)) return 0; - } - if (!magic_packet && priv->phydev) + if (magic_packet) { + /* Disable Magic Packet mode */ + tempval = gfar_read(®s->maccfg2); + tempval &= ~MACCFG2_MPEN; + gfar_write(®s->maccfg2, tempval); + } else { phy_start(priv->phydev); - - /* Disable Magic Packet mode, in case something - * else woke us up. - */ - local_irq_save(flags); - lock_tx_qs(priv); - - tempval = gfar_read(®s->maccfg2); - tempval &= ~MACCFG2_MPEN; - gfar_write(®s->maccfg2, tempval); + } gfar_start(priv); - unlock_tx_qs(priv); - local_irq_restore(flags); - netif_device_attach(ndev); - enable_napi(priv); return 0; @@ -1967,7 +1943,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) /* Install our interrupt handlers for Error, * Transmit, and Receive */ - err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, + err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, + IRQF_NO_SUSPEND, gfar_irq(grp, ER)->name, grp); if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", @@ -1990,7 +1967,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) goto rx_irq_fail; } } else { - err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, + err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, + IRQF_NO_SUSPEND, gfar_irq(grp, TX)->name, grp); if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", @@ -2091,8 +2069,6 @@ static int gfar_enet_open(struct net_device *dev) if (err) return err; - device_set_wakeup_enable(&dev->dev, priv->wol_en); - return err; } void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 44a32ac..0afd4a8 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1302,9 +1302,6 @@ struct gfar_private { int oldduplex; int oldlink; - /* Bitfield update lock */ - spinlock_t bflock; - uint32_t msg_enable; struct work_struct reset_task; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index f2dfa10..2b980d5 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -646,7 +646,6 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gfar_private *priv = netdev_priv(dev); - unsigned long flags; if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && wol->wolopts != 0) @@ -655,11 +654,9 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; - device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); + device_set_wakeup_enable(priv->dev, wol->wolopts & WAKE_MAGIC); - spin_lock_irqsave(&priv->bflock, flags); - priv->wol_en = !!device_may_wakeup(&dev->dev); - spin_unlock_irqrestore(&priv->bflock, flags); + priv->wol_en = !!device_may_wakeup(priv->dev); return 0; } -- cgit v0.10.2 From 4c21b72aceee0a6a182247442a3b4c307b2c6170 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Fri, 4 Apr 2014 13:55:26 +0300 Subject: gianfar: Enable all wol ports as wakeup event sources Not all the eTSECs have an active clock by default when the devices enter suspend state (i.e. sleep power state). This property is however configurable, and all the eTSECs with wake-on-lan capabilities can be enabled as wakeup event sources. Use the PMC API to enable all the eTSEC ports, which are capable of wake-on-lan, as wakeup event sources. Cc: Li Yang Cc: Zhao Chenhui Change-Id: I7bd3db14173ddbe3cdd64bcffeb6c72adfcbf2c7 Signed-off-by: Claudiu Manoil Reviewed-on: http://git.am.freescale.net:8181/10729 Tested-by: Review Code-CDREVIEW Reviewed-by: Yang Li Reviewed-by: Jose Rivera diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 2b980d5..2160ec5 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -42,6 +42,9 @@ #include #include #include +#ifdef CONFIG_PM +#include +#endif #include "gianfar.h" @@ -646,6 +649,7 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gfar_private *priv = netdev_priv(dev); + int err; if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && wol->wolopts != 0) @@ -656,6 +660,12 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) device_set_wakeup_enable(priv->dev, wol->wolopts & WAKE_MAGIC); + err = mpc85xx_pmc_set_wake(priv->dev, wol->wolopts & WAKE_MAGIC); + if (err) { + device_set_wakeup_enable(priv->dev, false); + return err; + } + priv->wol_en = !!device_may_wakeup(priv->dev); return 0; -- cgit v0.10.2 From 990b140a53f828b3c0638636a469879f0b58e094 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Thu, 3 Apr 2014 18:11:22 +0300 Subject: gianfar: Add framework for "wake-on-filer" This framework enables eTSEC's filer and the FGPI Rx interrupt (Filer General Purpose Interrupt) as a wakeup source event. Upon entering suspend state, the eTSEC filer can be programmed with various match rules for the Rx packets. For example, the rules could be matching incoming unicast or arp packets. If a packet matches one of the rules, it will be enqueued in the Rx ring and a FGPI interrupt will be generated by the filer to wakeup the system. The packet types not matching the rules will be dropped. The rules need to be added as filer scripts inside gfar_filer_config_wol() to implement different WOL capabilities. The "fsl,wake-on-filer" DT binding limits this capability to certain platforms only. Cc: Li Yang Cc: Zhao Chenhui Change-Id: Ida38d2210975a8523e4da4fc4667de4380c2b9d4 Signed-off-by: Claudiu Manoil Reviewed-on: http://git.am.freescale.net:8181/10730 Reviewed-by: Yang Li Tested-by: Review Code-CDREVIEW Reviewed-by: Jose Rivera diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 02f9170..7f9a2ed 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -148,6 +148,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int amount_pull, struct napi_struct *napi); +static int __gfar_is_rx_idle(struct gfar_private *priv); static void gfar_halt_nodisable(struct gfar_private *priv); static void gfar_clear_exact_match(struct net_device *dev); static void gfar_set_mac_for_addr(struct net_device *dev, int num, @@ -358,7 +359,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv) u32 rctrl = 0; if (priv->rx_filer_enable) { - rctrl |= RCTRL_FILREN; + rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; /* Program the RIR0 reg with the required distribution */ if (priv->poll_mode == GFAR_SQ_POLLING) gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); @@ -383,10 +384,10 @@ static void gfar_mac_rx_config(struct gfar_private *priv) /* Enable HW time stamping if requested from user space */ if (priv->hwts_rx_en) - rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; + rctrl |= RCTRL_TS_ENABLE; if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) - rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; + rctrl |= RCTRL_VLEX; /* Init rctrl based on our settings */ gfar_write(®s->rctrl, rctrl); @@ -897,6 +898,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) if (of_get_property(np, "fsl,magic-packet", NULL)) priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; + if (of_get_property(np, "fsl,wake-on-filer", NULL)) + priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER; + priv->phy_node = of_parse_phandle(np, "phy-handle", 0); /* Find the TBI PHY. If it's not there, we don't support SGMII */ @@ -1391,8 +1395,14 @@ static int gfar_probe(struct platform_device *ofdev) /* Carrier starts down, phylib will bring it up */ netif_carrier_off(dev); - device_set_wakeup_capable(&ofdev->dev, priv->device_flags & - FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) + priv->wol_supported |= GFAR_WOL_MAGIC; + + if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && + priv->rx_filer_enable) + priv->wol_supported |= 0; + + device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); /* fill out IRQ number and name fields */ for (i = 0; i < priv->num_grps; i++) { @@ -1462,15 +1472,130 @@ static int gfar_remove(struct platform_device *ofdev) #ifdef CONFIG_PM +static void __gfar_filer_disable(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 temp; + + temp = gfar_read(®s->rctrl); + temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT); + gfar_write(®s->rctrl, temp); +} + +static void __gfar_filer_enable(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 temp; + + temp = gfar_read(®s->rctrl); + temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; + gfar_write(®s->rctrl, temp); +} + +static void gfar_filer_config_wol(struct gfar_private *priv) +{ + u32 rqfcr, rqfpr; + unsigned int i; + + __gfar_filer_disable(priv); + + /* init filer table */ + rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH; + rqfpr = 0x0; + for (i = 0; i <= MAX_FILER_IDX; i++) + gfar_write_filer(priv, i, rqfcr, rqfpr); + + __gfar_filer_enable(priv); +} + +static void gfar_filer_restore_table(struct gfar_private *priv) +{ + u32 rqfcr, rqfpr; + unsigned int i; + + __gfar_filer_disable(priv); + + for (i = 0; i <= MAX_FILER_IDX; i++) { + rqfcr = priv->ftp_rqfcr[i]; + rqfpr = priv->ftp_rqfpr[i]; + gfar_write_filer(priv, i, rqfcr, rqfpr); + } + + __gfar_filer_enable(priv); +} + +void gfar_start_wol_filer(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + int i = 0; + + /* Enable Rx hw queues */ + gfar_write(®s->rqueue, priv->rqueue); + + /* Initialize DMACTRL to have WWR and WOP */ + tempval = gfar_read(®s->dmactrl); + tempval |= DMACTRL_INIT_SETTINGS; + gfar_write(®s->dmactrl, tempval); + + /* Make sure we aren't stopped */ + tempval = gfar_read(®s->dmactrl); + tempval &= ~DMACTRL_GRS; + gfar_write(®s->dmactrl, tempval); + + for (i = 0; i < priv->num_grps; i++) { + regs = priv->gfargrp[i].regs; + /* Clear RHLT, so that the DMA starts polling now */ + gfar_write(®s->rstat, priv->gfargrp[i].rstat); + /* enable the filer general purpose interrupts */ + gfar_write(®s->imask, IMASK_FGPI); + } + + /* Enable Rx/Tx DMA */ + tempval = gfar_read(®s->maccfg1); + tempval |= MACCFG1_RX_EN; + gfar_write(®s->maccfg1, tempval); +} + +void gfar_halt_wol_filer(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + + /* Dissable the Rx hw queues */ + gfar_write(®s->rqueue, 0); + + gfar_ints_disable(priv); + + /* Stop the DMA, and wait for it to stop */ + tempval = gfar_read(®s->dmactrl); + if (!(tempval & DMACTRL_GRS)) { + int ret; + + tempval |= DMACTRL_GRS; + gfar_write(®s->dmactrl, tempval); + + do { + ret = spin_event_timeout((gfar_read(®s->ievent) & + IEVENT_GRSC), 1000000, 0); + if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) + ret = __gfar_is_rx_idle(priv); + } while (!ret); + } + + /* Disable Rx DMA */ + tempval = gfar_read(®s->maccfg1); + tempval &= ~MACCFG1_RX_EN; + gfar_write(®s->maccfg1, tempval); +} + static int gfar_suspend(struct device *dev) { struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; - int magic_packet = priv->wol_en && - (priv->device_flags & - FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + u16 wol = priv->wol_opts; if (!netif_running(ndev)) return 0; @@ -1482,7 +1607,7 @@ static int gfar_suspend(struct device *dev) gfar_halt(priv); - if (magic_packet) { + if (wol & GFAR_WOL_MAGIC) { /* Enable interrupt on Magic Packet */ gfar_write(®s->imask, IMASK_MAG); @@ -1496,6 +1621,10 @@ static int gfar_suspend(struct device *dev) tempval |= MACCFG1_RX_EN; gfar_write(®s->maccfg1, tempval); + } else if (wol & (GFAR_WOL_FILER_UCAST | GFAR_WOL_FILER_ARP)) { + gfar_filer_config_wol(priv); + gfar_start_wol_filer(priv); + } else { phy_stop(priv->phydev); } @@ -1509,18 +1638,21 @@ static int gfar_resume(struct device *dev) struct net_device *ndev = priv->ndev; struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; - int magic_packet = priv->wol_en && - (priv->device_flags & - FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + u16 wol = priv->wol_opts; if (!netif_running(ndev)) return 0; - if (magic_packet) { + if (wol & GFAR_WOL_MAGIC) { /* Disable Magic Packet mode */ tempval = gfar_read(®s->maccfg2); tempval &= ~MACCFG2_MPEN; gfar_write(®s->maccfg2, tempval); + + } else if (wol & (GFAR_WOL_FILER_UCAST | GFAR_WOL_FILER_ARP)) { + gfar_halt_wol_filer(priv); + gfar_filer_restore_table(priv); + } else { phy_start(priv->phydev); } @@ -1959,7 +2091,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) gfar_irq(grp, TX)->irq); goto tx_irq_fail; } - err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, + err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, + IRQF_NO_SUSPEND, gfar_irq(grp, RX)->name, grp); if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", @@ -2610,7 +2743,14 @@ irqreturn_t gfar_receive(int irq, void *grp_id) { struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; unsigned long flags; - u32 imask; + u32 imask, ievent; + + ievent = gfar_read(&grp->regs->ievent); + + if (unlikely(ievent & IEVENT_FGPI)) { + gfar_write(&grp->regs->ievent, IEVENT_FGPI); + return IRQ_HANDLED; + } if (likely(napi_schedule_prep(&grp->napi_rx))) { spin_lock_irqsave(&grp->grplock, flags); diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 0afd4a8..1d1b9b1 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -406,6 +406,7 @@ extern const char gfar_driver_version[]; #define IEVENT_MAG 0x00000800 #define IEVENT_GRSC 0x00000100 #define IEVENT_RXF0 0x00000080 +#define IEVENT_FGPI 0x00000010 #define IEVENT_FIR 0x00000008 #define IEVENT_FIQ 0x00000004 #define IEVENT_DPE 0x00000002 @@ -438,6 +439,7 @@ extern const char gfar_driver_version[]; #define IMASK_MAG 0x00000800 #define IMASK_GRSC 0x00000100 #define IMASK_RXFEN0 0x00000080 +#define IMASK_FGPI 0x00000010 #define IMASK_FIR 0x00000008 #define IMASK_FIQ 0x00000004 #define IMASK_DPE 0x00000002 @@ -606,6 +608,10 @@ extern const char gfar_driver_version[]; #define GFAR_INT_NAME_MAX (IFNAMSIZ + 6) /* '_g#_xx' */ +#define GFAR_WOL_MAGIC 0x00000001 +#define GFAR_WOL_FILER_UCAST 0x00000002 +#define GFAR_WOL_FILER_ARP 0x00000004 + struct txbd8 { union { @@ -1056,6 +1062,7 @@ struct gfar { #define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 #define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 +#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000 #if defined CONFIG_FSL_GIANFAR_1588 #define FSL_GIANFAR_DEV_HAS_TS_TO_BUFFER 0x00001000 @@ -1311,8 +1318,6 @@ struct gfar_private { extended_hash:1, bd_stash_en:1, rx_filer_enable:1, - /* Wake-on-LAN enabled */ - wol_en:1, /* Enable priorty based Tx scheduling in Hw */ prio_sched_en:1, /* Flow control flags */ @@ -1341,6 +1346,10 @@ struct gfar_private { u32 __iomem *hash_regs[16]; int hash_width; + /* wake-on-lan settings */ + u16 wol_opts; + u16 wol_supported; + /*Filer table*/ unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 2160ec5..e2bf5d1 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -638,36 +638,58 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gfar_private *priv = netdev_priv(dev); - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) { - wol->supported = WAKE_MAGIC; - wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0; - } else { - wol->supported = wol->wolopts = 0; - } + wol->supported = wol->wolopts = 0; + + if (priv->wol_supported & GFAR_WOL_MAGIC) + wol->supported |= WAKE_MAGIC; + + if (priv->wol_supported & GFAR_WOL_FILER_UCAST) + wol->supported |= WAKE_UCAST; + + if (priv->wol_supported & GFAR_WOL_FILER_ARP) + wol->supported |= WAKE_ARP; + + if (priv->wol_opts & GFAR_WOL_MAGIC) + wol->wolopts |= WAKE_MAGIC; + + if (priv->wol_opts & GFAR_WOL_FILER_UCAST) + wol->wolopts |= WAKE_UCAST; + + if (priv->wol_opts & GFAR_WOL_FILER_ARP) + wol->wolopts |= WAKE_ARP; } static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gfar_private *priv = netdev_priv(dev); int err; + u16 wol_opts = 0; - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && - wol->wolopts != 0) + if (!priv->wol_supported && wol->wolopts) return -EINVAL; - if (wol->wolopts & ~WAKE_MAGIC) + if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)) return -EINVAL; - device_set_wakeup_enable(priv->dev, wol->wolopts & WAKE_MAGIC); + if (wol->wolopts & WAKE_MAGIC) { + wol_opts |= GFAR_WOL_MAGIC; + } else { + if (wol->wolopts & WAKE_UCAST) + wol_opts |= GFAR_WOL_FILER_UCAST; + if (wol->wolopts & WAKE_ARP) + wol_opts |= GFAR_WOL_FILER_ARP; + } + + priv->wol_opts = wol_opts & priv->wol_supported; + + device_set_wakeup_enable(priv->dev, priv->wol_opts); - err = mpc85xx_pmc_set_wake(priv->dev, wol->wolopts & WAKE_MAGIC); + err = mpc85xx_pmc_set_wake(priv->dev, priv->wol_opts); if (err) { device_set_wakeup_enable(priv->dev, false); return err; } - priv->wol_en = !!device_may_wakeup(priv->dev); - return 0; } #endif -- cgit v0.10.2 From 44b712b2ef30281db929e6a427fae23bddacef0d Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Thu, 3 Apr 2014 18:51:27 +0300 Subject: gianfar: Add WAKE_UCAST wol support Add wake-on-lan by L2 unicast packets. This feature is based on the "wake-on-filer" support. A filer rule was added to match the incoming UCAST packets during system suspend state and to trigger the FGPI interupt to wakeup the system. Cc: Li Yang Cc: Zhao Chenhui Change-Id: I04d4b4a9501ea2d8f75b3e0794c220ac95b0d679 Signed-off-by: Claudiu Manoil Reviewed-on: http://git.am.freescale.net:8181/10731 Tested-by: Review Code-CDREVIEW Reviewed-by: Yang Li Reviewed-by: Jose Rivera diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7f9a2ed..a93f275 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1400,7 +1400,7 @@ static int gfar_probe(struct platform_device *ofdev) if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && priv->rx_filer_enable) - priv->wol_supported |= 0; + priv->wol_supported |= GFAR_WOL_FILER_UCAST; device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); @@ -1496,6 +1496,7 @@ static void gfar_filer_config_wol(struct gfar_private *priv) { u32 rqfcr, rqfpr; unsigned int i; + u8 rqfcr_queue; __gfar_filer_disable(priv); @@ -1505,6 +1506,32 @@ static void gfar_filer_config_wol(struct gfar_private *priv) for (i = 0; i <= MAX_FILER_IDX; i++) gfar_write_filer(priv, i, rqfcr, rqfpr); + i = 0; + /* select a rx queue in group 0 */ + rqfcr_queue = (u8)find_first_bit(&priv->gfargrp[0].rx_bit_map, + priv->num_rx_queues); + + if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { + /* Unicast packet, accept it */ + struct net_device *ndev = priv->ndev; + u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | + (ndev->dev_addr[1] << 8) | + ndev->dev_addr[2]; + + rqfcr = (rqfcr_queue << 10) | RQFCR_AND | + RQFCR_CMP_EXACT | RQFCR_PID_DAH; + rqfpr = dest_mac_addr; + gfar_write_filer(priv, i++, rqfcr, rqfpr); + + dest_mac_addr = (ndev->dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | + ndev->dev_addr[5]; + rqfcr = (rqfcr_queue << 10) | RQFCR_GPI | + RQFCR_CMP_EXACT | RQFCR_PID_DAL; + rqfpr = dest_mac_addr; + gfar_write_filer(priv, i++, rqfcr, rqfpr); + } + __gfar_filer_enable(priv); } -- cgit v0.10.2 From 52f28f035d5ab0c912299b96220ae81112d94c11 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Fri, 4 Apr 2014 17:22:33 +0300 Subject: gianfar: Add WAKE_ARP wol support Add wake-on-lan by ARP request packets. This feature is based on the "wake-on-filer" support. A filer rule was added to match the incoming ARP request packets directed to the first primary IP address of the receiving interface. Upon successful match during system suspend state a FGPI interupt is triggered to wakeup the system. Cc: Li Yang Cc: Zhao Chenhui Change-Id: Ic00630a19a15d011ef0ee1eada660391f96018f4 Signed-off-by: Claudiu Manoil Reviewed-on: http://git.am.freescale.net:8181/10732 Tested-by: Review Code-CDREVIEW Reviewed-by: Yang Li Reviewed-by: Jose Rivera diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index a93f275..2cefc49 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -87,6 +87,9 @@ #include #include #include +#ifdef CONFIG_PM +#include +#endif #include #include @@ -1400,7 +1403,8 @@ static int gfar_probe(struct platform_device *ofdev) if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && priv->rx_filer_enable) - priv->wol_supported |= GFAR_WOL_FILER_UCAST; + priv->wol_supported |= GFAR_WOL_FILER_UCAST | + GFAR_WOL_FILER_ARP; device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); @@ -1492,11 +1496,34 @@ static void __gfar_filer_enable(struct gfar_private *priv) gfar_write(®s->rctrl, temp); } -static void gfar_filer_config_wol(struct gfar_private *priv) +/* Get the first IP address on this chain for this interface + * so that we can configure wakeup with WOL for ARP. + */ +static int gfar_get_ip(struct gfar_private *priv, __be32 *ip_addr) { + struct in_device *in_dev; + int err = -ENOENT; + + rcu_read_lock(); + in_dev = __in_dev_get_rcu(priv->ndev); + if (in_dev != NULL) { + for_primary_ifa(in_dev) { + *ip_addr = ifa->ifa_address; + err = 0; + break; + } endfor_ifa(in_dev); + } + rcu_read_unlock(); + return err; +} + +static int gfar_filer_config_wol(struct gfar_private *priv) +{ + struct net_device *ndev = priv->ndev; u32 rqfcr, rqfpr; unsigned int i; u8 rqfcr_queue; + int err = 0; __gfar_filer_disable(priv); @@ -1513,7 +1540,6 @@ static void gfar_filer_config_wol(struct gfar_private *priv) if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { /* Unicast packet, accept it */ - struct net_device *ndev = priv->ndev; u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | (ndev->dev_addr[1] << 8) | ndev->dev_addr[2]; @@ -1532,7 +1558,40 @@ static void gfar_filer_config_wol(struct gfar_private *priv) gfar_write_filer(priv, i++, rqfcr, rqfpr); } + if (priv->wol_opts & GFAR_WOL_FILER_ARP) { + /* ARP request packet, accept it */ + __be32 ip_addr; + + err = gfar_get_ip(priv, &ip_addr); + if (err) { + netif_err(priv, wol, ndev, "Failed to get ip addr\n"); + goto out; + } + + rqfcr = (rqfcr_queue << 10) | RQFCR_AND | + RQFCR_CMP_EXACT | RQFCR_PID_MASK; + rqfpr = RQFPR_ARQ; + gfar_write_filer(priv, i++, rqfcr, rqfpr); + + rqfcr = (rqfcr_queue << 10) | RQFCR_AND | + RQFCR_CMP_EXACT | RQFCR_PID_PARSE; + rqfpr = RQFPR_ARQ; + gfar_write_filer(priv, i++, rqfcr, rqfpr); + + /* match DEST_IP address in ARP req packet */ + rqfcr = (rqfcr_queue << 10) | RQFCR_AND | + RQFCR_CMP_EXACT | RQFCR_PID_MASK; + rqfpr = FPR_FILER_MASK; + gfar_write_filer(priv, i++, rqfcr, rqfpr); + + rqfcr = (rqfcr_queue << 10) | RQFCR_GPI | + RQFCR_CMP_EXACT | RQFCR_PID_DIA; + rqfpr = ip_addr; + gfar_write_filer(priv, i++, rqfcr, rqfpr); + } +out: __gfar_filer_enable(priv); + return err; } static void gfar_filer_restore_table(struct gfar_private *priv) @@ -1623,6 +1682,7 @@ static int gfar_suspend(struct device *dev) struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; u16 wol = priv->wol_opts; + int err = 0; if (!netif_running(ndev)) return 0; @@ -1649,14 +1709,14 @@ static int gfar_suspend(struct device *dev) gfar_write(®s->maccfg1, tempval); } else if (wol & (GFAR_WOL_FILER_UCAST | GFAR_WOL_FILER_ARP)) { - gfar_filer_config_wol(priv); + err = gfar_filer_config_wol(priv); gfar_start_wol_filer(priv); } else { phy_stop(priv->phydev); } - return 0; + return err; } static int gfar_resume(struct device *dev) -- cgit v0.10.2 From 2bff233af530a406f3ff0a0e09b6cade657f3354 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Thu, 10 Apr 2014 14:18:22 +0800 Subject: fsldma: bug fix for dma suspend During the suspend process, DMA channels are suspended one by one, if one channel cannot be suspended, all the former channels which have been suspended should be restored, i.e. mark the pm_state as running, and release the spin locks aquired. This patch adds the "pm_state = RUNNING" which was omitted in the original suspend patch. Signed-off-by: Hongbo Zhang Change-Id: I924af3384d58d03f6321cb02c28a702367ea6bc1 Reviewed-on: http://git.am.freescale.net:8181/10871 Tested-by: Review Code-CDREVIEW Reviewed-by: Yang Li Reviewed-by: Jeffrey Ladouceur Reviewed-by: Jose Rivera diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index d56d692..7ef96f1 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1533,6 +1533,7 @@ out: chan = fdev->chan[i]; if (!chan) continue; + chan->pm_state = RUNNING; spin_unlock_bh(&chan->desc_lock); } return -EBUSY; -- cgit v0.10.2 From 1382c06a4e156903682cf2847505c10b01ea5906 Mon Sep 17 00:00:00 2001 From: Jianhua Xie Date: Sun, 24 Nov 2013 17:59:07 +0800 Subject: bonding: LAG with outgoing traffic distribution based on h/w Linux bonding driver provides a method for aggregating multiple network interface controllers (NICs) into a single logical bonded interface of two or more so called (NIC) slaves. Slave selection for outgoing traffic is done according to the transmit hash policy, which may be changed from the default simple XOR policy via the xmit_hash_policy option. This selection algorithm in Linux bonding driver is based on software. The QorIQ Data Path Acceleration Architecture (DPAA) is a comprehensive architecture which integrates all aspects of packet processing in the SoC, addressing issues and requirements resulting from the multicore nature of QorIQ SoCs. The DPAA includes Cores, Network and packet I/O, Hardware offload accelerators. Hardware offload accelerators include FMan/BMan/QMan and etc. Offline port is one of FMan elements, which supports (Parse, Classify, Distribute) PCD function on frames extracted frame descriptor (FD). Offline port also can inspect traffic, split it into FQs on ingress, and send traffic from the FQs to the interface on egress by the PCD function. These patches are enhancing Linux kernel LAG (Link Aggregation) with Freescale DPAA value added. The main idea is to utilize offline ports with PCD function to help to distribute outgoing traffics, including outgoing slaves device searching and selection. In another world, patches are using CRC-64 based hashing of Keygen/scheme and the parser result of outgoing frames header information to distribute outgoing frames. Beside of above, after integration this HW based LAG with Freescale CEETMQos, these two features can support hardware based Qos for bundles links rather than individual links. These patches mainly include 2 parts: 'glue logic' and 'kernel space PCD'. The glue logic first probes all available offline ports information via reading dts, including tx fqid/default fqid/errors fqid, pcd fqs, other private data pointer of offline ports for future reusing. The glue logic also creates frames from skb and then sends these frames to offline port directly, this offline port will continue to distribution frames from the PCD FQs to the slave interface on egress by the PCD function, rather than select slave device by CPU, neither make slave device driver create frame from skb, nor make slave devices driver send frames. These patches are supporting the mapping among offline ports and available bundles at run-time. PCD based outgoing traffic distribution can be enabled or disabled at run-time by sysfs interface in patches. To do: 1. PCD policy L23/L34 have not been veryfied. 2. offline port buffer pool/buffer layout will be enhanced. 3. software based L4 csum for now, offline port based L4 csum need be fixed. To test this HW based LAG after booting up Linux: cd /sys/class/net/bond0/bonding/ echo 4 >mode cat offline_ports echo fman0-oh@1 > oh_needed_for_hw_distribution cat oh_needed_for_hw_distribution cat oh_en echo 1 >oh_en cat oh_en echo +fm1-gb0 >slaves echo +fm1-gb1 >slaves ifconfig bond0 192.168.10.2/24 up ping 192.168.10.1 -c 5 Signed-off-by: Jianhua Xie Change-Id: I3a6664bfcc9ec9ca3f86a5e36381220c5fcb07cf Reviewed-on: http://git.am.freescale.net:8181/10413 Tested-by: Review Code-CDREVIEW Reviewed-by: Jiafei Pan Reviewed-by: Jose Rivera diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index dfe8a62..eedf3aa 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -197,6 +197,31 @@ config BONDING To compile this driver as a module, choose M here: the module will be called bonding. +config HW_DISTRIBUTION_WITH_OH + default n + bool "Bonding driver with h/w based Tx traffic distribuition support" + depends on BONDING + depends on FSL_DPAA_OFFLINE_PORTS + ---help--- + Say 'Y' if you wish to distribute Tx data traffics based on + the hardware of Freescale DPAA - Offline Port PCD in your + mode Link Aggregation (802.3ad mode) + + In theory, this method can offload CPU when selecting the + transmit hash policy to use for slave device selection. + if this feature can integrate with CEETM Qos, they two can + provide a bundle Qos rather than individual links Qos. + +config HW_LAG_DEBUG + default n + bool "Show detailed debug information" + depends on HW_DISTRIBUTION_WITH_OH + ---help--- + This provide a wrapper of pr_info, which can show useful debug + information such as fuction name and line number. + Say 'Y' if you wish to get more details debugging informations + at run-time, otherwise please disable it. + config DUMMY tristate "Dummy net driver support" ---help--- diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile index 5a5d720..0024f07 100644 --- a/drivers/net/bonding/Makefile +++ b/drivers/net/bonding/Makefile @@ -2,6 +2,13 @@ # Makefile for the Ethernet Bonding driver # +include $(srctree)/drivers/net/ethernet/freescale/fman/ncsw_config.mk +ccflags-y += \ + -I$(srctree)/drivers/net/ethernet/freescale/dpa \ + -I$(srctree)/drivers/net/ethernet/freescale/fman/src/wrapper \ + -I$(srctree)/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd \ + -I$(srctree)/drivers/net/ethernet/freescale/fman/Peripherals/FM/inc + obj-$(CONFIG_BONDING) += bonding.o bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o bond_netlink.o bond_options.o @@ -9,3 +16,7 @@ bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o bo proc-$(CONFIG_PROC_FS) += bond_procfs.o bonding-objs += $(proc-y) +hash_pcd_based_xmit_frames_distribution-$(CONFIG_HW_DISTRIBUTION_WITH_OH) += \ + hw_distribution.o + +bonding-objs += $(hash_pcd_based_xmit_frames_distribution-y) diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 187b1b7..698ac84 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2402,6 +2402,15 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) goto out; } +#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH + hw_lag_dbg("skb->protocol:0x%0x\n", skb->protocol); + /* exclude ARP/LACP */ + if ((bond->slave_cnt == SLAVES_PER_BOND) && + are_all_slaves_linkup(bond) && + (bond->params.ohp) && (bond->params.ohp->oh_en == 1)) + return enqueue_pkt_to_oh(bond, skb, NULL); +#endif + slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg); first_ok_slave = NULL; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 36eab0c..77619a6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1607,6 +1607,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) new_slave->link != BOND_LINK_DOWN ? "n up" : " down"); /* enslave is successful */ +#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH + fill_oh_pcd_fqs_with_slave_info(bond, new_slave); + apply_pcd(bond, NO_POLICY); +#endif return 0; /* Undo stages on error */ @@ -1848,6 +1852,9 @@ static int __bond_release_one(struct net_device *bond_dev, slave_dev->priv_flags &= ~IFF_BONDING; +#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH + del_oh_pcd_fqs_with_slave_info(bond, slave); +#endif kfree(slave); return 0; /* deletion OK */ @@ -3164,6 +3171,10 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors; stats->tx_window_errors += sstats->tx_window_errors; } + +#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH + add_statistics(bond, stats); +#endif read_unlock_bh(&bond->lock); return stats; @@ -3890,6 +3901,9 @@ static const struct device_type bond_type = { static void bond_destructor(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); +#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH + release_pcd_mem(bond); +#endif if (bond->wq) destroy_workqueue(bond->wq); free_netdev(bond_dev); @@ -4436,7 +4450,10 @@ int bond_create(struct net *net, const char *name) res = register_netdevice(bond_dev); netif_carrier_off(bond_dev); - +#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH + if (res == 0) + init_status(bond_dev); +#endif rtnl_unlock(); if (res < 0) bond_destructor(bond_dev); @@ -4508,6 +4525,13 @@ static int __init bonding_init(void) } register_netdevice_notifier(&bond_netdev_notifier); +#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH + if (get_oh_info()) + pr_err("oh ports probe error, use software distribution\n"); + else + pr_info("get offline ports information ok.\n"); +#endif + out: return res; err: @@ -4526,6 +4550,10 @@ static void __exit bonding_exit(void) bond_netlink_fini(); unregister_pernet_subsys(&bond_net_ops); +#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH + kfree(poh); + hw_lag_dbg("released offline port resources\n"); +#endif #ifdef CONFIG_NET_POLL_CONTROLLER /* @@ -4534,8 +4562,13 @@ static void __exit bonding_exit(void) WARN_ON(atomic_read(&netpoll_block_tx)); #endif } - -module_init(bonding_init); +/** + * late init to wait till oh port initilization ready, + * oh port can help distribute outgoing traffics based + * on hardware (FSL DPAA Offline port and PCD). + * module_init(bonding_init); + */ +late_initcall(bonding_init); module_exit(bonding_exit); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index abf5e10..1e591c4 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -329,6 +329,9 @@ static ssize_t bonding_store_xmit_hash(struct device *d, (int)strlen(buf) - 1, buf); ret = -EINVAL; } else { +#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH + apply_pcd(bond, new_value); +#endif bond->params.xmit_policy = new_value; pr_info("%s: setting xmit hash policy to %s (%d).\n", bond->dev->name, @@ -883,6 +886,21 @@ static ssize_t bonding_store_min_links(struct device *d, } static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR, bonding_show_min_links, bonding_store_min_links); +#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH +static DEVICE_ATTR(offline_port_xmit_statistics, S_IRUGO, + bonding_show_offline_port_xmit_statistics, NULL); + +static DEVICE_ATTR(offline_ports, S_IRUGO, + bonding_show_offline_ports, NULL); + +static DEVICE_ATTR(oh_needed_for_hw_distribution, S_IRUGO | S_IWUSR, + bonding_show_oh_needed_for_hw_distribution, + bonding_store_oh_needed_for_hw_distribution); + +static DEVICE_ATTR(oh_en, S_IRUGO | S_IWUSR, + bonding_show_oh_enable, + bonding_store_oh_enable); +#endif static ssize_t bonding_show_ad_select(struct device *d, struct device_attribute *attr, @@ -1709,6 +1727,12 @@ static struct attribute *per_bond_attrs[] = { &dev_attr_min_links.attr, &dev_attr_lp_interval.attr, &dev_attr_packets_per_slave.attr, +#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH + &dev_attr_offline_ports.attr, + &dev_attr_oh_needed_for_hw_distribution.attr, + &dev_attr_oh_en.attr, + &dev_attr_offline_port_xmit_statistics.attr, +#endif NULL, }; diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index a9f4f9f..a606ea0 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -25,6 +25,9 @@ #include #include "bond_3ad.h" #include "bond_alb.h" +#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH +#include "hw_distribution.h" +#endif #define DRV_VERSION "3.7.1" #define DRV_RELDATE "April 27, 2011" @@ -167,6 +170,10 @@ struct bond_params { int resend_igmp; int lp_interval; int packets_per_slave; +#ifdef CONFIG_HW_DISTRIBUTION_WITH_OH + struct oh_port_priv *ohp; + struct rtnl_link_stats64 oh_stats; +#endif }; struct bond_parm_tbl { diff --git a/drivers/net/bonding/hw_distribution.c b/drivers/net/bonding/hw_distribution.c new file mode 100644 index 0000000..dce0ea1 --- /dev/null +++ b/drivers/net/bonding/hw_distribution.c @@ -0,0 +1,2276 @@ +/* Copyright 2014 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include +#include +#include +#include "hw_distribution.h" +#include "mac.h" +#define to_dev(obj) container_of(obj, struct device, kobj) +#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd)))) +#define master_to_bond(net_dev) ((struct bonding *)(netdev_priv(net_dev))) +/** + * This includes L4 checksum errors, but also other errors that + * the Hard Parser can detect, such as invalid combinations of + * TCP control flags, or bad UDP lengths. + */ +#define FM_L4_PARSE_ERROR 0x10 +/* Check if the hardware parser has run */ +#define FM_L4_HXS_RUN 0xE0 +/** + * Check if the FMan Hardware Parser has run for L4 protocols. + * @parse_result_ptr must be of type (fm_prs_result_t *). + */ +#define fm_l4_hxs_has_run(parse_result_ptr) \ + ((parse_result_ptr)->l4r & FM_L4_HXS_RUN) +/** + * If the FMan Hardware Parser has run for L4 protocols, check + * error status. + * @parse_result_ptr must be of type fm_prs_result_t *). + */ +#define fm_l4_hxs_error(parse_result_ptr) \ + ((parse_result_ptr)->l4r & FM_L4_PARSE_ERROR) + +#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \ + { \ + skbh = (struct sk_buff **)addr; \ + *(skbh + (off)) = skb; \ + } + +#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \ + { \ + skbh = (struct sk_buff **)addr; \ + skb = *(skbh + (off)); \ + } + +static const struct of_device_id oh_port_match_table[] = { + { + .compatible = "fsl,dpa-oh" + }, + {} +}; + +struct oh_port_priv *poh; /* Offline port information pointer */ +int available_num_of_oh_ports; +/** + * Sysfs interfaces: + * Show the statistics information by offline port xmit. + */ +ssize_t bonding_show_offline_port_xmit_statistics(struct device *d, + struct device_attribute *attr, char *buf) +{ + int res = 0; + struct bonding *bond = to_bond(d); + + if (bond->params.mode != BOND_MODE_8023AD) { + pr_err("%s: This command only support 802.3ad mode.\n", + bond->dev->name); + return -EPERM; + } + + if (!bond->params.ohp) { + pr_err("error, have not bind an offline port\n"); + + return -EPERM; + } + + if (!bond->params.ohp->oh_en) { + pr_err("error, binded offline port is not enabled.\n"); + + return -EPERM; + } + + res += sprintf(buf + res, "offline port TX packets: %llu\n", + bond->params.oh_stats.tx_packets); + res += sprintf(buf + res, "offline port TX bytes: %llu\n", + bond->params.oh_stats.tx_bytes); + res += sprintf(buf + res, "offline port TX errors: %llu\n", + bond->params.oh_stats.tx_errors); + res += sprintf(buf + res, "offline port TX dropped: %llu\n", + bond->params.oh_stats.tx_dropped); + + if (res) + buf[res-1] = '\n'; /* eat the leftover space */ + return res; +} +/** + * Sysfs interfaces: + * Show all available offline ports can be binded to a bond. + */ +ssize_t bonding_show_offline_ports(struct device *d, + struct device_attribute *attr, char *buf) +{ + int i, res = 0; + struct bonding *bond = to_bond(d); + + if (bond->params.mode != BOND_MODE_8023AD) { + pr_err("%s: This command only support 802.3ad mode.\n", + bond->dev->name); + return -EPERM; + } + + for (i = 0; i < available_num_of_oh_ports; i++) { + if (poh[i].oh_dev) + res += sprintf(buf + res, "%s\n", poh[i].friendname); + } + if (res) + buf[res-1] = '\n'; /* eat the leftover space */ + return res; +} +/** + * Sysfs interfaces: + * Show the offline_port has already attached to the current bond, + * which can help bond to do hardware based slave selection. + */ +ssize_t bonding_show_oh_needed_for_hw_distribution(struct device *d, + struct device_attribute *attr, char *buf) +{ + int res = 0; + struct bonding *bond = to_bond(d); + + if (bond->params.mode != BOND_MODE_8023AD) { + pr_err("%s: This command only support 802.3ad mode.\n", + bond->dev->name); + return -EPERM; + } + + if (!bond->params.ohp) { + pr_err("error, have not bind an offline port\n"); + + return -EPERM; + } + + res += sprintf(buf + res, "%s\n", bond->params.ohp->friendname); + if (res) + buf[res-1] = '\n'; /* eat the leftover space */ + + return res; +} +/** + * System interface: + * Add one Offline port into the current bond for utilizing PCD to + * do TX traffic distribution based on hard ware. + * This codes firt verify the input Offline port name validation, + * then store the Offline port to the current bond->params. + */ +ssize_t bonding_store_oh_needed_for_hw_distribution(struct device *d, + struct device_attribute *attr, const char *buffer, size_t count) +{ + char command[OHFRIENDNAMSIZ + 1] = { 0, }; + int ret = count, i, errno; + struct bonding *bond = to_bond(d); + struct oh_port_priv *tmp = poh; + bool find = false; + + if (bond->params.mode != BOND_MODE_8023AD) { + pr_err("%s: This command only support 802.3ad mode.\n", + bond->dev->name); + return -EPERM; + } + if (bond->slave_cnt > 0) { + pr_err("%s: Detach slaves before change oh binding.\n", + bond->dev->name); + return -EPERM; + } + + if (!rtnl_trylock()) + return restart_syscall(); + + /* OHFRIENDNAMSIZ = 10, there is 10 chars in a command. */ + errno = sscanf(buffer, "%10s", command); + if ((strlen(command) <= 1) || (errno != 1)) + goto err_no_cmd; + + if ((bond->params.ohp) && (bond->params.ohp->friendname[0]) && + strncasecmp(command, bond->params.ohp->friendname, + OHFRIENDNAMSIZ) == 0) { + pr_err("%s: has already used %s.\n", + bond->dev->name, command); + ret = -EPERM; + goto out; + } else + for (i = 0; i < available_num_of_oh_ports; i++) { + if (tmp->oh_dev) { + if (strncasecmp(command, tmp->friendname, + OHFRIENDNAMSIZ) == 0) { + find = true; + bond->params.ohp = tmp; + break; + } else + tmp++; + } + } + + if (!find) + goto err_no_cmd; + + pr_info("bind OH port oh_needed_for_hw_distribution: %s to %s\n", + bond->params.ohp->friendname, bond->dev->name); + + goto out; + +err_no_cmd: + pr_err("%s:bad command or no such OH port,\n" + "please try other OH ports.\n" + "Eg: echo OH0 > oh_needed_for_hw_distribution.\n", + bond->dev->name); + ret = -EPERM; + +out: + rtnl_unlock(); + return ret; +} +/** + * Sysfs interfaces: + * Show whether current offline port binding to the bond is active or not. + */ +ssize_t bonding_show_oh_enable(struct device *d, + struct device_attribute *attr, char *buf) +{ + + int res = 0; + struct bonding *bond = to_bond(d); + uint16_t channel; + unsigned long fman_dcpid, oh_offset, cell_index; + + if (bond->params.mode != BOND_MODE_8023AD) { + pr_err("%s: This command only support 802.3ad mode.\n", + bond->dev->name); + return -EPERM; + } + + if (!bond->params.ohp) { + pr_err("error, have not bind a offline port\n"); + + return -EPERM; + } + + res += sprintf(buf + res, "%d\n", bond->params.ohp->oh_en); + if (res) + buf[res-1] = '\n'; /* eat the leftover space */ + + if ((bond->params.ohp->oh_en) && + (!export_oh_port_info_to_ceetm(bond, &channel, + &fman_dcpid, &oh_offset, &cell_index))) + hw_lag_dbg("offline port channel:%d\n", channel); + + return res; +} +/** + * Sysfs interfaces: + * Set current offline port which is binding to the bond active or not, + * this interface can disable or enable the offline port which is binding + * to a bond at run-time. + */ +ssize_t bonding_store_oh_enable(struct device *d, + struct device_attribute *attr, const char *buffer, + size_t count) +{ + int new_value, ret; + struct bonding *bond = to_bond(d); + + if (bond->params.mode != BOND_MODE_8023AD) { + pr_err("%s: This command only support 802.3ad mode.\n", + bond->dev->name); + return -EPERM; + } + + ret = sscanf(buffer, "%d", &new_value); + pr_info("new_value:%d, ret: %d\n", new_value, ret); + if (ret != 1) { + pr_err("%s: Bad command, use echo [1|0] > oh_en.\n", + bond->dev->name); + return -EINVAL; + } + + if (!bond->params.ohp) { + pr_err("error, have not bind a offline port\n"); + return -EPERM; + } + + if ((new_value == 0) || (new_value == 1)) { + bond->params.ohp->oh_en = new_value; + return count; + } else { + pr_err("%s: Bad value, only is 1 or 0.\n", + bond->dev->name); + return -EINVAL; + } +} + +/** + * Judge a slave net device is a dpa-eth NIC, + * return true if it is a dpa-eth NIC, + * otherwise return false. + */ +static bool is_dpa_eth_port(struct net_device *netdev) +{ + struct device *dev = (struct device *) &(netdev->dev); + + if ((strlen(dev_driver_string(dev->parent)) >= 7) && + strncmp(dev_driver_string(dev->parent), "fsl_dpa", 7) == 0) + return true; + else + return false; +} + +bool are_all_slaves_linkup(struct bonding *bond) +{ + struct slave *slave; + struct list_head *iter; + + read_lock(&bond->lock); + bond_for_each_slave(bond, slave, iter) + if (!(SLAVE_IS_OK(slave))) { + read_unlock(&bond->lock); + return false; + } + + read_unlock(&bond->lock); + return true; +} + +unsigned int to_which_oh_i_attached(struct oh_port_priv *current_poh) +{ + struct oh_port_priv *org = poh; + int i = 0; + while (current_poh - org) { + i++; + org++; + } + + return i; +} +/* Borrowed from dpa_fd_release, removed netdev params. */ +static void __attribute__((nonnull)) +dpa_oh_fd_release(const struct qm_fd *fd) +{ + struct qm_sg_entry *sgt; + struct dpa_bp *dpa_bp; + struct bm_buffer bmb; + + bmb.hi = fd->addr_hi; + bmb.lo = fd->addr_lo; + + dpa_bp = dpa_bpid2pool(fd->bpid); + DPA_BUG_ON(!dpa_bp); + + if (fd->format == qm_fd_sg) { + sgt = (phys_to_virt(bm_buf_addr(&bmb)) + dpa_fd_offset(fd)); + dpa_release_sgt(sgt); + } + + while (bman_release(dpa_bp->pool, &bmb, 1, 0)) + cpu_relax(); +} + +static void dpa_oh_drain_bp(struct dpa_bp *bp) +{ + int i, num; + struct bm_buffer bmb[8]; + dma_addr_t addr; + int *countptr = __this_cpu_ptr(bp->percpu_count); + int count = *countptr; + struct sk_buff **skbh; + + while (count >= 8) { + num = bman_acquire(bp->pool, bmb, 8, 0); + /* There may still be up to 7 buffers in the pool; + * just leave them there until more arrive + */ + if (num < 0) + break; + for (i = 0; i < num; i++) { + addr = bm_buf_addr(&bmb[i]); + /* bp->free_buf_cb(phys_to_virt(addr)); */ + skbh = (struct sk_buff **)phys_to_virt(addr); + dma_unmap_single(bp->dev, addr, bp->size, + DMA_TO_DEVICE); + dev_kfree_skb(*skbh); + } + count -= num; + } + *countptr = count; +} +static int dpa_oh_tx_bp_probe(struct device *dev, + struct fm_port *tx_port, + struct dpa_bp **draining_tx_bp, + struct dpa_buffer_layout_s **tx_buf_layout) +{ + struct fm_port_params params; + struct dpa_bp *bp = NULL; + struct dpa_buffer_layout_s *buf_layout = NULL; + + buf_layout = devm_kzalloc(dev, sizeof(*buf_layout), GFP_KERNEL); + if (!buf_layout) { + dev_err(dev, "devm_kzalloc() failed\n"); + return -ENOMEM; + } + + buf_layout->priv_data_size = DPA_TX_PRIV_DATA_SIZE; + buf_layout->parse_results = true; + buf_layout->hash_results = true; + buf_layout->time_stamp = false; + + fm_port_get_buff_layout_ext_params(tx_port, ¶ms); + buf_layout->manip_extra_space = params.manip_extra_space; + buf_layout->data_align = params.data_align ? : DPA_FD_DATA_ALIGNMENT; + + bp = devm_kzalloc(dev, sizeof(*bp), GFP_KERNEL); + if (unlikely(bp == NULL)) { + dev_err(dev, "devm_kzalloc() failed\n"); + return -ENOMEM; + } + + bp->size = dpa_bp_size(buf_layout); + bp->percpu_count = alloc_percpu(*bp->percpu_count); + bp->target_count = CONFIG_FSL_DPAA_ETH_MAX_BUF_COUNT; + + *draining_tx_bp = bp; + *tx_buf_layout = buf_layout; + + return 0; +} +static int dpa_oh_bp_create(struct oh_port_priv *ohp) +{ + int err = 0; + struct dpa_bp *draining_tx_bp; + struct dpa_buffer_layout_s *tx_buf_layout; + + err = dpa_oh_tx_bp_probe(ohp->dpa_oh_dev, ohp->oh_config->oh_port, + &draining_tx_bp, &tx_buf_layout); + if (err) { + pr_err("errors on dpa_oh_tx_bp_probe()\n"); + return err; + } + + ohp->tx_bp = draining_tx_bp; + ohp->tx_buf_layout = tx_buf_layout; + + err = dpa_bp_alloc(ohp->tx_bp); + if (err < 0) { + /* _dpa_bp_free(ohp->tx_bp); */ + pr_err("error on dpa_bp_alloc()\n"); + ohp->tx_bp = NULL; + return err; + } + hw_lag_dbg("created bp, bpid(ohp->tx_bp):%d\n", ohp->tx_bp->bpid); + + return 0; +} +/** + * Copied from DPA-Eth driver (since they have different params type): + * Cleanup function for outgoing frame descriptors that were built on Tx path, + * either contiguous frames or scatter/gather ones. + * Skb freeing is not handled here. + * + * This function may be called on error paths in the Tx function, so guard + * against cases when not all fd relevant fields were filled in. + * + * Return the skb backpointer, since for S/G frames the buffer containing it + * gets freed here. + */ +struct sk_buff *oh_cleanup_tx_fd(const struct qm_fd *fd) +{ + int i, nr_frags; + const struct qm_sg_entry *sgt; + struct sk_buff **skbh; + struct sk_buff *skb = NULL; + dma_addr_t addr = qm_fd_addr(fd); + struct dpa_bp *dpa_bp = dpa_bpid2pool(fd->bpid); + const enum dma_data_direction dma_dir = DMA_TO_DEVICE; + + DPA_BUG_ON(fd->cmd & FM_FD_CMD_FCO); + dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, dma_dir); + + /* retrieve skb back pointer */ + DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0); + nr_frags = skb_shinfo(skb)->nr_frags; + + if (fd->format == qm_fd_sg) { + /* The sgt buffer has been allocated with netdev_alloc_frag(), + * it's from lowmem. + */ + sgt = phys_to_virt(addr + dpa_fd_offset(fd)); + + /* sgt[0] is from lowmem, was dma_map_single()-ed */ + dma_unmap_single(dpa_bp->dev, sgt[0].addr, + sgt[0].length, dma_dir); + + /* remaining pages were mapped with dma_map_page() */ + for (i = 1; i < nr_frags; i++) { + DPA_BUG_ON(sgt[i].extension); + + dma_unmap_page(dpa_bp->dev, sgt[i].addr, + sgt[i].length, dma_dir); + } + + /* Free the page frag that we allocated on Tx */ + put_page(virt_to_head_page(sgt)); + } + + return skb; +} + +static void dump_parser_result(const struct qm_fd *fd) +{ +#ifdef CONFIG_HW_LAG_DEBUG + dma_addr_t addr = qm_fd_addr(fd); + void *vaddr; + const fm_prs_result_t *parse_results; + + vaddr = phys_to_virt(addr); + DPA_BUG_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); + + parse_results = (const fm_prs_result_t *)(vaddr + + DPA_TX_PRIV_DATA_SIZE); + + hw_lag_dbg("parse_results->l2r:0x%08x\n", parse_results->l2r); + + hw_lag_dbg("FM_L3_PARSE_RESULT_IPV4:0x%0x\n" + "FM_L3_PARSE_RESULT_IPV6:0x%0x\n" + "parse_results->l3r:0x%08x\n", + parse_results->l3r & FM_L3_PARSE_RESULT_IPV4, + parse_results->l3r & FM_L3_PARSE_RESULT_IPV6, + parse_results->l3r); + + hw_lag_dbg("fm_l4_hxs_has_run(parse_results):0x%0x\n" + "fm_l4_hxs_error(parse_results):0x%0x\n", + fm_l4_hxs_has_run(parse_results), + fm_l4_hxs_error(parse_results)); + + hw_lag_dbg("fd->status & FM_FD_STAT_L4CV:0x%x\n" + "parse_results->l4r:0x%08x\n" + "fm_l4_frame_is_tcp(parse_results):0x%0x\n", + fd->status & FM_FD_STAT_L4CV, + parse_results->l4r, + fm_l4_frame_is_tcp(parse_results)); +#endif +} + +static void show_dbg_info(const struct qm_fd *fd, const char *func_name, + struct sk_buff *skb) +{ +#ifdef CONFIG_HW_LAG_DEBUG + u32 pad, fd_status; + dma_addr_t addr; + struct ethhdr *eth; + struct iphdr *iph; + struct tcphdr *tcph; + struct udphdr *udph; + unsigned int data_start; + unsigned long skb_addr; + + fd_status = fd->status; + addr = qm_fd_addr(fd); + + /* find out the pad */ + skb_addr = virt_to_phys(skb->head); + pad = addr - skb_addr; + + /* The skb is currently pointed at head + headroom. The packet + * starts at skb->head + pad + fd offset. + */ + data_start = pad + dpa_fd_offset(fd) - skb_headroom(skb); + + skb_pull(skb, data_start); + + pr_info("[%s]:fd->status:0x%08x\n", func_name, fd_status); + pr_info("[%s]:fd tx status:0x%08x. fd rx status:0x%08x\n", + func_name, + fd_status & FM_FD_STAT_TX_ERRORS, + fd_status & FM_FD_STAT_RX_ERRORS); + + if (likely(fd_status & FM_FD_STAT_ERR_PHYSICAL)) + pr_err("FM_FD_STAT_ERR_PHYSICAL\n"); + if (fd_status & FM_PORT_FRM_ERR_DMA) + pr_err("FM_PORT_FRM_ERR_DMA\n"); + if (fd_status & FM_PORT_FRM_ERR_PHYSICAL) + pr_err("FM_PORT_FRM_ERR_PHYSICAL\n"); + if (fd_status & FM_PORT_FRM_ERR_SIZE) + pr_err("FM_PORT_FRM_ERR_SIZE\n"); + if (fd_status & FM_PORT_FRM_ERR_PRS_HDR_ERR) + pr_err("oh_pcd_confq FM_PORT_FRM_ERR_PRS_HDR_ERR\n"); + + pr_info("[%s]:fd->format - qm_fd_contig:%d\n", func_name, + fd->format - qm_fd_contig); + pr_info("[%s]:fd->bpid:%d\n", func_name, fd->bpid); + + /* get L2 info */ + skb->protocol = htons(ETH_P_802_3); + skb_reset_mac_header(skb); + skb_pull_inline(skb, ETH_HLEN); + + eth = eth_hdr(skb); + + pr_info("\n[%s]:dmac:%02x:%02x:%02x:%02x:%02x:%02x\n" + "smac:%02x:%02x:%02x:%02x:%02x:%02x\n" + "h_proto:0x%04x\n", func_name, + eth->h_dest[0], eth->h_dest[1], eth->h_dest[2], + eth->h_dest[3], eth->h_dest[4], eth->h_dest[5], + eth->h_source[0], eth->h_source[1], eth->h_source[2], + eth->h_source[3], eth->h_source[4], eth->h_source[5], + eth->h_proto); + + if (fd_status & FM_FD_STAT_L4CV) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + pr_info("[%s]:skb->ip_summed = CHECKSUM_UNNECESSARY\n", + func_name); + } else { + skb->ip_summed = CHECKSUM_NONE; + pr_info("[%s]:skb->ip_summed = CHECKSUM_NONE\n", func_name); + } + + /* get L3 and part of L4 info */ + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + skb_reset_mac_len(skb); + + if (eth->h_proto == ETH_P_IP) { + iph = ip_hdr(skb); + pr_info("[%s]:L3_pro:0x%0x, dip:0x%0x, sip:0x%0x\n", func_name, + iph->protocol, iph->daddr, iph->saddr); + + skb_pull_inline(skb, ip_hdrlen(skb)); + skb_reset_transport_header(skb); + + if (iph->protocol == IPPROTO_TCP) { + tcph = tcp_hdr(skb); + pr_info("[%s]:tcp csum:0x%0x\n", + func_name, tcph->check); + } else if (iph->protocol == IPPROTO_UDP) { + udph = udp_hdr(skb); + pr_info("[%s]:udp csum:0x%0x\n", + func_name, udph->check); + } + + } else if (eth->h_proto == ETH_P_ARP) { + pr_info("[%s]:ARP.\n", func_name); + } else if (eth->h_proto == ETH_P_IPV6) { + pr_info("[%s]:IPv6.\n", func_name); + } else if (eth->h_proto == ETH_P_SLOW) { + pr_info("[%s]:802.3ad pkt.\n", func_name); + } else { + pr_info("[%s]:other pkt.\n", func_name); + } + + return; +#endif +} +/** + * When enqueue an frame from kernel module to offline port, + * once errors happeds, this callback will be entered. + */ +static enum qman_cb_dqrr_result +oh_ingress_tx_error_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + struct sk_buff *skb; + const struct qm_fd *fd = &dq->fd; + + skb = oh_cleanup_tx_fd(fd); + dump_parser_result(fd); + show_dbg_info(fd, __func__, skb); + dev_kfree_skb_any(skb); + + return qman_cb_dqrr_consume; +} +/** + * This subroutine is copied from oNIC, it should not be call + * in normal case, only for debugging outgoing traffics to oh + * tx port while no PCD applied for oh port. such as debugging + * oh port tx L4 csum. + */ +static enum qman_cb_dqrr_result __hot +oh_ingress_tx_default_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + struct net_device *netdev; + struct dpa_priv_s *priv; + struct dpa_bp *bp; + struct dpa_percpu_priv_s *percpu_priv; + struct sk_buff **skbh; + struct sk_buff *skb; + struct iphdr *iph; + const struct qm_fd *fd = &dq->fd; + u32 fd_status = fd->status; + u32 pad; + dma_addr_t addr = qm_fd_addr(fd); + unsigned int data_start; + unsigned long skb_addr; + int *countptr; + struct ethhdr *eth; + + hw_lag_dbg("fd->status:0x%08x\n", fd_status); + + hw_lag_dbg("fd tx status:0x%08x. fd rx status:0x%08x\n", + fd_status & FM_FD_STAT_TX_ERRORS, + fd_status & FM_FD_STAT_RX_ERRORS); + + if (likely(fd_status & FM_FD_STAT_ERR_PHYSICAL)) + pr_err("FM_FD_STAT_ERR_PHYSICAL\n"); + + if (fd_status & FM_PORT_FRM_ERR_DMA) + pr_err("FM_PORT_FRM_ERR_DMA\n"); + if (fd_status & FM_PORT_FRM_ERR_PHYSICAL) + pr_err("FM_PORT_FRM_ERR_PHYSICAL\n"); + if (fd_status & FM_PORT_FRM_ERR_SIZE) + pr_err("FM_PORT_FRM_ERR_SIZE\n"); + if (fd_status & FM_PORT_FRM_ERR_PRS_HDR_ERR) + pr_err("oh_tx_defq FM_PORT_FRM_ERR_PRS_HDR_ERR\n"); + + netdev = ((struct dpa_fq *)fq)->net_dev; + if (!netdev) { + pr_err("error netdev == NULL.\n"); + skbh = (struct sk_buff **)phys_to_virt(addr); + dev_kfree_skb(*skbh); + return qman_cb_dqrr_consume; + } + priv = netdev_priv(netdev); + dump_parser_result(fd); + + percpu_priv = __this_cpu_ptr(priv->percpu_priv); + countptr = __this_cpu_ptr(priv->dpa_bp->percpu_count); + + skbh = (struct sk_buff **)phys_to_virt(addr); + /* according to the last common code (bp refill) the skb pointer is set + * to another address shifted by sizeof(struct sk_buff) to the left + */ + skb = *(skbh - 1); + + if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { + hw_lag_dbg("FD status = 0x%08x\n", + fd_status & FM_FD_STAT_RX_ERRORS); + + percpu_priv->stats.rx_errors++; + oh_cleanup_tx_fd(fd); + goto qman_consume; + } + if (unlikely(fd->format != qm_fd_contig)) { + percpu_priv->stats.rx_dropped++; + hw_lag_dbg("Dropping a SG frame\n"); + oh_cleanup_tx_fd(fd); + goto qman_consume; + } + + hw_lag_dbg("fd->bpid:%d\n", fd->bpid); + bp = dpa_bpid2pool(fd->bpid); + dma_unmap_single(bp->dev, addr, bp->size, DMA_TO_DEVICE); + + /* find out the pad */ + skb_addr = virt_to_phys(skb->head); + pad = addr - skb_addr; + + countptr = __this_cpu_ptr(bp->percpu_count); + (*countptr)--; + + /* The skb is currently pointed at head + headroom. The packet + * starts at skb->head + pad + fd offset. + */ + data_start = pad + dpa_fd_offset(fd) - skb_headroom(skb); + skb_pull(skb, data_start); + + /* get L2 info */ + skb->protocol = eth_type_trans(skb, netdev); + eth = eth_hdr(skb); + + hw_lag_dbg("dmac:%02x:%02x:%02x:%02x:%02x:%02x\n" + "smac:%02x:%02x:%02x:%02x:%02x:%02x\n" + "h_proto:0x%04x\n", + eth->h_dest[0], eth->h_dest[1], eth->h_dest[2], + eth->h_dest[3], eth->h_dest[4], eth->h_dest[5], + eth->h_source[0], eth->h_source[1], eth->h_source[2], + eth->h_source[3], eth->h_source[4], eth->h_source[5], + eth->h_proto); + + if (unlikely(dpa_check_rx_mtu(skb, netdev->mtu))) { + percpu_priv->stats.rx_dropped++; + goto qman_consume; + } + + if (fd_status & FM_FD_STAT_L4CV) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + hw_lag_dbg("skb->ip_summed = CHECKSUM_UNNECESSARY\n"); + } else { + skb->ip_summed = CHECKSUM_NONE; + hw_lag_dbg("skb->ip_summed = CHECKSUM_NONE\n"); + } + + /* get L3 and part of L4 info */ + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + skb_reset_mac_len(skb); + + if (eth->h_proto == ETH_P_IP) { + iph = ip_hdr(skb); + hw_lag_dbg("L3_pro:0x%0x, dip:0x%0x, sip:0x%0x\n", + iph->protocol, iph->daddr, iph->saddr); + } else if (eth->h_proto == ETH_P_ARP) { + hw_lag_dbg("ARP.\n"); + } else if (eth->h_proto == ETH_P_IPV6) { + hw_lag_dbg("IPv6.\n"); + } else if (eth->h_proto == ETH_P_SLOW) { + hw_lag_dbg("802.3ad pkt.\n"); + } else { + hw_lag_dbg("other pkt.\n"); + } + +qman_consume: + dev_kfree_skb_any(skb); + + return qman_cb_dqrr_consume; +} +/** + * When frame leave from PCD fqs then goes final terminated physical + * ports(MAC ports),once errors happend, this callback will be entered. + * dump debugging information when HW_LAG_DEBUG enabled . + */ +static enum qman_cb_dqrr_result +oh_pcd_err_dqrr(struct qman_portal *portal, struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + struct sk_buff *skb; + const struct qm_fd *fd = &dq->fd; + + skb = oh_cleanup_tx_fd(fd); + dump_parser_result(fd); + show_dbg_info(fd, __func__, skb); + dev_kfree_skb_any(skb); + + return qman_cb_dqrr_consume; + +} +/** + * When frame leave from offline port tx fqs then goes into offline tx + * ports(MAC ports), it will be into confirm fq, this callback will be + * entered. + * dump debugging information when HW_LAG_DEBUG enabled. + * don't free skb, since offline port is not the final consumer. + */ +static enum qman_cb_dqrr_result __hot +oh_tx_conf_dqrr(struct qman_portal *portal, struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + struct sk_buff *skb; + const struct qm_fd *fd = &dq->fd; + + skb = oh_cleanup_tx_fd(fd); + dump_parser_result(fd); + show_dbg_info(fd, __func__, skb); + + return qman_cb_dqrr_consume; +} + +static void lag_public_egress_ern(struct qman_portal *portal, + struct qman_fq *fq, const struct qm_mr_entry *msg) +{ + /* will add ERN statistics in the future version. */ + const struct qm_fd *fd = &msg->ern.fd; + struct sk_buff *skb; + + if (msg->ern.fd.cmd & FM_FD_CMD_FCO) { + dpa_oh_fd_release(fd); + return; + } + + skb = oh_cleanup_tx_fd(fd); + dump_parser_result(fd); + show_dbg_info(fd, __func__, skb); + dev_kfree_skb_any(skb); +} + +/** + * This subroutine will be called when frame out of oh pcd fqs and + * consumed by (MAC ports). + * Display debugging information if HW_LAG_DEBUG on. + */ +static enum qman_cb_dqrr_result __hot +oh_pcd_conf_dqrr(struct qman_portal *portal, struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + struct sk_buff *skb; + const struct qm_fd *fd = &dq->fd; + + skb = oh_cleanup_tx_fd(fd); + show_dbg_info(fd, __func__, skb); + dev_kfree_skb_any(skb); + + return qman_cb_dqrr_consume; +} + +static const struct qman_fq oh_tx_defq = { + .cb = { .dqrr = oh_ingress_tx_default_dqrr} +}; +/* for OH ports Rx Error queues = Tx Error queues */ +static const struct qman_fq oh_tx_errq = { + .cb = { .dqrr = oh_ingress_tx_error_dqrr} +}; + +static const struct qman_fq oh_pcd_confq = { + .cb = { .dqrr = oh_pcd_conf_dqrr} +}; +static const struct qman_fq oh_pcd_errq = { + .cb = { .dqrr = oh_pcd_err_dqrr} +}; +static const struct qman_fq oh_tx_confq = { + .cb = { .dqrr = oh_tx_conf_dqrr} +}; +static const struct qman_fq oh_pcd_egress_ernq = { + .cb = { .ern = lag_public_egress_ern} +}; +static const struct qman_fq oh_egress_ernq = { + .cb = { .ern = lag_public_egress_ern} +}; + +static int oh_add_channel(void *__arg) +{ + int cpu; + struct qman_portal *portal; + const cpumask_t *cpus = qman_affine_cpus(); + u32 pool = QM_SDQCR_CHANNELS_POOL_CONV((u32)(unsigned long)__arg); + + for_each_cpu(cpu, cpus) { + portal = (struct qman_portal *)qman_get_affine_portal(cpu); + qman_p_static_dequeue_add(portal, pool); + } + + return 0; +} + +static int init_oh_errq_defq(struct device *dev, + uint32_t fqid_err, uint32_t fqid_def, + struct dpa_fq **oh_errq, struct dpa_fq **oh_defq, + uint16_t *priv_channel) +{ + int errno; + struct dpa_fq *errq, *defq; + /* These two vaules come from DPA-Eth driver */ + uint8_t wq_errq = 2, wq_defq = 1; + u32 channel; + struct qm_mcc_initfq initfq; + struct qm_fqd fqd; + struct task_struct *kth; + + /* Get a channel */ + errno = qman_alloc_pool(&channel); + if (errno) { + pr_err("error on getting pool channel.\n"); + return errno; + } + + if (channel < 0) { + errno = channel; + pr_err("error on dpa_get_channel().\n"); + return errno; + } + + /* Start a thread that will walk the cpus with affine portals + * and add this pool channel to each's dequeue mask. + */ + + kth = kthread_run(oh_add_channel, (void *)(unsigned long)channel, + "oh_add_channel"); + if (!kth) { + pr_warn("run kthread faild...\n"); + return -ENOMEM; + } + + /* Allocate memories for Tx ErrQ and Tx DefQ of oh port */ + errq = devm_kzalloc(dev, sizeof(struct dpa_fq), GFP_KERNEL); + if (!errq) { + pr_err("devm_kzalloc() for OH errq failed\n"); + return -ENOMEM; + } + defq = devm_kzalloc(dev, sizeof(struct dpa_fq), GFP_KERNEL); + if (!defq) { + pr_err("devm_kzalloc() for OH defq failed.\n"); + return -ENOMEM; + } + + /* Set Tx ErrQ callbacks of oh port */ + errq->fq_base = oh_tx_errq; + + /* Set the flags of the oh port Tx ErrQ/Tx DefQ and create the FQs */ + errq->fq_base.flags = QMAN_FQ_FLAG_NO_ENQUEUE; + errno = qman_create_fq(fqid_err, errq->fq_base.flags, &errq->fq_base); + if (errno) { + pr_err("error on create OH errq.\n"); + return errno; + } + + defq->fq_base = oh_tx_defq; + defq->fq_base.flags = QMAN_FQ_FLAG_NO_ENQUEUE; + errno = qman_create_fq(fqid_def, defq->fq_base.flags, &defq->fq_base); + if (errno) { + pr_err("error on create OH defq.\n"); + return errno; + } + + *priv_channel = (uint16_t)channel; + /* Set the FQs init options then init the FQs */ + initfq.we_mask = QM_INITFQ_WE_DESTWQ; + initfq.fqd.dest.channel = (uint16_t)channel; + initfq.fqd.dest.wq = wq_errq; + initfq.we_mask |= QM_INITFQ_WE_FQCTRL; + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; + initfq.fqd.fq_ctrl |= QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK; + initfq.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_DATA | + QM_STASHING_EXCL_CTX | QM_STASHING_EXCL_ANNOTATION; + initfq.fqd.context_a.stashing.data_cl = 2; + initfq.fqd.context_a.stashing.annotation_cl = 1; + initfq.fqd.context_a.stashing.context_cl = + DIV_ROUND_UP(sizeof(struct qman_fq), 64); + + /* init oh ports errors fq */ + errno = qman_init_fq(&errq->fq_base, QMAN_INITFQ_FLAG_SCHED, &initfq); + if (errno < 0) { + pr_err("error on qman_init_fq %u = %d\n", fqid_err, errno); + qman_destroy_fq(&errq->fq_base, 0); + devm_kfree(dev, errq); + return errno; + } + + errno = qman_query_fq(&errq->fq_base, &fqd); + hw_lag_dbg("errno of qman_query_fq:0x%08x\n", errno); + if (fqd.fq_ctrl != initfq.fqd.fq_ctrl) { + pr_err("queried fq_ctrl=%x, should be=%x\n", fqd.fq_ctrl, + initfq.fqd.fq_ctrl); + panic("fail"); + } + if (memcmp(&fqd.td, &initfq.fqd.td, sizeof(fqd.td))) { + pr_err("queried td_thresh=%x:%x, should be=%x:%x\n", + fqd.td.exp, fqd.td.mant, + initfq.fqd.td.exp, initfq.fqd.td.mant); + panic("fail"); + } + + /* init oh ports default fq */ + initfq.fqd.dest.wq = wq_defq; + errno = qman_init_fq(&defq->fq_base, QMAN_INITFQ_FLAG_SCHED, &initfq); + if (errno < 0) { + pr_err("error on qman_init_fq %u = %d\n", fqid_def, errno); + qman_destroy_fq(&defq->fq_base, 0); + devm_kfree(dev, defq); + return errno; + } + + *oh_errq = errq; + *oh_defq = defq; + + hw_lag_dbg("oh port defq and oh port errq initialize OK\n"); + + return BOND_OH_SUCCESS; +} +/** + * Initialize pcd err fqs and pcd confirmation fqs. + * HW LAG uses this method rather than reuse DPA-Eth private rx err/ + * rx def/tx err/tx confirm FQs and callbacks, since HW LAG uses + * different data structure from DPA-Eth private driver. + */ +static int init_oh_pcderrq_pcdconfq(struct device *dev, + uint32_t *fqid_pcderr, uint32_t *fqid_pcdconf, + struct dpa_fq **oh_pcderrq, struct dpa_fq **oh_pcdconfq, + uint16_t priv_channel) +{ + int errno; + struct dpa_fq *pcderrq, *pcdconfq; + /* These two vaules come from DPA-Eth driver */ + uint8_t wq_errq = 2, wq_confq = 1; + struct qm_mcc_initfq initfq; + + /* Allocate memories for PCD ErrQ and PCD confirm Q of oh port */ + pcderrq = devm_kzalloc(dev, sizeof(struct dpa_fq), GFP_KERNEL); + if (!pcderrq) { + pr_err("devm_kzalloc() for OH pcderrq failed\n"); + return -ENOMEM; + } + + pcdconfq = devm_kzalloc(dev, sizeof(struct dpa_fq), GFP_KERNEL); + if (!pcdconfq) { + pr_err("devm_kzalloc() for OH pcdconfq failed.\n"); + return -ENOMEM; + } + + /* Set PCD ErrQ callbacks of oh port */ + pcderrq->fq_base = oh_pcd_errq; + + /* Set the flags of the oh port PCD ErrQ, create the FQs */ + pcderrq->fq_base.flags = QMAN_FQ_FLAG_NO_ENQUEUE | + QMAN_FQ_FLAG_DYNAMIC_FQID; + errno = qman_create_fq(0, pcderrq->fq_base.flags, &pcderrq->fq_base); + if (errno) { + pr_err("error on create OH pcderrq.\n"); + return errno; + } + *fqid_pcderr = pcderrq->fq_base.fqid; + hw_lag_dbg("*fqid_pcderr:%d\n", *fqid_pcderr); + + /* Set PCD confirm Q callbacks of oh port */ + pcdconfq->fq_base = oh_pcd_confq; + /* Set the flags of the oh port PCD confQ, create the FQs */ + pcdconfq->fq_base.flags = QMAN_FQ_FLAG_NO_ENQUEUE| + QMAN_FQ_FLAG_DYNAMIC_FQID; + errno = qman_create_fq(0, pcdconfq->fq_base.flags, &pcdconfq->fq_base); + if (errno) { + pr_err("error on create OH pcdconfq.\n"); + return errno; + } + *fqid_pcdconf = pcdconfq->fq_base.fqid; + hw_lag_dbg("*fqid_pcdconf:%d\n", *fqid_pcdconf); + + /* Set the FQs init options then init the FQs */ + initfq.we_mask = QM_INITFQ_WE_DESTWQ; + initfq.fqd.dest.channel = priv_channel; + initfq.fqd.dest.wq = wq_errq; + initfq.we_mask |= QM_INITFQ_WE_FQCTRL; + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; + initfq.fqd.fq_ctrl |= QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK; + initfq.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_DATA | + QM_STASHING_EXCL_CTX | QM_STASHING_EXCL_ANNOTATION; + initfq.fqd.context_a.stashing.data_cl = 2; + initfq.fqd.context_a.stashing.annotation_cl = 1; + initfq.fqd.context_a.stashing.context_cl = + DIV_ROUND_UP(sizeof(struct qman_fq), 64); + + /* init pcd errors fq */ + errno = qman_init_fq(&pcderrq->fq_base, + QMAN_INITFQ_FLAG_SCHED, &initfq); + if (errno < 0) { + pr_err("error on qman_init_fq pcderrq:%u = %d\n", + *fqid_pcderr, errno); + qman_destroy_fq(&pcderrq->fq_base, 0); + devm_kfree(dev, pcderrq); + + return errno; + } + + /* init pcd confirm fq */ + initfq.fqd.dest.wq = wq_confq; + errno = qman_init_fq(&pcdconfq->fq_base, + QMAN_INITFQ_FLAG_SCHED, &initfq); + if (errno < 0) { + pr_err("error on qman_init_fq pcdconfq:%u = %d\n", + *fqid_pcdconf, errno); + qman_destroy_fq(&pcdconfq->fq_base, 0); + devm_kfree(dev, pcdconfq); + + return errno; + } + + *oh_pcderrq = pcderrq; + *oh_pcdconfq = pcdconfq; + + hw_lag_dbg("oh pcd confq and pcd errq initialize OK\n"); + + return BOND_OH_SUCCESS; +} +/** + * Initialize confirmation fq for offline port tx fqs. + * This confirmation call back is enabled in case of buffer is released + * by BM after frame entered into tx port of offline port. + */ +static int init_oh_txconfq(struct device *dev, uint32_t *fqid_ohtxconf, + struct dpa_fq **oh_txconfq, uint16_t priv_channel) +{ + int errno; + struct dpa_fq *txconfq; + /* This vaule comes from DPA-Eth driver */ + uint8_t wq_confq = 1; + struct qm_mcc_initfq initfq; + + /* Allocate memories for PCD ErrQ and PCD confirm Q of oh port */ + txconfq = devm_kzalloc(dev, sizeof(struct dpa_fq), GFP_KERNEL); + if (!txconfq) { + pr_err("devm_kzalloc() for OH tx confq failed.\n"); + return -ENOMEM; + } + + /* Set tx confirm Q callbacks of oh port */ + txconfq->fq_base = oh_tx_confq; + /* Set the flags of the oh port PCD confQ, create the FQs */ + txconfq->fq_base.flags = QMAN_FQ_FLAG_NO_ENQUEUE | + QMAN_FQ_FLAG_DYNAMIC_FQID; + errno = qman_create_fq(0, txconfq->fq_base.flags, &txconfq->fq_base); + if (errno) { + pr_err("error on create OH tx confq.\n"); + return errno; + } + *fqid_ohtxconf = txconfq->fq_base.fqid; + hw_lag_dbg("dynamic *fqid_ohtxconf:%d\n", *fqid_ohtxconf); + + /* Set the FQs init options then init the FQs */ + initfq.we_mask = QM_INITFQ_WE_DESTWQ; + initfq.fqd.dest.channel = priv_channel; + initfq.fqd.dest.wq = wq_confq; + initfq.we_mask |= QM_INITFQ_WE_FQCTRL; + initfq.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; + initfq.fqd.fq_ctrl |= QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK; + initfq.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_DATA | + QM_STASHING_EXCL_CTX | QM_STASHING_EXCL_ANNOTATION; + initfq.fqd.context_a.stashing.data_cl = 2; + initfq.fqd.context_a.stashing.annotation_cl = 1; + initfq.fqd.context_a.stashing.context_cl = + DIV_ROUND_UP(sizeof(struct qman_fq), 64); + + /* init oh tx confirm fq */ + initfq.fqd.dest.wq = wq_confq; + errno = qman_init_fq(&txconfq->fq_base, + QMAN_INITFQ_FLAG_SCHED, &initfq); + if (errno < 0) { + pr_err("error on qman_init_fq oh tx confq:%u = %d\n", + *fqid_ohtxconf, errno); + qman_destroy_fq(&txconfq->fq_base, 0); + devm_kfree(dev, txconfq); + + return errno; + } + + *oh_txconfq = txconfq; + + hw_lag_dbg("oh tx confq initialize OK\n"); + + return BOND_OH_SUCCESS; +} +/** + * Initialize dynamic particular tx fqs of offline port for LAG xmit, + * does not reuse tx fqs initialized by offline port driver. This method + * can avoid to modify offline port driver even if the confirmation fq + * need to be enabled. + */ +static int init_oh_tx_lag_fqs(struct device *dev, + struct dpa_fq **oh_tx_lag_fqs, uint32_t fqid_ohtxconf, + uint16_t oh_tx_channel) +{ + int errno = BOND_OH_SUCCESS, i, tx_fqs_count; + uint16_t wq_id; + struct dpa_fq *lag_fqs; + struct qm_mcc_initfq fq_opts; + uint32_t create_flags, init_flags; + + tx_fqs_count = num_possible_cpus(); + /* Allocate particular tx queues of offline port for LAG xmit. */ + lag_fqs = devm_kzalloc(dev, sizeof(struct dpa_fq) * tx_fqs_count, + GFP_KERNEL); + if (!lag_fqs) { + pr_err("Can't allocate tx fqs for LAG xmit.\n"); + errno = -ENOMEM; + goto return_kfree; + } + + /* Set flags for particular tx fqs, especially for dynamic fqid. */ + create_flags = QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_DYNAMIC_FQID; + + /* Create particular tx fqs of offline port for LAG xmit */ + for (i = 0; i < tx_fqs_count; i++) { + /* set egress_ern callback for offline port tx fq */ + lag_fqs[i].fq_base = oh_egress_ernq; + errno = qman_create_fq(0, create_flags, &lag_fqs[i].fq_base); + if (errno) { + pr_err("Error on creating tx fqs for LAG xmit.\n"); + goto return_kfree; + } + } + + /* Set init flags for tx fqs of oh port */ + init_flags = QMAN_INITFQ_FLAG_SCHED; + + /* Set fq init options. Specify destination wq id and channel */ + memset(&fq_opts, 0, sizeof(fq_opts)); + fq_opts.we_mask = QM_INITFQ_WE_DESTWQ; + /* wq info from DPA-Eth driver */ + wq_id = 3; + fq_opts.fqd.dest.wq = wq_id; + fq_opts.fqd.dest.channel = oh_tx_channel; + + fq_opts.we_mask |= QM_INITFQ_WE_FQCTRL; + fq_opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; + fq_opts.fqd.fq_ctrl |= QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK; + fq_opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_DATA | + QM_STASHING_EXCL_CTX | QM_STASHING_EXCL_ANNOTATION; + fq_opts.fqd.context_a.stashing.data_cl = 2; + fq_opts.fqd.context_a.stashing.annotation_cl = 1; + fq_opts.fqd.context_a.stashing.context_cl = + DIV_ROUND_UP(sizeof(struct qman_fq), 64); + +#ifdef CONFIG_HW_LAG_DEBUG + fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; + /** + * CTXA[OVFQ] = 1 + * we set particular tx own confirmation fq and their own callback + * in case of interrupt DPA-Eth private conf callback/err callback + * /def callback. + */ + + fq_opts.fqd.context_a.hi = 0x80000000; + fq_opts.fqd.context_a.lo = 0x0; + fq_opts.fqd.context_b = fqid_ohtxconf; +#endif + /* Initialize particular tx frame queue of offline port for LAG xmit */ + for (i = 0; i < tx_fqs_count; i++) { + errno = qman_init_fq(&lag_fqs[i].fq_base, init_flags, &fq_opts); + if (errno) + goto init_error; + } + + for (i = 0; i < tx_fqs_count; i++) { + hw_lag_dbg("ok, created lag_fqs: fqid:%d\n", + lag_fqs[i].fq_base.fqid); + } + + *oh_tx_lag_fqs = lag_fqs; + + return BOND_OH_SUCCESS; +init_error: + while (i-- < 0) { + hw_lag_dbg("errors on initializing tx fqs, No.:%d tx fq.\n", i); + qman_destroy_fq(&lag_fqs[i].fq_base, 0); + } + +return_kfree: + if (lag_fqs) + devm_kfree(dev, lag_fqs); + + return errno; +} +/** + * This subroutine has been copied from offline_port driver + * to get all information of all offline ports by parse DTS + * return BOND_OH_SUCCESS when get information successfully. + */ +int get_oh_info(void) +{ + struct platform_device *oh_of_dev, *of_dev; + struct device *dpa_oh_dev, *oh_dev; + struct device_node *dpa_oh_node = NULL, *oh_node; + int lenp, errno = BOND_OH_SUCCESS, i = 0; + const phandle *p_oh_port_handle; + const unsigned int *p_port_id; + const unsigned int *p_channel_id; + struct fm_port *oh_port; + unsigned long port_handle_cnt; + struct fm_port_params params; + + available_num_of_oh_ports = 0; + + /* probe offline ports and alloc memory, these codes need refining + * to save memory and need to get rid of the global variable. + */ + poh = kzalloc(sizeof(struct oh_port_priv) * FM_MAX_NUM_OF_OH_PORTS, + GFP_KERNEL); + if (!poh) + return -ENOMEM; + + for_each_matching_node(dpa_oh_node, oh_port_match_table) { + if (dpa_oh_node) { + p_oh_port_handle = of_get_property(dpa_oh_node, + "fsl,fman-oh-port", &lenp); + if (!p_oh_port_handle) { + pr_err("No OH port handle in node %s\n", + dpa_oh_node->full_name); + return -EINVAL; + } + hw_lag_dbg("dpa_oh_node->name:%s\n", + dpa_oh_node->full_name); + BUG_ON(lenp % sizeof(*p_oh_port_handle)); + if (lenp != sizeof(*p_oh_port_handle)) { + port_handle_cnt = + lenp / sizeof(*p_oh_port_handle); + + pr_err("Found %lu oh port in node %s\n" + "only 1 phandle is allowed.\n", + port_handle_cnt, + dpa_oh_node->full_name); + return -EINVAL; + } + + oh_node = of_find_node_by_phandle(*p_oh_port_handle); + if (!oh_node) { + pr_err("no oh node referenced from %s\n", + dpa_oh_node->full_name); + return -EINVAL; + } + hw_lag_dbg("Found oh_node->full_name %s.\n", + oh_node->full_name); + p_port_id = of_get_property(oh_node, + "cell-index", &lenp); + + if (!p_port_id) { + pr_err("No port id found in node %s\n", + dpa_oh_node->full_name); + return -EINVAL; + } + + hw_lag_dbg("Found port id %ud, in node %s\n", + *p_port_id, dpa_oh_node->full_name); + BUG_ON(lenp % sizeof(*p_port_id)); + + /* Read channel id for the queues */ + p_channel_id = + of_get_property(oh_node, + "fsl,qman-channel-id", &lenp); + if (!p_channel_id) { + pr_err("No channel id found in node %s\n", + dpa_oh_node->full_name); + return -EINVAL; + } + + BUG_ON(lenp % sizeof(*p_channel_id)); + + oh_of_dev = of_find_device_by_node(oh_node); + BUG_ON(!oh_of_dev); + oh_dev = &oh_of_dev->dev; + of_dev = of_find_device_by_node(dpa_oh_node); + BUG_ON(!oh_of_dev); + dpa_oh_dev = &of_dev->dev; + poh[i].of_dev = of_dev; + poh[i].oh_of_dev = oh_of_dev; + poh[i].dpa_oh_dev = dpa_oh_dev; + poh[i].oh_dev = oh_dev; + poh[i].dpa_oh_node = dpa_oh_node; + poh[i].oh_node = oh_node; + poh[i].cell_index = *p_port_id; + poh[i].oh_config = dev_get_drvdata(dpa_oh_dev); + poh[i].p_oh_port_handle = p_oh_port_handle; + poh[i].oh_channel_id = *p_channel_id; + oh_port = poh[i].oh_config->oh_port; + fm_port_get_buff_layout_ext_params(oh_port, ¶ms); + poh[i].bpid = params.pool_param[0].id; + poh[i].bp_size = params.pool_param[0].size; + /* give a friend name like "fman0-oh@1" + * rather than "/fsl,dpaa/dpa-fman0-oh@1". + * fill friendname array with dpa_oh_node->full_name, + * please don't use oh0 since documentatin says oh0 + * has bad performance. + */ + memcpy(poh[i].friendname, + dpa_oh_node->full_name + 14, 10); + + if (qman_alloc_fqid_range(&poh[i].pcd_fqids_base, + FM_MAX_NUM_OF_MACS, true, 0) + != FM_MAX_NUM_OF_MACS) { + pr_err("error on alloc continuous pcd fqid\n"); + return -EINVAL; + } + + errno = init_oh_errq_defq(poh[i].dpa_oh_dev, + poh[i].oh_config->error_fqid, + poh[i].oh_config->default_fqid, + &poh[i].oh_errq, + &poh[i].oh_defq, + &poh[i].p_oh_rcv_channel); + if (errno != BOND_OH_SUCCESS) { + pr_err("error when probe errq or defq.\n"); + return errno; + } + + errno = init_oh_pcderrq_pcdconfq(poh[i].dpa_oh_dev, + &poh[i].fqid_pcderr, + &poh[i].fqid_pcdconf, + &poh[i].oh_pcderrq, + &poh[i].oh_pcdconfq, + poh[i].p_oh_rcv_channel); + if (errno != BOND_OH_SUCCESS) { + pr_err("error on probe pcderrq or pcdconfq\n"); + return errno; + } + + errno = init_oh_txconfq(poh[i].dpa_oh_dev, + &poh[i].fqid_ohtxconf, + &poh[i].oh_txconfq, + poh[i].oh_channel_id); + if (errno != BOND_OH_SUCCESS) { + pr_err("error on init offline port tx confq\n"); + return errno; + } + + errno = init_oh_tx_lag_fqs(poh[i].dpa_oh_dev, + &poh[i].oh_tx_lag_fqs, + poh[i].fqid_ohtxconf, + poh[i].oh_channel_id); + if (errno != BOND_OH_SUCCESS) { + pr_err("error on init offline port tx confq\n"); + return errno; + } + + errno = dpa_oh_bp_create(&poh[i]); + if (errno != BOND_OH_SUCCESS) { + pr_err("error on init offline tx bp.\n"); + return errno; + } + + available_num_of_oh_ports = ++i; + } + } + + return errno; +} +/** + * Get the FM_MAC_RES_ID from a dpa-eth NIC, return 0 if it is not a dpa-eth, + * otherwise return FM_MAC_RES_ID + * this function does not process macless, LAG does not need a macless IF. + */ +static unsigned long long get_fm_mac_res_id(struct net_device *netdev) +{ + struct dpa_priv_s *priv = netdev_priv(netdev); + if (!is_dpa_eth_port(netdev)) + return 0; + + return (unsigned long long)priv->mac_dev->res->start; +} +/** + * Get the DCP_ID from a dpa-eth NIC, return 0 if it is not a dpa-eth, + * return 1 if it's fm0, return 2 if it's fm1, since there are only 2 + * FMAN in current DPA SOC. + * this function does not process macless, LAG does not need a macless IF. + */ +int get_dcp_id_from_dpa_eth_port(struct net_device *netdev) +{ + unsigned long long mac_res_start = get_fm_mac_res_id(netdev); + + if ((mac_res_start >= FM1_GB0) && (mac_res_start <= FM1_10G)) + return 1; + else if ((mac_res_start >= FM2_GB0) && (mac_res_start <= FM2_10G)) + return 2; + else + return 0; +} +/** + * Get all information of the offline port which is being used + * by a bundle, such as fman_dcpid, offline port offset, cell index, + * offline port channel. This API is required by CEETM Qos. + * Regarding fman dpcid, till sdk1.6, there is one fman in p1023, the + * offset is 0x1000000, for other dpaa socs, the offset of fman0 is + * 0x400000, the offset of fman1 is 0x500000, hence for current socs, + * the offset of fman0 <=0x4000000, 0x400000 < fman1 <=0x500000. + * return BOND_OH_SUCCESS when got all information, otherwise return + * Non-Zero. + */ +#define FMAN0_MAX_OFFSET 0x400000 +#define FMAN1_MAX_OFFSET 0x500000 +int export_oh_port_info_to_ceetm(struct bonding *bond, uint16_t *channel, + unsigned long *fman_dcpid, unsigned long *oh_offset, + unsigned long *cell_index) +{ + /** + * split str: "/soc@ffe000000/fman@400000/port@84000", then get + * the fman@ part and port@ part from them. regex is good enough + * as below: + * ret = sscanf((char *) p, "%*[^@]@%*[^@]@%[^/]/port@%s", s1, s2); + * but the kernel version does not support the method. + */ + int errno; + char s1[16] = {0}, s2[16] = {0}; + char *p, *p1; + + if (!bond->params.ohp) { + pr_err("The bundle has not binded an offline port.\n"); + return 1; + } + + if (!bond->params.ohp->oh_en) { + pr_err("The offline is disabled, to enable it, use sysfs.\n"); + return 2; + } + + if (!bond->params.ohp->oh_node) { + pr_err("The offline node error.\n"); + return 3; + } + + p = strstr(bond->params.ohp->oh_node->full_name, "fman@"); + p += strlen("fman@"); + p1 = strstr(p, "/port@"); + + memcpy(s1, p, p1 - p); + + p = strstr(p, "/port@"); + p += strlen("/port@"); + + errno = sscanf((const char *) p, "%s", s2); + if (errno != 1) { + pr_err("parser error while process offline port node\n"); + return 4; + } + + errno = kstrtoul(s1, 16, fman_dcpid) | kstrtoul(s2, 16, oh_offset); + if (errno) { + pr_err("error on kstrtoul fman_dcpid, of_offset\n"); + return 5; + } + if (*fman_dcpid <= FMAN0_MAX_OFFSET) { + *fman_dcpid = 0; + } else if ((*fman_dcpid > FMAN0_MAX_OFFSET) && + (*fman_dcpid <= FMAN1_MAX_OFFSET)) { + *fman_dcpid = 1; + } else { + pr_err("error on calculating fman dcpid, new soc appears.\n"); + return 6; + } + + *channel = bond->params.ohp->oh_channel_id; + *cell_index = bond->params.ohp->cell_index; + + hw_lag_dbg("This oh port mapped to bond has channel:0x%0x\n", *channel); + hw_lag_dbg("fman_dcpid:0x%0lx, oh_offset:0x%0lx, cell-index:%0lx\n", + *fman_dcpid, *oh_offset, *cell_index); + + return BOND_OH_SUCCESS; +} +EXPORT_SYMBOL(export_oh_port_info_to_ceetm); +/** + * Public APIs which can use for Link Aggregation and CEETM Qos + * show bond info and slave device info when they are available + */ +int show_dpa_slave_info(struct bonding *bond, struct slave *slave) +{ + struct dpa_priv_s *priv = netdev_priv(slave->dev); + if (bond) + pr_info("bond->dev->name:%s, slave_cnt:%d\n", + bond->dev->name, bond->slave_cnt); + if (slave) + pr_info("new_slave:%s\n", slave->dev->name); + + if (is_dpa_eth_port(slave->dev)) { + pr_info("priv->mac_dev->res->start:%llx\n", + (unsigned long long)priv->mac_dev->res->start); + pr_info("get_dcp_id_from_dpa_eth_port(netdev):0x%0x\n", + get_dcp_id_from_dpa_eth_port(slave->dev)); + } else + pr_info("the slave device %s is not a DPAA-Eth NIC\n", + slave->dev->name); + + return 0; +} + +int init_status(struct net_device *netdev) +{ + struct bonding *bond = master_to_bond(netdev); + memset(&bond->params.oh_stats, 0, sizeof(struct rtnl_link_stats64)); + + return BOND_OH_SUCCESS; +} + +void add_statistics(struct bonding *bond, struct rtnl_link_stats64 *stats) +{ + stats->tx_packets += bond->params.oh_stats.tx_packets; + stats->tx_bytes += bond->params.oh_stats.tx_bytes; + stats->tx_errors += bond->params.oh_stats.tx_errors; + stats->tx_dropped += bond->params.oh_stats.tx_dropped; +} +/** + * Copied from oNIC (removed priv) + * Turn on HW checksum computation for this outgoing frame. + * If the current protocol is not something we support in this regard + * (or if the stack has already computed the SW checksum), we do nothing. + * + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value + * otherwise. + * + * Note that this function may modify the fd->cmd field and the skb data buffer + * (the Parse Results area). + */ +int oh_tx_csum_enable(struct sk_buff *skb, + struct qm_fd *fd, + char *parse_results) +{ + fm_prs_result_t *parse_result; + struct iphdr *iph; + struct ipv6hdr *ipv6h = NULL; + struct tcphdr *tcph; + struct udphdr *udph; + int l4_proto; + int ethertype = ntohs(skb->protocol); + int retval = 0, i; + unsigned char *p; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + /* Fill in some fields of the Parse Results array, so the FMan + * can find them as if they came from the FMan Parser. + */ + parse_result = (fm_prs_result_t *)parse_results; + /* If we're dealing with VLAN, get the real Ethernet type */ + if (ethertype == ETH_P_8021Q) { + /* We can't always assume the MAC header is set correctly + * by the stack, so reset to beginning of skb->data + */ + skb_reset_mac_header(skb); + ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); + /* below l2r need look up FMAN RM to verify */ + parse_result->l2r = FM_PR_L2_VLAN | FM_PR_L2_VLAN_STACK; + } else { + parse_result->l2r = FM_PR_L2_ETHERNET; + } + + /* Fill in the relevant L3 parse result fields + * and read the L4 protocol type + */ + switch (ethertype) { + case ETH_P_IP: + parse_result->l3r = FM_L3_PARSE_RESULT_IPV4; + iph = ip_hdr(skb); + BUG_ON(iph == NULL); + l4_proto = ntohs(iph->protocol); + break; + case ETH_P_IPV6: + parse_result->l3r = FM_L3_PARSE_RESULT_IPV6; + ipv6h = ipv6_hdr(skb); + BUG_ON(ipv6h == NULL); + l4_proto = ntohs(ipv6h->nexthdr); + break; + default: + /* We shouldn't even be here */ + hw_lag_dbg("Can't compute HW csum for L3 proto 0x%x\n", + ntohs(skb->protocol)); + retval = -EIO; + goto return_error; + } + + hw_lag_dbg("skb->protocol(L3):0x%04x, ethertype:%x\n", + ntohs(skb->protocol), ethertype); + + /* Fill in the relevant L4 parse result fields */ + switch (l4_proto) { + case IPPROTO_UDP: + parse_result->l4r = FM_L4_PARSE_RESULT_UDP; + udph = (struct udphdr *)(skb->data + skb_transport_offset(skb)); + hw_lag_dbg("udp org csum:0x%0x\n", udph->check); + skb_set_transport_header(skb, skb_checksum_start_offset(skb)); + skb_checksum_help(skb); + hw_lag_dbg("udp software csum:0x%0x\n", udph->check); + break; + case IPPROTO_TCP: + parse_result->l4r = FM_L4_PARSE_RESULT_TCP; + tcph = (struct tcphdr *)(skb->data + skb_transport_offset(skb)); + p = skb->data; + hw_lag_dbg("\ndmac:%02x:%02x:%02x:%02x:%02x:%02x\n" + "smac:%02x:%02x:%02x:%02x:%02x:%02x\n" + "h_proto:0x%04x\n", p[0], p[1], p[2], p[3], p[4], p[5], + p[6], p[7], p[8], p[9], p[10], p[11], + *((short *)(p + 12))); + + /* dump skb data info for manual calculate L4CSUM, + * jump over net header first + */ + p += skb_network_offset(skb); + for (i = 0; i < skb->len - skb_network_offset(skb) - 4; i += 4) + hw_lag_dbg("%08x\n", *((unsigned int *) (p + i))); + + for (; i < skb->len - skb_network_offset(skb); i++) + hw_lag_dbg("%02x\n", *(p + i)); + + hw_lag_dbg("tcp org csum:0x%0x.\n", tcph->check); + skb_set_transport_header(skb, skb_checksum_start_offset(skb)); + skb_checksum_help(skb); + hw_lag_dbg("tcp software csum:0x%0x,\n", tcph->check); + + break; + default: + /* This can as well be a BUG() */ + pr_err("Can't compute HW csum for L4 proto 0x%x\n", + l4_proto); + retval = -EIO; + goto return_error; + } + + hw_lag_dbg("l4_proto:0x%04x, result->l2r:0x%04x\n", + l4_proto, parse_result->l2r); + hw_lag_dbg("result->l3r:0x%04x, result->l4r:0x%02x.\n", + parse_result->l3r, parse_result->l4r); + + /* At index 0 is IPOffset_1 as defined in the Parse Results */ + parse_result->ip_off[0] = skb_network_offset(skb); + parse_result->l4_off = skb_transport_offset(skb); + + /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ + fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC; + + /* On P1023 and similar platforms fd->cmd interpretation could + * be disabled by setting CONTEXT_A bit ICMD; currently this bit + * is not set so we do not need to check; in the future, if/when + * using context_a we need to check this bit + */ + +return_error: + return retval; +} + +static int __hot dpa_oh_xmit(struct qm_fd *fd, struct qman_fq *tx_fq) +{ + int err, i; + + for (i = 0; i < 100000; i++) { + err = qman_enqueue(tx_fq, fd, 0); + if (err != -EBUSY) + break; + } + + if (unlikely(err < 0)) { + /* TODO differentiate b/w -EBUSY (EQCR full) and other codes? */ + pr_err("qman_enqueue() error.\n"); + return err; + } + + return 0; +} + + +int __hot dpa_oh_tx(struct sk_buff *skb, struct bonding *bond, + struct net_device *net_dev, struct dpa_fq *tx_fq) +{ + struct dpa_priv_s *priv; + struct dpa_bp *bp = bond->params.ohp->tx_bp; + + struct sk_buff **skbh = NULL; + dma_addr_t addr; + struct qm_fd fd; + int err = 0; + int *countptr; + struct rtnl_link_stats64 *percpu_stats; + + tx_fq->net_dev = bond->params.ohp->slave[0]->dev; + priv = netdev_priv(bond->params.ohp->slave[0]->dev); + percpu_stats = &bond->params.oh_stats; + countptr = __this_cpu_ptr(bond->params.ohp->tx_bp->percpu_count); + + if (unlikely(skb_headroom(skb) < priv->tx_headroom)) { + struct sk_buff *skb_new; + + skb_new = skb_realloc_headroom(skb, priv->tx_headroom); + if (unlikely(!skb_new)) { + percpu_stats->tx_errors++; + kfree_skb(skb); + goto done; + } + kfree_skb(skb); + skb = skb_new; + } + + clear_fd(&fd); + + /* store skb backpointer to release the skb later */ + skbh = (struct sk_buff **)(skb->data - priv->tx_headroom); + *skbh = skb; + + /* TODO check if skb->len + priv->tx_headroom < bp->size */ + + /* Enable L3/L4 hardware checksum computation. + * + * We must do this before dma_map_single(), because we may + * need to write into the skb. + */ + err = oh_tx_csum_enable(skb, &fd, + ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE); + + if (unlikely(err < 0)) { + pr_err("HW csum error: %d\n", err); + + return err; + } + + addr = dma_map_single(bp->dev, skbh, + skb->len + priv->tx_headroom, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(bp->dev, addr))) { + pr_err("dma_map_single() failed\n"); + goto dma_mapping_failed; + } + + fd.format = qm_fd_contig; + fd.length20 = skb->len; + fd.offset = priv->tx_headroom; + fd.addr_hi = upper_32_bits(addr); + fd.addr_lo = lower_32_bits(addr); + /* fd.cmd |= FM_FD_CMD_FCO; */ + fd.bpid = bp->bpid; + + /* (Partially) drain the Draining Buffer Pool pool; each core + * acquires at most the number of buffers it put there; since + * BMan allows for up to 8 buffers to be acquired at one time, + * work in batches of 8 for efficiency reasons + */ + dpa_oh_drain_bp(bp); + + if (unlikely(dpa_oh_xmit(&fd, &tx_fq->fq_base) < 0)) { + /* oh tx error, add statistics */ + bond->params.oh_stats.tx_packets++; + bond->params.oh_stats.tx_errors++; + hw_lag_dbg("3ad enqueue_pkt error...txerr_pkt:%llu\n", + bond->params.oh_stats.tx_packets); + goto xmit_failed; + } else { + /* oh tx OK, add statistics */ + bond->params.oh_stats.tx_packets++; + bond->params.oh_stats.tx_bytes += skb->len; + hw_lag_dbg("3ad enqueue_pkt OK...tx_pkt:%llu\n", + bond->params.oh_stats.tx_packets); + return NETDEV_TX_OK; + } + + countptr = __this_cpu_ptr(bp->percpu_count); + (*countptr)++; + + goto done; + +xmit_failed: + dma_unmap_single(bp->dev, addr, fd.offset + fd.length20, DMA_TO_DEVICE); +dma_mapping_failed: + percpu_stats->tx_errors++; + dev_kfree_skb(skb); +done: + return NETDEV_TX_OK; +} +/** + * Enqueue one skb pkt to offline port which attached to a bond. + * bond: current bond's pointer + * skb: pkt which will be enqueued to the offline port + * ceetm_fq: pkt will use this fq for xmit. if this ceetm_fq is + * pointing to NULL, will use default tx_fq for xmit. + * return BOND_OH_SUCCESS if enqueued, otherwise return errors. + */ +int enqueue_pkt_to_oh(struct bonding *bond, struct sk_buff *skb, + struct dpa_fq *ceetm_fq) +{ + struct oh_port_priv *p_oh = bond->params.ohp; + struct net_device *slave_netdev = NULL; + struct dpa_fq *tx_fq = p_oh->oh_tx_lag_fqs; + + slave_netdev = p_oh->slave[0]->dev; + + p_oh->oh_errq->net_dev = slave_netdev; + p_oh->oh_defq->net_dev = slave_netdev; + + if (!is_dpa_eth_port(slave_netdev)) { + pr_err("is not dpaa NIC or NULL pointer.\n"); + return -EINVAL; + } + + if (ceetm_fq) + return dpa_oh_tx(skb, bond, slave_netdev, ceetm_fq); + else + return dpa_oh_tx(skb, bond, slave_netdev, tx_fq); +} +EXPORT_SYMBOL(enqueue_pkt_to_oh); + +static int get_dpa_slave_info(struct slave *slave, uint16_t *tx_channel) +{ + struct dpa_priv_s *priv = netdev_priv(slave->dev); + + if (!is_dpa_eth_port(slave->dev) || !(priv->mac_dev)) + return BOND_OH_ERROR; + + *tx_channel = fm_get_tx_port_channel(priv->mac_dev->port_dev[TX]); + + return BOND_OH_SUCCESS; +} + +int get_dpa_slave_info_ex(struct slave *slave, uint16_t *tx_channel, + struct qman_fq **egress_fq, u32 *first_fqid) +{ + struct dpa_priv_s *priv = netdev_priv(slave->dev); + + if (!is_dpa_eth_port(slave->dev) || !(priv->mac_dev)) + return BOND_OH_ERROR; + + *tx_channel = fm_get_tx_port_channel(priv->mac_dev->port_dev[TX]); + *egress_fq = priv->egress_fqs[0]; + *first_fqid = priv->egress_fqs[0]->fqid; + + return BOND_OH_SUCCESS; +} + +/* Creates Frame Queues, these 2 good subroutines are completely copied from + * Bogdan Purcareata's good patch "Offline port queues initialization", HW_LAG + * need to initialize FQs for an offline port PCD usage with tx_channel/wq of + * slave devices which have already attached to a bond, HW_LAG OH port dequeue, + * then enqueue PCD FQs to DPA-Eth via these PCD FQs. + */ +static int create_oh_pcd_fq(struct qman_fq *fq, u32 fqid_pcdconf, + uint32_t fq_id, uint16_t tx_channel, uint16_t wq_id) +{ + struct qm_mcc_initfq fq_opts; + uint32_t create_flags, init_flags; + uint32_t ret = 0; + + if (!fq) + return BOND_OH_ERROR; + + /* Set flags for FQ create */ + create_flags = QMAN_FQ_FLAG_TO_DCPORTAL; + + /* set egress_ern callback for pcd fqs */ + *fq = oh_pcd_egress_ernq; + + /* Create frame queue */ + ret = qman_create_fq(fq_id, create_flags, fq); + if (ret != 0) + return BOND_OH_ERROR; + + /* Set flags for FQ init */ + init_flags = QMAN_INITFQ_FLAG_SCHED; + + /* Set FQ init options. Specify destination WQ ID and channel */ + memset(&fq_opts, 0, sizeof(fq_opts)); + fq_opts.we_mask = QM_INITFQ_WE_DESTWQ; + fq_opts.fqd.dest.wq = wq_id; + fq_opts.fqd.dest.channel = tx_channel; + + fq_opts.we_mask |= QM_INITFQ_WE_FQCTRL; + fq_opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE; + fq_opts.fqd.fq_ctrl |= QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK; + fq_opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_DATA | + QM_STASHING_EXCL_CTX | QM_STASHING_EXCL_ANNOTATION; + fq_opts.fqd.context_a.stashing.data_cl = 2; + fq_opts.fqd.context_a.stashing.annotation_cl = 1; + fq_opts.fqd.context_a.stashing.context_cl = + DIV_ROUND_UP(sizeof(struct qman_fq), 64); + + fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB; + /** + * CTXA[OVFQ] = 1 + * we set PCD own confirmation Q and their own callback in case of + * interrupt DPA-Eth private conf callback/err callback/def callback. + */ + fq_opts.fqd.context_a.hi = 0x80000000; + fq_opts.fqd.context_a.lo = 0x0; + fq_opts.fqd.context_b = fqid_pcdconf; + + /* Initialize frame queue */ + ret = qman_init_fq(fq, init_flags, &fq_opts); + if (ret != 0) { + qman_destroy_fq(fq, 0); + return BOND_OH_ERROR; + } + hw_lag_dbg("FQ create_flags:0X%0x, init_flags:0X%0x\n", + create_flags, init_flags); + + return BOND_OH_SUCCESS; +} + +static int hw_lag_allocate_pcd_queues(struct device *dev, + struct dpa_fq **p_pcd_fq, u32 fqid_pcdconf, u32 fqid, + uint16_t tx_channel, uint16_t wq) +{ + /* Allocate pcd queues */ + int errno = BOND_OH_SUCCESS; + struct dpa_fq *pcd_fq; + hw_lag_dbg("Allocating PCD queues...p_pcd_fq:%p, fqid:%d\n", + *p_pcd_fq, fqid); + pcd_fq = devm_kzalloc(dev, sizeof(struct dpa_fq), GFP_KERNEL); + if (!pcd_fq) { + pr_err("can't allocate slave PCD FQ!\n"); + errno = -ENOMEM; + goto return_kfree; + } + + hw_lag_dbg("Allocated pcd_fq:%p, fqid:%d\n", pcd_fq, fqid); + /* Create pcd queues */ + errno = create_oh_pcd_fq(&pcd_fq->fq_base, fqid_pcdconf, + fqid, tx_channel, wq); + if (errno != BOND_OH_SUCCESS) { + pr_err("can't create lag PCD FQ:%u\n", fqid); + errno = -EINVAL; + goto return_kfree; + } + + *p_pcd_fq = pcd_fq; + hw_lag_dbg("created pcd_fq:%p, fqid:%d, *p_pcd_fq:%p\n", + pcd_fq, fqid, *p_pcd_fq); + return BOND_OH_SUCCESS; + +return_kfree: + if (pcd_fq) + devm_kfree(dev, pcd_fq); + return errno; +} + +/* Destroys Frame Queues */ +static void hw_lag_fq_destroy(struct qman_fq *fq) +{ + int errno = BOND_OH_SUCCESS; + + errno = qman_retire_fq(fq, NULL); + if (unlikely(errno < 0)) + pr_err("qman_retire_fq(%u)=%d\n", qman_fq_fqid(fq), errno); + + errno = qman_oos_fq(fq); + if (unlikely(errno < 0)) + pr_err("qman_oos_fq(%u)=%d\n", qman_fq_fqid(fq), errno); + + qman_destroy_fq(fq, 0); +} +/* release fq memory */ +static int hw_lag_release_fq(struct device *dev, struct dpa_fq *fq) +{ + + if (!fq) + return BOND_OH_ERROR; + + hw_lag_fq_destroy(&fq->fq_base); + if (!dev) + return BOND_OH_ERROR; + + devm_kfree(dev, fq); + + return BOND_OH_SUCCESS; +} +/** + * Get DPA slave device information: wq/channel_id, allocate FQID/FQ memory, + * then set FQ flags, record the slave pointer in case of remove these + * information when detaching slave in the future. + */ +int fill_oh_pcd_fqs_with_slave_info(struct bonding *bond, struct slave *slave) +{ + uint16_t tx_channel; + struct dpa_fq *pcd_fq = NULL; + struct oh_port_priv *cur; + u32 fqid; + uint16_t wq_id = 3; /* the default value in DPA-Eth private driver */ + + if (bond->params.mode != BOND_MODE_8023AD) { + pr_err("error, only support 802.3ad when fill OH FQs.\n"); + return BOND_OH_ERROR; + } + + if (is_dpa_eth_port(slave->dev) == false) { + pr_err("error, only support dpa eth nic.\n"); + return BOND_OH_ERROR; + } + + if (bond->slave_cnt > SLAVES_PER_BOND) { + pr_err("error, only support 2 dpa nic per bond.\n"); + return BOND_OH_ERROR; + } + + if (get_dpa_slave_info(slave, &tx_channel) == BOND_OH_ERROR) { + pr_err("error on getting dpa info when fill OH FQs.\n"); + return BOND_OH_ERROR; + } + + cur = bond->params.ohp; + if (!cur) { + pr_err("have not bind an OH port,\n"); + pr_err("will use software tx traffic distribution.\n"); + return BOND_OH_ERROR; + } + + hw_lag_dbg("cur->pcd_fqs[0]:%p, cur->pcd_fqs[1]:%p\n", + cur->pcd_fqs[0], cur->pcd_fqs[1]); + if (!cur->pcd_fqs[0]) + fqid = cur->pcd_fqids_base; + else + fqid = cur->pcd_fqids_base + 1; + + hw_lag_dbg("pcd_fq:%p, fqid:%d Before alloc.\n", pcd_fq, fqid); + + if (hw_lag_allocate_pcd_queues(cur->dpa_oh_dev, &pcd_fq, + cur->fqid_pcdconf, fqid, tx_channel, + wq_id) == BOND_OH_ERROR) { + pr_err("error on create pcd fqs\n"); + return BOND_OH_ERROR; + } + + hw_lag_dbg("pcd_fq:%p, fqid:%d, tx_channel:%d, wq_id:%d After alloc.\n", + pcd_fq, fqid, tx_channel, wq_id); + hw_lag_dbg("fqid:0x%0x, tx_channel:0x%0x, wq_id:0x%0x After alloc.\n", + fqid, tx_channel, wq_id); + + if (!cur->pcd_fqs[0]) { + cur->pcd_fqs[0] = pcd_fq; + cur->slave[0] = slave; + } else if (!cur->pcd_fqs[1]) { + cur->pcd_fqs[1] = pcd_fq; + cur->slave[1] = slave; + } + + return BOND_OH_SUCCESS; +} + +/* forget offline port pcd information according to slave pointer, + * then destroy fq and release the fq memory. + */ +int del_oh_pcd_fqs_with_slave_info(struct bonding *bond, struct slave *slave) +{ + struct oh_port_priv *cur; + struct dpa_fq *pcd_fq; + + if (is_dpa_eth_port(slave->dev) == false) { + pr_err("error, only support dpa eth nic.\n"); + return BOND_OH_ERROR; + } + cur = bond->params.ohp; + if (!cur) { + pr_err("have not bind an OH port,\n"); + pr_err("will use software tx traffic distribution.\n"); + return BOND_OH_ERROR; + } + if (slave == cur->slave[0]) { + pcd_fq = cur->pcd_fqs[0]; + cur->pcd_fqs[0] = NULL; + cur->slave[0] = NULL; + } else if (slave == cur->slave[1]) { + pcd_fq = cur->pcd_fqs[1]; + cur->pcd_fqs[1] = NULL; + cur->slave[1] = NULL; + } else + pcd_fq = NULL; + + return hw_lag_release_fq(cur->dpa_oh_dev, pcd_fq); +} + +int release_pcd_mem(struct bonding *bond) +{ + return BOND_OH_SUCCESS; +} + +/* get all offline port information from bond, including + * dev,oh handler, PCD FQid base and PCD FQ count, then + * get the new xmit policy, copy schemes needed from the + * cached_scheme pointer, config PCD params, init PCD dev, + * set PCD Net Env Characteristics, then set Keygen Scheme + * params to the PCD dev, disable offline port, set PCD + * params to the offline port dev, at last enable the offline + * port. + * this subroutine return true when it can apply PCD to + * the offline port, otherwise return false. + */ +bool apply_pcd(struct bonding *bond, int new_xmit_policy) +{ + return true; +} diff --git a/drivers/net/bonding/hw_distribution.h b/drivers/net/bonding/hw_distribution.h new file mode 100644 index 0000000..55e0420 --- /dev/null +++ b/drivers/net/bonding/hw_distribution.h @@ -0,0 +1,138 @@ +/** + * Copyright 2014 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HARDWARE_DISTRIBUTION_H +#define __HARDWARE_DISTRIBUTION_H + +#include + +#include "bonding.h" +#include "bond_3ad.h" +#include "bond_alb.h" +#include "offline_port.h" +#include "dpaa_eth.h" +#include "dpaa_eth_common.h" + +#define OHFRIENDNAMSIZ 10 /* fman0-oh@1, ... fman1-oh@6 */ +#define OHNODENAMSIZ 24 /* /fsl,dpaa/dpa-fman0-oh@1 */ +#define BOND_OH_SUCCESS 0 +#define BOND_OH_ERROR -1 +#define NO_POLICY 0xFF /* this is a magic number */ + +#define FM1_GB0 0xffe4e0000 +#define FM1_10G 0xffe4f0000 +#define FM2_GB0 0xffe5e0000 +#define FM2_10G 0xffe5f0000 + +#define DPA_FQ_TD 0x200000 + +/* There are 4 FMAN Ethernet Ports per T1040, 2 of them are for the + * Link Aggregation for the L2Swith trunk link, thus there are at + * most 2 ports left for the other Link Aggregation, this implies + * 2 MAX_BOND_CNT * SLAVES_PER_BOND = 4 FMAN Ethernet Ports. + * In fact,we only need numbers of offline port in a DTS: + * offline port count = min(FM_MAX_NUM_OF_OH_PORTS, MAX_BOND_CNT) + */ +#define MAX_BOND_CNT 2 +#define SLAVES_PER_BOND 2 + +#ifdef CONFIG_HW_LAG_DEBUG +#define hw_lag_dbg(fmt, arg...) \ + pr_info("LAG:[CPU %d ln %d fn %s] - " fmt, smp_processor_id(), \ + __LINE__, __func__, ##arg) +#else +#define hw_lag_dbg(fmt, arg...) +#endif + +struct oh_port_priv { + unsigned int oh_channel_id; + struct dpa_oh_config_s *oh_config; + struct dpa_fq *pcd_fqs[SLAVES_PER_BOND]; + struct dpa_fq *oh_defq, *oh_errq; + uint16_t p_oh_rcv_channel; + struct slave *slave[SLAVES_PER_BOND]; + u32 pcd_fqids_base; + uint32_t fqid_pcderr, fqid_pcdconf, fqid_ohtxconf; + struct dpa_fq *oh_pcderrq, *oh_pcdconfq, *oh_txconfq; + /* init dynamic particular tx fqs of offline port for LAG xmit, + * does not reuse tx fqs initialized by offline port driver. + */ + struct dpa_fq *oh_tx_lag_fqs; + const phandle *p_oh_port_handle; + struct platform_device *oh_of_dev, *of_dev; + struct device *dpa_oh_dev, *oh_dev; + struct device_node *dpa_oh_node, *oh_node; + struct dpa_bp *tx_bp; + struct dpa_buffer_layout_s *tx_buf_layout; + uint8_t bpid; /**< External buffer pool id */ + uint16_t bp_size; /**< External buffer pool buffer size */ + int oh_en; /* enable or disable offline port's help at run-time */ + unsigned char friendname[OHFRIENDNAMSIZ]; + unsigned long cell_index; +}; + +extern struct oh_port_priv *poh; +extern int available_num_of_oh_ports; + +int get_oh_info(void); +unsigned int to_which_oh_i_attached(struct oh_port_priv *current_poh); +bool are_all_slaves_linkup(struct bonding *bond); +int get_dcp_id_from_dpa_eth_port(struct net_device *netdev); +int export_oh_port_info_to_ceetm(struct bonding *bond, uint16_t *channel, + unsigned long *fman_dcpid, unsigned long *oh_offset, + unsigned long *cell_index); +int show_dpa_slave_info(struct bonding *bond, struct slave *slave); +int get_dpa_slave_info_ex(struct slave *slave, uint16_t *tx_channel, + struct qman_fq **egress_fq, u32 *first_fqid); +int enqueue_pkt_to_oh(struct bonding *bond, struct sk_buff *skb, + struct dpa_fq *ceetm_fq); +ssize_t bonding_show_offline_port_xmit_statistics(struct device *d, + struct device_attribute *attr, char *buf); +ssize_t bonding_show_offline_ports(struct device *d, + struct device_attribute *attr, char *buf); +ssize_t bonding_show_oh_needed_for_hw_distribution(struct device *d, + struct device_attribute *attr, char *buf); +ssize_t bonding_store_oh_needed_for_hw_distribution(struct device *d, + struct device_attribute *attr, const char *buffer, + size_t count); +ssize_t bonding_show_oh_enable(struct device *d, + struct device_attribute *attr, char *buf); +ssize_t bonding_store_oh_enable(struct device *d, + struct device_attribute *attr, const char *buffer, + size_t count); +int fill_oh_pcd_fqs_with_slave_info(struct bonding *bond, struct slave *slave); +int del_oh_pcd_fqs_with_slave_info(struct bonding *bond, struct slave *slave); +bool apply_pcd(struct bonding *bond, int new_xmit_policy); +int release_pcd_mem(struct bonding *bond); +int init_status(struct net_device *netdev); +void add_statistics(struct bonding *bond, struct rtnl_link_stats64 *stats); +#endif /* __HARDWARE_DISTRIBUTION_H */ -- cgit v0.10.2 From 9f3974232b53f89b97aef7c151a35fa6bbca860f Mon Sep 17 00:00:00 2001 From: Jianhua Xie Date: Fri, 3 Jan 2014 17:26:47 +0800 Subject: bonding: kernel space PCD for xmit distribution The kernel space PCD part provides hash based outgoing traffic distribution. The sources can be L2 MAC/L3 SRC and DST IP addr/ L4 SRC and DST port information. Current version only support L2 information hash which is the default transmit policy in the Linux bonding driver. Signed-off-by: Jianhua Xie Change-Id: Ifd85630ab0eebd77713574f6cf51fb92203a1c06 Reviewed-on: http://git.am.freescale.net:8181/10414 Tested-by: Review Code-CDREVIEW Reviewed-by: Jiafei Pan Reviewed-by: Florinel Iordache Reviewed-by: Jose Rivera diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile index 0024f07..57bce86 100644 --- a/drivers/net/bonding/Makefile +++ b/drivers/net/bonding/Makefile @@ -17,6 +17,6 @@ proc-$(CONFIG_PROC_FS) += bond_procfs.o bonding-objs += $(proc-y) hash_pcd_based_xmit_frames_distribution-$(CONFIG_HW_DISTRIBUTION_WITH_OH) += \ - hw_distribution.o + hw_distribution.o hw_oh_pcd.o bonding-objs += $(hash_pcd_based_xmit_frames_distribution-y) diff --git a/drivers/net/bonding/hw_distribution.c b/drivers/net/bonding/hw_distribution.c index dce0ea1..6eeb58a 100644 --- a/drivers/net/bonding/hw_distribution.c +++ b/drivers/net/bonding/hw_distribution.c @@ -2253,24 +2253,3 @@ int del_oh_pcd_fqs_with_slave_info(struct bonding *bond, struct slave *slave) return hw_lag_release_fq(cur->dpa_oh_dev, pcd_fq); } - -int release_pcd_mem(struct bonding *bond) -{ - return BOND_OH_SUCCESS; -} - -/* get all offline port information from bond, including - * dev,oh handler, PCD FQid base and PCD FQ count, then - * get the new xmit policy, copy schemes needed from the - * cached_scheme pointer, config PCD params, init PCD dev, - * set PCD Net Env Characteristics, then set Keygen Scheme - * params to the PCD dev, disable offline port, set PCD - * params to the offline port dev, at last enable the offline - * port. - * this subroutine return true when it can apply PCD to - * the offline port, otherwise return false. - */ -bool apply_pcd(struct bonding *bond, int new_xmit_policy) -{ - return true; -} diff --git a/drivers/net/bonding/hw_distribution.h b/drivers/net/bonding/hw_distribution.h index 55e0420..32f6e2f 100644 --- a/drivers/net/bonding/hw_distribution.h +++ b/drivers/net/bonding/hw_distribution.h @@ -38,9 +38,15 @@ #include "bonding.h" #include "bond_3ad.h" #include "bond_alb.h" +#include "lnxwrp_fm.h" #include "offline_port.h" #include "dpaa_eth.h" #include "dpaa_eth_common.h" +/* FMD includes */ +#include "error_ext.h" +#include "fm_pcd_ext.h" +#include "fm_cc.h" +#include "crc64.h" #define OHFRIENDNAMSIZ 10 /* fman0-oh@1, ... fman1-oh@6 */ #define OHNODENAMSIZ 24 /* /fsl,dpaa/dpa-fman0-oh@1 */ @@ -98,6 +104,27 @@ struct oh_port_priv { int oh_en; /* enable or disable offline port's help at run-time */ unsigned char friendname[OHFRIENDNAMSIZ]; unsigned long cell_index; + t_Handle h_FmPcd; + t_Handle h_FmPort; + t_Handle h_NetEnv; + + t_FmPcdNetEnvParams *netEnvParams; + t_FmPcdKgSchemeParams *scheme; + t_FmPortPcdParams *pcdParam; + t_FmPortPcdPrsParams *prsParam; + t_FmPortPcdKgParams *kgParam; + int numberof_pre_schemes; +}; + +enum e_dist_hdr { + L2_MAC = 0, + MAC_L3_IPV6, + MAC_L3_IPV4, + MAC_IPV6_TCP, + MAC_IPV6_UDP, + MAC_IPV4_TCP, + MAC_IPV4_UDP, + MAX_SCHEMES }; extern struct oh_port_priv *poh; diff --git a/drivers/net/bonding/hw_oh_pcd.c b/drivers/net/bonding/hw_oh_pcd.c new file mode 100644 index 0000000..424281b --- /dev/null +++ b/drivers/net/bonding/hw_oh_pcd.c @@ -0,0 +1,744 @@ +/** + * Copyright 2014 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "hw_distribution.h" + +static t_LnxWrpFmDev *get_FmDev_from_FmPortDev(t_LnxWrpFmPortDev *p_FmPortDev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev; + p_LnxWrpFmDev = (t_LnxWrpFmDev *) p_FmPortDev->h_LnxWrpFmDev; + return p_LnxWrpFmDev; +} + +static t_LnxWrpFmPortDev *get_FmPortDev_from_fm_port(struct fm_port *fm_port) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; + p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) fm_port; + return p_LnxWrpFmPortDev; +} + +/* for: FM_PORT_Disable/FM_PORT_Enable/FM_PORT_SetPCD + * (t_Handle h_FmPort,...) + */ +static t_Handle get_h_FmPort_from_fm_port(struct fm_port *fm_port) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = + get_FmPortDev_from_fm_port(fm_port); + return p_LnxWrpFmPortDev->h_Dev; +} + +/* for: FM_PCD_Enable/FM_PCD_NetEnvCharacteristicsSet/ + * FM_PCD_KgSchemeSet(t_Handle h_FmPcd) + */ +static t_Handle get_h_FmPcd_from_fm_port(struct fm_port *fm_port) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; + t_LnxWrpFmDev *p_LnxWrpFmDev; + t_Handle h_FmPcd; + p_LnxWrpFmPortDev = get_FmPortDev_from_fm_port(fm_port); + p_LnxWrpFmDev = get_FmDev_from_FmPortDev(p_LnxWrpFmPortDev); + h_FmPcd = p_LnxWrpFmDev->h_PcdDev; + return h_FmPcd; +} + +static int alloc_pcd_mem(struct fm_port *fm_port, uint8_t numOfSchemes, + u32 pcd_fqids_base, uint8_t distNumOfQueues, + struct bonding *bond) +{ + t_Handle h_FmPcd; + t_Handle h_FmPort; + t_Handle h_NetEnv; + + t_FmPcdNetEnvParams *netEnvParams; + t_FmPcdKgSchemeParams *scheme; + t_FmPortPcdParams *pcdParam; + t_FmPortPcdPrsParams *prsParam; + t_FmPortPcdKgParams *kgParam; + /* reuse below "ea_xxx_yyy" variables, can reduce 120 lines of codes */ + t_FmPcdExtractEntry ea_eth_sa, ea_eth_da, ea_ipv4_sa, ea_ipv4_da, + ea_ipv6_sa, ea_ipv6_da, ea_tcp_sp, ea_tcp_dp, + ea_udp_sp, ea_udp_dp, ea_nexthdr, ea_nextp; + + if (bond->params.ohp->h_FmPcd) + return BOND_OH_SUCCESS; + + /* get handle of fm_port/fm_pcd from kernel struct */ + h_FmPort = get_h_FmPort_from_fm_port(fm_port); + if (!h_FmPort) { + pr_err("error on get_h_FmPort_from_fm_port.\n"); + return E_INVALID_VALUE; + } + h_FmPcd = get_h_FmPcd_from_fm_port(fm_port); + if (!h_FmPcd) { + pr_err("error on get_h_FmPcd_from_fm_port.\n"); + return E_INVALID_VALUE; + } + /* set net env, get handle of net env */ + netEnvParams = kzalloc(sizeof(t_FmPcdNetEnvParams), GFP_KERNEL); + if (!netEnvParams) { + pr_err("Failed to allocate netEnvParams.\n"); + return -ENOMEM; + } + hw_lag_dbg("netEnvParams:%p\n", netEnvParams); + netEnvParams->numOfDistinctionUnits = 5; + netEnvParams->units[0].hdrs[0].hdr = HEADER_TYPE_ETH; + netEnvParams->units[1].hdrs[0].hdr = HEADER_TYPE_IPv4; + netEnvParams->units[2].hdrs[0].hdr = HEADER_TYPE_IPv6; + netEnvParams->units[3].hdrs[0].hdr = HEADER_TYPE_TCP; + netEnvParams->units[4].hdrs[0].hdr = HEADER_TYPE_UDP; + + FM_PCD_Enable(h_FmPcd); + h_NetEnv = FM_PCD_NetEnvCharacteristicsSet(h_FmPcd, netEnvParams); + if (!h_NetEnv) { + pr_err("error on FM_PCD_NetEnvCharacteristicsSet.\n"); + return E_INVALID_VALUE; + } + hw_lag_dbg("FM_PCD_NetEnvCharacteristicsSet() ok.\n"); + /* bind port to PCD properties */ + /* initialize PCD parameters */ + pcdParam = kzalloc(sizeof(t_FmPortPcdParams), GFP_KERNEL); + if (!pcdParam) { + pr_err("Failed to allocate pcdParam.\n"); + return -ENOMEM; + } + hw_lag_dbg("pcdParam:%p\n", pcdParam); + /* initialize parser port parameters */ + prsParam = kzalloc(sizeof(t_FmPortPcdPrsParams), GFP_KERNEL); + if (!prsParam) { + pr_err("Failed to allocate prsParam.\n"); + return -ENOMEM; + } + + hw_lag_dbg("prsParam:%p\n", prsParam); + prsParam->parsingOffset = 0; + prsParam->firstPrsHdr = HEADER_TYPE_ETH; + pcdParam->h_NetEnv = h_NetEnv; + pcdParam->pcdSupport = e_FM_PORT_PCD_SUPPORT_PRS_AND_KG; + pcdParam->p_PrsParams = prsParam; + + /* initialize Keygen port parameters */ + kgParam = kzalloc(sizeof(t_FmPortPcdKgParams), GFP_KERNEL); + if (!kgParam) { + pr_err("Failed to allocate kgParam.\n"); + return -ENOMEM; + } + + hw_lag_dbg("kgParam:%p\n", kgParam); + kgParam->numOfSchemes = numOfSchemes; + kgParam->directScheme = FALSE; + + pcdParam->p_KgParams = kgParam; + + /* initialize schemes according to numOfSchemes */ + scheme = kzalloc(sizeof(t_FmPcdKgSchemeParams) * MAX_SCHEMES, + GFP_KERNEL); + if (!scheme) { + pr_err("Failed to allocate scheme.\n"); + return -ENOMEM; + } + + hw_lag_dbg("scheme:%p\n", scheme); + /* Distribution: according to Layer2 info MAC */ + scheme[L2_MAC].alwaysDirect = 0; + scheme[L2_MAC].netEnvParams.numOfDistinctionUnits = 1; + scheme[L2_MAC].netEnvParams.unitIds[0] = 0; + scheme[L2_MAC].useHash = 1; + scheme[L2_MAC].baseFqid = pcd_fqids_base; + scheme[L2_MAC].nextEngine = e_FM_PCD_DONE; + scheme[L2_MAC].schemeCounter.update = 1; + scheme[L2_MAC].schemeCounter.value = 0; + scheme[L2_MAC].keyExtractAndHashParams.numOfUsedMasks = 0; + scheme[L2_MAC].keyExtractAndHashParams.hashShift = 0; + scheme[L2_MAC].keyExtractAndHashParams.symmetricHash = 0; + scheme[L2_MAC].keyExtractAndHashParams.hashDistributionNumOfFqids = + distNumOfQueues; + scheme[L2_MAC].keyExtractAndHashParams.numOfUsedExtracts = 2; + scheme[L2_MAC].numOfUsedExtractedOrs = 0; + scheme[L2_MAC].netEnvParams.h_NetEnv = h_NetEnv; + scheme[L2_MAC].id.relativeSchemeId = L2_MAC; + + /* Extract field:ethernet.src */ + memset(&ea_eth_sa, 0, sizeof(t_FmPcdExtractEntry)); + ea_eth_sa.type = e_FM_PCD_EXTRACT_BY_HDR; + ea_eth_sa.extractByHdr.hdr = HEADER_TYPE_ETH; + ea_eth_sa.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE; + ea_eth_sa.extractByHdr.ignoreProtocolValidation = 0; + ea_eth_sa.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + ea_eth_sa.extractByHdr.extractByHdrType.fullField.eth = + NET_HEADER_FIELD_ETH_SA; + scheme[L2_MAC].keyExtractAndHashParams.extractArray[0] = + ea_eth_sa; + + /* Extract field:ethernet.dst */ + memset(&ea_eth_sa, 0, sizeof(t_FmPcdExtractEntry)); + ea_eth_da.type = e_FM_PCD_EXTRACT_BY_HDR; + ea_eth_da.extractByHdr.hdr = HEADER_TYPE_ETH; + ea_eth_da.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE; + ea_eth_da.extractByHdr.ignoreProtocolValidation = 0; + ea_eth_da.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + ea_eth_da.extractByHdr.extractByHdrType.fullField.eth = + NET_HEADER_FIELD_ETH_DA; + scheme[L2_MAC].keyExtractAndHashParams.extractArray[1] = + ea_eth_da; + + /* Distribution: Layer2 and Layer3 info, MAC and ipv6 */ + scheme[MAC_L3_IPV6].alwaysDirect = 0; + scheme[MAC_L3_IPV6].netEnvParams.numOfDistinctionUnits = 2; + scheme[MAC_L3_IPV6].netEnvParams.unitIds[0] = 0; + scheme[MAC_L3_IPV6].netEnvParams.unitIds[1] = 2; + scheme[MAC_L3_IPV6].useHash = 1; + scheme[MAC_L3_IPV6].baseFqid = pcd_fqids_base; + scheme[MAC_L3_IPV6].nextEngine = e_FM_PCD_DONE; + scheme[MAC_L3_IPV6].schemeCounter.update = 1; + scheme[MAC_L3_IPV6].schemeCounter.value = 0; + scheme[MAC_L3_IPV6].keyExtractAndHashParams.numOfUsedMasks = 0; + scheme[MAC_L3_IPV6].keyExtractAndHashParams.hashShift = 0; + scheme[MAC_L3_IPV6].keyExtractAndHashParams.symmetricHash = 0; + scheme[MAC_L3_IPV6].keyExtractAndHashParams.hashDistributionNumOfFqids = + distNumOfQueues; + scheme[MAC_L3_IPV6].keyExtractAndHashParams.numOfUsedExtracts = 4; + scheme[MAC_L3_IPV6].numOfUsedExtractedOrs = 0; + scheme[MAC_L3_IPV6].netEnvParams.h_NetEnv = h_NetEnv; + scheme[MAC_L3_IPV6].id.relativeSchemeId = MAC_L3_IPV6; + /* Extract field:ethernet.src */ + scheme[MAC_L3_IPV6].keyExtractAndHashParams.extractArray[0] = + ea_eth_sa; + /* Extract field:ethernet.dst */ + scheme[MAC_L3_IPV6].keyExtractAndHashParams.extractArray[1] = + ea_eth_da; + + /* Extract field:ipv6.src */ + memset(&ea_ipv6_sa, 0, sizeof(t_FmPcdExtractEntry)); + ea_ipv6_sa.type = e_FM_PCD_EXTRACT_BY_HDR; + ea_ipv6_sa.extractByHdr.hdr = HEADER_TYPE_IPv6; + ea_ipv6_sa.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE; + ea_ipv6_sa.extractByHdr.ignoreProtocolValidation = 0; + ea_ipv6_sa.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + ea_ipv6_sa.extractByHdr.extractByHdrType.fullField.ipv6 = + NET_HEADER_FIELD_IPv6_SRC_IP; + scheme[MAC_L3_IPV6].keyExtractAndHashParams.extractArray[2] = + ea_ipv6_sa; + + /* Extract field:ipv6.dst */ + memset(&ea_ipv6_da, 0, sizeof(t_FmPcdExtractEntry)); + ea_ipv6_da.type = e_FM_PCD_EXTRACT_BY_HDR; + ea_ipv6_da.extractByHdr.hdr = HEADER_TYPE_IPv6; + ea_ipv6_da.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE; + ea_ipv6_da.extractByHdr.ignoreProtocolValidation = 0; + ea_ipv6_da.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + ea_ipv6_da.extractByHdr.extractByHdrType.fullField.ipv6 = + NET_HEADER_FIELD_IPv6_DST_IP; + scheme[MAC_L3_IPV6].keyExtractAndHashParams.extractArray[3] = + ea_ipv6_da; + + /* Distribution: Layer2 and Layer3 info, MAC ipv4 */ + scheme[MAC_L3_IPV4].alwaysDirect = 0; + scheme[MAC_L3_IPV4].netEnvParams.numOfDistinctionUnits = 2; + scheme[MAC_L3_IPV4].netEnvParams.unitIds[0] = 0; + scheme[MAC_L3_IPV4].netEnvParams.unitIds[1] = 1; + scheme[MAC_L3_IPV4].useHash = 1; + scheme[MAC_L3_IPV4].baseFqid = pcd_fqids_base; + scheme[MAC_L3_IPV4].nextEngine = e_FM_PCD_DONE; + scheme[MAC_L3_IPV4].schemeCounter.update = 1; + scheme[MAC_L3_IPV4].schemeCounter.value = 0; + scheme[MAC_L3_IPV4].keyExtractAndHashParams.numOfUsedMasks = 0; + scheme[MAC_L3_IPV4].keyExtractAndHashParams.hashShift = 0; + scheme[MAC_L3_IPV4].keyExtractAndHashParams.symmetricHash = 0; + scheme[MAC_L3_IPV4].keyExtractAndHashParams.hashDistributionNumOfFqids = + distNumOfQueues; + scheme[MAC_L3_IPV4].keyExtractAndHashParams.numOfUsedExtracts = 4; + scheme[MAC_L3_IPV4].numOfUsedExtractedOrs = 0; + scheme[MAC_L3_IPV4].netEnvParams.h_NetEnv = h_NetEnv; + scheme[MAC_L3_IPV4].id.relativeSchemeId = MAC_L3_IPV4; + /* Extract field:ethernet.src */ + scheme[MAC_L3_IPV4].keyExtractAndHashParams.extractArray[0] = + ea_eth_sa; + /* Extract field:ethernet.dst */ + scheme[MAC_L3_IPV4].keyExtractAndHashParams.extractArray[1] = + ea_eth_da; + /* Extract field:ipv4.src */ + memset(&ea_ipv4_sa, 0, sizeof(t_FmPcdExtractEntry)); + ea_ipv4_sa.type = e_FM_PCD_EXTRACT_BY_HDR; + ea_ipv4_sa.extractByHdr.hdr = HEADER_TYPE_IPv4; + ea_ipv4_sa.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE; + ea_ipv4_sa.extractByHdr.ignoreProtocolValidation = 0; + ea_ipv4_sa.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + ea_ipv4_sa.extractByHdr.extractByHdrType.fullField.ipv4 = + NET_HEADER_FIELD_IPv4_SRC_IP; + scheme[MAC_L3_IPV4].keyExtractAndHashParams.extractArray[2] = + ea_ipv4_sa; + /* Extract field:ipv4.dst */ + memset(&ea_ipv4_da, 0, sizeof(t_FmPcdExtractEntry)); + ea_ipv4_da.type = e_FM_PCD_EXTRACT_BY_HDR; + ea_ipv4_da.extractByHdr.hdr = HEADER_TYPE_IPv4; + ea_ipv4_da.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE; + ea_ipv4_da.extractByHdr.ignoreProtocolValidation = 0; + ea_ipv4_da.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + ea_ipv4_da.extractByHdr.extractByHdrType.fullField.ipv4 = + NET_HEADER_FIELD_IPv4_DST_IP; + scheme[MAC_L3_IPV4].keyExtractAndHashParams.extractArray[3] = + ea_ipv4_da; + + /* Distribution: Layer234 info MAC ipv6 tcp */ + scheme[MAC_IPV6_TCP].alwaysDirect = 0; + scheme[MAC_IPV6_TCP].netEnvParams.numOfDistinctionUnits = 3; + scheme[MAC_IPV6_TCP].netEnvParams.unitIds[0] = 0; + scheme[MAC_IPV6_TCP].netEnvParams.unitIds[1] = 2; + scheme[MAC_IPV6_TCP].netEnvParams.unitIds[2] = 3; + scheme[MAC_IPV6_TCP].useHash = 1; + scheme[MAC_IPV6_TCP].baseFqid = pcd_fqids_base; + scheme[MAC_IPV6_TCP].nextEngine = e_FM_PCD_DONE; + scheme[MAC_IPV6_TCP].schemeCounter.update = 1; + scheme[MAC_IPV6_TCP].schemeCounter.value = 0; + scheme[MAC_IPV6_TCP].keyExtractAndHashParams.numOfUsedMasks = 0; + scheme[MAC_IPV6_TCP].keyExtractAndHashParams.hashShift = 0; + scheme[MAC_IPV6_TCP].keyExtractAndHashParams.symmetricHash = 0; + scheme[MAC_IPV6_TCP].keyExtractAndHashParams.hashDistributionNumOfFqids + = distNumOfQueues; + scheme[MAC_IPV6_TCP].keyExtractAndHashParams.numOfUsedExtracts = 7; + scheme[MAC_IPV6_TCP].numOfUsedExtractedOrs = 0; + scheme[MAC_IPV6_TCP].netEnvParams.h_NetEnv = h_NetEnv; + scheme[MAC_IPV6_TCP].id.relativeSchemeId = MAC_IPV6_TCP; + /* Extract field:ethernet.src */ + scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[0] = + ea_eth_sa; + /* Extract field:ethernet.dst */ + scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[1] = + ea_eth_da; + /* Extract field:ipv6.src */ + scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[2] = + ea_ipv6_sa; + /* Extract field:ipv6.dst */ + scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[3] = + ea_ipv6_da; + + /* Extract field:ipv6.nexthdr */ + memset(&ea_nexthdr, 0, sizeof(t_FmPcdExtractEntry)); + ea_nexthdr.type = e_FM_PCD_EXTRACT_BY_HDR; + ea_nexthdr.extractByHdr.hdr = HEADER_TYPE_IPv6; + ea_nexthdr.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE; + ea_nexthdr.extractByHdr.ignoreProtocolValidation = 0; + ea_nexthdr.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + ea_nexthdr.extractByHdr.extractByHdrType.fullField.ipv6 = + NET_HEADER_FIELD_IPv6_NEXT_HDR; + scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[4] = + ea_nexthdr; + /* Extract field:tcp.sport */ + memset(&ea_tcp_sp, 0, sizeof(t_FmPcdExtractEntry)); + ea_tcp_sp.type = e_FM_PCD_EXTRACT_BY_HDR; + ea_tcp_sp.extractByHdr.hdr = HEADER_TYPE_TCP; + ea_tcp_sp.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE; + ea_tcp_sp.extractByHdr.ignoreProtocolValidation = 0; + ea_tcp_sp.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + ea_tcp_sp.extractByHdr.extractByHdrType.fullField.tcp = + NET_HEADER_FIELD_TCP_PORT_SRC; + scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[5] = + ea_tcp_sp; + /* Extract field:tcp.dport */ + memset(&ea_tcp_dp, 0, sizeof(t_FmPcdExtractEntry)); + ea_tcp_dp.type = e_FM_PCD_EXTRACT_BY_HDR; + ea_tcp_dp.extractByHdr.hdr = HEADER_TYPE_TCP; + ea_tcp_dp.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE; + ea_tcp_dp.extractByHdr.ignoreProtocolValidation = 0; + ea_tcp_dp.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + ea_tcp_dp.extractByHdr.extractByHdrType.fullField.tcp = + NET_HEADER_FIELD_TCP_PORT_DST; + scheme[MAC_IPV6_TCP].keyExtractAndHashParams.extractArray[6] = + ea_tcp_dp; + + /* Distribution: Layer234 info MAC ipv6 udp */ + scheme[MAC_IPV6_UDP].alwaysDirect = 0; + scheme[MAC_IPV6_UDP].netEnvParams.numOfDistinctionUnits = 3; + scheme[MAC_IPV6_UDP].netEnvParams.unitIds[0] = 0; + scheme[MAC_IPV6_UDP].netEnvParams.unitIds[1] = 2; + scheme[MAC_IPV6_UDP].netEnvParams.unitIds[2] = 4; + scheme[MAC_IPV6_UDP].useHash = 1; + scheme[MAC_IPV6_UDP].baseFqid = pcd_fqids_base; + scheme[MAC_IPV6_UDP].nextEngine = e_FM_PCD_DONE; + scheme[MAC_IPV6_UDP].schemeCounter.update = 1; + scheme[MAC_IPV6_UDP].schemeCounter.value = 0; + scheme[MAC_IPV6_UDP].keyExtractAndHashParams.numOfUsedMasks = 0; + scheme[MAC_IPV6_UDP].keyExtractAndHashParams.hashShift = 0; + scheme[MAC_IPV6_UDP].keyExtractAndHashParams.symmetricHash = 0; + scheme[MAC_IPV6_UDP].keyExtractAndHashParams.hashDistributionNumOfFqids + = distNumOfQueues; + scheme[MAC_IPV6_UDP].keyExtractAndHashParams.numOfUsedExtracts = 7; + scheme[MAC_IPV6_UDP].numOfUsedExtractedOrs = 0; + scheme[MAC_IPV6_UDP].netEnvParams.h_NetEnv = h_NetEnv; + scheme[MAC_IPV6_UDP].id.relativeSchemeId = MAC_IPV6_UDP; + /* Extract field:ethernet.src */ + scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[0] = + ea_eth_sa; + /* Extract field:ethernet.dst */ + scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[1] = + ea_eth_da; + /* Extract field:ipv6.src */ + scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[2] = + ea_ipv6_sa; + /* Extract field:ipv6.dst */ + scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[3] = + ea_ipv6_da; + /* Extract field:ipv6.nexthdr */ + scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[4] = + ea_nexthdr; + /* Extract field:udp.sport */ + memset(&ea_udp_sp, 0, sizeof(t_FmPcdExtractEntry)); + ea_udp_sp.type = e_FM_PCD_EXTRACT_BY_HDR; + ea_udp_sp.extractByHdr.hdr = HEADER_TYPE_UDP; + ea_udp_sp.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE; + ea_udp_sp.extractByHdr.ignoreProtocolValidation = 0; + ea_udp_sp.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + ea_udp_sp.extractByHdr.extractByHdrType.fullField.udp + = NET_HEADER_FIELD_UDP_PORT_SRC; + scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[5] = + ea_udp_sp; + /* Extract field:udp.dport */ + memset(&ea_udp_dp, 0, sizeof(t_FmPcdExtractEntry)); + ea_udp_dp.type = e_FM_PCD_EXTRACT_BY_HDR; + ea_udp_dp.extractByHdr.hdr = HEADER_TYPE_UDP; + ea_udp_dp.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE; + ea_udp_dp.extractByHdr.ignoreProtocolValidation = 0; + ea_udp_dp.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + ea_udp_dp.extractByHdr.extractByHdrType.fullField.udp + = NET_HEADER_FIELD_UDP_PORT_DST; + scheme[MAC_IPV6_UDP].keyExtractAndHashParams.extractArray[6] = + ea_udp_dp; + + /* Distribution: Layer234 info MAC ipv4 tcp */ + scheme[MAC_IPV4_TCP].alwaysDirect = 0; + scheme[MAC_IPV4_TCP].netEnvParams.numOfDistinctionUnits = 3; + scheme[MAC_IPV4_TCP].netEnvParams.unitIds[0] = 0; + scheme[MAC_IPV4_TCP].netEnvParams.unitIds[1] = 1; + scheme[MAC_IPV4_TCP].netEnvParams.unitIds[2] = 3; + scheme[MAC_IPV4_TCP].useHash = 1; + scheme[MAC_IPV4_TCP].baseFqid = pcd_fqids_base; + scheme[MAC_IPV4_TCP].nextEngine = e_FM_PCD_DONE; + scheme[MAC_IPV4_TCP].schemeCounter.update = 1; + scheme[MAC_IPV4_TCP].schemeCounter.value = 0; + scheme[MAC_IPV4_TCP].keyExtractAndHashParams.numOfUsedMasks = 0; + scheme[MAC_IPV4_TCP].keyExtractAndHashParams.hashShift = 0; + scheme[MAC_IPV4_TCP].keyExtractAndHashParams.symmetricHash = 0; + scheme[MAC_IPV4_TCP].keyExtractAndHashParams.hashDistributionNumOfFqids + = distNumOfQueues; + scheme[MAC_IPV4_TCP].keyExtractAndHashParams.numOfUsedExtracts = 7; + scheme[MAC_IPV4_TCP].numOfUsedExtractedOrs = 0; + scheme[MAC_IPV4_TCP].netEnvParams.h_NetEnv = h_NetEnv; + scheme[MAC_IPV4_TCP].id.relativeSchemeId = MAC_IPV4_TCP; + /* Extract field:ethernet.src */ + scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[0] = + ea_eth_sa; + /* Extract field:ethernet.dst */ + scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[1] = + ea_eth_da; + /* Extract field:ipv4.src */ + scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[2] = + ea_ipv4_sa; + /* Extract field:ipv4.dst */ + scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[3] = + ea_ipv4_da; + /* Extract field:ipv4.nextp */ + memset(&ea_nextp, 0, sizeof(t_FmPcdExtractEntry)); + ea_nextp.type = e_FM_PCD_EXTRACT_BY_HDR; + ea_nextp.extractByHdr.hdr = HEADER_TYPE_IPv4; + ea_nextp.extractByHdr.hdrIndex = e_FM_PCD_HDR_INDEX_NONE; + ea_nextp.extractByHdr.ignoreProtocolValidation = 0; + ea_nextp.extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + ea_nextp.extractByHdr.extractByHdrType.fullField.ipv4 = + NET_HEADER_FIELD_IPv4_PROTO; + scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[4] = + ea_nextp; + /* Extract field:tcp.sport */ + scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[5] = + ea_tcp_sp; + /* Extract field:tcp.dport */ + scheme[MAC_IPV4_TCP].keyExtractAndHashParams.extractArray[6] = + ea_tcp_dp; + + /* Distribution: Layer234 info MAC ipv4 udp */ + scheme[MAC_IPV4_UDP].alwaysDirect = 0; + scheme[MAC_IPV4_UDP].netEnvParams.numOfDistinctionUnits = 3; + scheme[MAC_IPV4_UDP].netEnvParams.unitIds[0] = 0; + scheme[MAC_IPV4_UDP].netEnvParams.unitIds[1] = 1; + scheme[MAC_IPV4_UDP].netEnvParams.unitIds[2] = 4; + scheme[MAC_IPV4_UDP].useHash = 1; + scheme[MAC_IPV4_UDP].baseFqid = pcd_fqids_base; + scheme[MAC_IPV4_UDP].nextEngine = e_FM_PCD_DONE; + scheme[MAC_IPV4_UDP].schemeCounter.update = 1; + scheme[MAC_IPV4_UDP].schemeCounter.value = 0; + scheme[MAC_IPV4_UDP].keyExtractAndHashParams.numOfUsedMasks = 0; + scheme[MAC_IPV4_UDP].keyExtractAndHashParams.hashShift = 0; + scheme[MAC_IPV4_UDP].keyExtractAndHashParams.symmetricHash = 0; + scheme[MAC_IPV4_UDP].keyExtractAndHashParams.hashDistributionNumOfFqids + = distNumOfQueues; + scheme[MAC_IPV4_UDP].keyExtractAndHashParams.numOfUsedExtracts = 7; + scheme[MAC_IPV4_UDP].numOfUsedExtractedOrs = 0; + scheme[MAC_IPV4_UDP].netEnvParams.h_NetEnv = h_NetEnv; + scheme[MAC_IPV4_UDP].id.relativeSchemeId = MAC_IPV4_UDP; + /* Extract field:ethernet.src */ + scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[0] = + ea_eth_sa; + /* Extract field:ethernet.dst */ + scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[1] = + ea_eth_da; + /* Extract field:ipv4.src */ + scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[2] = + ea_ipv4_sa; + /* Extract field:ipv4.dst */ + scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[3] = + ea_ipv4_da; + /* Extract field:ipv4.nextp */ + scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[4] = + ea_nextp; + /* Extract field:udp.sport */ + scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[5] = + ea_udp_sp; + /* Extract field:udp.dport */ + scheme[MAC_IPV4_UDP].keyExtractAndHashParams.extractArray[6] = + ea_udp_dp; + + bond->params.ohp->h_FmPcd = h_FmPcd; + bond->params.ohp->h_FmPort = h_FmPort; + bond->params.ohp->h_NetEnv = h_NetEnv; + bond->params.ohp->prsParam = prsParam; + bond->params.ohp->kgParam = kgParam; + bond->params.ohp->pcdParam = pcdParam; + bond->params.ohp->scheme = scheme; + bond->params.ohp->netEnvParams = netEnvParams; + hw_lag_dbg("alloc_pcd_mem() ok.\n"); + + return BOND_OH_SUCCESS; +} + +int release_pcd_mem(struct bonding *bond) +{ + + kfree(bond->params.ohp->prsParam); + kfree(bond->params.ohp->kgParam); + kfree(bond->params.ohp->pcdParam); + kfree(bond->params.ohp->scheme); + kfree(bond->params.ohp->netEnvParams); + + bond->params.ohp->h_FmPcd = NULL; + bond->params.ohp->h_FmPort = NULL; + bond->params.ohp->h_NetEnv = NULL; + bond->params.ohp->prsParam = NULL; + bond->params.ohp->kgParam = NULL; + bond->params.ohp->pcdParam = NULL; + bond->params.ohp->scheme = NULL; + bond->params.ohp->netEnvParams = NULL; + bond->params.ohp->numberof_pre_schemes = 0; + + return BOND_OH_SUCCESS; +} + +static int replace_pcd(struct fm_port *fm_port, uint8_t numOfSchemes, + u32 pcd_fqids_base, uint8_t distNumOfQueues, + struct bonding *bond) +{ + + t_Handle h_FmPcd, h_FmPort, h_NetEnv; + + t_FmPcdNetEnvParams *netEnvParams; + t_FmPcdKgSchemeParams *scheme; + t_FmPortPcdParams *pcdParam; + t_FmPortPcdPrsParams *prsParam; + t_FmPortPcdKgParams *kgParam; + int i, err, numberof_pre_schemes; + + numberof_pre_schemes = bond->params.ohp->numberof_pre_schemes; + + if (numberof_pre_schemes == numOfSchemes) { + hw_lag_dbg("numberof_pre_schemes == numOfSchemes.\n"); + return BOND_OH_SUCCESS; + } + + h_FmPcd = bond->params.ohp->h_FmPcd; + h_FmPort = bond->params.ohp->h_FmPort; + h_NetEnv = bond->params.ohp->h_NetEnv; + + netEnvParams = bond->params.ohp->netEnvParams; + scheme = bond->params.ohp->scheme; + pcdParam = bond->params.ohp->pcdParam; + prsParam = bond->params.ohp->prsParam; + kgParam = bond->params.ohp->kgParam; + kgParam->numOfSchemes = numOfSchemes; + hw_lag_dbg("h_FmPcd:%p, h_FmPort:%p, h_NetEnv:%p\n", + h_FmPcd, h_FmPort, h_NetEnv); + hw_lag_dbg("netEnvParams:%p, scheme:%p, pcdParam:%p\n", + netEnvParams, scheme, pcdParam); + hw_lag_dbg("prsParam:%p, kgParam:%p, numberof_pre_schemes:%d\n", + prsParam, kgParam, numberof_pre_schemes); + + for (i = 0; i < numberof_pre_schemes; i++) { + if (kgParam->h_Schemes[i]) + err = FM_PCD_KgSchemeDelete(kgParam->h_Schemes[i]); + if (err == E_OK) + hw_lag_dbg("KgSchemeDelete(h_Schemes[%d]) = %p OK.\n", + i, kgParam->h_Schemes[i]); + else { + pr_err("KgSchemeDelete(h_Schemes[%d]) = %p Err.\n", + i, kgParam->h_Schemes[i]); + return BOND_OH_ERROR; + } + + } + for (i = 0; i < numOfSchemes; i++) { + scheme[i].baseFqid = pcd_fqids_base; + scheme[i].keyExtractAndHashParams.hashDistributionNumOfFqids = + distNumOfQueues; + hw_lag_dbg("scheme[%d]->pcd_fqids_base:%d\n", i, + pcd_fqids_base); + hw_lag_dbg("scheme[%d]->distNumOfQueues:%d\n", i, + distNumOfQueues); + if (!kgParam->h_Schemes[i]) + kgParam->h_Schemes[i] = + FM_PCD_KgSchemeSet(h_FmPcd, &scheme[i]); + hw_lag_dbg("kgParam->h_Schemes[%d]:%p.\n", + i, kgParam->h_Schemes[i]); + } + hw_lag_dbg("FM_PCD_KgSchemeSet() OK.\n"); + + if (bond->params.ohp->oh_en == 1) { + bond->params.ohp->oh_en = 0; + err = FM_PORT_Disable(h_FmPort); + if (err == E_OK) { + hw_lag_dbg("FM_PORT_Disable() OK with oh_en\n"); + err = FM_PORT_SetPCD(h_FmPort, pcdParam); + if (err == E_OK) { + hw_lag_dbg("FM_PORT_SetPCD() OK with oh_en\n"); + err = FM_PORT_Enable(h_FmPort); + if (err == E_OK) + hw_lag_dbg("FM_PORT_Enable() OK.\n"); + else + pr_err("FM_PORT_Enable() err.\n"); + } else { + pr_err("FM_PORT_SetPCD() err in oh_en\n"); + FM_PORT_Enable(h_FmPort); + } + } else + pr_err("FM_PORT_Disable() errors with oh_en\n"); + bond->params.ohp->oh_en = 1; + } else { + FM_PORT_Disable(h_FmPort); + err = FM_PORT_SetPCD(h_FmPort, pcdParam); + FM_PORT_Enable(h_FmPort); + } + if (GET_ERROR_TYPE(ERROR_CODE(err)) != E_OK) + return BOND_OH_ERROR; + + bond->params.ohp->numberof_pre_schemes = numOfSchemes; + return BOND_OH_SUCCESS; + +} + +/* get all offline port information from bond, including + * dev,oh handler, PCD FQid base and PCD FQ count, then + * get the new xmit policy, copy schemes needed from the + * cached_scheme pointer, config PCD params, init PCD dev, + * set PCD Net Env Characteristics, then set Keygen Scheme + * params to the PCD dev, disable offline port, set PCD + * params to the offline port dev, at last enable the offline + * port. + * this subroutine return true when it can apply PCD to + * the offline port, otherwise return false. + */ +bool apply_pcd(struct bonding *bond, int new_xmit_policy) +{ + int true_policy; + struct fm_port *fm_port; + uint8_t numOfSchemes; + u32 pcd_fqids_base; + uint8_t distNumOfQueues; + int err; + + if (bond->params.mode != BOND_MODE_8023AD) { + hw_lag_dbg("not 802.3ad mode, can't apply PCD\n"); + return false; + } + if (!bond->params.ohp) { + pr_err("have not bind an OH port,\n"); + pr_err("will use software tx traffic distribution.\n"); + return false; + } + if (bond->slave_cnt != SLAVES_PER_BOND) { + hw_lag_dbg("can't apply PCD, slave_cnt:%d\n", SLAVES_PER_BOND); + return false; + } + if (new_xmit_policy == NO_POLICY) + true_policy = bond->params.xmit_policy; + else + true_policy = new_xmit_policy; + fm_port = bond->params.ohp->oh_config->oh_port; + + /* chang the XML PCD from user space to kernel PCD, + * please refer to the output of fmc host command mode + */ + switch (true_policy) { + case BOND_XMIT_POLICY_LAYER23: + /* will be support in the future version + * numOfSchemes = 3; + */ + break; + case BOND_XMIT_POLICY_LAYER34: + /* will be support in the future version + * numOfSchemes = 7; + */ + break; + case BOND_XMIT_POLICY_LAYER2: + numOfSchemes = 1; + break; + default: + numOfSchemes = 1; + break; + } + pcd_fqids_base = bond->params.ohp->pcd_fqids_base; + distNumOfQueues = SLAVES_PER_BOND; + hw_lag_dbg("fm_port:%p, numOfSchemes:%d, pcd_fqids_base:%d", + fm_port, numOfSchemes, pcd_fqids_base); + hw_lag_dbg("distNumOfQueues:%d, bond:%p\n", distNumOfQueues, bond); + err = alloc_pcd_mem(fm_port, numOfSchemes, pcd_fqids_base, + distNumOfQueues, bond); + if (err == BOND_OH_SUCCESS) { + err = replace_pcd(fm_port, numOfSchemes, pcd_fqids_base, + distNumOfQueues, bond); + if (err == BOND_OH_SUCCESS) { + hw_lag_dbg("applied PCD.\n"); + return true; + } else { + pr_err("error on replace_pcd()\n"); + return false; + } + } else { + pr_err("error on replace_pcd()\n"); + return false; + } +} -- cgit v0.10.2 From 5e484120a4bdf35092dbbb934d3f9466ab615b19 Mon Sep 17 00:00:00 2001 From: Jianhua Xie Date: Fri, 7 Mar 2014 16:22:18 +0800 Subject: powerpc/t1040/dts: Enable oh port5 to support LAG Enable one offline port5 with a buffer pool to support HW based LAG for T1040QDS/RDB. The dts codes are borrowed from other USDPAA dts file. Change-Id: Ifc67f8a9fc8e5fadfe59d76bc54532c9f3f99586 Signed-off-by: Jianhua Xie Reviewed-by: Mandy Lavi Reviewed-on: http://git.am.freescale.net:8181/10416 Tested-by: Review Code-CDREVIEW Reviewed-by: Jiafei Pan Reviewed-by: Jose Rivera diff --git a/arch/powerpc/boot/dts/t1040rdb.dts b/arch/powerpc/boot/dts/t1040rdb.dts index dcb5a93..d1caf30 100644 --- a/arch/powerpc/boot/dts/t1040rdb.dts +++ b/arch/powerpc/boot/dts/t1040rdb.dts @@ -304,6 +304,14 @@ }; }; + /* bp dts definition is borrowed from other USDPAA dts */ + bp6: buffer-pool@6 { + compatible = "fsl,t1040-bpool", "fsl,bpool"; + fsl,bpid = <6>; + fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + fsl,dpaa { compatible = "fsl,t1040-dpaa", "fsl,dpaa"; ethernet@0 { @@ -331,6 +339,16 @@ fsl,fman-mac = <&enet4>; sleep = <&rcpm 0x08000008>; }; + /* enable one offline port */ + dpa-fman0-oh@5 { + compatible = "fsl,dpa-oh"; + /* Define frame queues for the OH port*/ + /* */ + fsl,qman-frame-queues-oh = <0x68 1 0x69 1>; + fsl,bman-buffer-pools = <&bp6>; + fsl,qman-frame-queues-tx = <0x90 8>; + fsl,fman-oh-port = <&fman0_oh5>; + }; }; qe: qe@ffe139999 { diff --git a/arch/powerpc/boot/dts/t104xqds.dtsi b/arch/powerpc/boot/dts/t104xqds.dtsi index 079a4cd..ba4de7f 100644 --- a/arch/powerpc/boot/dts/t104xqds.dtsi +++ b/arch/powerpc/boot/dts/t104xqds.dtsi @@ -329,6 +329,13 @@ }; }; }; + /* bp dts definition is borrowed from other USDPAA dts */ + bp6: buffer-pool@6 { + compatible = "fsl,t1040-bpool", "fsl,bpool"; + fsl,bpid = <6>; + fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; fsl,dpaa { compatible = "fsl,t1040-dpaa", "fsl,dpaa"; @@ -357,6 +364,16 @@ fsl,fman-mac = <&enet4>; sleep = <&rcpm 0x08000008>; }; + /* Enable one offline port as default to support HW based LAG */ + dpa-fman0-oh@5 { + compatible = "fsl,dpa-oh"; + /* Define frame queues for the OH port*/ + /* */ + fsl,qman-frame-queues-oh = <0x68 1 0x69 1>; + fsl,bman-buffer-pools = <&bp6>; + fsl,qman-frame-queues-tx = <0x90 8>; + fsl,fman-oh-port = <&fman0_oh5>; + }; }; pci0: pcie@ffe240000 { -- cgit v0.10.2 From f93d9be87a1cbde38e28a82e43c223f72d069fff Mon Sep 17 00:00:00 2001 From: Alex Porosanu Date: Wed, 9 Apr 2014 20:14:35 +0300 Subject: crypto: caam - fix UIO JR dependency Exporting JR via the UIO mechanism in user-space should depend on the JR back-end mechanism being available. Otherwise the user could build the JR UIO exporting part of the CAAM driver without having the necessary JR export code in place. This patch adds the correct dependencies to the CAAM driver kernel configuration file. Change-Id: Idab845bcb73ceff4196685446ea5c3d266df1e6d Signed-off-by: Alex Porosanu Reviewed-on: http://git.am.freescale.net:8181/10820 Tested-by: Review Code-CDREVIEW Reviewed-by: Horia Ioan Geanta Neag Reviewed-by: Ruchika Gupta Reviewed-by: Jose Rivera diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index d7863d9..1d2db39 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -153,6 +153,5 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG config CRYPTO_DEV_FSL_CAAM_JR_UIO tristate "Freescale Job Ring UIO support" - depends on CRYPTO_DEV_FSL_CAAM - depends on UIO + depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR && UIO default y -- cgit v0.10.2 From d4807e0bde6a5ba8bfc9f9c8c6abe048612a937d Mon Sep 17 00:00:00 2001 From: Marian Chereji Date: Fri, 11 Apr 2014 13:26:14 +0300 Subject: dpa_offload: Add application DTS files for T2080RDB platform Added DTS files to support the dpa_offloading applications on the T2080RDB platform. Signed-off-by: Marian Chereji Change-Id: I02a59c557b354b710d24e9c8b97147fb55482307 Reviewed-on: http://git.am.freescale.net:8181/10919 Tested-by: Review Code-CDREVIEW Reviewed-by: Aurelian Zanoschi Reviewed-by: Jose Rivera diff --git a/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa-shared-interfaces.dts b/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa-shared-interfaces.dts new file mode 100644 index 0000000..b07fbd9 --- /dev/null +++ b/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa-shared-interfaces.dts @@ -0,0 +1,182 @@ +/* + * T2080PCIe-RDB USDPAA Device Tree Source + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/include/ "t2080rdb.dts" + +/ { + /* NB: "bpool-ethernet-seeds" is not set to avoid buffer seeding, + * because apps seed these pools with buffers allocated at + * run-time. + * HOWEVER, the kernel driver requires the buffer-size so + * "fsl,bpool-ethernet-cfg" is set. It also mis-interprets + * things if the base-address is zero (hence the 0xdeadbeef + * values). + */ + bp7: buffer-pool@7 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <7>; + fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>; + fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>; + }; + bp8: buffer-pool@8 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <8>; + fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp9: buffer-pool@9 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <9>; + fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + + bp10: buffer-pool@10 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <10>; + fsl,bpool-thresholds = <0x10 0x30 0x0 0x0>; + }; + bp11: buffer-pool@11 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <11>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp12: buffer-pool@12 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <12>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp16: buffer-pool@16 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <16>; + fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp17: buffer-pool@17 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <17>; + fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + + fsl,dpaa { + ethernet@0 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x90 1 0x91 1>; + fsl,qman-frame-queues-tx = <0x98 1 0x99 1>; + }; + /* ethernet@1 declared as shared MAC. USDPAA will seed buffers to + * this buffer pool. The ethernet driver will initialize the RX default, + * RX error, TX error, TX confirm and 8 TX Frame queues. On receiving frame + * at this interface, the ethernet driver will do kmap_atomic/kunmap_atomic + * for that frame. */ + ethernet@1 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-shared", "fsl,dpa-ethernet-shared"; + fsl,bman-buffer-pools = <&bp17>; + fsl,qman-frame-queues-rx = <0x92 1 0x93 1 0x2000 3>; + fsl,qman-frame-queues-tx = <0 1 0 1 0x3000 8>; + }; + ethernet@2 {/* 1G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp16>; + fsl,qman-frame-queues-rx = <0x54 1 0x55 1>; + fsl,qman-frame-queues-tx = <0x74 1 0x75 1>; + }; + ethernet@3 {/* 1G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp16>; + fsl,qman-frame-queues-rx = <0x56 1 0x57 1>; + fsl,qman-frame-queues-tx = <0x76 1 0x77 1>; + }; + ethernet@8 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>; + fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>; + }; + ethernet@9 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>; + fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>; + }; + /* ethernet@10 declared as MAC-less interface with no "fsl,fman-mac" property. + * USDPAA will seed buffers to this buffer pool and initialize 8 TX Frame + * queues. The ethernet driver will initialize 8 RX default Frame queues. + * On receiving frame at this interface, the ethernet driver will do + * kmap_atomic/kunmap_atomic for that frame. */ + ethernet@10 { + compatible = "fsl,t2080-dpa-ethernet-macless", "fsl,dpa-ethernet-macless"; + fsl,bman-buffer-pools = <&bp16>; + fsl,qman-frame-queues-rx = <4000 8>; + fsl,qman-frame-queues-tx = <4008 8>; + local-mac-address = [00 11 22 33 44 55]; + }; + ethernet@11 { + compatible = "fsl,t2080-dpa-ethernet-macless", "fsl,dpa-ethernet-macless"; + fsl,bman-buffer-pools = <&bp16>; + fsl,qman-frame-queues-rx = <5000 8>; + fsl,qman-frame-queues-tx = <5008 8>; + local-mac-address = [00 11 22 33 44 66]; + }; + ethernet@12 { + compatible = "fsl,t2080-dpa-ethernet-macless", "fsl,dpa-ethernet-macless"; + fsl,bman-buffer-pools = <&bp16>; + fsl,qman-frame-queues-rx = <6000 8>; + fsl,qman-frame-queues-tx = <6008 8>; + local-mac-address = [00 11 22 33 44 77]; + }; + dpa-fman0-oh@2 { + compatible = "fsl,dpa-oh"; + /* Define frame queues for the OH port*/ + /* */ + fsl,qman-frame-queues-oh = <0x6e 1 0x6f 1>; + fsl,bman-buffer-pools = <&bp9>; + fsl,fman-oh-port = <&fman0_oh2>; + }; + dpa_fman0_oh3: dpa-fman0-oh@3 { + compatible = "fsl,dpa-oh"; + fsl,qman-frame-queues-oh = <0x68 1 0x69 1>; + fsl,bman-buffer-pools = <&bp9>; + fsl,fman-oh-port = <&fman0_oh3>; + }; + dpa_fman0_oh4: dpa-fman0-oh@4 { + compatible = "fsl,dpa-oh"; + fsl,qman-frame-queues-oh = <0x70 1 0x71 1>; + fsl,bman-buffer-pools = <&bp9>; + fsl,fman-oh-port = <&fman0_oh4>; + }; + }; +}; diff --git a/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa.dts b/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa.dts new file mode 100644 index 0000000..ff231ca --- /dev/null +++ b/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa.dts @@ -0,0 +1,163 @@ +/* + * T2080PCIe-RDB USDPAA Device Tree Source + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/include/ "t2080rdb.dts" + +/ { + /* NB: "bpool-ethernet-seeds" is not set to avoid buffer seeding, + * because apps seed these pools with buffers allocated at + * run-time. + * HOWEVER, the kernel driver requires the buffer-size so + * "fsl,bpool-ethernet-cfg" is set. It also mis-interprets + * things if the base-address is zero (hence the 0xdeadbeef + * values). + */ + bp7: buffer-pool@7 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <7>; + fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>; + fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>; + }; + bp8: buffer-pool@8 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <8>; + fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp9: buffer-pool@9 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <9>; + fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + + bp10: buffer-pool@10 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <10>; + fsl,bpool-thresholds = <0x10 0x30 0x0 0x0>; + }; + bp11: buffer-pool@11 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <11>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp12: buffer-pool@12 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <12>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp16: buffer-pool@16 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <16>; + fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + + fsl,dpaa { + ethernet@0 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x90 1 0x91 1>; + fsl,qman-frame-queues-tx = <0x98 1 0x99 1>; + }; + ethernet@1 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x92 1 0x93 1>; + fsl,qman-frame-queues-tx = <0x9a 1 0x9b 1>; + }; + ethernet@2 {/* 1G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x54 1 0x55 1>; + fsl,qman-frame-queues-tx = <0x74 1 0x75 1>; + }; + ethernet@3 {/* 1G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x56 1 0x57 1>; + fsl,qman-frame-queues-tx = <0x76 1 0x77 1>; + }; + ethernet@8 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>; + fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>; + }; + ethernet@9 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>; + fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>; + }; + /* ethernet@10 declared as MAC-less interface with no "fsl,fman-mac" property. + * USDPAA will seed buffers to this buffer pool and initialize 8 TX Frame + * queues. The ethernet driver will initialize 8 RX default Frame queues. + * On receiving frame at this interface, the ethernet driver will do + * kmap_atomic/kunmap_atomic for that frame. */ + ethernet@10 { + compatible = "fsl,t2080-dpa-ethernet-macless", "fsl,dpa-ethernet-macless"; + fsl,bman-buffer-pools = <&bp16>; + fsl,qman-frame-queues-rx = <4000 8>; + fsl,qman-frame-queues-tx = <4008 8>; + local-mac-address = [00 11 22 33 44 55]; + }; + ethernet@11 { + compatible = "fsl,t2080-dpa-ethernet-macless", "fsl,dpa-ethernet-macless"; + fsl,bman-buffer-pools = <&bp16>; + fsl,qman-frame-queues-rx = <5000 8>; + fsl,qman-frame-queues-tx = <5008 8>; + local-mac-address = [00 11 22 33 44 66]; + dpa-fman0-oh@2 { + compatible = "fsl,dpa-oh"; + /* Define frame queues for the OH port*/ + /* */ + fsl,qman-frame-queues-oh = <0x6e 1 0x6f 1>; + fsl,bman-buffer-pools = <&bp9>; + fsl,fman-oh-port = <&fman0_oh2>; + }; + dpa_fman0_oh3: dpa-fman0-oh@3 { + compatible = "fsl,dpa-oh"; + fsl,qman-frame-queues-oh = <0x68 1 0x69 1>; + fsl,bman-buffer-pools = <&bp9>; + fsl,fman-oh-port = <&fman0_oh3>; + }; + dpa_fman0_oh4: dpa-fman0-oh@4 { + compatible = "fsl,dpa-oh"; + fsl,qman-frame-queues-oh = <0x70 1 0x71 1>; + fsl,bman-buffer-pools = <&bp9>; + fsl,fman-oh-port = <&fman0_oh4>; + }; + }; +}; -- cgit v0.10.2 From 83e2f16e50cffcc1e402663cb3ce34da4f95e5fb Mon Sep 17 00:00:00 2001 From: Marian Chereji Date: Wed, 2 Apr 2014 18:27:21 +0300 Subject: dpa_classifier: Allow adding of header manipulation on miss table relationship The dpa_classifier did not allow the user application to attach header manipulation chains on the miss table relationship action. This is, however supported in FMD, hence now the dpa_classifier will also allow this feature to be used. Signed-off-by: Marian Chereji Change-Id: I0174a4a3d9c708b4209cb2f8fa2fe3f724bb2081 Reviewed-on: http://git.am.freescale.net:8181/10644 Tested-by: Review Code-CDREVIEW Reviewed-by: Nicolae-Sebastian Grigore Reviewed-by: Jose Rivera diff --git a/drivers/staging/fsl_dpa_offload/dpa_classifier.c b/drivers/staging/fsl_dpa_offload/dpa_classifier.c index e4610d4..d1281ae 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_classifier.c +++ b/drivers/staging/fsl_dpa_offload/dpa_classifier.c @@ -406,6 +406,7 @@ int dpa_classif_table_modify_miss_action(int td, const struct dpa_cls_tbl_action *miss_action) { int errno; + int old_hmd, hmd; t_Error err; t_FmPcdCcNextEngineParams miss_engine_params; struct dpa_cls_table *ptable; @@ -428,11 +429,35 @@ int dpa_classif_table_modify_miss_action(int td, return -ENOSYS; } + /* + * Check existing header manipulation descriptors and release if + * found. + */ + switch (ptable->miss_action.type) { + case DPA_CLS_TBL_ACTION_ENQ: + old_hmd = ptable->miss_action.enq_params.hmd; + break; + case DPA_CLS_TBL_ACTION_NEXT_TABLE: + old_hmd = ptable->miss_action.next_table_params.hmd; + break; +#if (DPAA_VERSION >= 11) + case DPA_CLS_TBL_ACTION_MCAST: + old_hmd = ptable->miss_action.mcast_params.hmd; + break; +#endif /* (DPAA_VERSION >= 11) */ + default: + old_hmd = DPA_OFFLD_DESC_NONE; + break; + } + dpa_classif_hm_release_chain(old_hmd); + /* Fill the [miss_engine_params] structure w/ data */ errno = action_to_next_engine_params(miss_action, &miss_engine_params, - NULL, ptable->params.distribution, + &hmd, ptable->params.distribution, ptable->params.classification); if (errno < 0) { + /* Lock back the old HM chain. */ + dpa_classif_hm_lock_chain(old_hmd); RELEASE_OBJECT(ptable); log_err("Failed verification of miss action params for table " "td=%d.\n", td); @@ -443,6 +468,8 @@ int dpa_classif_table_modify_miss_action(int td, err = FM_PCD_HashTableModifyMissNextEngine(ptable->params. cc_node, &miss_engine_params); if (err != E_OK) { + /* Lock back the old HM chain. */ + dpa_classif_hm_lock_chain(old_hmd); RELEASE_OBJECT(ptable); log_err("FMan driver call failed - " "FM_PCD_HashTableModifyMissNextEngine " @@ -454,6 +481,8 @@ int dpa_classif_table_modify_miss_action(int td, err = FM_PCD_MatchTableModifyMissNextEngine((t_Handle)ptable-> int_cc_node[0].cc_node, &miss_engine_params); if (err != E_OK) { + /* Lock back the old HM chain. */ + dpa_classif_hm_lock_chain(old_hmd); RELEASE_OBJECT(ptable); log_err("FMan driver call failed - " "FM_PCD_MatchTableModifyMissNextEngine (td=%d, " @@ -463,6 +492,7 @@ int dpa_classif_table_modify_miss_action(int td, } } + /* Store Miss Action (including its header manip chain). */ memcpy(&ptable->miss_action, miss_action, sizeof(*miss_action)); RELEASE_OBJECT(ptable); -- cgit v0.10.2 From 81dd51bb373dc88fb91ff595dec5df7f40c8a14b Mon Sep 17 00:00:00 2001 From: Chunhe Lan Date: Tue, 1 Apr 2014 15:04:05 +0800 Subject: powerpc/85xx: Add T4240RDB board support T4240RDB board Specification ---------------------------- Memory subsystem: 6GB DDR3 128MB NOR flash 2GB NAND flash Ethernet: Eight 1G SGMII ports Four 10Gbps SFP+ ports PCIe: Two PCIe slots USB: Two USB2.0 Type A ports SDHC: One SD-card port SATA: One SATA port UART: Dual RJ45 ports Signed-off-by: Chunhe Lan Change-Id: I769d39c25a49508088219fd5a9b27003421b6569 Reviewed-on: http://git.am.freescale.net:8181/10386 Tested-by: Review Code-CDREVIEW Reviewed-by: Tiefei Zang Reviewed-by: Jose Rivera diff --git a/arch/powerpc/boot/dts/t4240rdb.dts b/arch/powerpc/boot/dts/t4240rdb.dts new file mode 100644 index 0000000..934b96a --- /dev/null +++ b/arch/powerpc/boot/dts/t4240rdb.dts @@ -0,0 +1,476 @@ +/* + * T4240RDB Device Tree Source + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/include/ "fsl/t4240si-pre.dtsi" + +/ { + model = "fsl,T4240RDB"; + compatible = "fsl,T4240RDB"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&mpic>; + + aliases { + sgmii_phy21 = &sgmiiphy21; + sgmii_phy22 = &sgmiiphy22; + sgmii_phy23 = &sgmiiphy23; + sgmii_phy24 = &sgmiiphy24; + sgmii_phy41 = &sgmiiphy41; + sgmii_phy42 = &sgmiiphy42; + sgmii_phy43 = &sgmiiphy43; + sgmii_phy44 = &sgmiiphy44; + ethernet0 = &enet0; + ethernet1 = &enet1; + ethernet2 = &enet2; + ethernet3 = &enet3; + ethernet4 = &enet14; + ethernet5 = &enet15; + ethernet6 = &enet6; + ethernet7 = &enet7; + ethernet8 = &enet8; + ethernet9 = &enet9; + ethernet10 = &enet10; + ethernet11 = &enet11; + ethernet12 = &enet12; + ethernet13 = &enet13; + ethernet14 = &enet4; + ethernet15 = &enet5; + }; + + ifc: localbus@ffe124000 { + reg = <0xf 0xfe124000 0 0x2000>; + ranges = <0 0 0xf 0xe8000000 0x08000000 + 2 0 0xf 0xff800000 0x00010000 + 3 0 0xf 0xffdf0000 0x00008000>; + + nor@0,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x0 0x0 0x8000000>; + + bank-width = <2>; + device-width = <1>; + }; + + nand@2,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "fsl,ifc-nand"; + reg = <0x2 0x0 0x10000>; + }; + }; + + memory { + device_type = "memory"; + }; + + dcsr: dcsr@f00000000 { + ranges = <0x00000000 0xf 0x00000000 0x01072000>; + }; + + bportals: bman-portals@ff4000000 { + ranges = <0x0 0xf 0xf4000000 0x2000000>; + }; + + qportals: qman-portals@ff6000000 { + ranges = <0x0 0xf 0xf6000000 0x2000000>; + }; + + lportals: lac-portals@ff8000000 { + ranges = <0x0 0xf 0xf8000000 0x20000>; + }; + + soc: soc@ffe000000 { + ranges = <0x00000000 0xf 0xfe000000 0x1000000>; + reg = <0xf 0xfe000000 0 0x00001000>; + spi@110000 { + flash@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "sst,sst25wf040"; + reg = <0>; + spi-max-frequency = <40000000>; /* input clock */ + }; + }; + + i2c@118000 { + eeprom@52 { + compatible = "at24,24c256"; + reg = <0x52>; + }; + eeprom@54 { + compatible = "at24,24c256"; + reg = <0x54>; + }; + eeprom@56 { + compatible = "at24,24c256"; + reg = <0x56>; + }; + rtc@68 { + compatible = "dallas,ds1374"; + reg = <0x68>; + interrupts = <0x1 0x1 0 0>; + }; + monitor@2f { + compatible = "w83793"; + reg = <0x2f>; + }; + }; + + fman0: fman@400000 { + enet0: ethernet@e0000 { + phy-handle = <&sgmiiphy21>; + phy-connection-type = "sgmii"; + }; + + enet1: ethernet@e2000 { + phy-handle = <&sgmiiphy22>; + phy-connection-type = "sgmii"; + }; + + enet2: ethernet@e4000 { + phy-handle = <&sgmiiphy23>; + phy-connection-type = "sgmii"; + }; + + enet3: ethernet@e6000 { + phy-handle = <&sgmiiphy24>; + phy-connection-type = "sgmii"; + }; + + enet4: ethernet@e8000 { + status = "disabled"; + }; + + enet5: ethernet@ea000 { + status = "disabled"; + }; + + enet6: ethernet@f0000 { /* FM1@TSEC9/FM1@TGEC1 */ + phy-handle = <&xfiphy1>; + phy-connection-type = "xgmii"; + }; + + enet7: ethernet@f2000 { /* FM1@TSEC10/FM1@TGEC2 */ + phy-handle = <&xfiphy2>; + phy-connection-type = "xgmii"; + }; + + mdio@fc000 { + status = "disabled"; + }; + + mdio@fd000 { + status = "disabled"; + }; + + fman0_oh2 { + status = "disabled"; + }; + fman0_oh3 { + status = "disabled"; + }; + fman0_oh4 { + status = "disabled"; + }; + fman0_oh5 { + status = "disabled"; + }; + fman0_oh6 { + status = "disabled"; + }; + }; + + fman1: fman@500000 { + enet8: ethernet@e0000 { + phy-handle = <&sgmiiphy41>; + phy-connection-type = "sgmii"; + }; + + enet9: ethernet@e2000 { + phy-handle = <&sgmiiphy42>; + phy-connection-type = "sgmii"; + }; + + enet10: ethernet@e4000 { + phy-handle = <&sgmiiphy43>; + phy-connection-type = "sgmii"; + }; + + enet11: ethernet@e6000 { + phy-handle = <&sgmiiphy44>; + phy-connection-type = "sgmii"; + }; + + enet12: ethernet@e8000 { + status = "disabled"; + }; + + enet13: ethernet@ea000 { + status = "disabled"; + }; + + enet14: ethernet@f0000 { /* FM2@TSEC9/FM2@TGEC1 */ + phy-handle = <&xfiphy3>; + phy-connection-type = "xgmii"; + }; + + enet15: ethernet@f2000 { /* FM2@TSEC10/FM2@TGEC2 */ + phy-handle = <&xfiphy4>; + phy-connection-type = "xgmii"; + }; + + mdio0: mdio@fc000 { + sgmiiphy21: ethernet-phy@0 { + reg = <0x0>; + }; + + sgmiiphy22: ethernet-phy@1 { + reg = <0x1>; + }; + + sgmiiphy23: ethernet-phy@2 { + reg = <0x2>; + }; + + sgmiiphy24: ethernet-phy@3 { + reg = <0x3>; + }; + + sgmiiphy41: ethernet-phy@4 { + reg = <0x4>; + }; + + sgmiiphy42: ethernet-phy@5 { + reg = <0x5>; + }; + + sgmiiphy43: ethernet-phy@6 { + reg = <0x6>; + }; + + sgmiiphy44: ethernet-phy@7 { + reg = <0x7>; + }; + }; + + xmdio0: mdio@fd000 { + xfiphy1: ethernet-phy@10 { + compatible = "ethernet-phy-ieee802.3-c45"; + reg = <0x10>; + }; + + xfiphy2: ethernet-phy@11 { + compatible = "ethernet-phy-ieee802.3-c45"; + reg = <0x11>; + }; + + xfiphy3: ethernet-phy@13 { + compatible = "ethernet-phy-ieee802.3-c45"; + reg = <0x13>; + }; + + xfiphy4: ethernet-phy@12 { + compatible = "ethernet-phy-ieee802.3-c45"; + reg = <0x12>; + }; + }; + + fman1_oh3 { + status = "disabled"; + }; + fman1_oh4 { + status = "disabled"; + }; + fman1_oh5 { + status = "disabled"; + }; + fman1_oh6 { + status = "disabled"; + }; + }; + + sdhc@114000 { + voltage-ranges = <1800 1800 3300 3300>; + }; + }; + + pci0: pcie@ffe240000 { + reg = <0xf 0xfe240000 0 0x10000>; + ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000 + 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>; + pcie@0 { + ranges = <0x02000000 0 0xe0000000 + 0x02000000 0 0xe0000000 + 0 0x20000000 + + 0x01000000 0 0x00000000 + 0x01000000 0 0x00000000 + 0 0x00010000>; + }; + }; + + pci1: pcie@ffe250000 { + reg = <0xf 0xfe250000 0 0x10000>; + ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000 + 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>; + pcie@0 { + ranges = <0x02000000 0 0xe0000000 + 0x02000000 0 0xe0000000 + 0 0x20000000 + + 0x01000000 0 0x00000000 + 0x01000000 0 0x00000000 + 0 0x00010000>; + }; + }; + + pci2: pcie@ffe260000 { + reg = <0xf 0xfe260000 0 0x1000>; + ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000 + 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>; + pcie@0 { + ranges = <0x02000000 0 0xe0000000 + 0x02000000 0 0xe0000000 + 0 0x20000000 + + 0x01000000 0 0x00000000 + 0x01000000 0 0x00000000 + 0 0x00010000>; + }; + }; + + pci3: pcie@ffe270000 { + reg = <0xf 0xfe270000 0 0x10000>; + ranges = <0x02000000 0 0xe0000000 0xc 0x60000000 0 0x20000000 + 0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>; + pcie@0 { + ranges = <0x02000000 0 0xe0000000 + 0x02000000 0 0xe0000000 + 0 0x20000000 + + 0x01000000 0 0x00000000 + 0x01000000 0 0x00000000 + 0 0x00010000>; + }; + }; + rio: rapidio@ffe0c0000 { + reg = <0xf 0xfe0c0000 0 0x11000>; + + port1 { + ranges = <0 0 0xc 0x20000000 0 0x10000000>; + }; + port2 { + ranges = <0 0 0xc 0x30000000 0 0x10000000>; + }; + }; + + fsl,dpaa { + compatible = "fsl,t4240-dpaa", "fsl,dpaa"; + ethernet@0 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet0>; + }; + ethernet@1 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet1>; + }; + ethernet@2 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet2>; + }; + ethernet@3 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet3>; + }; + ethernet@4 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet4>; + status = "disabled"; + }; + ethernet@5 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet5>; + status = "disabled"; + }; + ethernet@6 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet6>; + }; + ethernet@7 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet7>; + }; + ethernet@8 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet8>; + }; + ethernet@9 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet9>; + }; + ethernet@10 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet10>; + }; + ethernet@11 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet11>; + }; + ethernet@12 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet12>; + status = "disabled"; + }; + ethernet@13 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet13>; + status = "disabled"; + }; + ethernet@14 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet14>; + }; + ethernet@15 { + compatible = "fsl,t4240-dpa-ethernet", "fsl,dpa-ethernet"; + fsl,fman-mac = <&enet15>; + }; + }; +}; + +/include/ "fsl/t4240si-post.dtsi" +/include/ "fsl/qoriq-dpaa-res3.dtsi" +/include/ "fsl/qoriq-qman-ceetm0.dtsi" +/include/ "fsl/qoriq-qman-ceetm1.dtsi" diff --git a/arch/powerpc/configs/85xx/e6500rev2_defconfig b/arch/powerpc/configs/85xx/e6500rev2_defconfig index 9bdce3e..f7a855b 100644 --- a/arch/powerpc/configs/85xx/e6500rev2_defconfig +++ b/arch/powerpc/configs/85xx/e6500rev2_defconfig @@ -106,6 +106,7 @@ CONFIG_FMAN_T4240=y CONFIG_FSL_DPAA_ETH=y CONFIG_E1000E=y CONFIG_FSL_10GBASE_KR=y +CONFIG_VITESSE_PHY=y CONFIG_FIXED_PHY=y # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set @@ -125,7 +126,8 @@ CONFIG_SPI=y CONFIG_SPI_GPIO=y CONFIG_SPI_FSL_SPI=y CONFIG_SPI_FSL_ESPI=y -# CONFIG_HWMON is not set +CONFIG_HWMON=y +CONFIG_SENSORS_W83793=y CONFIG_VIDEO_OUTPUT_CONTROL=y CONFIG_USB=y CONFIG_USB_EHCI_HCD=y @@ -139,6 +141,7 @@ CONFIG_EDAC=y CONFIG_EDAC_MM_EDAC=y CONFIG_EDAC_MPC85XX=y CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_DS1374=y CONFIG_RTC_DRV_DS3232=y CONFIG_RTC_DRV_CMOS=y CONFIG_DMADEVICES=y diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index ae08633..ae9fdb51 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -284,7 +284,7 @@ config CORENET_GENERIC For 32bit kernel, the following boards are supported: P2041 RDB, P3041 DS and P4080 DS For 64bit kernel, the following boards are supported: - T208x QDS and RDB, T4240 QDS and B4 QDS + T208x QDS and RDB, T4240 QDS, T4240 RDB and B4 QDS The following boards are supported for both 32bit and 64bit kernel: P5020 DS, P5040 DS, T104xQDS, T104xRDB diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c index 7026526..693f284 100644 --- a/arch/powerpc/platforms/85xx/corenet_generic.c +++ b/arch/powerpc/platforms/85xx/corenet_generic.c @@ -141,6 +141,7 @@ static const char * const boards[] __initconst = { "fsl,T2081QDS", "fsl,T2080RDB", "fsl,T4240QDS", + "fsl,T4240RDB", "fsl,B4860QDS", "fsl,B4420QDS", "fsl,B4220QDS", -- cgit v0.10.2 From 5e5d799febe8d397fdd42bb5c788350cd09bf2a9 Mon Sep 17 00:00:00 2001 From: Zhao Qiang Date: Mon, 14 Apr 2014 13:14:13 +0800 Subject: QE: add PPC64 for QE's dependences PPC64 can also support qe. Signed-off-by: Zhao Qiang Change-Id: Iaa88d2886b40e30932aec6b2bd55de68d464fd5f Reviewed-on: http://git.am.freescale.net:8181/10935 Tested-by: Review Code-CDREVIEW Reviewed-by: Xiaobo Xie Reviewed-by: Jose Rivera diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index bf9c6d4..be181b6 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -280,7 +280,7 @@ config TAU_AVERAGE config QUICC_ENGINE bool "Freescale QUICC Engine (QE) Support" - depends on FSL_SOC && PPC32 + depends on FSL_SOC && (PPC32 || PPC64) select PPC_LIB_RHEAP select CRC32 help -- cgit v0.10.2 From dc850cb1713746aaa56cd6b1a322e231472d779d Mon Sep 17 00:00:00 2001 From: Zhao Qiang Date: Fri, 11 Apr 2014 11:29:08 +0800 Subject: qe: remove duplicated qe node in t1040si-post.dtsi qe node was duplicated in t1040si-post.dtsi, remove one of them. Signed-off-by: Zhao Qiang Change-Id: Ie701cd3f6a2f068b6ba610adc4aeced926dcc168 Reviewed-on: http://git.am.freescale.net:8181/10897 Tested-by: Review Code-CDREVIEW Reviewed-by: Xiaobo Xie Reviewed-by: Jose Rivera diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi index 696e30b..23d0ae5 100644 --- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi @@ -757,48 +757,3 @@ sata@221000 { }; }; }; - -&qe { - #address-cells = <1>; - #size-cells = <1>; - device_type = "qe"; - compatible = "fsl,qe"; - fsl,qe-num-riscs = <1>; - fsl,qe-num-snums = <28>; - - qeic: interrupt-controller@80 { - interrupt-controller; - compatible = "fsl,qe-ic"; - #address-cells = <0>; - #interrupt-cells = <1>; - reg = <0x80 0x80>; - interrupts = <95 2 0 0 94 2 0 0>; //high:79 low:78 - }; - - ucc@2000 { - cell-index = <1>; - reg = <0x2000 0x200>; - interrupts = <32>; - interrupt-parent = <&qeic>; - }; - - ucc@2200 { - cell-index = <3>; - reg = <0x2200 0x200>; - interrupts = <34>; - interrupt-parent = <&qeic>; - }; - - muram@10000 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "fsl,qe-muram", "fsl,cpm-muram"; - ranges = <0x0 0x10000 0x6000>; - - data-only@0 { - compatible = "fsl,qe-muram-data", - "fsl,cpm-muram-data"; - reg = <0x0 0x6000>; - }; - }; -}; -- cgit v0.10.2 From 7188e1a04ff61f57fc71593bfdc6862d23407a70 Mon Sep 17 00:00:00 2001 From: Zhao Qiang Date: Mon, 14 Apr 2014 14:49:54 +0800 Subject: QE: Add "ifdef CONFIG_QUICC_ENGINE" for qe qe and qe_ic shouldn't be initialized when QUICC_ENGINE is not selected. Signed-off-by: Zhao Qiang Change-Id: If9b1b22499dade52ebc0727b6c0ec923f520cdd1 Reviewed-on: http://git.am.freescale.net:8181/10936 Tested-by: Review Code-CDREVIEW Reviewed-by: Xiaobo Xie Reviewed-by: Jose Rivera diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c index 693f284..45a61bb 100644 --- a/arch/powerpc/platforms/85xx/corenet_generic.c +++ b/arch/powerpc/platforms/85xx/corenet_generic.c @@ -42,7 +42,9 @@ void __init corenet_gen_pic_init(void) unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU | MPIC_NO_RESET; +#ifdef CONFIG_QUICC_ENGINE struct device_node *np; +#endif if (ppc_md.get_irq == mpic_get_coreint_irq) flags |= MPIC_ENABLE_COREINT; @@ -52,12 +54,14 @@ void __init corenet_gen_pic_init(void) mpic_init(mpic); +#ifdef CONFIG_QUICC_ENGINE np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); if (np) { qe_ic_init(np, 0, qe_ic_cascade_low_mpic, qe_ic_cascade_high_mpic); of_node_put(np); } +#endif } /* @@ -108,9 +112,11 @@ static const struct of_device_id of_device_ids[] = { { .compatible = "fsl,qoriq-pcie-v3.0", }, +#ifdef CONFIG_QUICC_ENGINE { .compatible = "fsl,qe", }, +#endif /* The following two are for the Freescale hypervisor */ { .name = "hypervisor", -- cgit v0.10.2 From 857692491a0beed72330e5c13935675c0449b963 Mon Sep 17 00:00:00 2001 From: Jianhua Xie Date: Mon, 14 Apr 2014 17:29:10 +0800 Subject: bonding: make pcd basefqid aligned in FSL LAG Distribution may result in less than hashDistributionNumOfFqids queues if baseFqid unaligned. Signed-off-by: Jianhua Xie Change-Id: I3bb9dd597d305759a81a0319957d92c096294c4c Reviewed-on: http://git.am.freescale.net:8181/10949 Tested-by: Review Code-CDREVIEW Reviewed-by: Jiafei Pan Reviewed-by: Jose Rivera diff --git a/drivers/net/bonding/hw_distribution.c b/drivers/net/bonding/hw_distribution.c index 6eeb58a..1403659 100644 --- a/drivers/net/bonding/hw_distribution.c +++ b/drivers/net/bonding/hw_distribution.c @@ -1378,7 +1378,7 @@ int get_oh_info(void) struct platform_device *oh_of_dev, *of_dev; struct device *dpa_oh_dev, *oh_dev; struct device_node *dpa_oh_node = NULL, *oh_node; - int lenp, errno = BOND_OH_SUCCESS, i = 0; + int fqcount, lenp, errno = BOND_OH_SUCCESS, i = 0; const phandle *p_oh_port_handle; const unsigned int *p_port_id; const unsigned int *p_channel_id; @@ -1481,9 +1481,9 @@ int get_oh_info(void) memcpy(poh[i].friendname, dpa_oh_node->full_name + 14, 10); + fqcount = roundup_pow_of_two(FM_MAX_NUM_OF_MACS); if (qman_alloc_fqid_range(&poh[i].pcd_fqids_base, - FM_MAX_NUM_OF_MACS, true, 0) - != FM_MAX_NUM_OF_MACS) { + fqcount, fqcount, 0) != fqcount) { pr_err("error on alloc continuous pcd fqid\n"); return -EINVAL; } -- cgit v0.10.2 From 59283bc18e045b47ae8141d3e3de5754f073f8e2 Mon Sep 17 00:00:00 2001 From: Stefan Sicleru Date: Tue, 15 Apr 2014 11:30:02 +0300 Subject: T1040RDB: Fix: Add L2 switch support and device tree bindings Added port indexes and compatible strings for each port and for the L2 switch node itself. Added L2 switch device tree binding. Signed-off-by: Stefan Sicleru Change-Id: I0d1383fbde82698bf6bdbbf275dadd7768bf0f8d Reviewed-on: http://git.am.freescale.net:8181/10978 Tested-by: Review Code-CDREVIEW Reviewed-by: Razvan Stefanescu Reviewed-by: Codrin Constantin Ciubotariu Reviewed-by: Jose Rivera diff --git a/Documentation/devicetree/bindings/powerpc/fsl/l2-switch.txt b/Documentation/devicetree/bindings/powerpc/fsl/l2-switch.txt new file mode 100644 index 0000000..c192f74 --- /dev/null +++ b/Documentation/devicetree/bindings/powerpc/fsl/l2-switch.txt @@ -0,0 +1,99 @@ +T1040 L2switch Device Tree Binding +Copyright (C) 2013 Freescale Semiconductor Inc. + +CONTENTS + - Overview + - L2switch Node + - Port Node + +===================================================================== +Overview + +DESCRIPTION + + T1040 integrates a Gigabit Ethernet switch core with eight 10/100/1000 Mbps + Ethernet ports and two 10/100/1000/2500 Mbps ports. + +===================================================================== +L2switch Node + +Description + + This node specifies the address range of l2switch configuration registers + and interrupts. It also contains a set of child nodes defining the + Ethernet ports. + +PROPERTIES + + - compatible + Usage: required + Value type: + Definition: Must include "vitesse-9953" + + - reg + Usage: required + Value type: + Definition: Standard property which specifies the physical address and + length of the l2switch configuration registers. + + - interrupts + Usage: required + Value type: + Definition: Standard property, specifies the interrupts generated by + this device. + The value of the interrupts property consists of one interrupt + specifier. The format of the specifier is defined by the binding + document describing the node's interrupt parent. + +===================================================================== +Port Node + +Description + + This node specifies the status and connection type of each of the Ethernet + ports. + +PROPERTIES + + - compatible + Usage: required + Value type: + Definition: Must include "vitesse-9953-port" + + - port-index + Usage: required + Value type: + Definition: Specifies the port index. Values 0, 1 are used by the two + internal 10/100/1000/2500 Mbps ports, values 2-9 are used by the + eight external 10/100/1000 Mbps ports. + + - phy-connection-type + Usage: required + Value type: + Definition: Specifies the controller/PHY interface type. + + - status + Usage: optional + Value type: + Definition: Standard property. + Indicates the operational status of the port. "disabled" is used + to indicate that the port is not usable (for example because the + QSGMII link to the PHYs is not available with current RCW). + + - fixed-link + Usage: optional + Value type: + Definition: Specifies link parameters in the absence of a PHY. + where a is emulated phy id - must be unique to among + all specified fixed-links, b is duplex - 0 half, 1 full, c is + link speed - 10/100/1000/2500, d is pause - 0 no pause, 1 pause, + e is asym_pause - 0 no asym_pause, 1 asym_pause. + + - phy-handle + Usage: optional + Value type: + Definition: The phandle for the PHY connected to this l2switch port. + + + Note: All other standard properties (see the ePAPR) are allowed but are + optional. diff --git a/arch/powerpc/boot/dts/fsl/qoriq-l2switch-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-l2switch-0.dtsi new file mode 100644 index 0000000..e15ac3c --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/qoriq-l2switch-0.dtsi @@ -0,0 +1,93 @@ +/* + * T1040 Silicon/SoC L2switch device tree stub [ controller @ offset 0x800000 ] + * + * Copyright 2013 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +l2switch: l2switch@800000 { + #address-cells = <1>; + #size-cells = <0>; + + compatible = "vitesse-9953"; + clock-frequency = <0>; + + reg = <0x800000 0x290000>; + + port@100000 { + compatible = "vitesse-9953-port"; + port-index = <0>; + }; + + port@110000 { + compatible = "vitesse-9953-port"; + port-index = <1>; + }; + + port@120000 { + compatible = "vitesse-9953-port"; + port-index = <2>; + }; + + port@130000 { + compatible = "vitesse-9953-port"; + port-index = <3>; + }; + + port@140000 { + compatible = "vitesse-9953-port"; + port-index = <4>; + }; + + port@150000 { + compatible = "vitesse-9953-port"; + port-index = <5>; + }; + + port@160000 { + compatible = "vitesse-9953-port"; + port-index = <6>; + }; + + port@170000 { + compatible = "vitesse-9953-port"; + port-index = <7>; + }; + + port@180000 { + compatible = "vitesse-9953-port"; + port-index = <8>; + }; + + port@190000 { + compatible = "vitesse-9953-port"; + port-index = <9>; + }; + }; diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi index 23d0ae5..f48d5c2 100644 --- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi @@ -638,6 +638,7 @@ sata@221000 { status = "disabled"; }; }; +/include/ "qoriq-l2switch-0.dtsi" l2switch@800000 { interrupts = <26 2 0 0>; -- cgit v0.10.2 From a2a02cc21ad042efca9af777194800d95253762c Mon Sep 17 00:00:00 2001 From: Sandeep Singh Date: Tue, 15 Apr 2014 12:51:52 -0400 Subject: t2080rdb/dts: Add usdpaa device tree - 42G configuration (4x10G + 2x1G) for USDPAA. - Also add device tree for shared MAC and macless interface Signed-off-by: Sandeep Singh Change-Id: I6e2fc18f13cea9d71dcaf777d017c119afe77b7c Reviewed-on: http://git.am.freescale.net:8181/11001 Tested-by: Review Code-CDREVIEW Reviewed-by: Vakul Garg Reviewed-by: Jose Rivera diff --git a/arch/powerpc/boot/dts/t2080rdb-usdpaa-shared-interfaces.dts b/arch/powerpc/boot/dts/t2080rdb-usdpaa-shared-interfaces.dts new file mode 100644 index 0000000..babce4d --- /dev/null +++ b/arch/powerpc/boot/dts/t2080rdb-usdpaa-shared-interfaces.dts @@ -0,0 +1,148 @@ +/* + * T2080RDB USDPAA Device Tree Source + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/include/ "t2080rdb.dts" + +/ { + /* NB: "bpool-ethernet-seeds" is not set to avoid buffer seeding, + * because apps seed these pools with buffers allocated at + * run-time. + * HOWEVER, the kernel driver requires the buffer-size so + * "fsl,bpool-ethernet-cfg" is set. It also mis-interprets + * things if the base-address is zero (hence the 0xdeadbeef + * values). + */ + bp7: buffer-pool@7 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <7>; + fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>; + fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>; + }; + bp8: buffer-pool@8 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <8>; + fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp9: buffer-pool@9 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <9>; + fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + + bp10: buffer-pool@10 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <10>; + fsl,bpool-thresholds = <0x10 0x30 0x0 0x0>; + }; + bp11: buffer-pool@11 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <11>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp12: buffer-pool@12 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <12>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp16: buffer-pool@16 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <16>; + fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp17: buffer-pool@17 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <17>; + fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + + fsl,dpaa { + ethernet@0 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x90 1 0x91 1>; + fsl,qman-frame-queues-tx = <0x98 1 0x99 1>; + }; + /* ethernet@1 declared as shared MAC. USDPAA will seed buffers to + * this buffer pool. The ethernet driver will initialize the RX default, + * RX error, TX error, TX confirm and 8 TX Frame queues. On receiving frame + * at this interface, the ethernet driver will do kmap_atomic/kunmap_atomic + * for that frame. */ + ethernet@1 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-shared", "fsl,dpa-ethernet-shared"; + fsl,bman-buffer-pools = <&bp17>; + fsl,qman-frame-queues-rx = <0x92 1 0x93 1 0x2000 3>; + fsl,qman-frame-queues-tx = <0 1 0 1 0x3000 8>; + }; + ethernet@2 {/* 1G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x54 1 0x55 1>; + fsl,qman-frame-queues-tx = <0x74 1 0x75 1>; + }; + ethernet@3 {/* 1G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x56 1 0x57 1>; + fsl,qman-frame-queues-tx = <0x76 1 0x77 1>; + }; + ethernet@8 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>; + fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>; + }; + ethernet@9 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>; + fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>; + }; + /* ethernet@10 declared as MAC-less interface with no "fsl,fman-mac" property. + * USDPAA will seed buffers to this buffer pool and initialize 8 TX Frame + * queues. The ethernet driver will initialize 8 RX default Frame queues. + * On receiving frame at this interface, the ethernet driver will do + * kmap_atomic/kunmap_atomic for that frame. */ + ethernet@10 { + compatible = "fsl,t2080-dpa-ethernet-macless", "fsl,dpa-ethernet-macless"; + fsl,bman-buffer-pools = <&bp16>; + fsl,qman-frame-queues-rx = <4000 8>; + fsl,qman-frame-queues-tx = <4008 8>; + local-mac-address = [00 11 22 33 44 55]; + }; + }; +}; diff --git a/arch/powerpc/boot/dts/t2080rdb-usdpaa.dts b/arch/powerpc/boot/dts/t2080rdb-usdpaa.dts new file mode 100644 index 0000000..c4dac2d --- /dev/null +++ b/arch/powerpc/boot/dts/t2080rdb-usdpaa.dts @@ -0,0 +1,119 @@ +/* + * T2080RDB USDPAA Device Tree Source + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/include/ "t2080rdb.dts" + +/ { + /* NB: "bpool-ethernet-seeds" is not set to avoid buffer seeding, + * because apps seed these pools with buffers allocated at + * run-time. + * HOWEVER, the kernel driver requires the buffer-size so + * "fsl,bpool-ethernet-cfg" is set. It also mis-interprets + * things if the base-address is zero (hence the 0xdeadbeef + * values). + */ + bp7: buffer-pool@7 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <7>; + fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>; + fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>; + }; + bp8: buffer-pool@8 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <8>; + fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp9: buffer-pool@9 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <9>; + fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + + bp10: buffer-pool@10 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <10>; + fsl,bpool-thresholds = <0x10 0x30 0x0 0x0>; + }; + bp11: buffer-pool@11 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <11>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp12: buffer-pool@12 { + compatible = "fsl,t2080-bpool", "fsl,bpool"; + fsl,bpid = <12>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + + fsl,dpaa { + ethernet@0 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x90 1 0x91 1>; + fsl,qman-frame-queues-tx = <0x98 1 0x99 1>; + }; + ethernet@1 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x92 1 0x93 1>; + fsl,qman-frame-queues-tx = <0x9a 1 0x9b 1>; + }; + ethernet@2 {/* 1G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x54 1 0x55 1>; + fsl,qman-frame-queues-tx = <0x74 1 0x75 1>; + }; + ethernet@3 {/* 1G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x56 1 0x57 1>; + fsl,qman-frame-queues-tx = <0x76 1 0x77 1>; + }; + ethernet@8 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>; + fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>; + }; + ethernet@9 {/* 10G */ + compatible = "fsl,t2080-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>; + fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>; + }; + }; +}; -- cgit v0.10.2 From 9a119440b8863e6db0cac6e666804211104fb9c5 Mon Sep 17 00:00:00 2001 From: Sandeep Singh Date: Tue, 15 Apr 2014 16:38:33 -0400 Subject: t4240rdb/dts: Add usdpaa device tree - 48G configuration (4x10G + 8x1G) for USDPAA. - Also add device tree for shared MAC and macless interface Signed-off-by: Sandeep Singh Change-Id: Iceb48f0e8a1c0d429119064d9bb02d1678320902 Reviewed-on: http://git.am.freescale.net:8181/11002 Tested-by: Review Code-CDREVIEW Reviewed-by: Vakul Garg Reviewed-by: Jose Rivera diff --git a/arch/powerpc/boot/dts/t4240rdb-usdpaa-shared-interfaces.dts b/arch/powerpc/boot/dts/t4240rdb-usdpaa-shared-interfaces.dts new file mode 100644 index 0000000..689db9c --- /dev/null +++ b/arch/powerpc/boot/dts/t4240rdb-usdpaa-shared-interfaces.dts @@ -0,0 +1,191 @@ +/* + * T4240RDB USDPAA Device Tree Source + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/include/ "t4240rdb.dts" + +/ { + /* NB: "bpool-ethernet-seeds" is not set to avoid buffer seeding, + * because apps seed these pools with buffers allocated at + * run-time. + * HOWEVER, the kernel driver requires the buffer-size so + * "fsl,bpool-ethernet-cfg" is set. It also mis-interprets + * things if the base-address is zero (hence the 0xdeadbeef + * values). + */ + bp7: buffer-pool@7 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <7>; + fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>; + fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>; + }; + bp8: buffer-pool@8 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <8>; + fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp9: buffer-pool@9 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <9>; + fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + + bp10: buffer-pool@10 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <10>; + fsl,bpool-thresholds = <0x10 0x30 0x0 0x0>; + }; + bp11: buffer-pool@11 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <11>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp12: buffer-pool@12 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <12>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp16: buffer-pool@16 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <16>; + fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp17: buffer-pool@17 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <17>; + fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + + fsl,dpaa { + ethernet@0 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x50 1 0x51 1>; + fsl,qman-frame-queues-tx = <0x70 1 0x71 1>; + }; + ethernet@1 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x52 1 0x53 1>; + fsl,qman-frame-queues-tx = <0x72 1 0x73 1>; + }; + ethernet@2 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x54 1 0x55 1>; + fsl,qman-frame-queues-tx = <0x74 1 0x75 1>; + }; + ethernet@3 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x56 1 0x57 1>; + fsl,qman-frame-queues-tx = <0x76 1 0x77 1>; + }; + ethernet@6 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>; + fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>; + }; + ethernet@7 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>; + fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>; + }; + ethernet@8 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x60 1 0x61 1>; + fsl,qman-frame-queues-tx = <0x80 1 0x81 1>; + }; + ethernet@9 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x62 1 0x63 1>; + fsl,qman-frame-queues-tx = <0x82 1 0x83 1>; + }; + ethernet@10 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x64 1 0x65 1>; + fsl,qman-frame-queues-tx = <0x84 1 0x85 1>; + }; + ethernet@11 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x66 1 0x67 1>; + fsl,qman-frame-queues-tx = <0x86 1 0x87 1>; + }; + ethernet@14 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x6c 1 0x6d 1>; + fsl,qman-frame-queues-tx = <0x8c 1 0x8d 1>; + }; + /* ethernet@15 declared as shared MAC. USDPAA will seed buffers to + * this buffer pool. The ethernet driver will initialize the RX default, + * RX error, TX error, TX confirm and 8 TX Frame queues. On receiving frame + * at this interface, the ethernet driver will do kmap_atomic/kunmap_atomic + * for that frame. */ + ethernet@15 { + compatible = "fsl,t4240-dpa-ethernet-shared", "fsl,dpa-ethernet-shared"; + fsl,bman-buffer-pools = <&bp17>; + fsl,qman-frame-queues-rx = <0x6e 1 0x6f 1 0x2000 3>; + fsl,qman-frame-queues-tx = <0 1 0 1 0x3000 8>; + }; + /* ethernet@16 declared as MAC-less interface with no "fsl,fman-mac" property. + * USDPAA will seed buffers to this buffer pool and initialize 8 TX Frame + * queues. The ethernet driver will initialize 8 RX default Frame queues. + * On receiving frame at this interface, the ethernet driver will do + * kmap_atomic/kunmap_atomic for that frame. */ + ethernet@16 { + compatible = "fsl,t4240-dpa-ethernet-macless", "fsl,dpa-ethernet-macless"; + fsl,bman-buffer-pools = <&bp16>; + fsl,qman-frame-queues-rx = <4000 8>; + fsl,qman-frame-queues-tx = <4008 8>; + local-mac-address = [00 11 22 33 44 55]; + }; + dpa-fman0-oh@2 { + compatible = "fsl,dpa-oh"; + /* Define frame queues for the OH port*/ + /* */ + fsl,qman-frame-queues-oh = <0x90 1 0x91 1>; + fsl,fman-oh-port = <&fman0_oh2>; + }; + }; +}; diff --git a/arch/powerpc/boot/dts/t4240rdb-usdpaa.dts b/arch/powerpc/boot/dts/t4240rdb-usdpaa.dts new file mode 100644 index 0000000..a9e30eb --- /dev/null +++ b/arch/powerpc/boot/dts/t4240rdb-usdpaa.dts @@ -0,0 +1,208 @@ +/* + * T4240RDB USDPAA Device Tree Source + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/include/ "t4240rdb.dts" + +/ { + /* NB: "bpool-ethernet-seeds" is not set to avoid buffer seeding, + * because apps seed these pools with buffers allocated at + * run-time. + * HOWEVER, the kernel driver requires the buffer-size so + * "fsl,bpool-ethernet-cfg" is set. It also mis-interprets + * things if the base-address is zero (hence the 0xdeadbeef + * values). + */ + bp7: buffer-pool@7 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <7>; + fsl,bpool-ethernet-cfg = <0 0 0 192 0 0xdeadbeef>; + fsl,bpool-thresholds = <0x400 0xc00 0x0 0x0>; + }; + bp8: buffer-pool@8 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <8>; + fsl,bpool-ethernet-cfg = <0 0 0 576 0 0xabbaf00d>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp9: buffer-pool@9 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <9>; + fsl,bpool-ethernet-cfg = <0 0 0 1728 0 0xfeedabba>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + + bp10: buffer-pool@10 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <10>; + fsl,bpool-thresholds = <0x10 0x30 0x0 0x0>; + }; + bp11: buffer-pool@11 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <11>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp12: buffer-pool@12 { + compatible = "fsl,t4240-bpool", "fsl,bpool"; + fsl,bpid = <12>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + bp13: buffer-pool@13 { + compatible = "fsl,t4240-bpool", "fsl,bpool", "fsl,bpool-ethernet-seeds"; + fsl,bpid = <13>; + fsl,bpool-ethernet-cfg = <0 2048 0 1728 0 0>; + fsl,bpool-thresholds = <0x100 0x300 0x0 0x0>; + }; + + chosen { + dpaa-extended-args { + fman0-extd-args { + cell-index = <0x0>; + compatible = "fsl,fman-extended-args"; + dma-aid-mode = "port"; + fman0_oh3-extd-args { + cell-index = <0x2>; + compatible = "fsl,fman-port-op-extended-args"; + vsp-window = <0x8 0x0>; + }; + fman0_oh4-extd-args { + cell-index = <0x3>; + compatible = "fsl,fman-port-op-extended-args"; + vsp-window = <0x8 0x0>; + }; + }; + }; + }; + + fsl,dpaa { + ethernet@0 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x50 1 0x51 1>; + fsl,qman-frame-queues-tx = <0x70 1 0x71 1>; + }; + ethernet@1 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x52 1 0x53 1>; + fsl,qman-frame-queues-tx = <0x72 1 0x73 1>; + }; + ethernet@2 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x54 1 0x55 1>; + fsl,qman-frame-queues-tx = <0x74 1 0x75 1>; + }; + ethernet@3 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x56 1 0x57 1>; + fsl,qman-frame-queues-tx = <0x76 1 0x77 1>; + }; + ethernet@6 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x5c 1 0x5d 1>; + fsl,qman-frame-queues-tx = <0x7c 1 0x7d 1>; + }; + ethernet@7 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x5e 1 0x5f 1>; + fsl,qman-frame-queues-tx = <0x7e 1 0x7f 1>; + }; + ethernet@8 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x60 1 0x61 1>; + fsl,qman-frame-queues-tx = <0x80 1 0x81 1>; + }; + ethernet@9 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x62 1 0x63 1>; + fsl,qman-frame-queues-tx = <0x82 1 0x83 1>; + }; + ethernet@10 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x64 1 0x65 1>; + fsl,qman-frame-queues-tx = <0x84 1 0x85 1>; + }; + ethernet@11 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x66 1 0x67 1>; + fsl,qman-frame-queues-tx = <0x86 1 0x87 1>; + }; + ethernet@14 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x6c 1 0x6d 1>; + fsl,qman-frame-queues-tx = <0x8c 1 0x8d 1>; + }; + ethernet@15 { + compatible = "fsl,t4240-dpa-ethernet-init", "fsl,dpa-ethernet-init"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-rx = <0x6e 1 0x6f 1>; + fsl,qman-frame-queues-tx = <0x8e 1 0x8f 1>; + }; + ethernet@16 { + compatible = "fsl,t4240-dpa-ethernet-generic", "fsl,dpa-ethernet-generic"; + fsl,qman-frame-queues-tx = <0x92 1>; + fsl,oh-ports = <&oh3 &oh4>; + local-mac-address = [00 11 22 33 44 66]; + }; + + dpa-fman0-oh@2 { + compatible = "fsl,dpa-oh"; + /* Define frame queues for the OH port*/ + /* */ + fsl,qman-frame-queues-oh = <0x90 1 0x91 1>; + fsl,fman-oh-port = <&fman0_oh2>; + }; + oh3: dpa-fman0-oh@3 { + compatible = "fsl,dpa-oh"; + fsl,bman-buffer-pools = <&bp13>; + fsl,qman-frame-queues-oh = <0x96 1 0x97 1>; + fsl,fman-oh-port = <&fman0_oh3>; + }; + oh4: dpa-fman0-oh@4 { + compatible = "fsl,dpa-oh"; + fsl,bman-buffer-pools = <&bp7 &bp8 &bp9>; + fsl,qman-frame-queues-oh = <0x94 1 0x95 1>; + fsl,fman-oh-port = <&fman0_oh4>; + }; + + }; +}; -- cgit v0.10.2 From 0be682ca768d671c91cfd1379759efcb3b29102a Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Tue, 14 Jan 2014 21:58:50 +0100 Subject: net: add sysfs helpers for netdev_adjacent logic They clean up the code a bit and can be used further. CC: Ding Tianhong CC: "David S. Miller" CC: Eric Dumazet CC: Nicolas Dichtel CC: Cong Wang Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller Change-Id: I4dc650a845c8ea243b9b7daf62f0923748ed2f0a Reviewed-on: http://git.am.freescale.net:8181/11047 Tested-by: Review Code-CDREVIEW Reviewed-by: Mihai Claudiu Caraman Reviewed-by: Jose Rivera diff --git a/net/core/dev.c b/net/core/dev.c index 4ac5786..d518938 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4608,13 +4608,36 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) } EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); +int netdev_adjacent_sysfs_add(struct net_device *dev, + struct net_device *adj_dev, + struct list_head *dev_list) +{ + char linkname[IFNAMSIZ+7]; + sprintf(linkname, dev_list == &dev->adj_list.upper ? + "upper_%s" : "lower_%s", adj_dev->name); + return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), + linkname); +} +void netdev_adjacent_sysfs_del(struct net_device *dev, + char *name, + struct list_head *dev_list) +{ + char linkname[IFNAMSIZ+7]; + sprintf(linkname, dev_list == &dev->adj_list.upper ? + "upper_%s" : "lower_%s", name); + sysfs_remove_link(&(dev->dev.kobj), linkname); +} + +#define netdev_adjacent_is_neigh_list(dev, dev_list) \ + (dev_list == &dev->adj_list.upper || \ + dev_list == &dev->adj_list.lower) + static int __netdev_adjacent_dev_insert(struct net_device *dev, struct net_device *adj_dev, struct list_head *dev_list, void *private, bool master) { struct netdev_adjacent *adj; - char linkname[IFNAMSIZ+7]; int ret; adj = __netdev_find_adj(dev, adj_dev, dev_list); @@ -4637,16 +4660,8 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, pr_debug("dev_hold for %s, because of link added from %s to %s\n", adj_dev->name, dev->name, adj_dev->name); - if (dev_list == &dev->adj_list.lower) { - sprintf(linkname, "lower_%s", adj_dev->name); - ret = sysfs_create_link(&(dev->dev.kobj), - &(adj_dev->dev.kobj), linkname); - if (ret) - goto free_adj; - } else if (dev_list == &dev->adj_list.upper) { - sprintf(linkname, "upper_%s", adj_dev->name); - ret = sysfs_create_link(&(dev->dev.kobj), - &(adj_dev->dev.kobj), linkname); + if (netdev_adjacent_is_neigh_list(dev, dev_list)) { + ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); if (ret) goto free_adj; } @@ -4666,14 +4681,8 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, return 0; remove_symlinks: - if (dev_list == &dev->adj_list.lower) { - sprintf(linkname, "lower_%s", adj_dev->name); - sysfs_remove_link(&(dev->dev.kobj), linkname); - } else if (dev_list == &dev->adj_list.upper) { - sprintf(linkname, "upper_%s", adj_dev->name); - sysfs_remove_link(&(dev->dev.kobj), linkname); - } - + if (netdev_adjacent_is_neigh_list(dev, dev_list)) + netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); free_adj: kfree(adj); dev_put(adj_dev); @@ -4686,7 +4695,6 @@ void __netdev_adjacent_dev_remove(struct net_device *dev, struct list_head *dev_list) { struct netdev_adjacent *adj; - char linkname[IFNAMSIZ+7]; adj = __netdev_find_adj(dev, adj_dev, dev_list); @@ -4706,13 +4714,8 @@ void __netdev_adjacent_dev_remove(struct net_device *dev, if (adj->master) sysfs_remove_link(&(dev->dev.kobj), "master"); - if (dev_list == &dev->adj_list.lower) { - sprintf(linkname, "lower_%s", adj_dev->name); - sysfs_remove_link(&(dev->dev.kobj), linkname); - } else if (dev_list == &dev->adj_list.upper) { - sprintf(linkname, "upper_%s", adj_dev->name); - sysfs_remove_link(&(dev->dev.kobj), linkname); - } + if (netdev_adjacent_is_neigh_list(dev, dev_list)) + netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); list_del_rcu(&adj->list); pr_debug("dev_put for %s, because link removed from %s to %s\n", -- cgit v0.10.2 From 45ce45c69750b93b8262aa66792185bd49150293 Mon Sep 17 00:00:00 2001 From: Veaceslav Falico Date: Tue, 14 Jan 2014 21:58:51 +0100 Subject: net: rename sysfs symlinks on device name change Currently, we don't rename the upper/lower_ifc symlinks in /sys/class/net/*/ , which might result stale/duplicate links/names. Fix this by adding netdev_adjacent_rename_links(dev, oldname) which renames all the upper/lower interface's links to dev from the upper/lower_oldname to the new name. We don't need a rollback because only we control these symlinks and if we fail to rename them - sysfs will anyway complain. Reported-by: Ding Tianhong CC: Ding Tianhong CC: "David S. Miller" CC: Eric Dumazet CC: Nicolas Dichtel CC: Cong Wang Signed-off-by: Veaceslav Falico Signed-off-by: David S. Miller (cherry picked and merged from commit 5bb025fae53889cc99a21058c5dd369bf8cce820) Change-Id: I31a2f1800a7a181189c031d53ea1deef0ae137fc Reviewed-on: http://git.am.freescale.net:8181/11048 Tested-by: Review Code-CDREVIEW Reviewed-by: Mihai Claudiu Caraman Reviewed-by: Jose Rivera diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8fa1219..cfde9a5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2886,6 +2886,7 @@ void netdev_upper_dev_unlink(struct net_device *dev, struct net_device *upper_dev); void *netdev_lower_dev_get_private_rcu(struct net_device *dev, struct net_device *lower_dev); +void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); int skb_checksum_help(struct sk_buff *skb); diff --git a/net/core/dev.c b/net/core/dev.c index d518938..df70378 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1118,6 +1118,8 @@ rollback: write_seqcount_end(&devnet_rename_seq); + netdev_adjacent_rename_links(dev, oldname); + write_lock_bh(&dev_base_lock); hlist_del_rcu(&dev->name_hlist); write_unlock_bh(&dev_base_lock); @@ -1137,6 +1139,7 @@ rollback: err = ret; write_seqcount_begin(&devnet_rename_seq); memcpy(dev->name, oldname, IFNAMSIZ); + memcpy(oldname, newname, IFNAMSIZ); goto rollback; } else { pr_err("%s: name change rollback failed: %d\n", @@ -4999,6 +5002,25 @@ void *netdev_lower_dev_get_private_rcu(struct net_device *dev, } EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu); +void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) +{ + struct netdev_adjacent *iter; + + list_for_each_entry(iter, &dev->adj_list.upper, list) { + netdev_adjacent_sysfs_del(iter->dev, oldname, + &iter->dev->adj_list.lower); + netdev_adjacent_sysfs_add(iter->dev, dev, + &iter->dev->adj_list.lower); + } + + list_for_each_entry(iter, &dev->adj_list.lower, list) { + netdev_adjacent_sysfs_del(iter->dev, oldname, + &iter->dev->adj_list.upper); + netdev_adjacent_sysfs_add(iter->dev, dev, + &iter->dev->adj_list.upper); + } +} + void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev) { -- cgit v0.10.2 From 6321dc4e2c1f1ac6ea5ef03236dfaa092e73a862 Mon Sep 17 00:00:00 2001 From: Nikhil Badola Date: Tue, 8 Apr 2014 16:40:51 +0530 Subject: fsl/usb: Add Deep Sleep support for USB drv Saves and Restores USB register context when the system goes to deep sleep and resumes from it. This is required only when USB controller and phys are OFF during Deep Sleep Signed-off-by: Nikhil Badola Signed-off-by: Ramneek Mehresh Change-Id: I5af0b680a9c3b4dd014c8266f5e8b00ca56264df Reviewed-on: http://git.am.freescale.net:8181/10994 Tested-by: Review Code-CDREVIEW Reviewed-by: Varun Sethi Reviewed-by: Prabhakar Kushwaha Reviewed-by: Jose Rivera diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index e3a386a..16225ad 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -38,6 +38,7 @@ struct ehci_fsl { struct ehci_hcd ehci; #ifdef CONFIG_PM + struct ehci_regs *saved_regs; /* Saved USB PHY settings, need to restore after deep sleep. */ u32 usb_ctrl; #endif @@ -482,7 +483,43 @@ static int ehci_fsl_setup(struct usb_hcd *hcd) return retval; } + + + #ifdef CONFIG_PM +/* save usb registers */ +static int ehci_fsl_save_context(struct usb_hcd *hcd) +{ + struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + void __iomem *non_ehci = hcd->regs; + + ehci_fsl->saved_regs = kzalloc(sizeof(struct ehci_regs), GFP_KERNEL); + if (!ehci_fsl->saved_regs) + return -ENOMEM; + _memcpy_fromio(ehci_fsl->saved_regs, ehci->regs, + sizeof(struct ehci_regs)); + ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL); + return 0; + +} + +/*Restore usb registers */ +static int ehci_fsl_restore_context(struct usb_hcd *hcd) +{ + struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + void __iomem *non_ehci = hcd->regs; + + if (ehci_fsl->saved_regs) { + _memcpy_toio(ehci->regs, ehci_fsl->saved_regs, + sizeof(struct ehci_regs)); + out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl); + kfree(ehci_fsl->saved_regs); + ehci_fsl->saved_regs = NULL; + } + return 0; +} #ifdef CONFIG_PPC_MPC512x static int ehci_fsl_mpc512x_drv_suspend(struct device *dev) @@ -658,6 +695,9 @@ static int ehci_fsl_drv_suspend(struct device *dev) ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd), device_may_wakeup(dev)); + + ehci_fsl_save_context(hcd); + if (!fsl_deep_sleep()) return 0; @@ -671,6 +711,9 @@ static int ehci_fsl_drv_resume(struct device *dev) struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); struct ehci_hcd *ehci = hcd_to_ehci(hcd); void __iomem *non_ehci = hcd->regs; + + ehci_fsl_restore_context(hcd); + #if defined(CONFIG_FSL_USB2_OTG) || defined(CONFIG_FSL_USB2_OTG_MODULE) struct usb_bus host = hcd->self; #endif -- cgit v0.10.2 From e32e7897039873adafb57263d4253ed3f878575b Mon Sep 17 00:00:00 2001 From: Suresh Gupta Date: Fri, 11 Apr 2014 20:03:22 +0530 Subject: USB : Gadget : fsl: Fix the fault issue on rmmod completion in udc_controller->done should be assign with proper value before complete called. The complete called in fsl_udc_release which intern called from usb_del_gadget_udc, so moving assignment before calling usb_del_gadget_udc Signed-off-by: Suresh Gupta Change-Id: I40867db4c11464d7d47e1fd55ed852b38c7d0e47 Reviewed-on: http://git.am.freescale.net:8181/10911 Tested-by: Review Code-CDREVIEW Reviewed-by: Nikhil Badola Reviewed-by: Jose Rivera diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index fa27a4c..c540dfa 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c @@ -2536,8 +2536,8 @@ static int __exit fsl_udc_remove(struct platform_device *pdev) if (!udc_controller) return -ENODEV; - usb_del_gadget_udc(&udc_controller->gadget); udc_controller->done = &done; + usb_del_gadget_udc(&udc_controller->gadget); fsl_udc_clk_release(); -- cgit v0.10.2 From 079c2217ab32340438798bc8f3b1512f32591c58 Mon Sep 17 00:00:00 2001 From: Marian Chereji Date: Thu, 17 Apr 2014 15:45:38 +0300 Subject: dpa_offload: Fix usdpaa DTS file corruption for T2080RDB There is a closed bracket ('}') missing in the usdpaa DTS file which is causing a dtc compiler error. Signed-off-by: Marian Chereji Change-Id: Ibafca15bfb9e7c03bfda9d9ef05d43bdd233a66c Reviewed-on: http://git.am.freescale.net:8181/11102 Tested-by: Review Code-CDREVIEW Reviewed-by: Aurelian Zanoschi Reviewed-by: Radu-Andrei Bulie Reviewed-by: Jose Rivera diff --git a/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa.dts b/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa.dts index ff231ca..269aaea 100644 --- a/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa.dts +++ b/drivers/staging/fsl_dpa_offload/dts/t2080rdb-usdpaa.dts @@ -139,6 +139,7 @@ fsl,qman-frame-queues-rx = <5000 8>; fsl,qman-frame-queues-tx = <5008 8>; local-mac-address = [00 11 22 33 44 66]; + }; dpa-fman0-oh@2 { compatible = "fsl,dpa-oh"; /* Define frame queues for the OH port*/ -- cgit v0.10.2 From 0dfc5833cea4f87c5a640aff8a8ecb079af4dde2 Mon Sep 17 00:00:00 2001 From: Aurelian Zanoschi Date: Thu, 17 Apr 2014 16:15:55 +0300 Subject: dpa_offload: Fix compat support for SA per DSCP In case of 64bit platforms SA per DSCP feature needs some structure data members in order to correctly copy parameters from user-space to kernel-space. Also values for the three feature parameters need to be correctly copied from one space to another. Change-Id: I2d31813431b87be6e61651597acf7ad56ae60148 Signed-off-by: Aurelian Zanoschi Reviewed-on: http://git.am.freescale.net:8181/11104 Tested-by: Review Code-CDREVIEW Reviewed-by: Marian-Cornel Chereji Reviewed-by: Jose Rivera diff --git a/drivers/staging/fsl_dpa_offload/dpa_ipsec_ioctl.h b/drivers/staging/fsl_dpa_offload/dpa_ipsec_ioctl.h index 9d1da46..d76b0ff 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_ipsec_ioctl.h +++ b/drivers/staging/fsl_dpa_offload/dpa_ipsec_ioctl.h @@ -95,6 +95,8 @@ struct ioc_compat_sa_out_params { compat_uptr_t outer_ip_header; compat_uptr_t outer_udp_header; uint16_t post_sec_flow_id; + uint8_t dscp_start; + uint8_t dscp_end; }; struct ioc_compat_sa_in_params { @@ -158,6 +160,7 @@ struct ioc_compat_policy_params { uint8_t dest_prefix_len; uint8_t protocol; bool masked_proto; + bool use_dscp; union { struct dpa_ipsec_l4_params l4; struct dpa_ipsec_icmp_params icmp; diff --git a/drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c b/drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c index f4b081f..5d04f84 100644 --- a/drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c +++ b/drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c @@ -118,6 +118,8 @@ static void compat_copy_sa_out_params(struct dpa_ipsec_sa_out_params *prm, prm->outer_udp_header = (void *) compat_ptr(compat_prm->outer_udp_header); prm->post_sec_flow_id = compat_prm->post_sec_flow_id; + prm->dscp_start = compat_prm->dscp_start; + prm->dscp_end = compat_prm->dscp_end; } static void compat_copy_sa_crypto_params(struct dpa_ipsec_sa_crypto_params *prm, @@ -205,6 +207,7 @@ static void compat_copy_dpa_ipsec_add_rem_policy( prm->pol_params.protocol = compat_prm->pol_params.protocol; prm->pol_params.masked_proto = compat_prm->pol_params.masked_proto; + prm->pol_params.use_dscp = compat_prm->pol_params.use_dscp; prm->pol_params.l4 = compat_prm->pol_params.l4; if (compat_prm->pol_params.dir_params.type == DPA_IPSEC_POL_DIR_PARAMS_MANIP) @@ -231,6 +234,7 @@ static void compat_copy_dpa_ipsec_add_rem_policy( compat_prm->pol_params.protocol = prm->pol_params.protocol; compat_prm->pol_params.masked_proto = prm->pol_params.masked_proto; + compat_prm->pol_params.use_dscp = prm->pol_params.use_dscp; compat_prm->pol_params.l4 = prm->pol_params.l4; if (prm->pol_params.dir_params.type == DPA_IPSEC_POL_DIR_PARAMS_MANIP) -- cgit v0.10.2 From 153f2d27a28a543f722fdef20553dd65e8decac1 Mon Sep 17 00:00:00 2001 From: Liu Gang Date: Thu, 17 Apr 2014 18:19:16 +0800 Subject: UIO/DMA: Add dma uio private mmap function UIO mmap cannot map an unaligned page address, and the PowerPC DMA register space has an unaligned page base address. So the mmap for DMA register address should be supported via a private function. Signed-off-by: Liu Gang Signed-off-by: Minghuan Lian Change-Id: Ic5672dd4f86031fab34830d47e231fb1c46b2070 Reviewed-on: http://git.am.freescale.net:8181/11081 Tested-by: Review Code-CDREVIEW Reviewed-by: Mingkai Hu Reviewed-by: Jose Rivera diff --git a/drivers/uio/fsl_dma_uio.c b/drivers/uio/fsl_dma_uio.c index f16a009..899523f 100644 --- a/drivers/uio/fsl_dma_uio.c +++ b/drivers/uio/fsl_dma_uio.c @@ -25,6 +25,7 @@ #include #include #include +#include static const char dma_uio_version[] = "DMA UIO driver v1.0"; @@ -82,6 +83,32 @@ static irqreturn_t dma_uio_irq_handler(int irq, struct uio_info *dev_info) return IRQ_HANDLED; } +static int dma_uio_mmap(struct uio_info *info, struct vm_area_struct *vma) +{ + int mi; + struct uio_mem *mem; + + if (vma->vm_pgoff < MAX_UIO_MAPS) { + if (info->mem[vma->vm_pgoff].size == 0) + return -EINVAL; + mi = (int)vma->vm_pgoff; + } else + return -EINVAL; + + mem = info->mem + mi; + + if (vma->vm_end - vma->vm_start > mem->size) + return -EINVAL; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + return remap_pfn_range(vma, + vma->vm_start, + mem->addr >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + static int dma_chan_uio_setup(struct dma_chan *dma_ch) { int ret; @@ -112,6 +139,7 @@ static int dma_chan_uio_setup(struct dma_chan *dma_ch) info->uio.handler = dma_uio_irq_handler; info->uio.open = dma_uio_open; info->uio.release = dma_uio_release; + info->uio.mmap = dma_uio_mmap; info->uio.priv = dma_ch; ret = uio_register_device(dma_ch->dev, &info->uio); if (ret) { -- cgit v0.10.2 From 8b60f5ea90c49344692a70f62cd4aa349de38b48 Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Fri, 18 Apr 2014 15:17:49 +0300 Subject: powerpc/kvm/booke: Fix build break due to stack frame size warning Commit ce11e48b7fdd256ec68b932a89b397a790566031 ("KVM: PPC: E500: Add userspace debug stub support") added "struct thread_struct" to the stack of kvmppc_vcpu_run(). thread_struct is 1152 bytes on my build, compared to 48 bytes for the recently-introduced "struct debug_reg". Use the latter instead. This fixes the following error: cc1: warnings being treated as errors arch/powerpc/kvm/booke.c: In function 'kvmppc_vcpu_run': arch/powerpc/kvm/booke.c:760:1: error: the frame size of 1424 bytes is larger than 1024 bytes make[2]: *** [arch/powerpc/kvm/booke.o] Error 1 make[1]: *** [arch/powerpc/kvm] Error 2 make[1]: *** Waiting for unfinished jobs.... Signed-off-by: Scott Wood (cherry picked and merged from upstream commit f5f972102d5c12729f0a35fce266b580aaa03f66) Change-Id: Iaa6996949b07b5f38e9c443071fcb38d71f05a94 Reviewed-on: http://git.am.freescale.net:8181/11191 Tested-by: Review Code-CDREVIEW diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index a18b4ab..aace905 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -35,11 +35,7 @@ extern void giveup_vsx(struct task_struct *); extern void enable_kernel_spe(void); extern void giveup_spe(struct task_struct *); extern void load_up_spe(struct task_struct *); -extern void switch_booke_debug_regs(struct thread_struct *new_thread); - -#ifdef CONFIG_PPC_ADV_DEBUG_REGS -extern void switch_booke_debug_regs(struct thread_struct *new_thread); -#endif +extern void switch_booke_debug_regs(struct debug_reg *new_debug); #ifndef CONFIG_SMP extern void discard_lazy_cpu_state(void); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3386d8a..4a96556 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) #endif } -static void prime_debug_regs(struct thread_struct *thread) +static void prime_debug_regs(struct debug_reg *debug) { /* * We could have inherited MSR_DE from userspace, since @@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread) */ mtmsr(mfmsr() & ~MSR_DE); - mtspr(SPRN_IAC1, thread->debug.iac1); - mtspr(SPRN_IAC2, thread->debug.iac2); + mtspr(SPRN_IAC1, debug->iac1); + mtspr(SPRN_IAC2, debug->iac2); #if CONFIG_PPC_ADV_DEBUG_IACS > 2 - mtspr(SPRN_IAC3, thread->debug.iac3); - mtspr(SPRN_IAC4, thread->debug.iac4); + mtspr(SPRN_IAC3, debug->iac3); + mtspr(SPRN_IAC4, debug->iac4); #endif - mtspr(SPRN_DAC1, thread->debug.dac1); - mtspr(SPRN_DAC2, thread->debug.dac2); + mtspr(SPRN_DAC1, debug->dac1); + mtspr(SPRN_DAC2, debug->dac2); #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 - mtspr(SPRN_DVC1, thread->debug.dvc1); - mtspr(SPRN_DVC2, thread->debug.dvc2); + mtspr(SPRN_DVC1, debug->dvc1); + mtspr(SPRN_DVC2, debug->dvc2); #endif - mtspr(SPRN_DBCR0, thread->debug.dbcr0); - mtspr(SPRN_DBCR1, thread->debug.dbcr1); + mtspr(SPRN_DBCR0, debug->dbcr0); + mtspr(SPRN_DBCR1, debug->dbcr1); #ifdef CONFIG_BOOKE - mtspr(SPRN_DBCR2, thread->debug.dbcr2); + mtspr(SPRN_DBCR2, debug->dbcr2); #endif } /* @@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread) * debug registers, set the debug registers from the values * stored in the new thread. */ -void switch_booke_debug_regs(struct thread_struct *new_thread) +void switch_booke_debug_regs(struct debug_reg *new_debug) { if ((current->thread.debug.dbcr0 & DBCR0_IDM) - || (new_thread->debug.dbcr0 & DBCR0_IDM)) - prime_debug_regs(new_thread); + || (new_debug->dbcr0 & DBCR0_IDM)) + prime_debug_regs(new_debug); } EXPORT_SYMBOL_GPL(switch_booke_debug_regs); #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ @@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev, #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS - switch_booke_debug_regs(&new->thread); + switch_booke_debug_regs(&new->thread.debug); #else /* * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 7e62965..87c7ad7 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret, s; - struct thread_struct thread; + struct debug_reg debug; #ifdef CONFIG_PPC_FPU struct thread_fp_state fp; int fpexc_mode; @@ -747,9 +747,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) #endif /* Switch to guest debug context */ - thread.debug = vcpu->arch.shadow_dbg_reg; - switch_booke_debug_regs(&thread); - thread.debug = current->thread.debug; + debug = vcpu->arch.shadow_dbg_reg; + switch_booke_debug_regs(&debug); + debug = current->thread.debug; current->thread.debug = vcpu->arch.shadow_dbg_reg; kvmppc_fix_ee_before_entry(); @@ -760,8 +760,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) We also get here with interrupts enabled. */ /* Switch back to user space debug context */ - switch_booke_debug_regs(&thread); - current->thread.debug = thread.debug; + switch_booke_debug_regs(&debug); + current->thread.debug = debug; #ifdef CONFIG_PPC_FPU kvmppc_save_guest_fp(vcpu); -- cgit v0.10.2 From 1c0dc06e47e11bf758f3e84ea90c2178a31dbf0f Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Fri, 18 Apr 2014 15:27:47 -0500 Subject: Revert "net: rename sysfs symlinks on device name change" This reverts commit 45ce45c69750b93b8262aa66792185bd49150293. diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cfde9a5..8fa1219 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2886,7 +2886,6 @@ void netdev_upper_dev_unlink(struct net_device *dev, struct net_device *upper_dev); void *netdev_lower_dev_get_private_rcu(struct net_device *dev, struct net_device *lower_dev); -void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); int skb_checksum_help(struct sk_buff *skb); diff --git a/net/core/dev.c b/net/core/dev.c index df70378..d518938 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1118,8 +1118,6 @@ rollback: write_seqcount_end(&devnet_rename_seq); - netdev_adjacent_rename_links(dev, oldname); - write_lock_bh(&dev_base_lock); hlist_del_rcu(&dev->name_hlist); write_unlock_bh(&dev_base_lock); @@ -1139,7 +1137,6 @@ rollback: err = ret; write_seqcount_begin(&devnet_rename_seq); memcpy(dev->name, oldname, IFNAMSIZ); - memcpy(oldname, newname, IFNAMSIZ); goto rollback; } else { pr_err("%s: name change rollback failed: %d\n", @@ -5002,25 +4999,6 @@ void *netdev_lower_dev_get_private_rcu(struct net_device *dev, } EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu); -void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) -{ - struct netdev_adjacent *iter; - - list_for_each_entry(iter, &dev->adj_list.upper, list) { - netdev_adjacent_sysfs_del(iter->dev, oldname, - &iter->dev->adj_list.lower); - netdev_adjacent_sysfs_add(iter->dev, dev, - &iter->dev->adj_list.lower); - } - - list_for_each_entry(iter, &dev->adj_list.lower, list) { - netdev_adjacent_sysfs_del(iter->dev, oldname, - &iter->dev->adj_list.upper); - netdev_adjacent_sysfs_add(iter->dev, dev, - &iter->dev->adj_list.upper); - } -} - void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev) { -- cgit v0.10.2 From b35a69559c46e066e6f24bb02d5a6090483786e3 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Fri, 18 Apr 2014 15:27:52 -0500 Subject: Revert "net: add sysfs helpers for netdev_adjacent logic" This reverts commit 0be682ca768d671c91cfd1379759efcb3b29102a. diff --git a/net/core/dev.c b/net/core/dev.c index d518938..4ac5786 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4608,36 +4608,13 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev) } EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu); -int netdev_adjacent_sysfs_add(struct net_device *dev, - struct net_device *adj_dev, - struct list_head *dev_list) -{ - char linkname[IFNAMSIZ+7]; - sprintf(linkname, dev_list == &dev->adj_list.upper ? - "upper_%s" : "lower_%s", adj_dev->name); - return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), - linkname); -} -void netdev_adjacent_sysfs_del(struct net_device *dev, - char *name, - struct list_head *dev_list) -{ - char linkname[IFNAMSIZ+7]; - sprintf(linkname, dev_list == &dev->adj_list.upper ? - "upper_%s" : "lower_%s", name); - sysfs_remove_link(&(dev->dev.kobj), linkname); -} - -#define netdev_adjacent_is_neigh_list(dev, dev_list) \ - (dev_list == &dev->adj_list.upper || \ - dev_list == &dev->adj_list.lower) - static int __netdev_adjacent_dev_insert(struct net_device *dev, struct net_device *adj_dev, struct list_head *dev_list, void *private, bool master) { struct netdev_adjacent *adj; + char linkname[IFNAMSIZ+7]; int ret; adj = __netdev_find_adj(dev, adj_dev, dev_list); @@ -4660,8 +4637,16 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, pr_debug("dev_hold for %s, because of link added from %s to %s\n", adj_dev->name, dev->name, adj_dev->name); - if (netdev_adjacent_is_neigh_list(dev, dev_list)) { - ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list); + if (dev_list == &dev->adj_list.lower) { + sprintf(linkname, "lower_%s", adj_dev->name); + ret = sysfs_create_link(&(dev->dev.kobj), + &(adj_dev->dev.kobj), linkname); + if (ret) + goto free_adj; + } else if (dev_list == &dev->adj_list.upper) { + sprintf(linkname, "upper_%s", adj_dev->name); + ret = sysfs_create_link(&(dev->dev.kobj), + &(adj_dev->dev.kobj), linkname); if (ret) goto free_adj; } @@ -4681,8 +4666,14 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev, return 0; remove_symlinks: - if (netdev_adjacent_is_neigh_list(dev, dev_list)) - netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); + if (dev_list == &dev->adj_list.lower) { + sprintf(linkname, "lower_%s", adj_dev->name); + sysfs_remove_link(&(dev->dev.kobj), linkname); + } else if (dev_list == &dev->adj_list.upper) { + sprintf(linkname, "upper_%s", adj_dev->name); + sysfs_remove_link(&(dev->dev.kobj), linkname); + } + free_adj: kfree(adj); dev_put(adj_dev); @@ -4695,6 +4686,7 @@ void __netdev_adjacent_dev_remove(struct net_device *dev, struct list_head *dev_list) { struct netdev_adjacent *adj; + char linkname[IFNAMSIZ+7]; adj = __netdev_find_adj(dev, adj_dev, dev_list); @@ -4714,8 +4706,13 @@ void __netdev_adjacent_dev_remove(struct net_device *dev, if (adj->master) sysfs_remove_link(&(dev->dev.kobj), "master"); - if (netdev_adjacent_is_neigh_list(dev, dev_list)) - netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); + if (dev_list == &dev->adj_list.lower) { + sprintf(linkname, "lower_%s", adj_dev->name); + sysfs_remove_link(&(dev->dev.kobj), linkname); + } else if (dev_list == &dev->adj_list.upper) { + sprintf(linkname, "upper_%s", adj_dev->name); + sysfs_remove_link(&(dev->dev.kobj), linkname); + } list_del_rcu(&adj->list); pr_debug("dev_put for %s, because link removed from %s to %s\n", -- cgit v0.10.2