diff options
31 files changed, 452 insertions, 156 deletions
diff --git a/arch/powerpc/boot/dts/t1040d4rdb.dts b/arch/powerpc/boot/dts/t1040d4rdb.dts index 666716f..e6bf1fc 100644 --- a/arch/powerpc/boot/dts/t1040d4rdb.dts +++ b/arch/powerpc/boot/dts/t1040d4rdb.dts @@ -127,27 +127,35 @@ }; phy_qsgmii_0: ethernet-phy@08 { reg = <0x08>; + interrupts = <0 1 0 0>; }; phy_qsgmii_1: ethernet-phy@09 { reg = <0x09>; + interrupts = <0 1 0 0>; }; phy_qsgmii_2: ethernet-phy@0a { reg = <0x0a>; + interrupts = <0 1 0 0>; }; phy_qsgmii_3: ethernet-phy@0b { reg = <0x0b>; + interrupts = <0 1 0 0>; }; phy_qsgmii_4: ethernet-phy@0c { reg = <0x0c>; + interrupts = <0 1 0 0>; }; phy_qsgmii_5: ethernet-phy@0d { reg = <0x0d>; + interrupts = <0 1 0 0>; }; phy_qsgmii_6: ethernet-phy@0e { reg = <0x0e>; + interrupts = <0 1 0 0>; }; phy_qsgmii_7: ethernet-phy@0f { reg = <0x0f>; + interrupts = <0 1 0 0>; }; }; }; diff --git a/arch/powerpc/boot/dts/t1040rdb.dts b/arch/powerpc/boot/dts/t1040rdb.dts index 4a3f207..af1db40 100644 --- a/arch/powerpc/boot/dts/t1040rdb.dts +++ b/arch/powerpc/boot/dts/t1040rdb.dts @@ -213,27 +213,35 @@ }; phy_qsgmii_0: ethernet-phy@04 { reg = <0x04>; + interrupts = <0 1 0 0>; }; phy_qsgmii_1: ethernet-phy@05 { reg = <0x05>; + interrupts = <0 1 0 0>; }; phy_qsgmii_2: ethernet-phy@06 { reg = <0x06>; + interrupts = <0 1 0 0>; }; phy_qsgmii_3: ethernet-phy@07 { reg = <0x07>; + interrupts = <0 1 0 0>; }; phy_qsgmii_4: ethernet-phy@08 { reg = <0x08>; + interrupts = <0 1 0 0>; }; phy_qsgmii_5: ethernet-phy@09 { reg = <0x09>; + interrupts = <0 1 0 0>; }; phy_qsgmii_6: ethernet-phy@0a { reg = <0x0a>; + interrupts = <0 1 0 0>; }; phy_qsgmii_7: ethernet-phy@0b { reg = <0x0b>; + interrupts = <0 1 0 0>; }; }; }; diff --git a/arch/powerpc/boot/dts/t104xqds.dtsi b/arch/powerpc/boot/dts/t104xqds.dtsi index 7474d19..c778a83 100644 --- a/arch/powerpc/boot/dts/t104xqds.dtsi +++ b/arch/powerpc/boot/dts/t104xqds.dtsi @@ -183,15 +183,19 @@ phy_s5_01: ethernet-phy@1c { reg = <0x14>; + interrupts = <1 1 0 0>; }; phy_s5_02: ethernet-phy@1d { reg = <0x15>; + interrupts = <1 1 0 0>; }; phy_s5_03: ethernet-phy@1e { reg = <0x16>; + interrupts = <1 1 0 0>; }; phy_s5_04: ethernet-phy@1f { reg = <0x17>; + interrupts = <1 1 0 0>; }; }; @@ -203,15 +207,19 @@ phy_s6_01: ethernet-phy@1c { reg = <0x18>; + interrupts = <1 1 0 0>; }; phy_s6_02: ethernet-phy@1d { reg = <0x19>; + interrupts = <1 1 0 0>; }; phy_s6_03: ethernet-phy@1e { reg = <0x1a>; + interrupts = <1 1 0 0>; }; phy_s6_04: ethernet-phy@1f { reg = <0x1b>; + interrupts = <1 1 0 0>; }; }; diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index e52e121..360a0ce 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -147,6 +147,18 @@ void settlbcam(int index, unsigned long virt, phys_addr_t phys, loadcam_entry(index); } +void cleartlbcam(unsigned long virt, unsigned int pid) +{ + int i = 0; + for (i = 0; i < NUM_TLBCAMS; i++) { + if (tlbcam_addrs[i].start == virt) { + TLBCAM[i].MAS1 = 0; + loadcam_entry(i); + return; + } + } +} + unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, phys_addr_t phys) { diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index dbbba8d..5880a74 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -94,6 +94,8 @@ extern void _tlbia(void); void settlbcam(int index, unsigned long virt, phys_addr_t phys, unsigned long size, unsigned long flags, unsigned int pid); +void cleartlbcam(unsigned long virt, unsigned int pid); + #ifdef CONFIG_PPC32 extern void mapin_ram(void); diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index 36b2e6e..cd620fb 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -525,7 +525,7 @@ static int __init qoriq_cpufreq_init(void) ret = cpufreq_register_driver(&qoriq_cpufreq_driver); if (!ret) - pr_info("Freescale PowerPC qoriq CPU frequency scaling driver\n"); + pr_info("Freescale QorIQ CPU frequency scaling driver\n"); return ret; } @@ -539,4 +539,4 @@ module_exit(qoriq_cpufreq_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>"); -MODULE_DESCRIPTION("cpufreq driver for Freescale e500mc series SoCs"); +MODULE_DESCRIPTION("cpufreq driver for Freescale QorIQ series SoCs"); diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 0f117d0..023a5d8 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -3070,8 +3070,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); @@ -3310,8 +3310,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); @@ -3581,8 +3581,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index e99a45b..fb913c3 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -807,7 +807,7 @@ static int ahash_update_ctx(struct ahash_request *req) * allocate space for base edesc and hw desc commands, * link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, @@ -835,17 +835,17 @@ static int ahash_update_ctx(struct ahash_request *req) src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + sec4_sg_src_index, chained); - if (*next_buflen) { + if (*next_buflen) scatterwalk_map_and_copy(next_buf, req->src, to_hash - *buflen, *next_buflen, 0); - state->current_buf = !state->current_buf; - } } else { (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN; } + state->current_buf = !state->current_buf; + sh_len = desc_len(sh_desc); desc = edesc->hw_desc; init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | @@ -909,17 +909,18 @@ static int ahash_final_ctx(struct ahash_request *req) state->buflen_1; u32 *sh_desc = ctx->sh_desc_fin, *desc; dma_addr_t ptr = ctx->sh_desc_fin_dma; - int sec4_sg_bytes; + int sec4_sg_bytes, sec4_sg_src_index; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int ret = 0; int sh_len; - sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); + sec4_sg_src_index = 1 + (buflen ? 1 : 0); + sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req) state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, buf, state->buf_dma, buflen, last_buflen); - (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; + (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN; edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); @@ -1005,8 +1006,8 @@ static int ahash_finup_ctx(struct ahash_request *req) sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -1091,8 +1092,8 @@ static int ahash_digest(struct ahash_request *req) sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes + - DESC_JOB_IO_LEN, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -1165,8 +1166,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) int sh_len; /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN, - GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -1245,7 +1245,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) * allocate space for base edesc and hw desc commands, * link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, @@ -1268,9 +1268,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) scatterwalk_map_and_copy(next_buf, req->src, to_hash - *buflen, *next_buflen, 0); - state->current_buf = !state->current_buf; } + state->current_buf = !state->current_buf; + sh_len = desc_len(sh_desc); desc = edesc->hw_desc; init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | @@ -1352,8 +1353,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req) sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + - sec4_sg_bytes, GFP_DMA | flags); + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, + GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); return -ENOMEM; @@ -1447,7 +1448,7 @@ static int ahash_update_first(struct ahash_request *req) * allocate space for base edesc and hw desc commands, * link tables */ - edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, @@ -1544,6 +1545,8 @@ static int ahash_init(struct ahash_request *req) state->current_buf = 0; state->buf_dma = 0; + state->buflen_0 = 0; + state->buflen_1 = 0; return 0; } diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 0359c71..1615916 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -16,6 +16,12 @@ #include "qi.h" #endif +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) +static const bool is_arm = true; +#else +static const bool is_arm; +#endif + /* * Descriptor to instantiate RNG State Handle 0 in normal mode and * load the JDKEK, TDKEK and TDSK registers @@ -488,7 +494,7 @@ static int caam_probe(struct platform_device *pdev) #ifdef CONFIG_DEBUG_FS struct caam_perfmon *perfmon; #endif - u32 mcr, scfgr, comp_params; + u32 scfgr, comp_params; int pg_size; int BLOCK_OFFSET = 0; @@ -537,11 +543,9 @@ static int caam_probe(struct platform_device *pdev) * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, * long pointers in master configuration register */ - mcr = rd_reg32(&ctrl->mcr); - mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0x2 << MCFGR_AWCACHE_SHIFT) | - MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ? - MCFGR_LONG_PTR : 0); - wr_reg32(&ctrl->mcr, mcr); + setbits32(&ctrl->mcr, MCFGR_WDENABLE | MCFGR_LARGE_BURST | + (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0) | + (is_arm ? 0x2 << MCFGR_AWCACHE_SHIFT : 0)); /* * Read the Compile Time paramters and SCFGR to determine @@ -820,7 +824,6 @@ static int caam_resume(struct device *dev) struct caam_drv_private *caam_priv; struct caam_ctrl __iomem *ctrl; struct caam_queue_if __iomem *qi; - u32 mcr; int ret; caam_priv = dev_get_drvdata(dev); @@ -830,11 +833,9 @@ static int caam_resume(struct device *dev) * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, * long pointers in master configuration register */ - mcr = rd_reg32(&ctrl->mcr); - mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0x2 << MCFGR_AWCACHE_SHIFT) | - MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ? - MCFGR_LONG_PTR : 0); - wr_reg32(&ctrl->mcr, mcr); + setbits32(&ctrl->mcr, MCFGR_WDENABLE | MCFGR_LARGE_BURST | + (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0) | + (is_arm ? 0x2 << MCFGR_AWCACHE_SHIFT : 0)); /* Enable QI interface of SEC */ if (caam_priv->qi_present) diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 72193f8..b2715d4 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -413,7 +413,8 @@ struct caam_ctrl { #define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT) #define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */ -#define MCFGR_BURST_64 0x00000001 /* Max burst size */ +#define MCFGR_LARGE_BURST 0x00000004 /* 128/256-byte burst size */ +#define MCFGR_BURST_64 0x00000001 /* 64-byte burst size */ /* JRSTART register offsets */ #define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */ diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index ff1e2a0..9e35d1b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -90,10 +90,27 @@ config ASF_IPV6 This option compiles the ASF for IPV6 Firewall and IPsec. +config ASF_LINUX_QOS + default n + bool "ASF Linux QoS Integration" + depends on AS_FASTPATH + select NETFILTER_XT_TARGET_DSCP + select NET_SCHED + select NET_SCH_PRIO + select NET_SCH_TBF + select NET_SCH_DRR + select NET_CLS + select NET_CLS_BASIC + select NET_CLS_U32 + select NET_CLS_FLOW + select NET_SCH_FIFO + select IP_NF_MANGLE + select IP6_NF_MANGLE + config ASF_QOS - default y + default n bool "ASF QoS Support" - depends on AS_FASTPATH + depends on AS_FASTPATH && !ASF_LINUX_QOS && !FSL_DPAA_ETH select NETFILTER_XT_TARGET_DSCP select NET_SCHED select NET_SCH_PRIO @@ -114,23 +131,29 @@ config ASF_EGRESS_QOS config ASF_INGRESS_MARKER default y - bool "DSCP Marking Support" - depends on ASF_QOS && ASF_EGRESS_QOS + bool "ASF DSCP Marking Support" + depends on AS_FASTPATH + depends on ASF_QOS ---help--- - Choose this option if you wish to utilize ASF Marking support. - Currently only DSCP marking is supported. + Choose this option if you wish to utilize + ASF Marking support. + Currently only DSCP marking + is supported. config ASF_EGRESS_SCH default y - bool "S/W Scheduler Support" - depends on ASF_QOS && ASF_EGRESS_QOS && !DPA_ETH + bool "ASF S/W Scheduler Support" + depends on ASF_QOS && ASF_EGRESS_QOS && !FSL_DPAA_ETH ---help--- - Choose this option if you wish to utilize ASF S/W based Scheduler support. + Choose this option + if you wish to utilize + ASF S/W based + Scheduler support. config ASF_EGRESS_SHAPER default y - bool "S/W Shaper Support" - depends on ASF_QOS && ASF_EGRESS_QOS && ASF_EGRESS_SCH && !DPA_ETH + bool "ASF S/W Shaper Support" + depends on ASF_QOS && ASF_EGRESS_QOS && ASF_EGRESS_SCH && !FSL_DPAA_ETH ---help--- Choose this option if you wish to utilize ASF S/W based Shaping support. @@ -148,13 +171,15 @@ choice default ASF_SCH_PRIO config ASF_SCH_PRIO - bool "Strict Priority (PRIO)" + bool "ASF Strict Priority (PRIO)" ---help--- - This option compiles the ASF to utilize eTSEC(NON-DPAA) - H/W Scheduler with PRIORITY algorithm. + This option compiles the ASF + to utilize eTSEC(NON-DPAA) H/W + Scheduler with PRIORITY + algorithm. config ASF_SCH_MWRR - bool "Modified Weighted Round-Robin (MWRR)" + bool "ASF Modified Weighted Round-Robin (MWRR)" ---help--- This option compiles the ASF to utilize eTSEC(NON-DPAA) H/W Scheduler with Modified Weighted Round-Robin algorithm. diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.c index 9a0309d..9f55496 100644 --- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.c +++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.c @@ -187,7 +187,7 @@ int dpa_bp_shared_port_seed(struct dpa_bp *bp) /* seed pool with buffers from that memory region */ if (bp->seed_pool) { int count = bp->target_count; - size_t addr = bp->paddr; + dma_addr_t addr = bp->paddr; while (count) { struct bm_buffer bufs[8]; diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c index f439167..13a5c15 100644 --- a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c @@ -6615,7 +6615,6 @@ t_Error FM_PCD_MatchTableModifyKey(t_Handle h_CcNode, uint16_t keyIndex, { t_FmPcd *p_FmPcd; t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; - t_List h_List; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); @@ -6624,18 +6623,16 @@ t_Error FM_PCD_MatchTableModifyKey(t_Handle h_CcNode, uint16_t keyIndex, SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); - INIT_LIST(&h_List); - err = FmPcdCcNodeTreeTryLock(p_FmPcd, p_CcNode, &h_List); - if (err) + if (!FmPcdLockTryLockAll(p_FmPcd)) { - DBG(TRACE, ("Node's trees lock failed")); + DBG(TRACE, ("FmPcdLockTryLockAll failed")); return ERROR_CODE(E_BUSY); } err = FmPcdCcModifyKey(p_FmPcd, p_CcNode, keyIndex, keySize, p_Key, p_Mask); - FmPcdCcNodeTreeReleaseLock(p_FmPcd, &h_List); + FmPcdLockUnlockAll(p_FmPcd); switch(GET_ERROR_TYPE(err) ) { diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c index 9224703..caebc2e 100644 --- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c +++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c @@ -1024,6 +1024,94 @@ Status: feature not supported } #if defined(CONFIG_COMPAT) + case FM_PCD_IOC_KG_SCHEME_GET_CNTR_COMPAT: +#endif + case FM_PCD_IOC_KG_SCHEME_GET_CNTR: + { + ioc_fm_pcd_kg_scheme_spc_t *param; + + param = (ioc_fm_pcd_kg_scheme_spc_t *) XX_Malloc(sizeof(ioc_fm_pcd_kg_scheme_spc_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + + memset(param, 0, sizeof(ioc_fm_pcd_kg_scheme_spc_t)); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param = NULL; + + compat_param = (ioc_compat_fm_pcd_kg_scheme_spc_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t)); + + if (copy_from_user(compat_param, (ioc_compat_fm_pcd_kg_scheme_spc_t *) compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t))) + { + XX_Free(compat_param); + XX_Free(param); + RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG); + } + + compat_copy_fm_pcd_kg_scheme_spc(compat_param, param, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_kg_scheme_spc_t *)arg, + sizeof(ioc_fm_pcd_kg_scheme_spc_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG); + } + } + + param->val = FM_PCD_KgSchemeGetCounter((t_Handle)param->id); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_kg_scheme_spc_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t)); + compat_copy_fm_pcd_kg_scheme_spc(compat_param, param, COMPAT_K_TO_US); + if (copy_to_user((ioc_compat_fm_pcd_kg_scheme_spc_t *)compat_ptr(arg), + compat_param, + sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t))) + err = E_READ_FAILED; + + XX_Free(compat_param); + } + else +#endif + { + if (copy_to_user((ioc_fm_pcd_kg_scheme_spc_t *)arg, + param, + sizeof(ioc_fm_pcd_kg_scheme_spc_t))) + err = E_READ_FAILED; + } + + XX_Free(param); + break; + } + +#if defined(CONFIG_COMPAT) case FM_PCD_IOC_KG_SCHEME_DELETE_COMPAT: #endif case FM_PCD_IOC_KG_SCHEME_DELETE: diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c index ea0885a..baf2a33 100644 --- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c +++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c @@ -518,6 +518,7 @@ void compat_copy_fm_pcd_hash_table( param->hash_res_mask = compat_param->hash_res_mask; param->hash_shift = compat_param->hash_shift; param->match_key_size = compat_param->match_key_size; + param->aging_support = compat_param->aging_support; param->id = compat_pcd_id2ptr(compat_param->id); } else @@ -528,6 +529,7 @@ void compat_copy_fm_pcd_hash_table( compat_param->hash_res_mask = param->hash_res_mask; compat_param->hash_shift = param->hash_shift; compat_param->match_key_size = param->match_key_size; + compat_param->aging_support = param->aging_support; compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE); } @@ -733,6 +735,22 @@ void compat_copy_fm_pcd_kg_scheme( _fm_cpt_dbg(compat," ...->}\n"); } +void compat_copy_fm_pcd_kg_scheme_spc( + ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param, + ioc_fm_pcd_kg_scheme_spc_t *param, + uint8_t compat) +{ + if (compat == COMPAT_US_TO_K) + { + param->id = compat_pcd_id2ptr(compat_param->id); + param->val = compat_param->val; + } else { + compat_param->id = compat_pcd_ptr2id(param->id); + compat_param->val = param->val; + } +} + + void compat_copy_fm_pcd_kg_scheme_select( ioc_compat_fm_pcd_kg_scheme_select_t *compat_param, ioc_fm_pcd_kg_scheme_select_t *param, diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h index d89a64d..e2f779a 100644 --- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h +++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h @@ -296,6 +296,7 @@ typedef struct ioc_compat_fm_pcd_hash_table_params_t { uint16_t hash_res_mask; uint8_t hash_shift; uint8_t match_key_size; + bool aging_support; ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params_for_miss; compat_uptr_t id; } ioc_compat_fm_pcd_hash_table_params_t; @@ -537,6 +538,12 @@ typedef struct ioc_compat_fm_vsp_prs_result_params_t { compat_uptr_t p_fm_vsp; compat_uptr_t p_data; } ioc_compat_fm_vsp_prs_result_params_t; + +typedef struct ioc_compat_fm_pcd_kg_scheme_spc_t { + uint32_t val; + compat_uptr_t id; +} ioc_compat_fm_pcd_kg_scheme_spc_t; + #endif /* (DPAA_VERSION >= 11) */ typedef struct ioc_compat_fm_ctrl_mon_counters_params_t { @@ -710,6 +717,11 @@ void compat_copy_fm_vsp_prs_result_params( ioc_compat_fm_vsp_prs_result_params_t *compat_param, ioc_fm_vsp_prs_result_params_t *param, uint8_t compat); + +void compat_copy_fm_pcd_kg_scheme_spc( + ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param, + ioc_fm_pcd_kg_scheme_spc_t *param, + uint8_t compat); #endif /* (DPAA_VERSION >= 11) */ /* } pcd compat functions */ #endif diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 406d0f0..a6f2d77 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -127,7 +127,11 @@ EXPORT_SYMBOL(devfp_tx_hook); #endif #endif +#ifdef CONFIG_PREEMPT_RT_FULL +#define TX_TIMEOUT (5*HZ) +#else #define TX_TIMEOUT (1*HZ) +#endif const char gfar_driver_version[] = "1.3"; @@ -975,9 +979,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) /* Find the TBI PHY. If it's not there, we don't support SGMII */ priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); - - priv->dma_endian_le = of_property_read_bool(np, "fsl,dma-endian-le"); - #if defined CONFIG_FSL_GIANFAR_1588 /* Handle IEEE1588 node */ if (!gfar_ptp_init(np, priv)) @@ -2211,8 +2212,6 @@ void gfar_start(struct gfar_private *priv) /* Initialize DMACTRL to have WWR and WOP */ tempval = gfar_read(®s->dmactrl); tempval |= DMACTRL_INIT_SETTINGS; - if (priv->dma_endian_le) - tempval |= DMACTRL_LE; gfar_write(®s->dmactrl, tempval); /* Make sure we aren't stopped */ diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 0f5bf84..0b0d7c9 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -328,7 +328,6 @@ extern const char gfar_driver_version[]; #define DMACTRL_INIT_SETTINGS 0x000000c3 #define DMACTRL_GRS 0x00000010 #define DMACTRL_GTS 0x00000008 -#define DMACTRL_LE 0x00008000 #define TSTAT_CLEAR_THALT_ALL 0xFF000000 #define TSTAT_CLEAR_THALT 0x80000000 @@ -1380,9 +1379,6 @@ struct gfar_private { /* L2 SRAM alloc of BDs */ bd_l2sram_en:1; - /* little endian dma buffer and descriptor host interface */ - unsigned int dma_endian_le; - /* The total tx and rx ring size for the enabled queues */ unsigned int total_tx_ring_size; unsigned int total_rx_ring_size; diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c index c5633cf..6d6b420 100644 --- a/drivers/pci/host/pci-layerscape.c +++ b/drivers/pci/host/pci-layerscape.c @@ -137,15 +137,6 @@ static void ls_pcie_host_init(struct pcie_port *pp) dw_pcie_setup_rc(pp); - while (!ls_pcie_link_up(pp)) { - usleep_range(100, 1000); - count++; - if (count >= 200) { - dev_err(pp->dev, "phy link never came up\n"); - return; - } - } - if (of_device_is_compatible(pcie->dev->of_node, "fsl,ls1021a-pcie")) { /* * LS1021A Workaround for internal TKT228622 diff --git a/drivers/staging/fsl_dce/tests/chunk_comp_def_cf/dce_chunk_comp_def_cf_test.c b/drivers/staging/fsl_dce/tests/chunk_comp_def_cf/dce_chunk_comp_def_cf_test.c index 80e2ca1..7650e08 100644 --- a/drivers/staging/fsl_dce/tests/chunk_comp_def_cf/dce_chunk_comp_def_cf_test.c +++ b/drivers/staging/fsl_dce/tests/chunk_comp_def_cf/dce_chunk_comp_def_cf_test.c @@ -179,7 +179,7 @@ static int do_test(struct dce_test_ctx *ctx, nop_req = kmalloc(sizeof(*nop_req), GFP_KERNEL); if (!nop_req) { ret = -ENOMEM; - goto fail_deflate_nop; + goto fail_nop; } init_completion(&nop_req->cb_done); @@ -188,7 +188,7 @@ static int do_test(struct dce_test_ctx *ctx, ret = fsl_dce_chunk_nop(&ctx->deflate_chunk, 0, nop_req); if (ret) { BUG(); - goto fail_deflate_nop; + goto fail_nop; } pr_info("Sent NOP on deflate path\n"); @@ -199,7 +199,7 @@ static int do_test(struct dce_test_ctx *ctx, ret = fsl_dce_chunk_nop(&ctx->inflate_chunk, 0, nop_req); if (ret) { BUG(); - goto fail_inflate_nop; + goto fail_nop; } pr_info("Sent NOP on inflate path\n"); @@ -212,14 +212,16 @@ static int do_test(struct dce_test_ctx *ctx, def_process_req = kzalloc(sizeof(*def_process_req), GFP_KERNEL); if (!def_process_req) { BUG(); - goto fail_inflate_params; + ret = -ENOMEM; + goto fail_nop; } pr_info("Allocated def_process_req\n"); def_process_req->v_output = vmalloc(output_len); - if (!def_process_req) { + if (!def_process_req->v_output) { BUG(); - goto fail_v_output; + ret = -ENOMEM; + goto fail_deflate_v_output; } init_completion(&def_process_req->cb_done); @@ -228,7 +230,7 @@ static int do_test(struct dce_test_ctx *ctx, &def_process_req->input_data); if (ret) { BUG(); - goto fail_alloc_dce_data_input; + goto fail_deflate_alloc_dce_data_input; } if (verbose_level == 1) { @@ -240,7 +242,7 @@ static int do_test(struct dce_test_ctx *ctx, &def_process_req->output_data); if (ret) { BUG(); - goto fail_alloc_dce_data_output; + goto fail_deflate_alloc_dce_data_output; } if (verbose_level == 1) { @@ -252,7 +254,7 @@ static int do_test(struct dce_test_ctx *ctx, &def_process_req->input_data); if (ret) { BUG(); - goto fail_alloc_dce_data_output; + goto fail_deflate_copy_input_to_dce_data; } if (verbose_level == 1) { @@ -263,7 +265,7 @@ static int do_test(struct dce_test_ctx *ctx, ret = dma_map_dce_data(&def_process_req->input_data, DMA_BIDIRECTIONAL); if (ret) { BUG(); - goto fail_alloc_dce_data_output; + goto fail_deflate_dma_map_dce_data_input; } if (verbose_level == 1) { @@ -275,7 +277,7 @@ static int do_test(struct dce_test_ctx *ctx, DMA_BIDIRECTIONAL); if (ret) { BUG(); - goto fail_dma_map_deflate_output_data; + goto fail_deflate_dma_map_dce_data_output; } if (verbose_level == 1) { @@ -288,7 +290,7 @@ static int do_test(struct dce_test_ctx *ctx, DMA_BIDIRECTIONAL); if (ret) { BUG(); - goto fail_output_attach_data_to_sg_deflate; + goto fail_deflate_attach_data_list_to_sg_output; } ret = attach_data_list_to_sg(&def_process_req->dce_cf[1], @@ -296,7 +298,7 @@ static int do_test(struct dce_test_ctx *ctx, DMA_BIDIRECTIONAL); if (ret) { BUG(); - goto fail_input_attach_data_to_sg_deflate; + goto fail_deflate_attach_data_list_to_sg_input; } def_process_req->dce_cf[2].final = 1; @@ -304,7 +306,7 @@ static int do_test(struct dce_test_ctx *ctx, def_process_req->input_fd._format2 = qm_fd_compound; def_process_req->input_fd.cong_weight = 1; qm_fd_addr_set64(&def_process_req->input_fd, - fsl_dce_map(def_process_req->dce_cf)); + fsl_dce_map(def_process_req->dce_cf)); print_dce_fd(def_process_req->input_fd); print_dce_sg(def_process_req->dce_cf[0]); @@ -336,28 +338,28 @@ static int do_test(struct dce_test_ctx *ctx, &def_process_req->input_data, DMA_BIDIRECTIONAL); if (ret) { pr_err("Error %d\n", __LINE__); - goto fail_input_attach_data_to_sg_deflate; + goto fail_deflate_attach_data_list_to_sg_input; } ret = detach_data_list_from_sg(&def_process_req->dce_cf[0], &def_process_req->output_data, DMA_BIDIRECTIONAL); if (ret) { pr_err("Error %d\n", __LINE__); - goto fail_output_attach_data_to_sg_deflate; + goto fail_deflate_attach_data_list_to_sg_output; } ret = dma_unmap_dce_data(&def_process_req->output_data, DMA_BIDIRECTIONAL); if (ret) { pr_err("Error %d\n", __LINE__); - goto fail_dma_map_deflate_output_data; + goto fail_deflate_dma_map_dce_data_output; } ret = dma_unmap_dce_data(&def_process_req->input_data, DMA_BIDIRECTIONAL); if (ret) { pr_err("Error %d\n", __LINE__); - goto fail_alloc_dce_data_output; + goto fail_deflate_dma_map_dce_data_input; } pr_info("Got chunk process, status = %d, sg_table[0].length = %d\n", @@ -376,8 +378,10 @@ static int do_test(struct dce_test_ctx *ctx, def_process_req->v_output = vmalloc(def_process_req->dce_cf[0].length); if (!def_process_req->v_output) { pr_err("Error %d\n", __LINE__); - goto fail_alloc_dce_data_output; + ret = -ENOMEM; + goto fail_deflate_copy_input_to_dce_data; } + def_process_req->v_output_size = def_process_req->dce_cf[0].length; ret = copy_output_dce_data_to_buffer(&def_process_req->output_data, @@ -386,7 +390,7 @@ static int do_test(struct dce_test_ctx *ctx, if (ret) { pr_err("Error %d\n", __LINE__); - return ret; + goto fail_deflate_copy_input_to_dce_data; } /* Free dce data deflate operation, but keep vmalloc output */ @@ -398,14 +402,16 @@ static int do_test(struct dce_test_ctx *ctx, inf_process_req = kzalloc(sizeof(*inf_process_req), GFP_KERNEL); if (!inf_process_req) { pr_err("Error %d\n", __LINE__); - return ret; + ret = -ENOMEM; + goto fail_inflate_params; } pr_info("Allocated inf_process_req\n"); inf_process_req->v_output = vmalloc(input_len); - if (!def_process_req) { + if (!inf_process_req->v_output) { pr_err("Error %d\n", __LINE__); - return ret; + ret = -ENOMEM; + goto fail_inflate_v_output; } init_completion(&inf_process_req->cb_done); @@ -415,7 +421,7 @@ static int do_test(struct dce_test_ctx *ctx, &inf_process_req->input_data); if (ret) { pr_err("Error %d\n", __LINE__); - return ret; + goto fail_inflate_alloc_dce_data_input; } if (verbose_level == 1) { @@ -427,7 +433,7 @@ static int do_test(struct dce_test_ctx *ctx, &inf_process_req->output_data); if (ret) { pr_err("Error %d\n", __LINE__); - return ret; + goto fail_inflate_alloc_dce_data_output; } if (verbose_level == 1) { @@ -439,7 +445,7 @@ static int do_test(struct dce_test_ctx *ctx, def_process_req->v_output_size, &inf_process_req->input_data); if (ret) { pr_err("Error %d\n", __LINE__); - return ret; + goto fail_inflate_copy_input_to_dce_data; } if (verbose_level == 1) { @@ -450,7 +456,7 @@ static int do_test(struct dce_test_ctx *ctx, ret = dma_map_dce_data(&inf_process_req->input_data, DMA_BIDIRECTIONAL); if (ret) { pr_err("Error %d\n", __LINE__); - return ret; + goto fail_inflate_dma_map_dce_data_input; } if (verbose_level == 1) { @@ -462,7 +468,7 @@ static int do_test(struct dce_test_ctx *ctx, DMA_BIDIRECTIONAL); if (ret) { pr_err("Error %d\n", __LINE__); - return ret; + goto fail_inflate_dma_map_dce_data_output; } if (verbose_level == 1) { @@ -474,21 +480,21 @@ static int do_test(struct dce_test_ctx *ctx, &inf_process_req->output_data, true, DMA_BIDIRECTIONAL); if (ret) { pr_err("Error %d\n", __LINE__); - return ret; + goto fail_inflate_attach_data_list_to_sg_output; } ret = attach_data_list_to_sg(&inf_process_req->dce_cf[1], &inf_process_req->input_data, false, DMA_BIDIRECTIONAL); if (ret) { pr_err("Error %d\n", __LINE__); - return ret; + goto fail_inflate_attach_data_list_to_sg_input; } inf_process_req->dce_cf[2].final = 1; inf_process_req->input_fd._format2 = qm_fd_compound; qm_fd_addr_set64(&inf_process_req->input_fd, - fsl_dce_map(inf_process_req->dce_cf)); + fsl_dce_map(inf_process_req->dce_cf)); print_dce_fd(inf_process_req->input_fd); print_dce_sg(inf_process_req->dce_cf[0]); @@ -503,32 +509,32 @@ static int do_test(struct dce_test_ctx *ctx, pr_info("Output FD\n"); print_dce_fd(inf_process_req->output_fd); - ret = dma_unmap_dce_data(&inf_process_req->input_data, - DMA_BIDIRECTIONAL); + ret = detach_data_list_from_sg(&inf_process_req->dce_cf[1], + &inf_process_req->input_data, DMA_BIDIRECTIONAL); if (ret) { pr_err("Error %d\n", __LINE__); - return ret; + goto fail_inflate_attach_data_list_to_sg_input; } - ret = dma_unmap_dce_data(&inf_process_req->output_data, - DMA_BIDIRECTIONAL); + ret = detach_data_list_from_sg(&inf_process_req->dce_cf[0], + &inf_process_req->output_data, DMA_BIDIRECTIONAL); if (ret) { pr_err("Error %d\n", __LINE__); - return ret; + goto fail_inflate_attach_data_list_to_sg_output; } - ret = detach_data_list_from_sg(&inf_process_req->dce_cf[0], - &inf_process_req->output_data, DMA_BIDIRECTIONAL); + ret = dma_unmap_dce_data(&inf_process_req->output_data, + DMA_BIDIRECTIONAL); if (ret) { pr_err("Error %d\n", __LINE__); - return ret; + goto fail_inflate_dma_map_dce_data_output; } - ret = detach_data_list_from_sg(&inf_process_req->dce_cf[1], - &inf_process_req->input_data, DMA_BIDIRECTIONAL); + ret = dma_unmap_dce_data(&inf_process_req->input_data, + DMA_BIDIRECTIONAL); if (ret) { pr_err("Error %d\n", __LINE__); - return ret; + goto fail_inflate_dma_map_dce_data_input; } pr_info("Got chunk process, status = 0x%x, sg_table[0].length = %d\n", @@ -537,7 +543,8 @@ static int do_test(struct dce_test_ctx *ctx, if (inf_process_req->dce_cf[0].length != input_len) { pr_err("Error %d\n", __LINE__); - return ret; + ret = -EINVAL; + goto fail_inflate_copy_input_to_dce_data; } print_dce_sg(inf_process_req->dce_cf[0]); @@ -546,7 +553,8 @@ static int do_test(struct dce_test_ctx *ctx, if (!inf_process_req->v_output) { pr_err("Error %d\n", __LINE__); - return ret; + ret = -ENOMEM; + goto fail_inflate_copy_input_to_dce_data; } inf_process_req->v_output_size = inf_process_req->dce_cf[0].length; @@ -555,14 +563,15 @@ static int do_test(struct dce_test_ctx *ctx, input_len); if (ret) { pr_err("Error %d\n", __LINE__); - return ret; + goto fail_inflate_copy_input_to_dce_data; } - /* compare output to orinal data */ + /* compare output to original data */ if (memcmp(inf_process_req->v_output, input_data, input_len)) { pr_err("Error %d\n", __LINE__); - return ret; + ret = -EINVAL; + goto fail_inflate_copy_input_to_dce_data; } - pr_info("Ouput inflate data matched original!\n"); + pr_info("Output inflate data matched original!\n"); /* Free dce data deflate operation, but keep vmalloc output */ free_dce_data(&inf_process_req->output_data); @@ -574,12 +583,13 @@ static int do_test(struct dce_test_ctx *ctx, kfree(def_process_req); ret = destroy_test_ctx(ctx); - if (ret) + if (ret) { pr_err("Error with test\n"); - else - pr_info("Done test loop\n"); + return ret; + } pr_info("Done test loop\n"); + return 0; fail_deflate_process: @@ -587,35 +597,70 @@ fail_deflate_process: &def_process_req->input_data, DMA_BIDIRECTIONAL); -fail_input_attach_data_to_sg_deflate: +fail_deflate_attach_data_list_to_sg_input: detach_data_list_from_sg(&def_process_req->dce_cf[0], &def_process_req->output_data, DMA_BIDIRECTIONAL); -fail_output_attach_data_to_sg_deflate: +fail_deflate_attach_data_list_to_sg_output: dma_unmap_dce_data(&def_process_req->output_data, DMA_BIDIRECTIONAL); -fail_dma_map_deflate_output_data: +fail_deflate_dma_map_dce_data_output: dma_unmap_dce_data(&def_process_req->input_data, DMA_BIDIRECTIONAL); -fail_alloc_dce_data_output: +fail_deflate_dma_map_dce_data_input: +fail_deflate_copy_input_to_dce_data: + free_dce_data(&def_process_req->output_data); + +fail_deflate_alloc_dce_data_output: free_dce_data(&def_process_req->input_data); -fail_alloc_dce_data_input: +fail_deflate_alloc_dce_data_input: vfree(def_process_req->v_output); -fail_v_output: +fail_deflate_v_output: kfree(def_process_req); -fail_inflate_params: - fsl_dce_chunk_destroy(&ctx->inflate_chunk, 0, NULL); - -fail_inflate_nop: -fail_deflate_nop: +fail_nop: destroy_test_ctx(ctx); fail_init_test_ctx: return ret; + +/* this section can't be added before deflate fail section +as it would cause seg fault */ +fail_inflate_process: + detach_data_list_from_sg(&inf_process_req->dce_cf[1], + &inf_process_req->input_data, + DMA_BIDIRECTIONAL); + +fail_inflate_attach_data_list_to_sg_input: + detach_data_list_from_sg(&inf_process_req->dce_cf[0], + &inf_process_req->output_data, + DMA_BIDIRECTIONAL); + +fail_inflate_attach_data_list_to_sg_output: + dma_unmap_dce_data(&inf_process_req->output_data, DMA_BIDIRECTIONAL); + +fail_inflate_dma_map_dce_data_output: + dma_unmap_dce_data(&inf_process_req->input_data, DMA_BIDIRECTIONAL); + +fail_inflate_dma_map_dce_data_input: +fail_inflate_copy_input_to_dce_data: + free_dce_data(&inf_process_req->output_data); + +fail_inflate_alloc_dce_data_output: + free_dce_data(&inf_process_req->input_data); + +fail_inflate_alloc_dce_data_input: + vfree(inf_process_req->v_output); + +fail_inflate_v_output: + kfree(inf_process_req); + +fail_inflate_params: + destroy_test_ctx(ctx); + return ret; } struct test_meta_info_t { diff --git a/drivers/staging/fsl_dce/tests/performance_simple/dce_perf_simple.c b/drivers/staging/fsl_dce/tests/performance_simple/dce_perf_simple.c index d91c42c..388f420 100644 --- a/drivers/staging/fsl_dce/tests/performance_simple/dce_perf_simple.c +++ b/drivers/staging/fsl_dce/tests/performance_simple/dce_perf_simple.c @@ -816,8 +816,10 @@ static int do_operation(void) pr_err("fsl_dce_chunk_deflate_params failed %d\n", ret); def_process_req = kzalloc(sizeof(*def_process_req), GFP_KERNEL); - if (!def_process_req) + if (!def_process_req) { pr_err("Line %d\n", __LINE__); + return -ENOMEM; + } init_completion(&def_process_req->cb_done); @@ -951,8 +953,10 @@ done: if (fsl_dce_get_status(def_process_req->output_fd.status) != STREAM_END) goto skip_output_copy; test_data->out_data = vmalloc(def_process_req->dce_cf[0].length); - if (!test_data->out_data) + if (!test_data->out_data) { pr_err("Unable to allocate output data\n"); + return -ENOMEM; + } test_data->out_data_len = def_process_req->dce_cf[0].length; if (!bman_output) { diff --git a/drivers/staging/fsl_dce/tests/performance_simple/dce_sf_perf_simple.c b/drivers/staging/fsl_dce/tests/performance_simple/dce_sf_perf_simple.c index d0e5fe0..10154b1 100644 --- a/drivers/staging/fsl_dce/tests/performance_simple/dce_sf_perf_simple.c +++ b/drivers/staging/fsl_dce/tests/performance_simple/dce_sf_perf_simple.c @@ -926,8 +926,10 @@ static int do_operation(void) for (i = 0; i < chunk_count; i++) { def_process_req = kzalloc(sizeof(*def_process_req), GFP_KERNEL); - if (!def_process_req) + if (!def_process_req) { pr_err("Line %d\n", __LINE__); + return -ENOMEM; + } def_process_req->extra_data_size = i; @@ -1065,6 +1067,11 @@ try_again: } i++; } + + if (!def_process_req) { + pr_err("Line %d\n", __LINE__); + return -EINVAL; + } /* wait for last request to be processed */ wait_for_completion(&def_process_req->cb_done); end_time = mfatb(); @@ -1092,8 +1099,10 @@ done: pr_info("Total output required %d\n", total_out); test_data->out_data_len = total_out; test_data->out_data = vmalloc(total_out); - if (!test_data->out_data) + if (!test_data->out_data) { pr_err("vmalloc FAILED\n"); + return -ENOMEM; + } p_out = test_data->out_data; /* copy output */ diff --git a/drivers/staging/fsl_pme2/pme2_ctrl.c b/drivers/staging/fsl_pme2/pme2_ctrl.c index 388162b..56e2443 100644 --- a/drivers/staging/fsl_pme2/pme2_ctrl.c +++ b/drivers/staging/fsl_pme2/pme2_ctrl.c @@ -138,6 +138,10 @@ static __init int parse_mem_property(struct device_node *node, const char *name, } else if (zero) { /* map as cacheable, non-guarded */ void *tmpp = ioremap_prot(*addr, *sz, 0); + if (!tmpp) { + pr_err("pme: Failed to remap\n"); + return -EINVAL; + } memset(tmpp, 0, *sz); iounmap(tmpp); } diff --git a/drivers/staging/fsl_pme2/pme2_db.c b/drivers/staging/fsl_pme2/pme2_db.c index 50263d5..0cc13fc 100644 --- a/drivers/staging/fsl_pme2/pme2_db.c +++ b/drivers/staging/fsl_pme2/pme2_db.c @@ -295,6 +295,7 @@ comp_frame_free_rx: kfree(rx_data); goto unmap_input_frame; single_frame_unmap_frame: + kfree(rx_data); unmap_input_frame: free_tx_data: kfree(tx_data); diff --git a/drivers/staging/fsl_pme2/pme2_high.c b/drivers/staging/fsl_pme2/pme2_high.c index 12ab7a1..e39bfc6 100644 --- a/drivers/staging/fsl_pme2/pme2_high.c +++ b/drivers/staging/fsl_pme2/pme2_high.c @@ -933,6 +933,7 @@ static inline void cb_helper(__always_unused struct qman_portal *portal, if (error) do_flags(ctx, 0, 0, PME_CTX_FLAG_DEAD, 0); token = pop_matching_token(ctx, fd); + BUG_ON(!token); if (likely(token->cmd_type == pme_cmd_scan)) { ctx->cb(ctx, fd, token); } else if (token->cmd_type == pme_cmd_pmtcc) { @@ -1002,6 +1003,7 @@ static void cb_ern(__always_unused struct qman_portal *portal, ctx = data->parent; token = pop_matching_token(ctx, &mr->ern.fd); + BUG_ON(!token); if (likely(token->cmd_type == pme_cmd_scan)) { BUG_ON(!ctx->ern_cb); ctx->ern_cb(ctx, mr, token); diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa.c b/drivers/staging/fsl_qbman/fsl_usdpaa.c index 55223cd..0c54657 100644 --- a/drivers/staging/fsl_qbman/fsl_usdpaa.c +++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c @@ -20,6 +20,7 @@ #include <linux/memblock.h> #include <linux/slab.h> #include <linux/mman.h> +#include <mm/mmu_decl.h> /* Physical address range of the memory reservation, exported for mm/mem.c */ static u64 phys_start; @@ -1117,6 +1118,7 @@ static long ioctl_dma_unmap(struct ctx *ctx, void __user *arg) struct mem_fragment *current_frag; size_t sz; unsigned long base; + unsigned long vaddr; down_write(¤t->mm->mmap_sem); vma = find_vma(current->mm, (unsigned long)arg); @@ -1150,9 +1152,19 @@ map_match: } current_frag = map->root_frag; + vaddr = (unsigned long) map->virt_addr; for (i = 0; i < map->frag_count; i++) { DPA_ASSERT(current_frag->refs > 0); --current_frag->refs; + /* + * Make sure we invalidate the TLB entry for + * this fragment, otherwise a remap of a different + * page to this vaddr would give acces to an + * incorrect piece of memory + */ + cleartlbcam(vaddr, mfspr(SPRN_PID)); + + vaddr += current_frag->len; current_frag = list_entry(current_frag->list.prev, struct mem_fragment, list); } diff --git a/drivers/staging/fsl_qbman/qman_high.c b/drivers/staging/fsl_qbman/qman_high.c index d17d301..80604b7 100644 --- a/drivers/staging/fsl_qbman/qman_high.c +++ b/drivers/staging/fsl_qbman/qman_high.c @@ -3397,7 +3397,7 @@ int qman_ceetm_sp_release(struct qm_ceetm_sp *sp) { struct qm_ceetm_sp *p; - if (sp->lni->is_claimed == 1) { + if (sp->lni && sp->lni->is_claimed == 1) { pr_err("The dependency of sub-portal has not been released!\n"); return -EBUSY; } @@ -4456,7 +4456,9 @@ int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq, if (qman_ceetm_configure_cq(&cq_config)) { pr_err("Can't configure the CQ#%d with CCGRID#%d\n", idx, ccg->idx); - return -EINVAL; + list_del(&p->node); + kfree(p); + return -EINVAL; } } @@ -4503,6 +4505,8 @@ int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq, if (qman_ceetm_configure_cq(&cq_config)) { pr_err("Can't configure the CQ#%d with CCGRID#%d\n", idx, ccg->idx); + list_del(&p->node); + kfree(p); return -EINVAL; } } @@ -4549,6 +4553,8 @@ int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq, if (qman_ceetm_configure_cq(&cq_config)) { pr_err("Can't configure the CQ#%d with CCGRID#%d\n", idx, ccg->idx); + list_del(&p->node); + kfree(p); return -EINVAL; } } @@ -4814,6 +4820,8 @@ int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq, if (qman_ceetm_configure_lfqmt(&lfqmt_config)) { pr_err("Can't configure LFQMT for LFQID#%d @ CQ#%d\n", lfqid, cq->idx); + list_del(&p->node); + kfree(p); return -EINVAL; } *lfq = p; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 6fabded..09eb648 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -767,7 +767,7 @@ void tbf_hook_fn_register(tbf_add_hook *add, struct Qdisc *tbf_get_inner_qdisc(struct Qdisc *sch); #endif -#ifdef CONFIG_ASF_EGRESS_QOS +#if defined(CONFIG_ASF_EGRESS_QOS) || defined(CONFIG_ASF_LINUX_QOS) typedef int asf_qos_fn_hook(struct sk_buff *skb); void asf_qos_fn_register(asf_qos_fn_hook *fn); #endif diff --git a/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h b/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h index b0bd8aa..99f654b 100644 --- a/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h +++ b/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h @@ -375,6 +375,23 @@ typedef struct ioc_fm_pcd_kg_dflt_value_params_t { *//***************************************************************************/ #define FM_PCD_IOC_GET_COUNTER _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(9), ioc_fm_pcd_counters_params_t) +/**************************************************************************//** + + @Function FM_PCD_KgSchemeGetCounter + + @Description Reads scheme packet counter. + + @Param[in] h_Scheme scheme handle as returned by FM_PCD_KgSchemeSet(). + + @Return Counter's current value. + + @Cautions Allowed only following FM_PCD_Init() & FM_PCD_KgSchemeSet(). +*//***************************************************************************/ +#if defined(CONFIG_COMPAT) +#define FM_PCD_IOC_KG_SCHEME_GET_CNTR_COMPAT _IOR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(4), ioc_compat_fm_pcd_kg_scheme_spc_t) +#endif +#define FM_PCD_IOC_KG_SCHEME_GET_CNTR _IOR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(4), ioc_fm_pcd_kg_scheme_spc_t) + #if 0 TODO: unused IOCTL /**************************************************************************//** @@ -1126,6 +1143,15 @@ typedef struct ioc_fm_pcd_kg_scheme_counter_t { counter; clear this field to reset the counter. */ } ioc_fm_pcd_kg_scheme_counter_t; + +/**************************************************************************//** + @Description A structure for retrieving FMKG_SE_SPC +*//***************************************************************************/ +typedef struct ioc_fm_pcd_kg_scheme_spc_t { + uint32_t val; /**< return value */ + void *id; /**< scheme handle */ +} ioc_fm_pcd_kg_scheme_spc_t; + /**************************************************************************//** @Description A structure for defining policer profile parameters as required by keygen (when policer is the next engine after this scheme). @@ -1486,6 +1512,12 @@ typedef struct ioc_fm_pcd_hash_table_params_t { ioc_fm_pcd_cc_next_engine_params_t cc_next_engine_params_for_miss; /**< Parameters for defining the next engine when a key is not matched */ + bool aging_support; /**< TRUE to enable aging support for all keys of this hash table; + Aging status of a key enables the application to monitor if the + key was accessed for a certain period of time, meaning if a + packet that matches this key was received since this bit was last + set by the application */ + void *id; } ioc_fm_pcd_hash_table_params_t; diff --git a/net/core/dev.c b/net/core/dev.c index 647ec24..71d316a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2470,7 +2470,7 @@ static void dev_gso_skb_destructor(struct sk_buff *skb) * This function segments the given skb and stores the list of segments * in skb->next. */ -static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) +int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs; @@ -2489,6 +2489,13 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) return 0; } +EXPORT_SYMBOL(dev_gso_segment); + +void dev_set_skb_destructor(struct sk_buff *skb) +{ + skb->destructor = DEV_GSO_CB(skb)->destructor; +} +EXPORT_SYMBOL(dev_set_skb_destructor); static netdev_features_t harmonize_features(struct sk_buff *skb, const struct net_device *dev, @@ -2556,9 +2563,10 @@ static inline int skb_needs_linearize(struct sk_buff *skb, !(features & NETIF_F_SG))); } -#ifdef CONFIG_ASF_EGRESS_QOS +#if defined(CONFIG_ASF_EGRESS_QOS) || defined(CONFIG_ASF_LINUX_QOS) /* Linux QoS hook to tranfer all packet to ASF QoS */ -static asf_qos_fn_hook *asf_qos_fn; +asf_qos_fn_hook *asf_qos_fn; +EXPORT_SYMBOL(asf_qos_fn); void asf_qos_fn_register(asf_qos_fn_hook *fn) { @@ -2848,7 +2856,7 @@ int dev_queue_xmit(struct sk_buff *skb) skb_update_prio(skb); -#ifdef CONFIG_ASF_EGRESS_QOS +#if defined(CONFIG_ASF_EGRESS_QOS) || defined(CONFIG_ASF_LINUX_QOS) if (asf_qos_fn) { rc = asf_qos_fn(skb); if (!rc) @@ -4445,7 +4453,9 @@ out: softnet_break: sd->time_squeeze++; +#ifndef CONFIG_PREEMPT_RT_FULL __raise_softirq_irqoff(NET_RX_SOFTIRQ); +#endif goto out; } diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 891070e..a3e1900 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -736,7 +736,7 @@ static bool tcp_in_window(const struct nf_conn *ct, break; case TCPOPT_TIMESTAMP: state->seen[dir].td_tcptimestamp = - ntohl(*((unsigned long *) + ntohl(*((unsigned int *) (tcpopt + 2))); goto DONE; default: |