summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c4
-rw-r--r--drivers/crypto/caam/caamalg.c12
-rw-r--r--drivers/crypto/caam/caamhash.c43
-rw-r--r--drivers/crypto/caam/ctrl.c25
-rw-r--r--drivers/crypto/caam/regs.h3
-rw-r--r--drivers/net/Kconfig55
-rw-r--r--drivers/net/ethernet/freescale/dpa/dpaa_eth_base.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c9
-rw-r--r--drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c88
-rw-r--r--drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c18
-rw-r--r--drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h12
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c9
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h4
-rw-r--r--drivers/pci/host/pci-layerscape.c9
-rw-r--r--drivers/staging/fsl_dce/tests/chunk_comp_def_cf/dce_chunk_comp_def_cf_test.c171
-rw-r--r--drivers/staging/fsl_dce/tests/performance_simple/dce_perf_simple.c8
-rw-r--r--drivers/staging/fsl_dce/tests/performance_simple/dce_sf_perf_simple.c13
-rw-r--r--drivers/staging/fsl_pme2/pme2_ctrl.c4
-rw-r--r--drivers/staging/fsl_pme2/pme2_db.c1
-rw-r--r--drivers/staging/fsl_pme2/pme2_high.c2
-rw-r--r--drivers/staging/fsl_qbman/fsl_usdpaa.c12
-rw-r--r--drivers/staging/fsl_qbman/qman_high.c12
22 files changed, 366 insertions, 150 deletions
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 36b2e6e..cd620fb 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -525,7 +525,7 @@ static int __init qoriq_cpufreq_init(void)
ret = cpufreq_register_driver(&qoriq_cpufreq_driver);
if (!ret)
- pr_info("Freescale PowerPC qoriq CPU frequency scaling driver\n");
+ pr_info("Freescale QorIQ CPU frequency scaling driver\n");
return ret;
}
@@ -539,4 +539,4 @@ module_exit(qoriq_cpufreq_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>");
-MODULE_DESCRIPTION("cpufreq driver for Freescale e500mc series SoCs");
+MODULE_DESCRIPTION("cpufreq driver for Freescale QorIQ series SoCs");
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 0f117d0..023a5d8 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3070,8 +3070,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -3310,8 +3310,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -3581,8 +3581,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index e99a45b..fb913c3 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -807,7 +807,7 @@ static int ahash_update_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -835,17 +835,17 @@ static int ahash_update_ctx(struct ahash_request *req)
src_map_to_sec4_sg(jrdev, req->src, src_nents,
edesc->sec4_sg + sec4_sg_src_index,
chained);
- if (*next_buflen) {
+ if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
*next_buflen, 0);
- state->current_buf = !state->current_buf;
- }
} else {
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
SEC4_SG_LEN_FIN;
}
+ state->current_buf = !state->current_buf;
+
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
@@ -909,17 +909,18 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma;
- int sec4_sg_bytes;
+ int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
- sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+ sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+ sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
- (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
@@ -1005,8 +1006,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1091,8 +1092,8 @@ static int ahash_digest(struct ahash_request *req)
sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
- DESC_JOB_IO_LEN, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1165,8 +1166,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
int sh_len;
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
- GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1245,7 +1245,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -1268,9 +1268,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
*next_buflen, 0);
- state->current_buf = !state->current_buf;
}
+ state->current_buf = !state->current_buf;
+
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
@@ -1352,8 +1353,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+ GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return -ENOMEM;
@@ -1447,7 +1448,7 @@ static int ahash_update_first(struct ahash_request *req)
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev,
@@ -1544,6 +1545,8 @@ static int ahash_init(struct ahash_request *req)
state->current_buf = 0;
state->buf_dma = 0;
+ state->buflen_0 = 0;
+ state->buflen_1 = 0;
return 0;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 0359c71..1615916 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -16,6 +16,12 @@
#include "qi.h"
#endif
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+static const bool is_arm = true;
+#else
+static const bool is_arm;
+#endif
+
/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
* load the JDKEK, TDKEK and TDSK registers
@@ -488,7 +494,7 @@ static int caam_probe(struct platform_device *pdev)
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
- u32 mcr, scfgr, comp_params;
+ u32 scfgr, comp_params;
int pg_size;
int BLOCK_OFFSET = 0;
@@ -537,11 +543,9 @@ static int caam_probe(struct platform_device *pdev)
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register
*/
- mcr = rd_reg32(&ctrl->mcr);
- mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0x2 << MCFGR_AWCACHE_SHIFT) |
- MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
- MCFGR_LONG_PTR : 0);
- wr_reg32(&ctrl->mcr, mcr);
+ setbits32(&ctrl->mcr, MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+ (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0) |
+ (is_arm ? 0x2 << MCFGR_AWCACHE_SHIFT : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
@@ -820,7 +824,6 @@ static int caam_resume(struct device *dev)
struct caam_drv_private *caam_priv;
struct caam_ctrl __iomem *ctrl;
struct caam_queue_if __iomem *qi;
- u32 mcr;
int ret;
caam_priv = dev_get_drvdata(dev);
@@ -830,11 +833,9 @@ static int caam_resume(struct device *dev)
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register
*/
- mcr = rd_reg32(&ctrl->mcr);
- mcr = (mcr & ~MCFGR_AWCACHE_MASK) | (0x2 << MCFGR_AWCACHE_SHIFT) |
- MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
- MCFGR_LONG_PTR : 0);
- wr_reg32(&ctrl->mcr, mcr);
+ setbits32(&ctrl->mcr, MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+ (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0) |
+ (is_arm ? 0x2 << MCFGR_AWCACHE_SHIFT : 0));
/* Enable QI interface of SEC */
if (caam_priv->qi_present)
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 72193f8..b2715d4 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -413,7 +413,8 @@ struct caam_ctrl {
#define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT)
#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
-#define MCFGR_BURST_64 0x00000001 /* Max burst size */
+#define MCFGR_LARGE_BURST 0x00000004 /* 128/256-byte burst size */
+#define MCFGR_BURST_64 0x00000001 /* 64-byte burst size */
/* JRSTART register offsets */
#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ff1e2a0..9e35d1b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -90,10 +90,27 @@ config ASF_IPV6
This option compiles the ASF for IPV6
Firewall and IPsec.
+config ASF_LINUX_QOS
+ default n
+ bool "ASF Linux QoS Integration"
+ depends on AS_FASTPATH
+ select NETFILTER_XT_TARGET_DSCP
+ select NET_SCHED
+ select NET_SCH_PRIO
+ select NET_SCH_TBF
+ select NET_SCH_DRR
+ select NET_CLS
+ select NET_CLS_BASIC
+ select NET_CLS_U32
+ select NET_CLS_FLOW
+ select NET_SCH_FIFO
+ select IP_NF_MANGLE
+ select IP6_NF_MANGLE
+
config ASF_QOS
- default y
+ default n
bool "ASF QoS Support"
- depends on AS_FASTPATH
+ depends on AS_FASTPATH && !ASF_LINUX_QOS && !FSL_DPAA_ETH
select NETFILTER_XT_TARGET_DSCP
select NET_SCHED
select NET_SCH_PRIO
@@ -114,23 +131,29 @@ config ASF_EGRESS_QOS
config ASF_INGRESS_MARKER
default y
- bool "DSCP Marking Support"
- depends on ASF_QOS && ASF_EGRESS_QOS
+ bool "ASF DSCP Marking Support"
+ depends on AS_FASTPATH
+ depends on ASF_QOS
---help---
- Choose this option if you wish to utilize ASF Marking support.
- Currently only DSCP marking is supported.
+ Choose this option if you wish to utilize
+ ASF Marking support.
+ Currently only DSCP marking
+ is supported.
config ASF_EGRESS_SCH
default y
- bool "S/W Scheduler Support"
- depends on ASF_QOS && ASF_EGRESS_QOS && !DPA_ETH
+ bool "ASF S/W Scheduler Support"
+ depends on ASF_QOS && ASF_EGRESS_QOS && !FSL_DPAA_ETH
---help---
- Choose this option if you wish to utilize ASF S/W based Scheduler support.
+ Choose this option
+ if you wish to utilize
+ ASF S/W based
+ Scheduler support.
config ASF_EGRESS_SHAPER
default y
- bool "S/W Shaper Support"
- depends on ASF_QOS && ASF_EGRESS_QOS && ASF_EGRESS_SCH && !DPA_ETH
+ bool "ASF S/W Shaper Support"
+ depends on ASF_QOS && ASF_EGRESS_QOS && ASF_EGRESS_SCH && !FSL_DPAA_ETH
---help---
Choose this option if you wish to utilize ASF S/W based Shaping support.
@@ -148,13 +171,15 @@ choice
default ASF_SCH_PRIO
config ASF_SCH_PRIO
- bool "Strict Priority (PRIO)"
+ bool "ASF Strict Priority (PRIO)"
---help---
- This option compiles the ASF to utilize eTSEC(NON-DPAA)
- H/W Scheduler with PRIORITY algorithm.
+ This option compiles the ASF
+ to utilize eTSEC(NON-DPAA) H/W
+ Scheduler with PRIORITY
+ algorithm.
config ASF_SCH_MWRR
- bool "Modified Weighted Round-Robin (MWRR)"
+ bool "ASF Modified Weighted Round-Robin (MWRR)"
---help---
This option compiles the ASF to utilize eTSEC(NON-DPAA)
H/W Scheduler with Modified Weighted Round-Robin algorithm.
diff --git a/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.c b/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.c
index 9a0309d..9f55496 100644
--- a/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.c
+++ b/drivers/net/ethernet/freescale/dpa/dpaa_eth_base.c
@@ -187,7 +187,7 @@ int dpa_bp_shared_port_seed(struct dpa_bp *bp)
/* seed pool with buffers from that memory region */
if (bp->seed_pool) {
int count = bp->target_count;
- size_t addr = bp->paddr;
+ dma_addr_t addr = bp->paddr;
while (count) {
struct bm_buffer bufs[8];
diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c
index f439167..13a5c15 100644
--- a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c
+++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c
@@ -6615,7 +6615,6 @@ t_Error FM_PCD_MatchTableModifyKey(t_Handle h_CcNode, uint16_t keyIndex,
{
t_FmPcd *p_FmPcd;
t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
- t_List h_List;
t_Error err = E_OK;
SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE);
@@ -6624,18 +6623,16 @@ t_Error FM_PCD_MatchTableModifyKey(t_Handle h_CcNode, uint16_t keyIndex,
SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE);
- INIT_LIST(&h_List);
- err = FmPcdCcNodeTreeTryLock(p_FmPcd, p_CcNode, &h_List);
- if (err)
+ if (!FmPcdLockTryLockAll(p_FmPcd))
{
- DBG(TRACE, ("Node's trees lock failed"));
+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
return ERROR_CODE(E_BUSY);
}
err = FmPcdCcModifyKey(p_FmPcd, p_CcNode, keyIndex, keySize, p_Key, p_Mask);
- FmPcdCcNodeTreeReleaseLock(p_FmPcd, &h_List);
+ FmPcdLockUnlockAll(p_FmPcd);
switch(GET_ERROR_TYPE(err)
) {
diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c
index 9224703..caebc2e 100644
--- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c
+++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c
@@ -1024,6 +1024,94 @@ Status: feature not supported
}
#if defined(CONFIG_COMPAT)
+ case FM_PCD_IOC_KG_SCHEME_GET_CNTR_COMPAT:
+#endif
+ case FM_PCD_IOC_KG_SCHEME_GET_CNTR:
+ {
+ ioc_fm_pcd_kg_scheme_spc_t *param;
+
+ param = (ioc_fm_pcd_kg_scheme_spc_t *) XX_Malloc(sizeof(ioc_fm_pcd_kg_scheme_spc_t));
+ if (!param)
+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
+
+ memset(param, 0, sizeof(ioc_fm_pcd_kg_scheme_spc_t));
+
+#if defined(CONFIG_COMPAT)
+ if (compat)
+ {
+ ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param = NULL;
+
+ compat_param = (ioc_compat_fm_pcd_kg_scheme_spc_t *) XX_Malloc(
+ sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t));
+ if (!compat_param)
+ {
+ XX_Free(param);
+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
+ }
+
+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t));
+
+ if (copy_from_user(compat_param, (ioc_compat_fm_pcd_kg_scheme_spc_t *) compat_ptr(arg),
+ sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t)))
+ {
+ XX_Free(compat_param);
+ XX_Free(param);
+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
+ }
+
+ compat_copy_fm_pcd_kg_scheme_spc(compat_param, param, COMPAT_US_TO_K);
+
+ XX_Free(compat_param);
+ }
+ else
+#endif
+ {
+ if (copy_from_user(param, (ioc_fm_pcd_kg_scheme_spc_t *)arg,
+ sizeof(ioc_fm_pcd_kg_scheme_spc_t)))
+ {
+ XX_Free(param);
+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
+ }
+ }
+
+ param->val = FM_PCD_KgSchemeGetCounter((t_Handle)param->id);
+
+#if defined(CONFIG_COMPAT)
+ if (compat)
+ {
+ ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param;
+
+ compat_param = (ioc_compat_fm_pcd_kg_scheme_spc_t *) XX_Malloc(
+ sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t));
+ if (!compat_param)
+ {
+ XX_Free(param);
+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
+ }
+
+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t));
+ compat_copy_fm_pcd_kg_scheme_spc(compat_param, param, COMPAT_K_TO_US);
+ if (copy_to_user((ioc_compat_fm_pcd_kg_scheme_spc_t *)compat_ptr(arg),
+ compat_param,
+ sizeof(ioc_compat_fm_pcd_kg_scheme_spc_t)))
+ err = E_READ_FAILED;
+
+ XX_Free(compat_param);
+ }
+ else
+#endif
+ {
+ if (copy_to_user((ioc_fm_pcd_kg_scheme_spc_t *)arg,
+ param,
+ sizeof(ioc_fm_pcd_kg_scheme_spc_t)))
+ err = E_READ_FAILED;
+ }
+
+ XX_Free(param);
+ break;
+ }
+
+#if defined(CONFIG_COMPAT)
case FM_PCD_IOC_KG_SCHEME_DELETE_COMPAT:
#endif
case FM_PCD_IOC_KG_SCHEME_DELETE:
diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c
index ea0885a..baf2a33 100644
--- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c
+++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c
@@ -518,6 +518,7 @@ void compat_copy_fm_pcd_hash_table(
param->hash_res_mask = compat_param->hash_res_mask;
param->hash_shift = compat_param->hash_shift;
param->match_key_size = compat_param->match_key_size;
+ param->aging_support = compat_param->aging_support;
param->id = compat_pcd_id2ptr(compat_param->id);
}
else
@@ -528,6 +529,7 @@ void compat_copy_fm_pcd_hash_table(
compat_param->hash_res_mask = param->hash_res_mask;
compat_param->hash_shift = param->hash_shift;
compat_param->match_key_size = param->match_key_size;
+ compat_param->aging_support = param->aging_support;
compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
}
@@ -733,6 +735,22 @@ void compat_copy_fm_pcd_kg_scheme(
_fm_cpt_dbg(compat," ...->}\n");
}
+void compat_copy_fm_pcd_kg_scheme_spc(
+ ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param,
+ ioc_fm_pcd_kg_scheme_spc_t *param,
+ uint8_t compat)
+{
+ if (compat == COMPAT_US_TO_K)
+ {
+ param->id = compat_pcd_id2ptr(compat_param->id);
+ param->val = compat_param->val;
+ } else {
+ compat_param->id = compat_pcd_ptr2id(param->id);
+ compat_param->val = param->val;
+ }
+}
+
+
void compat_copy_fm_pcd_kg_scheme_select(
ioc_compat_fm_pcd_kg_scheme_select_t *compat_param,
ioc_fm_pcd_kg_scheme_select_t *param,
diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h
index d89a64d..e2f779a 100644
--- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h
+++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h
@@ -296,6 +296,7 @@ typedef struct ioc_compat_fm_pcd_hash_table_params_t {
uint16_t hash_res_mask;
uint8_t hash_shift;
uint8_t match_key_size;
+ bool aging_support;
ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params_for_miss;
compat_uptr_t id;
} ioc_compat_fm_pcd_hash_table_params_t;
@@ -537,6 +538,12 @@ typedef struct ioc_compat_fm_vsp_prs_result_params_t {
compat_uptr_t p_fm_vsp;
compat_uptr_t p_data;
} ioc_compat_fm_vsp_prs_result_params_t;
+
+typedef struct ioc_compat_fm_pcd_kg_scheme_spc_t {
+ uint32_t val;
+ compat_uptr_t id;
+} ioc_compat_fm_pcd_kg_scheme_spc_t;
+
#endif /* (DPAA_VERSION >= 11) */
typedef struct ioc_compat_fm_ctrl_mon_counters_params_t {
@@ -710,6 +717,11 @@ void compat_copy_fm_vsp_prs_result_params(
ioc_compat_fm_vsp_prs_result_params_t *compat_param,
ioc_fm_vsp_prs_result_params_t *param,
uint8_t compat);
+
+void compat_copy_fm_pcd_kg_scheme_spc(
+ ioc_compat_fm_pcd_kg_scheme_spc_t *compat_param,
+ ioc_fm_pcd_kg_scheme_spc_t *param,
+ uint8_t compat);
#endif /* (DPAA_VERSION >= 11) */
/* } pcd compat functions */
#endif
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 406d0f0..a6f2d77 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -127,7 +127,11 @@ EXPORT_SYMBOL(devfp_tx_hook);
#endif
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+#define TX_TIMEOUT (5*HZ)
+#else
#define TX_TIMEOUT (1*HZ)
+#endif
const char gfar_driver_version[] = "1.3";
@@ -975,9 +979,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
/* Find the TBI PHY. If it's not there, we don't support SGMII */
priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
-
- priv->dma_endian_le = of_property_read_bool(np, "fsl,dma-endian-le");
-
#if defined CONFIG_FSL_GIANFAR_1588
/* Handle IEEE1588 node */
if (!gfar_ptp_init(np, priv))
@@ -2211,8 +2212,6 @@ void gfar_start(struct gfar_private *priv)
/* Initialize DMACTRL to have WWR and WOP */
tempval = gfar_read(&regs->dmactrl);
tempval |= DMACTRL_INIT_SETTINGS;
- if (priv->dma_endian_le)
- tempval |= DMACTRL_LE;
gfar_write(&regs->dmactrl, tempval);
/* Make sure we aren't stopped */
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 0f5bf84..0b0d7c9 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -328,7 +328,6 @@ extern const char gfar_driver_version[];
#define DMACTRL_INIT_SETTINGS 0x000000c3
#define DMACTRL_GRS 0x00000010
#define DMACTRL_GTS 0x00000008
-#define DMACTRL_LE 0x00008000
#define TSTAT_CLEAR_THALT_ALL 0xFF000000
#define TSTAT_CLEAR_THALT 0x80000000
@@ -1380,9 +1379,6 @@ struct gfar_private {
/* L2 SRAM alloc of BDs */
bd_l2sram_en:1;
- /* little endian dma buffer and descriptor host interface */
- unsigned int dma_endian_le;
-
/* The total tx and rx ring size for the enabled queues */
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index c5633cf..6d6b420 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -137,15 +137,6 @@ static void ls_pcie_host_init(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
- while (!ls_pcie_link_up(pp)) {
- usleep_range(100, 1000);
- count++;
- if (count >= 200) {
- dev_err(pp->dev, "phy link never came up\n");
- return;
- }
- }
-
if (of_device_is_compatible(pcie->dev->of_node, "fsl,ls1021a-pcie")) {
/*
* LS1021A Workaround for internal TKT228622
diff --git a/drivers/staging/fsl_dce/tests/chunk_comp_def_cf/dce_chunk_comp_def_cf_test.c b/drivers/staging/fsl_dce/tests/chunk_comp_def_cf/dce_chunk_comp_def_cf_test.c
index 80e2ca1..7650e08 100644
--- a/drivers/staging/fsl_dce/tests/chunk_comp_def_cf/dce_chunk_comp_def_cf_test.c
+++ b/drivers/staging/fsl_dce/tests/chunk_comp_def_cf/dce_chunk_comp_def_cf_test.c
@@ -179,7 +179,7 @@ static int do_test(struct dce_test_ctx *ctx,
nop_req = kmalloc(sizeof(*nop_req), GFP_KERNEL);
if (!nop_req) {
ret = -ENOMEM;
- goto fail_deflate_nop;
+ goto fail_nop;
}
init_completion(&nop_req->cb_done);
@@ -188,7 +188,7 @@ static int do_test(struct dce_test_ctx *ctx,
ret = fsl_dce_chunk_nop(&ctx->deflate_chunk, 0, nop_req);
if (ret) {
BUG();
- goto fail_deflate_nop;
+ goto fail_nop;
}
pr_info("Sent NOP on deflate path\n");
@@ -199,7 +199,7 @@ static int do_test(struct dce_test_ctx *ctx,
ret = fsl_dce_chunk_nop(&ctx->inflate_chunk, 0, nop_req);
if (ret) {
BUG();
- goto fail_inflate_nop;
+ goto fail_nop;
}
pr_info("Sent NOP on inflate path\n");
@@ -212,14 +212,16 @@ static int do_test(struct dce_test_ctx *ctx,
def_process_req = kzalloc(sizeof(*def_process_req), GFP_KERNEL);
if (!def_process_req) {
BUG();
- goto fail_inflate_params;
+ ret = -ENOMEM;
+ goto fail_nop;
}
pr_info("Allocated def_process_req\n");
def_process_req->v_output = vmalloc(output_len);
- if (!def_process_req) {
+ if (!def_process_req->v_output) {
BUG();
- goto fail_v_output;
+ ret = -ENOMEM;
+ goto fail_deflate_v_output;
}
init_completion(&def_process_req->cb_done);
@@ -228,7 +230,7 @@ static int do_test(struct dce_test_ctx *ctx,
&def_process_req->input_data);
if (ret) {
BUG();
- goto fail_alloc_dce_data_input;
+ goto fail_deflate_alloc_dce_data_input;
}
if (verbose_level == 1) {
@@ -240,7 +242,7 @@ static int do_test(struct dce_test_ctx *ctx,
&def_process_req->output_data);
if (ret) {
BUG();
- goto fail_alloc_dce_data_output;
+ goto fail_deflate_alloc_dce_data_output;
}
if (verbose_level == 1) {
@@ -252,7 +254,7 @@ static int do_test(struct dce_test_ctx *ctx,
&def_process_req->input_data);
if (ret) {
BUG();
- goto fail_alloc_dce_data_output;
+ goto fail_deflate_copy_input_to_dce_data;
}
if (verbose_level == 1) {
@@ -263,7 +265,7 @@ static int do_test(struct dce_test_ctx *ctx,
ret = dma_map_dce_data(&def_process_req->input_data, DMA_BIDIRECTIONAL);
if (ret) {
BUG();
- goto fail_alloc_dce_data_output;
+ goto fail_deflate_dma_map_dce_data_input;
}
if (verbose_level == 1) {
@@ -275,7 +277,7 @@ static int do_test(struct dce_test_ctx *ctx,
DMA_BIDIRECTIONAL);
if (ret) {
BUG();
- goto fail_dma_map_deflate_output_data;
+ goto fail_deflate_dma_map_dce_data_output;
}
if (verbose_level == 1) {
@@ -288,7 +290,7 @@ static int do_test(struct dce_test_ctx *ctx,
DMA_BIDIRECTIONAL);
if (ret) {
BUG();
- goto fail_output_attach_data_to_sg_deflate;
+ goto fail_deflate_attach_data_list_to_sg_output;
}
ret = attach_data_list_to_sg(&def_process_req->dce_cf[1],
@@ -296,7 +298,7 @@ static int do_test(struct dce_test_ctx *ctx,
DMA_BIDIRECTIONAL);
if (ret) {
BUG();
- goto fail_input_attach_data_to_sg_deflate;
+ goto fail_deflate_attach_data_list_to_sg_input;
}
def_process_req->dce_cf[2].final = 1;
@@ -304,7 +306,7 @@ static int do_test(struct dce_test_ctx *ctx,
def_process_req->input_fd._format2 = qm_fd_compound;
def_process_req->input_fd.cong_weight = 1;
qm_fd_addr_set64(&def_process_req->input_fd,
- fsl_dce_map(def_process_req->dce_cf));
+ fsl_dce_map(def_process_req->dce_cf));
print_dce_fd(def_process_req->input_fd);
print_dce_sg(def_process_req->dce_cf[0]);
@@ -336,28 +338,28 @@ static int do_test(struct dce_test_ctx *ctx,
&def_process_req->input_data, DMA_BIDIRECTIONAL);
if (ret) {
pr_err("Error %d\n", __LINE__);
- goto fail_input_attach_data_to_sg_deflate;
+ goto fail_deflate_attach_data_list_to_sg_input;
}
ret = detach_data_list_from_sg(&def_process_req->dce_cf[0],
&def_process_req->output_data, DMA_BIDIRECTIONAL);
if (ret) {
pr_err("Error %d\n", __LINE__);
- goto fail_output_attach_data_to_sg_deflate;
+ goto fail_deflate_attach_data_list_to_sg_output;
}
ret = dma_unmap_dce_data(&def_process_req->output_data,
DMA_BIDIRECTIONAL);
if (ret) {
pr_err("Error %d\n", __LINE__);
- goto fail_dma_map_deflate_output_data;
+ goto fail_deflate_dma_map_dce_data_output;
}
ret = dma_unmap_dce_data(&def_process_req->input_data,
DMA_BIDIRECTIONAL);
if (ret) {
pr_err("Error %d\n", __LINE__);
- goto fail_alloc_dce_data_output;
+ goto fail_deflate_dma_map_dce_data_input;
}
pr_info("Got chunk process, status = %d, sg_table[0].length = %d\n",
@@ -376,8 +378,10 @@ static int do_test(struct dce_test_ctx *ctx,
def_process_req->v_output = vmalloc(def_process_req->dce_cf[0].length);
if (!def_process_req->v_output) {
pr_err("Error %d\n", __LINE__);
- goto fail_alloc_dce_data_output;
+ ret = -ENOMEM;
+ goto fail_deflate_copy_input_to_dce_data;
}
+
def_process_req->v_output_size = def_process_req->dce_cf[0].length;
ret = copy_output_dce_data_to_buffer(&def_process_req->output_data,
@@ -386,7 +390,7 @@ static int do_test(struct dce_test_ctx *ctx,
if (ret) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ goto fail_deflate_copy_input_to_dce_data;
}
/* Free dce data deflate operation, but keep vmalloc output */
@@ -398,14 +402,16 @@ static int do_test(struct dce_test_ctx *ctx,
inf_process_req = kzalloc(sizeof(*inf_process_req), GFP_KERNEL);
if (!inf_process_req) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ ret = -ENOMEM;
+ goto fail_inflate_params;
}
pr_info("Allocated inf_process_req\n");
inf_process_req->v_output = vmalloc(input_len);
- if (!def_process_req) {
+ if (!inf_process_req->v_output) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ ret = -ENOMEM;
+ goto fail_inflate_v_output;
}
init_completion(&inf_process_req->cb_done);
@@ -415,7 +421,7 @@ static int do_test(struct dce_test_ctx *ctx,
&inf_process_req->input_data);
if (ret) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ goto fail_inflate_alloc_dce_data_input;
}
if (verbose_level == 1) {
@@ -427,7 +433,7 @@ static int do_test(struct dce_test_ctx *ctx,
&inf_process_req->output_data);
if (ret) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ goto fail_inflate_alloc_dce_data_output;
}
if (verbose_level == 1) {
@@ -439,7 +445,7 @@ static int do_test(struct dce_test_ctx *ctx,
def_process_req->v_output_size, &inf_process_req->input_data);
if (ret) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ goto fail_inflate_copy_input_to_dce_data;
}
if (verbose_level == 1) {
@@ -450,7 +456,7 @@ static int do_test(struct dce_test_ctx *ctx,
ret = dma_map_dce_data(&inf_process_req->input_data, DMA_BIDIRECTIONAL);
if (ret) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ goto fail_inflate_dma_map_dce_data_input;
}
if (verbose_level == 1) {
@@ -462,7 +468,7 @@ static int do_test(struct dce_test_ctx *ctx,
DMA_BIDIRECTIONAL);
if (ret) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ goto fail_inflate_dma_map_dce_data_output;
}
if (verbose_level == 1) {
@@ -474,21 +480,21 @@ static int do_test(struct dce_test_ctx *ctx,
&inf_process_req->output_data, true, DMA_BIDIRECTIONAL);
if (ret) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ goto fail_inflate_attach_data_list_to_sg_output;
}
ret = attach_data_list_to_sg(&inf_process_req->dce_cf[1],
&inf_process_req->input_data, false, DMA_BIDIRECTIONAL);
if (ret) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ goto fail_inflate_attach_data_list_to_sg_input;
}
inf_process_req->dce_cf[2].final = 1;
inf_process_req->input_fd._format2 = qm_fd_compound;
qm_fd_addr_set64(&inf_process_req->input_fd,
- fsl_dce_map(inf_process_req->dce_cf));
+ fsl_dce_map(inf_process_req->dce_cf));
print_dce_fd(inf_process_req->input_fd);
print_dce_sg(inf_process_req->dce_cf[0]);
@@ -503,32 +509,32 @@ static int do_test(struct dce_test_ctx *ctx,
pr_info("Output FD\n");
print_dce_fd(inf_process_req->output_fd);
- ret = dma_unmap_dce_data(&inf_process_req->input_data,
- DMA_BIDIRECTIONAL);
+ ret = detach_data_list_from_sg(&inf_process_req->dce_cf[1],
+ &inf_process_req->input_data, DMA_BIDIRECTIONAL);
if (ret) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ goto fail_inflate_attach_data_list_to_sg_input;
}
- ret = dma_unmap_dce_data(&inf_process_req->output_data,
- DMA_BIDIRECTIONAL);
+ ret = detach_data_list_from_sg(&inf_process_req->dce_cf[0],
+ &inf_process_req->output_data, DMA_BIDIRECTIONAL);
if (ret) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ goto fail_inflate_attach_data_list_to_sg_output;
}
- ret = detach_data_list_from_sg(&inf_process_req->dce_cf[0],
- &inf_process_req->output_data, DMA_BIDIRECTIONAL);
+ ret = dma_unmap_dce_data(&inf_process_req->output_data,
+ DMA_BIDIRECTIONAL);
if (ret) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ goto fail_inflate_dma_map_dce_data_output;
}
- ret = detach_data_list_from_sg(&inf_process_req->dce_cf[1],
- &inf_process_req->input_data, DMA_BIDIRECTIONAL);
+ ret = dma_unmap_dce_data(&inf_process_req->input_data,
+ DMA_BIDIRECTIONAL);
if (ret) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ goto fail_inflate_dma_map_dce_data_input;
}
pr_info("Got chunk process, status = 0x%x, sg_table[0].length = %d\n",
@@ -537,7 +543,8 @@ static int do_test(struct dce_test_ctx *ctx,
if (inf_process_req->dce_cf[0].length != input_len) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ ret = -EINVAL;
+ goto fail_inflate_copy_input_to_dce_data;
}
print_dce_sg(inf_process_req->dce_cf[0]);
@@ -546,7 +553,8 @@ static int do_test(struct dce_test_ctx *ctx,
if (!inf_process_req->v_output) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ ret = -ENOMEM;
+ goto fail_inflate_copy_input_to_dce_data;
}
inf_process_req->v_output_size = inf_process_req->dce_cf[0].length;
@@ -555,14 +563,15 @@ static int do_test(struct dce_test_ctx *ctx,
input_len);
if (ret) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ goto fail_inflate_copy_input_to_dce_data;
}
- /* compare output to orinal data */
+ /* compare output to original data */
if (memcmp(inf_process_req->v_output, input_data, input_len)) {
pr_err("Error %d\n", __LINE__);
- return ret;
+ ret = -EINVAL;
+ goto fail_inflate_copy_input_to_dce_data;
}
- pr_info("Ouput inflate data matched original!\n");
+ pr_info("Output inflate data matched original!\n");
/* Free dce data deflate operation, but keep vmalloc output */
free_dce_data(&inf_process_req->output_data);
@@ -574,12 +583,13 @@ static int do_test(struct dce_test_ctx *ctx,
kfree(def_process_req);
ret = destroy_test_ctx(ctx);
- if (ret)
+ if (ret) {
pr_err("Error with test\n");
- else
- pr_info("Done test loop\n");
+ return ret;
+ }
pr_info("Done test loop\n");
+
return 0;
fail_deflate_process:
@@ -587,35 +597,70 @@ fail_deflate_process:
&def_process_req->input_data,
DMA_BIDIRECTIONAL);
-fail_input_attach_data_to_sg_deflate:
+fail_deflate_attach_data_list_to_sg_input:
detach_data_list_from_sg(&def_process_req->dce_cf[0],
&def_process_req->output_data,
DMA_BIDIRECTIONAL);
-fail_output_attach_data_to_sg_deflate:
+fail_deflate_attach_data_list_to_sg_output:
dma_unmap_dce_data(&def_process_req->output_data, DMA_BIDIRECTIONAL);
-fail_dma_map_deflate_output_data:
+fail_deflate_dma_map_dce_data_output:
dma_unmap_dce_data(&def_process_req->input_data, DMA_BIDIRECTIONAL);
-fail_alloc_dce_data_output:
+fail_deflate_dma_map_dce_data_input:
+fail_deflate_copy_input_to_dce_data:
+ free_dce_data(&def_process_req->output_data);
+
+fail_deflate_alloc_dce_data_output:
free_dce_data(&def_process_req->input_data);
-fail_alloc_dce_data_input:
+fail_deflate_alloc_dce_data_input:
vfree(def_process_req->v_output);
-fail_v_output:
+fail_deflate_v_output:
kfree(def_process_req);
-fail_inflate_params:
- fsl_dce_chunk_destroy(&ctx->inflate_chunk, 0, NULL);
-
-fail_inflate_nop:
-fail_deflate_nop:
+fail_nop:
destroy_test_ctx(ctx);
fail_init_test_ctx:
return ret;
+
+/* this section can't be added before deflate fail section
+as it would cause seg fault */
+fail_inflate_process:
+ detach_data_list_from_sg(&inf_process_req->dce_cf[1],
+ &inf_process_req->input_data,
+ DMA_BIDIRECTIONAL);
+
+fail_inflate_attach_data_list_to_sg_input:
+ detach_data_list_from_sg(&inf_process_req->dce_cf[0],
+ &inf_process_req->output_data,
+ DMA_BIDIRECTIONAL);
+
+fail_inflate_attach_data_list_to_sg_output:
+ dma_unmap_dce_data(&inf_process_req->output_data, DMA_BIDIRECTIONAL);
+
+fail_inflate_dma_map_dce_data_output:
+ dma_unmap_dce_data(&inf_process_req->input_data, DMA_BIDIRECTIONAL);
+
+fail_inflate_dma_map_dce_data_input:
+fail_inflate_copy_input_to_dce_data:
+ free_dce_data(&inf_process_req->output_data);
+
+fail_inflate_alloc_dce_data_output:
+ free_dce_data(&inf_process_req->input_data);
+
+fail_inflate_alloc_dce_data_input:
+ vfree(inf_process_req->v_output);
+
+fail_inflate_v_output:
+ kfree(inf_process_req);
+
+fail_inflate_params:
+ destroy_test_ctx(ctx);
+ return ret;
}
struct test_meta_info_t {
diff --git a/drivers/staging/fsl_dce/tests/performance_simple/dce_perf_simple.c b/drivers/staging/fsl_dce/tests/performance_simple/dce_perf_simple.c
index d91c42c..388f420 100644
--- a/drivers/staging/fsl_dce/tests/performance_simple/dce_perf_simple.c
+++ b/drivers/staging/fsl_dce/tests/performance_simple/dce_perf_simple.c
@@ -816,8 +816,10 @@ static int do_operation(void)
pr_err("fsl_dce_chunk_deflate_params failed %d\n", ret);
def_process_req = kzalloc(sizeof(*def_process_req), GFP_KERNEL);
- if (!def_process_req)
+ if (!def_process_req) {
pr_err("Line %d\n", __LINE__);
+ return -ENOMEM;
+ }
init_completion(&def_process_req->cb_done);
@@ -951,8 +953,10 @@ done:
if (fsl_dce_get_status(def_process_req->output_fd.status) != STREAM_END)
goto skip_output_copy;
test_data->out_data = vmalloc(def_process_req->dce_cf[0].length);
- if (!test_data->out_data)
+ if (!test_data->out_data) {
pr_err("Unable to allocate output data\n");
+ return -ENOMEM;
+ }
test_data->out_data_len = def_process_req->dce_cf[0].length;
if (!bman_output) {
diff --git a/drivers/staging/fsl_dce/tests/performance_simple/dce_sf_perf_simple.c b/drivers/staging/fsl_dce/tests/performance_simple/dce_sf_perf_simple.c
index d0e5fe0..10154b1 100644
--- a/drivers/staging/fsl_dce/tests/performance_simple/dce_sf_perf_simple.c
+++ b/drivers/staging/fsl_dce/tests/performance_simple/dce_sf_perf_simple.c
@@ -926,8 +926,10 @@ static int do_operation(void)
for (i = 0; i < chunk_count; i++) {
def_process_req = kzalloc(sizeof(*def_process_req), GFP_KERNEL);
- if (!def_process_req)
+ if (!def_process_req) {
pr_err("Line %d\n", __LINE__);
+ return -ENOMEM;
+ }
def_process_req->extra_data_size = i;
@@ -1065,6 +1067,11 @@ try_again:
}
i++;
}
+
+ if (!def_process_req) {
+ pr_err("Line %d\n", __LINE__);
+ return -EINVAL;
+ }
/* wait for last request to be processed */
wait_for_completion(&def_process_req->cb_done);
end_time = mfatb();
@@ -1092,8 +1099,10 @@ done:
pr_info("Total output required %d\n", total_out);
test_data->out_data_len = total_out;
test_data->out_data = vmalloc(total_out);
- if (!test_data->out_data)
+ if (!test_data->out_data) {
pr_err("vmalloc FAILED\n");
+ return -ENOMEM;
+ }
p_out = test_data->out_data;
/* copy output */
diff --git a/drivers/staging/fsl_pme2/pme2_ctrl.c b/drivers/staging/fsl_pme2/pme2_ctrl.c
index 388162b..56e2443 100644
--- a/drivers/staging/fsl_pme2/pme2_ctrl.c
+++ b/drivers/staging/fsl_pme2/pme2_ctrl.c
@@ -138,6 +138,10 @@ static __init int parse_mem_property(struct device_node *node, const char *name,
} else if (zero) {
/* map as cacheable, non-guarded */
void *tmpp = ioremap_prot(*addr, *sz, 0);
+ if (!tmpp) {
+ pr_err("pme: Failed to remap\n");
+ return -EINVAL;
+ }
memset(tmpp, 0, *sz);
iounmap(tmpp);
}
diff --git a/drivers/staging/fsl_pme2/pme2_db.c b/drivers/staging/fsl_pme2/pme2_db.c
index 50263d5..0cc13fc 100644
--- a/drivers/staging/fsl_pme2/pme2_db.c
+++ b/drivers/staging/fsl_pme2/pme2_db.c
@@ -295,6 +295,7 @@ comp_frame_free_rx:
kfree(rx_data);
goto unmap_input_frame;
single_frame_unmap_frame:
+ kfree(rx_data);
unmap_input_frame:
free_tx_data:
kfree(tx_data);
diff --git a/drivers/staging/fsl_pme2/pme2_high.c b/drivers/staging/fsl_pme2/pme2_high.c
index 12ab7a1..e39bfc6 100644
--- a/drivers/staging/fsl_pme2/pme2_high.c
+++ b/drivers/staging/fsl_pme2/pme2_high.c
@@ -933,6 +933,7 @@ static inline void cb_helper(__always_unused struct qman_portal *portal,
if (error)
do_flags(ctx, 0, 0, PME_CTX_FLAG_DEAD, 0);
token = pop_matching_token(ctx, fd);
+ BUG_ON(!token);
if (likely(token->cmd_type == pme_cmd_scan)) {
ctx->cb(ctx, fd, token);
} else if (token->cmd_type == pme_cmd_pmtcc) {
@@ -1002,6 +1003,7 @@ static void cb_ern(__always_unused struct qman_portal *portal,
ctx = data->parent;
token = pop_matching_token(ctx, &mr->ern.fd);
+ BUG_ON(!token);
if (likely(token->cmd_type == pme_cmd_scan)) {
BUG_ON(!ctx->ern_cb);
ctx->ern_cb(ctx, mr, token);
diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa.c b/drivers/staging/fsl_qbman/fsl_usdpaa.c
index 55223cd..0c54657 100644
--- a/drivers/staging/fsl_qbman/fsl_usdpaa.c
+++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c
@@ -20,6 +20,7 @@
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/mman.h>
+#include <mm/mmu_decl.h>
/* Physical address range of the memory reservation, exported for mm/mem.c */
static u64 phys_start;
@@ -1117,6 +1118,7 @@ static long ioctl_dma_unmap(struct ctx *ctx, void __user *arg)
struct mem_fragment *current_frag;
size_t sz;
unsigned long base;
+ unsigned long vaddr;
down_write(&current->mm->mmap_sem);
vma = find_vma(current->mm, (unsigned long)arg);
@@ -1150,9 +1152,19 @@ map_match:
}
current_frag = map->root_frag;
+ vaddr = (unsigned long) map->virt_addr;
for (i = 0; i < map->frag_count; i++) {
DPA_ASSERT(current_frag->refs > 0);
--current_frag->refs;
+ /*
+ * Make sure we invalidate the TLB entry for
+ * this fragment, otherwise a remap of a different
+ * page to this vaddr would give acces to an
+ * incorrect piece of memory
+ */
+ cleartlbcam(vaddr, mfspr(SPRN_PID));
+
+ vaddr += current_frag->len;
current_frag = list_entry(current_frag->list.prev,
struct mem_fragment, list);
}
diff --git a/drivers/staging/fsl_qbman/qman_high.c b/drivers/staging/fsl_qbman/qman_high.c
index d17d301..80604b7 100644
--- a/drivers/staging/fsl_qbman/qman_high.c
+++ b/drivers/staging/fsl_qbman/qman_high.c
@@ -3397,7 +3397,7 @@ int qman_ceetm_sp_release(struct qm_ceetm_sp *sp)
{
struct qm_ceetm_sp *p;
- if (sp->lni->is_claimed == 1) {
+ if (sp->lni && sp->lni->is_claimed == 1) {
pr_err("The dependency of sub-portal has not been released!\n");
return -EBUSY;
}
@@ -4456,7 +4456,9 @@ int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq,
if (qman_ceetm_configure_cq(&cq_config)) {
pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
idx, ccg->idx);
- return -EINVAL;
+ list_del(&p->node);
+ kfree(p);
+ return -EINVAL;
}
}
@@ -4503,6 +4505,8 @@ int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq,
if (qman_ceetm_configure_cq(&cq_config)) {
pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
idx, ccg->idx);
+ list_del(&p->node);
+ kfree(p);
return -EINVAL;
}
}
@@ -4549,6 +4553,8 @@ int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq,
if (qman_ceetm_configure_cq(&cq_config)) {
pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
idx, ccg->idx);
+ list_del(&p->node);
+ kfree(p);
return -EINVAL;
}
}
@@ -4814,6 +4820,8 @@ int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq,
if (qman_ceetm_configure_lfqmt(&lfqmt_config)) {
pr_err("Can't configure LFQMT for LFQID#%d @ CQ#%d\n",
lfqid, cq->idx);
+ list_del(&p->node);
+ kfree(p);
return -EINVAL;
}
*lfq = p;