summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJ. German Rivera <Jose.G.Rivera@freescale.com>2013-08-27 19:19:56 (GMT)
committerJ. German Rivera <German.Rivera@freescale.com>2013-08-27 19:19:56 (GMT)
commita133e4d9e7412890f61a978a24f80fc085b4eca5 (patch)
tree560a7268eb1d72b32a30c6875f2317697da612aa
parenta4460591eff829d4805dfe2bdf43c041899ad4f2 (diff)
parent0ec09b835c42ccdd05745ecb3f086df57f9b50b9 (diff)
downloadlinux-fsl-qoriq-a133e4d9e7412890f61a978a24f80fc085b4eca5.tar.xz
Merge branch 'sdk-v1.4.x' into sdk-kernel-3.8
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/boot/dts/fsl/b4420si-post.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi23
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-post.dtsi20
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi59
-rw-r--r--arch/powerpc/boot/dts/fsl/b4si-post.dtsi5
-rw-r--r--arch/powerpc/configs/85xx/e6500rev2_defconfig2
-rw-r--r--arch/powerpc/include/asm/cputable.h4
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/reg_booke.h5
-rw-r--r--arch/powerpc/platforms/85xx/b4_qds.c11
-rw-r--r--arch/powerpc/platforms/85xx/common.c106
-rw-r--r--arch/powerpc/platforms/85xx/p2041_rdb.c11
-rw-r--r--arch/powerpc/platforms/85xx/p3041_ds.c11
-rw-r--r--arch/powerpc/platforms/85xx/p4080_ds.c11
-rw-r--r--arch/powerpc/platforms/85xx/p5020_ds.c11
-rw-r--r--arch/powerpc/platforms/85xx/p5040_ds.c11
-rw-r--r--arch/powerpc/platforms/85xx/smp.c33
-rw-r--r--arch/powerpc/platforms/85xx/t4240_qds.c11
-rw-r--r--drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c1016
-rw-r--r--drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.h11
-rw-r--r--drivers/net/ethernet/freescale/fman/inc/Peripherals/fm_pcd_ext.h56
-rw-r--r--drivers/net/ethernet/freescale/fman/inc/integrations/T4240/dpaa_integration_ext.h3
-rw-r--r--drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_exp_sym.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c149
-rw-r--r--drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c20
-rw-r--r--drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h11
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c31
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c30
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_classifier.c23
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_classifier.h9
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_stats.c695
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_stats.h1
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_stats_ioctl.h12
-rw-r--r--drivers/staging/fsl_dpa_offload/dts/b4860qds-usdpaa-shared-interfaces.dts2
-rw-r--r--drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c950
-rw-r--r--drivers/staging/fsl_qbman/bman_driver.c25
-rw-r--r--drivers/staging/fsl_qbman/bman_private.h1
-rw-r--r--drivers/staging/fsl_qbman/fsl_usdpaa.c358
-rw-r--r--drivers/staging/fsl_qbman/qman_driver.c27
-rw-r--r--drivers/staging/fsl_qbman/qman_high.c34
-rw-r--r--drivers/staging/fsl_qbman/qman_low.h40
-rw-r--r--drivers/staging/fsl_qbman/qman_private.h2
-rw-r--r--include/linux/fsl_dpa_stats.h67
-rw-r--r--include/linux/fsl_usdpaa.h12
-rw-r--r--include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h74
48 files changed, 2774 insertions, 1239 deletions
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 82808b5..50b626c 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -91,7 +91,6 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
-CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4)
CFLAGS-$(CONFIG_CELL_CPU) += $(call cc-option,-mcpu=cell)
CFLAGS-$(CONFIG_POWER4_CPU) += $(call cc-option,-mcpu=power4)
CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5)
diff --git a/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi
index f039c52..0198d22 100644
--- a/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi
@@ -318,5 +318,13 @@
L2: l2-cache-controller@c20000 {
compatible = "fsl,b4420-l2-cache-controller";
+ reg = <0xc20000 0x1000>;
+ next-level-cache = <&cpc>;
+ };
+
+ L2_2: l2-cache-controller@c60000 {
+ compatible = "fsl,b4420-l2-cache-controller";
+ reg = <0xc60000 0x1000>;
+ next-level-cache = <&cpc>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
index d56ac22..ee7263b 100644
--- a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
@@ -90,4 +90,27 @@
next-level-cache = <&L2>;
};
};
+
+ dsp-clusters {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dsp-cluster0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,sc3900-cluster";
+ reg = <0>;
+
+ dsp0: dsp@0 {
+ compatible = "fsl,sc3900";
+ reg = <0>;
+ next-level-cache = <&L2_2>;
+ };
+ dsp1: dsp@1 {
+ compatible = "fsl,sc3900";
+ reg = <1>;
+ next-level-cache = <&L2_2>;
+ };
+ };
+ };
};
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
index d2192e7..4d35d4f 100644
--- a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
@@ -516,6 +516,26 @@
L2: l2-cache-controller@c20000 {
compatible = "fsl,b4860-l2-cache-controller";
+ reg = <0xc20000 0x1000>;
+ next-level-cache = <&cpc>;
+ };
+
+ L2_2: l2-cache-controller@c60000 {
+ compatible = "fsl,b4860-l2-cache-controller";
+ reg = <0xc60000 0x1000>;
+ next-level-cache = <&cpc>;
+ };
+
+ L2_3: l2-cache-controller@ca0000 {
+ compatible = "fsl,b4860-l2-cache-controller";
+ reg = <0xca0000 0x1000>;
+ next-level-cache = <&cpc>;
+ };
+
+ L2_4: l2-cache-controller@ce0000 {
+ compatible = "fsl,b4860-l2-cache-controller";
+ reg = <0xce0000 0x1000>;
+ next-level-cache = <&cpc>;
};
/include/ "qoriq-rman-0.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
index 6c85eca..e344468 100644
--- a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
@@ -105,4 +105,63 @@
next-level-cache = <&L2>;
};
};
+
+ dsp-clusters {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dsp-cluster0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,sc3900-cluster";
+ reg = <0>;
+
+ dsp0: dsp@0 {
+ compatible = "fsl,sc3900";
+ reg = <0>;
+ next-level-cache = <&L2_2>;
+ };
+ dsp1: dsp@1 {
+ compatible = "fsl,sc3900";
+ reg = <1>;
+ next-level-cache = <&L2_2>;
+ };
+ };
+
+ dsp-cluster1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,sc3900-cluster";
+ reg = <1>;
+
+ dsp2: dsp@2 {
+ compatible = "fsl,sc3900";
+ reg = <2>;
+ next-level-cache = <&L2_3>;
+ };
+ dsp3: dsp@3 {
+ compatible = "fsl,sc3900";
+ reg = <3>;
+ next-level-cache = <&L2_3>;
+ };
+ };
+
+ dsp-cluster2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,sc3900-cluster";
+ reg = <2>;
+
+ dsp4: dsp@4 {
+ compatible = "fsl,sc3900";
+ reg = <4>;
+ next-level-cache = <&L2_4>;
+ };
+ dsp5: dsp@5 {
+ compatible = "fsl,sc3900";
+ reg = <5>;
+ next-level-cache = <&L2_4>;
+ };
+ };
+ };
};
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
index 6e2781e..55ced62 100644
--- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
@@ -329,9 +329,4 @@
/include/ "qoriq-duart-1.dtsi"
/include/ "qoriq-sec5.3-0.dtsi"
- L2: l2-cache-controller@c20000 {
- compatible = "fsl,b4-l2-cache-controller";
- reg = <0xc20000 0x1000>;
- next-level-cache = <&cpc>;
- };
};
diff --git a/arch/powerpc/configs/85xx/e6500rev2_defconfig b/arch/powerpc/configs/85xx/e6500rev2_defconfig
index 26a586a..819d403 100644
--- a/arch/powerpc/configs/85xx/e6500rev2_defconfig
+++ b/arch/powerpc/configs/85xx/e6500rev2_defconfig
@@ -1,5 +1,7 @@
CONFIG_PPC64=y
CONFIG_PPC_BOOK3E_64=y
+# CONFIG_FSL_ERRATUM_A_004801 is not set
+# CONFIG_FSL_ERRATUM_A_005337 is not set
CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=24
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index b7d2747..e354127 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -369,12 +369,12 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
+ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_CELL_TB_BUG)
#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_SMT)
+ CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 635fc0b..3a853ae 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -362,7 +362,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
#define ISYNC_601
#endif
-#ifdef CONFIG_PPC_CELL
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define MFTB(dest) \
90: mftb dest; \
BEGIN_FTR_SECTION_NESTED(96); \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index f1b79a6..16fbb66 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -995,6 +995,8 @@
#define PVR_8560 0x80200000
#define PVR_VER_E500V1 0x8020
#define PVR_VER_E500V2 0x8021
+#define PVR_VER_E6500 0x8040
+
/*
* For the 8xx processors, all of them report the same PVR family for
* the PowerPC core. The various versions of these processors must be
@@ -1058,7 +1060,7 @@
: "memory")
#ifdef __powerpc64__
-#ifdef CONFIG_PPC_CELL
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define mftb() ({unsigned long rval; \
asm volatile( \
"90: mftb %0;\n" \
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 2bc7704..5152c36 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -179,6 +179,7 @@
#define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */
#define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */
#define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */
+#define SPRN_PWRMGTCR0 0x3FB /* Power management control register 0 */
#define SPRN_SVR 0x3FF /* System Version Register */
/*
@@ -225,6 +226,10 @@
#define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */
#define CCR1_TCS 0x00000080 /* Timer Clock Select */
+/* Bit definitions for PWRMGTCR0. */
+#define PWRMGTCR0_ALTIVEC_IDLE (1 << 22) /* Altivec idle enable */
+#define PWRMGTCR0_PW20_WAIT (1 << 14) /* PW20 state enable bit */
+
/* Bit definitions for the MCSR. */
#define MCSR_MCS 0x80000000 /* Machine Check Summary */
#define MCSR_IB 0x40000000 /* Instruction PLB Error */
diff --git a/arch/powerpc/platforms/85xx/b4_qds.c b/arch/powerpc/platforms/85xx/b4_qds.c
index b497271..d4b6a2c 100644
--- a/arch/powerpc/platforms/85xx/b4_qds.c
+++ b/arch/powerpc/platforms/85xx/b4_qds.c
@@ -39,9 +39,6 @@
static int __init b4_qds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
if ((of_flat_dt_is_compatible(root, "fsl,B4860QDS")) ||
(of_flat_dt_is_compatible(root, "fsl,B4420QDS")) ||
@@ -57,14 +54,6 @@ static int __init b4_qds_probe(void)
ppc_md.restart = fsl_hv_restart;
ppc_md.power_off = fsl_hv_halt;
ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
return 1;
}
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 1269a10..09978f5 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -7,10 +7,32 @@
*/
#include <linux/of_platform.h>
+#include <asm/time.h>
+
#include <sysdev/cpm2_pic.h>
#include "mpc85xx.h"
+#define MAX_BIT 64
+
+#define ALTIVEC_COUNT_OFFSET 16
+#define ALTIVEC_IDLE_COUNT_MASK 0x003f0000
+#define PW20_COUNT_OFFSET 8
+#define PW20_IDLE_COUNT_MASK 0x00003f00
+
+/*
+ * FIXME - We don't know the AltiVec application scenarios.
+ */
+#define ALTIVEC_IDLE_TIME 1000 /* 1ms */
+
+/*
+ * FIXME - We don't know, what time should we let the core into PW20 state.
+ * because we don't know the current state of the cpu load. And threads are
+ * independent, so we can not know the state of different thread has been
+ * idle.
+ */
+#define PW20_IDLE_TIME 1000 /* 1ms */
+
static struct of_device_id __initdata mpc85xx_common_ids[] = {
{ .type = "soc", },
{ .compatible = "soc", },
@@ -82,3 +104,87 @@ void __init mpc85xx_cpm2_pic_init(void)
irq_set_chained_handler(irq, cpm2_cascade);
}
#endif
+
+static bool has_pw20_altivec_idle(void)
+{
+ u32 pvr;
+
+ pvr = mfspr(SPRN_PVR);
+
+ /* PW20 & AltiVec idle feature only exists for E6500 */
+ if (PVR_VER(pvr) != PVR_VER_E6500)
+ return false;
+
+ /* Fix erratum, e6500 rev1 does not support PW20 & AltiVec idle */
+ if (PVR_REV(pvr) < 0x20)
+ return false;
+
+ return true;
+}
+
+static unsigned int get_idle_ticks_bit(unsigned int us)
+{
+ unsigned int cycle;
+
+ /*
+ * The time control by TB turn over bit, so we need
+ * to be divided by 2.
+ */
+ cycle = (us / 2) * tb_ticks_per_usec;
+
+ return ilog2(cycle) + 1;
+}
+
+static void setup_altivec_idle(void *unused)
+{
+ u32 altivec_idle, bit;
+
+ if (!has_pw20_altivec_idle())
+ return;
+
+ /* Enable Altivec Idle */
+ altivec_idle = mfspr(SPRN_PWRMGTCR0);
+ altivec_idle |= PWRMGTCR0_ALTIVEC_IDLE;
+
+ /* Set Automatic AltiVec Idle Count */
+ /* clear count */
+ altivec_idle &= ~ALTIVEC_IDLE_COUNT_MASK;
+
+ /* set count */
+ bit = get_idle_ticks_bit(ALTIVEC_IDLE_TIME);
+ altivec_idle |= ((MAX_BIT - bit) << ALTIVEC_COUNT_OFFSET);
+
+ mtspr(SPRN_PWRMGTCR0, altivec_idle);
+}
+
+static void setup_pw20_idle(void *unused)
+{
+ u32 pw20_idle, bit;
+
+ if (!has_pw20_altivec_idle())
+ return;
+
+ pw20_idle = mfspr(SPRN_PWRMGTCR0);
+
+ /* set PW20_WAIT bit, enable pw20 */
+ pw20_idle |= PWRMGTCR0_PW20_WAIT;
+
+ /* Set Automatic PW20 Core Idle Count */
+ /* clear count */
+ pw20_idle &= ~PW20_IDLE_COUNT_MASK;
+
+ /* set count */
+ bit = get_idle_ticks_bit(PW20_IDLE_TIME);
+ pw20_idle |= ((MAX_BIT - bit) << PW20_COUNT_OFFSET);
+
+ mtspr(SPRN_PWRMGTCR0, pw20_idle);
+}
+
+static int __init setup_idle_hw_governor(void)
+{
+ on_each_cpu(setup_altivec_idle, NULL, 1);
+ on_each_cpu(setup_pw20_idle, NULL, 1);
+
+ return 0;
+}
+late_initcall(setup_idle_hw_governor);
diff --git a/arch/powerpc/platforms/85xx/p2041_rdb.c b/arch/powerpc/platforms/85xx/p2041_rdb.c
index 6112b09..9ee8758 100644
--- a/arch/powerpc/platforms/85xx/p2041_rdb.c
+++ b/arch/powerpc/platforms/85xx/p2041_rdb.c
@@ -37,9 +37,6 @@
static int __init p2041_rdb_probe(void)
{
unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
if (of_flat_dt_is_compatible(root, "fsl,P2041RDB"))
return 1;
@@ -51,14 +48,6 @@ static int __init p2041_rdb_probe(void)
ppc_md.restart = fsl_hv_restart;
ppc_md.power_off = fsl_hv_halt;
ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
return 1;
}
diff --git a/arch/powerpc/platforms/85xx/p3041_ds.c b/arch/powerpc/platforms/85xx/p3041_ds.c
index d0517bd..b8af7fb 100644
--- a/arch/powerpc/platforms/85xx/p3041_ds.c
+++ b/arch/powerpc/platforms/85xx/p3041_ds.c
@@ -39,9 +39,6 @@
static int __init p3041_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
if (of_flat_dt_is_compatible(root, "fsl,P3041DS"))
return 1;
@@ -53,14 +50,6 @@ static int __init p3041_ds_probe(void)
ppc_md.restart = fsl_hv_restart;
ppc_md.power_off = fsl_hv_halt;
ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
return 1;
}
diff --git a/arch/powerpc/platforms/85xx/p4080_ds.c b/arch/powerpc/platforms/85xx/p4080_ds.c
index 15be644..d83b8d0 100644
--- a/arch/powerpc/platforms/85xx/p4080_ds.c
+++ b/arch/powerpc/platforms/85xx/p4080_ds.c
@@ -38,9 +38,6 @@
static int __init p4080_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
if (of_flat_dt_is_compatible(root, "fsl,P4080DS"))
return 1;
@@ -52,14 +49,6 @@ static int __init p4080_ds_probe(void)
ppc_md.restart = fsl_hv_restart;
ppc_md.power_off = fsl_hv_halt;
ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
return 1;
}
diff --git a/arch/powerpc/platforms/85xx/p5020_ds.c b/arch/powerpc/platforms/85xx/p5020_ds.c
index 7eccc1a..6e1ed9f 100644
--- a/arch/powerpc/platforms/85xx/p5020_ds.c
+++ b/arch/powerpc/platforms/85xx/p5020_ds.c
@@ -39,9 +39,6 @@
static int __init p5020_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
if (of_flat_dt_is_compatible(root, "fsl,P5020DS"))
return 1;
@@ -53,14 +50,6 @@ static int __init p5020_ds_probe(void)
ppc_md.restart = fsl_hv_restart;
ppc_md.power_off = fsl_hv_halt;
ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
return 1;
}
diff --git a/arch/powerpc/platforms/85xx/p5040_ds.c b/arch/powerpc/platforms/85xx/p5040_ds.c
index e07fd7b..095806a 100644
--- a/arch/powerpc/platforms/85xx/p5040_ds.c
+++ b/arch/powerpc/platforms/85xx/p5040_ds.c
@@ -30,9 +30,6 @@
static int __init p5040_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
if (of_flat_dt_is_compatible(root, "fsl,P5040DS"))
return 1;
@@ -44,14 +41,6 @@ static int __init p5040_ds_probe(void)
ppc_md.restart = fsl_hv_restart;
ppc_md.power_off = fsl_hv_halt;
ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
return 1;
}
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 30721ac..43cc5c9 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -130,7 +130,30 @@ static void __cpuinit mpc85xx_give_timebase(void)
tb_req = 0;
mpc85xx_timebase_freeze(1);
+#ifdef CONFIG_PPC64
+ /*
+ * e5500/e6500 have a workaround for erratum A-006958 in place
+ * that will reread the timebase until TBL is non-zero.
+ * That would be a bad thing when the timebase is frozen.
+ *
+ * Thus, we read it manually, and instead of checking that
+ * TBL is non-zero, we ensure that TB does not change. We don't
+ * do that for the main mftb implementation, because it requires
+ * a scratch register
+ */
+ {
+ u64 prev;
+
+ asm volatile("mftb %0" : "=r" (timebase));
+
+ do {
+ prev = timebase;
+ asm volatile("mftb %0" : "=r" (timebase));
+ } while (prev != timebase);
+ }
+#else
timebase = get_tb();
+#endif
mb();
tb_valid = 1;
@@ -212,7 +235,7 @@ void platform_cpu_die(unsigned int cpu)
/* enter PH20 status */
setbits32(&((struct ccsr_rcpm_v2 *)guts_regs)->pcph20setr,
1 << cpu_core_index_of_thread(hw_cpu));
- } else if (!rcpmv2) {
+ } else if (!rcpmv2 && guts_regs) {
rcpm = guts_regs;
/* Core Nap Operation */
setbits32(&rcpm->cnapcr, 1 << hw_cpu);
@@ -441,10 +464,6 @@ struct smp_ops_t smp_85xx_ops = {
.cpu_disable = generic_cpu_disable,
.cpu_die = generic_cpu_die,
#endif
-#ifdef CONFIG_KEXEC
- .give_timebase = smp_generic_give_timebase,
- .take_timebase = smp_generic_take_timebase,
-#endif
};
#ifdef CONFIG_KEXEC
@@ -587,6 +606,10 @@ void __init mpc85xx_smp_init(void)
smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
}
+#ifdef CONFIG_HOTPLUG_CPU
+ ppc_md.cpu_die = generic_mach_cpu_die;
+#endif
+
np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
if (np) {
if (of_device_is_compatible(np, "fsl,qoriq-rcpm-2.0"))
diff --git a/arch/powerpc/platforms/85xx/t4240_qds.c b/arch/powerpc/platforms/85xx/t4240_qds.c
index f4a215a..c649b48 100644
--- a/arch/powerpc/platforms/85xx/t4240_qds.c
+++ b/arch/powerpc/platforms/85xx/t4240_qds.c
@@ -39,9 +39,6 @@
static int __init t4240_qds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
if (of_flat_dt_is_compatible(root, "fsl,T4240QDS"))
return 1;
@@ -53,14 +50,6 @@ static int __init t4240_qds_probe(void)
ppc_md.restart = fsl_hv_restart;
ppc_md.power_off = fsl_hv_halt;
ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
return 1;
}
diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c
index a68ff12..a8a52b2 100644
--- a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c
+++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c
@@ -1192,6 +1192,10 @@ static void DeleteNode(t_FmPcdCcNode *p_CcNode)
p_CcNode->h_Spinlock = NULL;
}
+ /* Restore the original counters pointer instead of the mutual pointer (mutual to all hash buckets) */
+ if (p_CcNode->isHashBucket && (p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_NONE))
+ p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].p_StatsObj->h_StatsCounters = p_CcNode->h_PrivMissStatsCounters;
+
/* Releasing all currently used statistics objects, including 'miss' entry */
for (i = 0; i < p_CcNode->numOfKeys + 1; i++)
if (p_CcNode->keyAndNextEngineParams[i].p_StatsObj)
@@ -1200,7 +1204,6 @@ static void DeleteNode(t_FmPcdCcNode *p_CcNode)
if (!LIST_IsEmpty(&p_CcNode->availableStatsLst))
{
t_Handle h_FmMuram = FmPcdGetMuramHandle(p_CcNode->h_FmPcd);
-
ASSERT_COND(h_FmMuram);
FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram);
@@ -1490,7 +1493,7 @@ t_Error ValidateNextEngineParams(t_Handle h_FmPcd,
(p_FmPcdCcNextEngineParams->statisticsEn))
RETURN_ERROR(MAJOR, E_CONFLICT,
("Statistics are requested for a key, but statistics mode was set"
- "to 'NONE' upon initialization of this match table"));
+ "to 'NONE' upon initialization"));
switch (p_FmPcdCcNextEngineParams->nextEngine)
{
@@ -3397,13 +3400,18 @@ static t_Error ValidateAndCalcStatsParams(t_FmPcdCcNode *p_CcNode,
uint32_t *p_NumOfRanges,
uint32_t *p_CountersArraySize)
{
- e_FmPcdCcStatsMode statisticsMode = p_CcNode->statisticsMode;
+ e_FmPcdCcStatsMode statisticsMode = p_CcNode->statisticsMode;
+ uint32_t i;
UNUSED(p_CcNodeParam);
switch (statisticsMode)
{
case e_FM_PCD_CC_STATS_MODE_NONE:
+ for (i = 0; i < p_CcNode->numOfKeys; i++)
+ if (p_CcNodeParam->keysParams.keyParams[i].ccNextEngineParams.statisticsEn)
+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
+ ("Statistics cannot be enabled for key %d when statistics mode was set to 'NONE'", i));
return E_OK;
case e_FM_PCD_CC_STATS_MODE_FRAME:
@@ -3983,9 +3991,6 @@ static t_Error MatchTableGetKeyStatistics(t_FmPcdCcNode *p_CcNode,
if (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE)
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this match table"));
- if (keyIndex >= p_CcNode->numOfKeys)
- RETURN_ERROR(MAJOR, E_INVALID_STATE, ("The provided keyIndex exceeds the number of keys in this match table"));
-
if (!p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj)
RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this key"));
@@ -5014,377 +5019,11 @@ void FmPcdCcGetAdTablesThatPointOnReplicGroup(t_Handle h_Node,
ASSERT_COND(i != p_CurrentNode->numOfKeys);
}
#endif /* (DPAA_VERSION >= 11) */
-/*********************** End of inter-module routines ************************/
-
-/****************************************/
-/* API Init unit functions */
-/****************************************/
-
-t_Handle FM_PCD_CcRootBuild(t_Handle h_FmPcd, t_FmPcdCcTreeParams *p_PcdGroupsParam)
-{
- t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
- t_Error err = E_OK;
- int i = 0, j = 0, k = 0;
- t_FmPcdCcTree *p_FmPcdCcTree;
- uint8_t numOfEntries;
- t_Handle p_CcTreeTmp;
- t_FmPcdCcGrpParams *p_FmPcdCcGroupParams;
- t_FmPcdCcKeyAndNextEngineParams *p_Params, *p_KeyAndNextEngineParams;
- t_NetEnvParams netEnvParams;
- uint8_t lastOne = 0;
- uint32_t requiredAction = 0;
- t_FmPcdCcNode *p_FmPcdCcNextNode;
- t_CcNodeInformation ccNodeInfo, *p_CcInformation;
-
- SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE, NULL);
- SANITY_CHECK_RETURN_VALUE(p_PcdGroupsParam,E_INVALID_HANDLE, NULL);
-
- if (p_PcdGroupsParam->numOfGrps > FM_PCD_MAX_NUM_OF_CC_GROUPS)
- {
- REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfGrps should not exceed %d", FM_PCD_MAX_NUM_OF_CC_GROUPS));
- return NULL;
- }
-
- p_FmPcdCcTree = (t_FmPcdCcTree*)XX_Malloc(sizeof(t_FmPcdCcTree));
- if (!p_FmPcdCcTree)
- {
- REPORT_ERROR(MAJOR, E_NO_MEMORY, ("PCD tree structure"));
- return NULL;
- }
- memset(p_FmPcdCcTree, 0, sizeof(t_FmPcdCcTree));
- p_FmPcdCcTree->h_FmPcd = h_FmPcd;
-
- p_Params = (t_FmPcdCcKeyAndNextEngineParams*)XX_Malloc(FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams));
- memset(p_Params, 0, FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams));
-
- INIT_LIST(&p_FmPcdCcTree->fmPortsLst);
-
-#ifdef FM_CAPWAP_SUPPORT
- if ((p_PcdGroupsParam->numOfGrps == 1) &&
- (p_PcdGroupsParam->ccGrpParams[0].numOfDistinctionUnits == 0) &&
- (p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].nextEngine == e_FM_PCD_CC) &&
- p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode &&
- IsCapwapApplSpecific(p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode))
- {
- p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip = FmPcdManipApplSpecificBuild();
- if (!p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip)
- {
- DeleteTree(p_FmPcdCcTree,p_FmPcd);
- XX_Free(p_Params);
- REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
- return NULL;
- }
- }
-#endif /* FM_CAPWAP_SUPPORT */
-
- numOfEntries = 0;
- p_FmPcdCcTree->netEnvId = FmPcdGetNetEnvId(p_PcdGroupsParam->h_NetEnv);
-
- for (i = 0; i < p_PcdGroupsParam->numOfGrps; i++)
- {
- p_FmPcdCcGroupParams = &p_PcdGroupsParam->ccGrpParams[i];
-
- if (p_FmPcdCcGroupParams->numOfDistinctionUnits > FM_PCD_MAX_NUM_OF_CC_UNITS)
- {
- DeleteTree(p_FmPcdCcTree,p_FmPcd);
- XX_Free(p_Params);
- REPORT_ERROR(MAJOR, E_INVALID_VALUE,
- ("numOfDistinctionUnits (group %d) should not exceed %d", i, FM_PCD_MAX_NUM_OF_CC_UNITS));
- return NULL;
- }
-
- p_FmPcdCcTree->fmPcdGroupParam[i].baseGroupEntry = numOfEntries;
- p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup =(uint8_t)( 0x01 << p_FmPcdCcGroupParams->numOfDistinctionUnits);
- numOfEntries += p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup;
- if (numOfEntries > FM_PCD_MAX_NUM_OF_CC_GROUPS)
- {
- DeleteTree(p_FmPcdCcTree,p_FmPcd);
- XX_Free(p_Params);
- REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfEntries can not be larger than %d", FM_PCD_MAX_NUM_OF_CC_GROUPS));
- return NULL;
- }
-
- if (lastOne)
- {
- if (p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup > lastOne)
- {
- DeleteTree(p_FmPcdCcTree,p_FmPcd);
- XX_Free(p_Params);
- REPORT_ERROR(MAJOR, E_CONFLICT, ("numOfEntries per group must be set in descending order"));
- return NULL;
- }
- }
-
- lastOne = p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup;
-
- netEnvParams.netEnvId = p_FmPcdCcTree->netEnvId;
- netEnvParams.numOfDistinctionUnits = p_FmPcdCcGroupParams->numOfDistinctionUnits;
-
- memcpy(netEnvParams.unitIds,
- &p_FmPcdCcGroupParams->unitIds,
- (sizeof(uint8_t)) * p_FmPcdCcGroupParams->numOfDistinctionUnits);
-
- err = PcdGetUnitsVector(p_FmPcd, &netEnvParams);
- if (err)
- {
- DeleteTree(p_FmPcdCcTree,p_FmPcd);
- XX_Free(p_Params);
- REPORT_ERROR(MAJOR, err, NO_MSG);
- return NULL;
- }
-
- p_FmPcdCcTree->fmPcdGroupParam[i].totalBitsMask = netEnvParams.vector;
- for (j = 0; j < p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; j++)
- {
- err = ValidateNextEngineParams(h_FmPcd,
- &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j],
- e_FM_PCD_CC_STATS_MODE_NONE);
- if (err)
- {
- DeleteTree(p_FmPcdCcTree,p_FmPcd);
- XX_Free(p_Params);
- REPORT_ERROR(MAJOR, err, (NO_MSG));
- return NULL;
- }
-
- if (p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j].h_Manip)
- {
- err = FmPcdManipCheckParamsForCcNextEngine(&p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], &requiredAction);
- if (err)
- {
- DeleteTree(p_FmPcdCcTree,p_FmPcd);
- XX_Free(p_Params);
- REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
- return NULL;
- }
- }
- p_KeyAndNextEngineParams = p_Params+k;
-
- memcpy(&p_KeyAndNextEngineParams->nextEngineParams,
- &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j],
- sizeof(t_FmPcdCcNextEngineParams));
-
- if ((p_KeyAndNextEngineParams->nextEngineParams.nextEngine == e_FM_PCD_CC)
- && p_KeyAndNextEngineParams->nextEngineParams.h_Manip)
- {
- err = AllocAndFillAdForContLookupManip(p_KeyAndNextEngineParams->nextEngineParams.params.ccParams.h_CcNode);
- if (err)
- {
- DeleteTree(p_FmPcdCcTree,p_FmPcd);
- XX_Free(p_Params);
- REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree"));
- return NULL;
- }
- }
-
- requiredAction |= UPDATE_CC_WITH_TREE;
- p_KeyAndNextEngineParams->requiredAction = requiredAction;
-
- k++;
- }
- }
-
- p_FmPcdCcTree->numOfEntries = (uint8_t)k;
- p_FmPcdCcTree->numOfGrps = p_PcdGroupsParam->numOfGrps;
-
- p_FmPcdCcTree->ccTreeBaseAddr =
- PTR_TO_UINT(FM_MURAM_AllocMem(FmPcdGetMuramHandle(h_FmPcd),
- (uint32_t)( FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE),
- FM_PCD_CC_TREE_ADDR_ALIGN));
- if (!p_FmPcdCcTree->ccTreeBaseAddr)
- {
- DeleteTree(p_FmPcdCcTree,p_FmPcd);
- XX_Free(p_Params);
- REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree"));
- return NULL;
- }
- IOMemSet32(UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr), 0, (uint32_t)(FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE));
-
- p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
-
- j = 0;
- for (i = 0; i < numOfEntries; i++)
- {
- p_KeyAndNextEngineParams = p_Params + i;
-
- NextStepAd(p_CcTreeTmp,
- NULL,
- &p_KeyAndNextEngineParams->nextEngineParams,
- p_FmPcd);
-
- p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE);
-
- memcpy(&p_FmPcdCcTree->keyAndNextEngineParams[i],
- p_KeyAndNextEngineParams,
- sizeof(t_FmPcdCcKeyAndNextEngineParams));
-
- if (p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine== e_FM_PCD_CC)
- {
- p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode;
- p_CcInformation = FindNodeInfoInReleventLst(&p_FmPcdCcNextNode->ccTreeIdLst,
- (t_Handle)p_FmPcdCcTree,
- p_FmPcdCcNextNode->h_Spinlock);
-
- if (!p_CcInformation)
- {
- memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
- ccNodeInfo.h_CcNode = (t_Handle)p_FmPcdCcTree;
- ccNodeInfo.index = 1;
- EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccTreeIdLst,
- &ccNodeInfo,
- p_FmPcdCcNextNode->h_Spinlock);
- }
- else
- p_CcInformation->index++;
- }
- }
-
- FmPcdIncNetEnvOwners(h_FmPcd, p_FmPcdCcTree->netEnvId);
- p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
-
- if (!FmPcdLockTryLockAll(p_FmPcd))
- {
- FM_PCD_CcRootDelete(p_FmPcdCcTree);
- XX_Free(p_Params);
- DBG(TRACE, ("FmPcdLockTryLockAll failed"));
- return NULL;
- }
-
- for (i = 0; i < numOfEntries; i++)
- {
- if (p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction)
- {
- err = SetRequiredAction(h_FmPcd,
- p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction,
- &p_FmPcdCcTree->keyAndNextEngineParams[i],
- p_CcTreeTmp,
- 1,
- p_FmPcdCcTree);
- if (err)
- {
- FmPcdLockUnlockAll(p_FmPcd);
- FM_PCD_CcRootDelete(p_FmPcdCcTree);
- XX_Free(p_Params);
- REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
- return NULL;
- }
- p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE);
- }
- }
-
- FmPcdLockUnlockAll(p_FmPcd);
- p_FmPcdCcTree->p_Lock = FmPcdAcquireLock(p_FmPcd);
- if (!p_FmPcdCcTree->p_Lock)
- {
- FM_PCD_CcRootDelete(p_FmPcdCcTree);
- XX_Free(p_Params);
- REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM CC lock"));
- return NULL;
- }
-
- XX_Free(p_Params);
-
- return p_FmPcdCcTree;
-}
-
-t_Error FM_PCD_CcRootDelete(t_Handle h_CcTree)
-{
- t_FmPcd *p_FmPcd;
- t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree;
- int i= 0;
-
- SANITY_CHECK_RETURN_ERROR(p_CcTree,E_INVALID_STATE);
- p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd;
- SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-
- FmPcdDecNetEnvOwners(p_FmPcd, p_CcTree->netEnvId);
-
- if (p_CcTree->owners)
- RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("the tree with this ID can not be removed because this tree is occupied, first - unbind this tree"));
-
- /* Delete reassembly schemes if exist */
- if (p_CcTree->h_IpReassemblyManip)
- {
- FmPcdManipDeleteIpReassmSchemes(p_CcTree->h_IpReassemblyManip);
- FmPcdManipUpdateOwner(p_CcTree->h_IpReassemblyManip, FALSE);
- }
-
- for (i = 0; i <p_CcTree->numOfEntries; i++)
- {
- if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC)
- UpdateNodeOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode, FALSE);
-
- if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip)
- FmPcdManipUpdateOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip, FALSE);
-
-#ifdef FM_CAPWAP_SUPPORT
- if ((p_CcTree->numOfGrps == 1) &&
- (p_CcTree->fmPcdGroupParam[0].numOfEntriesInGroup == 1) &&
- (p_CcTree->keyAndNextEngineParams[0].nextEngineParams.nextEngine == e_FM_PCD_CC) &&
- p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode &&
- IsCapwapApplSpecific(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode))
- {
- if (FM_PCD_ManipNodeDelete(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.h_Manip) != E_OK)
- return E_INVALID_STATE;
- }
-#endif /* FM_CAPWAP_SUPPORT */
-
-#if (DPAA_VERSION >= 11)
- if ((p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_FR) &&
- (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic))
- FrmReplicGroupUpdateOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic,
- FALSE);
-#endif /* (DPAA_VERSION >= 11) */
- }
-
- if (p_CcTree->p_Lock)
- FmPcdReleaseLock(p_CcTree->h_FmPcd, p_CcTree->p_Lock);
-
- DeleteTree(p_CcTree, p_FmPcd);
-
- return E_OK;
-}
-
-t_Error FM_PCD_CcRootModifyNextEngine(t_Handle h_CcTree,
- uint8_t grpId,
- uint8_t index,
- t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
-{
- t_FmPcd *p_FmPcd;
- t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree;
- t_Error err = E_OK;
-
- SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
- SANITY_CHECK_RETURN_ERROR(p_CcTree,E_INVALID_STATE);
- p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd;
- SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
-
- if (!FmPcdLockTryLockAll(p_FmPcd))
- {
- DBG(TRACE, ("FmPcdLockTryLockAll failed"));
- return ERROR_CODE(E_BUSY);
- }
-
- err = FmPcdCcModifyNextEngineParamTree(p_FmPcd,
- p_CcTree,
- grpId,
- index,
- p_FmPcdCcNextEngineParams);
- FmPcdLockUnlockAll(p_FmPcd);
-
- if (err)
- {
- RETURN_ERROR(MAJOR, err, NO_MSG);
- }
-
- return E_OK;
-}
-
-t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam)
+t_Error FmPcdCcMatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNode *p_CcNode, t_FmPcdCcNodeParams *p_CcNodeParam)
{
t_FmPcd *p_FmPcd = (t_FmPcd *) h_FmPcd;
- t_FmPcdCcNode *p_CcNode, *p_FmPcdCcNextNode;
+ t_FmPcdCcNode *p_FmPcdCcNextNode;
t_Error err = E_OK;
uint32_t tmp, keySize;
bool glblMask = FALSE;
@@ -5402,15 +5041,9 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
t_FmPcdCcStatsParams statsParams = {0};
t_Handle h_Manip;
- SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE,NULL);
-
- p_CcNode = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode));
- if (!p_CcNode)
- {
- REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
- return NULL;
- }
- memset(p_CcNode, 0, sizeof(t_FmPcdCcNode));
+ ASSERT_COND(h_FmPcd);
+ ASSERT_COND(p_CcNode);
+ ASSERT_COND(p_CcNodeParam);
p_CcNode->p_GlblMask = (t_Handle)XX_Malloc(CC_GLBL_MASK_SIZE * sizeof(uint8_t));
memset(p_CcNode->p_GlblMask, 0, CC_GLBL_MASK_SIZE * sizeof(uint8_t));
@@ -5430,10 +5063,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
h_FmMuram = FmPcdGetMuramHandle(h_FmPcd);
if (!h_FmMuram)
- {
- REPORT_ERROR(MAJOR, E_INVALID_HANDLE, ("FM MURAM"));
- return NULL;
- }
+ RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM MURAM"));
INIT_LIST(&p_CcNode->ccPrevNodesLst);
INIT_LIST(&p_CcNode->ccTreeIdLst);
@@ -5444,8 +5074,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
if (!p_CcNode->h_Spinlock)
{
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, E_NO_MEMORY, ("CC node spinlock"));
- return NULL;
+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("CC node spinlock"));
}
if ((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_BY_HDR) &&
@@ -5466,18 +5095,18 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
if ((p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_FLOW_ID) &&
(p_CcNodeParam->extractCcParams.extractNonHdr.offset != 0))
{
- REPORT_ERROR(MAJOR, E_INVALID_VALUE,
+ DeleteNode(p_CcNode);
+ RETURN_ERROR(MAJOR, E_INVALID_VALUE,
("In the case of the extraction from e_FM_PCD_EXTRACT_FROM_FLOW_ID offset has to be 0"));
- return NULL;
}
icCode = IcDefineCode(p_CcNodeParam);
fromIc = TRUE;
if (icCode == CC_PRIVATE_INFO_NONE)
{
- REPORT_ERROR(MAJOR, E_INVALID_STATE,
+ DeleteNode(p_CcNode);
+ RETURN_ERROR(MAJOR, E_INVALID_STATE,
("user asked extraction from IC and field in internal context or action wasn't initialized in the right way"));
- return NULL;
}
if ((icCode == CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP) ||
@@ -5503,8 +5132,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
if (err)
{
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, err, NO_MSG);
- return NULL;
+ RETURN_ERROR(MAJOR, err, NO_MSG);
}
switch (p_CcNodeParam->extractCcParams.type)
@@ -5564,8 +5192,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
default:
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
- return NULL;
+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
}
break;
@@ -5587,8 +5214,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
if ((p_CcNode->offset + p_CcNode->sizeOfExtraction) > 8)
{
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, E_INVALID_SELECTION,("when node of the type CC_PC_GENERIC_IC_HASH_INDEXED offset + size can not be bigger then size of HASH 64 bits (8 bytes)"));
- return NULL;
+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION,("when node of the type CC_PC_GENERIC_IC_HASH_INDEXED offset + size can not be bigger then size of HASH 64 bits (8 bytes)"));
}
}
if ((p_CcNode->parseCode == CC_PC_GENERIC_IC_GMASK) ||
@@ -5601,30 +5227,26 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
default:
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
- return NULL;
+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG);
}
if (p_CcNode->parseCode == CC_PC_ILLEGAL)
{
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("illegal extraction type"));
- return NULL;
+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("illegal extraction type"));
}
if ((p_CcNode->sizeOfExtraction > FM_PCD_MAX_SIZE_OF_KEY) ||
!p_CcNode->sizeOfExtraction)
{
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("sizeOfExatrction can not be greater than 56 and not 0"));
- return NULL;
+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("sizeOfExatrction can not be greater than 56 and not 0"));
}
if (p_CcNodeParam->keysParams.keySize != p_CcNode->sizeOfExtraction)
{
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction"));
- return NULL;
+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction"));
}
p_CcNode->userSizeOfExtraction = p_CcNode->sizeOfExtraction;
@@ -5636,8 +5258,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
if (err != E_OK)
{
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction"));
- return NULL;
+ RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction"));
}
/* Calculating matching table entry size by rounding up the user-defined size of extraction to valid entry size */
@@ -5659,8 +5280,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
if (err != E_OK)
{
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, err, NO_MSG);
- return NULL;
+ RETURN_ERROR(MAJOR, err, NO_MSG);
}
p_CcNode->keysMatchTableMaxSize = matchTableSize;
@@ -5671,8 +5291,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
if (err != E_OK)
{
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, err, NO_MSG);
- return NULL;
+ RETURN_ERROR(MAJOR, err, NO_MSG);
}
}
@@ -5685,8 +5304,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
if (!p_CcNode->h_TmpAd)
{
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC action descriptor"));
- return NULL;
+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC action descriptor"));
}
}
else
@@ -5710,8 +5328,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
if (!p_CcNode->h_StatsFLRs)
{
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC frame length ranges array"));
- return NULL;
+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC frame length ranges array"));
}
/* Initialize using value received from the user */
@@ -5742,8 +5359,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
if (!p_CcNode->h_KeysMatchTable)
{
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node key match table"));
- return NULL;
+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node key match table"));
}
IOMemSet32((uint8_t *)p_CcNode->h_KeysMatchTable,
0,
@@ -5758,8 +5374,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
if (!p_CcNode->h_AdTable)
{
DeleteNode(p_CcNode);
- REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptors table"));
- return NULL;
+ RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptors table"));
}
IOMemSet32((uint8_t *)p_CcNode->h_AdTable, 0, adTableSize);
@@ -5833,6 +5448,18 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
p_StatsObj = GetStatsObj(p_CcNode);
ASSERT_COND(p_StatsObj);
+ /* All 'bucket' nodes of a hash table should share the same statistics counters,
+ allocated by the hash table. So, if this node is a bucket of a hash table,
+ we'll replace the locally allocated counters with the shared counters. */
+ if (p_CcNode->isHashBucket)
+ {
+ ASSERT_COND(p_CcNode->h_MissStatsCounters);
+
+ /* Store original counters pointer and replace it with mutual preallocated pointer */
+ p_CcNode->h_PrivMissStatsCounters = p_StatsObj->h_StatsCounters;
+ p_StatsObj->h_StatsCounters = p_CcNode->h_MissStatsCounters;
+ }
+
statsParams.h_StatsAd = p_StatsObj->h_StatsAd;
statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters;
#if (DPAA_VERSION >= 11)
@@ -5909,7 +5536,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
{
FM_PCD_MatchTableDelete((t_Handle)p_CcNode);
DBG(TRACE, ("FmPcdLockTryLockAll failed"));
- return NULL;
+ return ERROR_CODE(E_BUSY);
}
/* Required action for each next engine */
@@ -5927,8 +5554,7 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
{
FmPcdLockUnlockAll(h_FmPcd);
FM_PCD_MatchTableDelete((t_Handle)p_CcNode);
- REPORT_ERROR(MAJOR, err, NO_MSG);
- return NULL;
+ RETURN_ERROR(MAJOR, err, NO_MSG);
}
p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE);
}
@@ -5936,6 +5562,407 @@ t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodePar
FmPcdLockUnlockAll(h_FmPcd);
+ return E_OK;
+}
+/*********************** End of inter-module routines ************************/
+
+
+/****************************************/
+/* API Init unit functions */
+/****************************************/
+
+t_Handle FM_PCD_CcRootBuild(t_Handle h_FmPcd, t_FmPcdCcTreeParams *p_PcdGroupsParam)
+{
+ t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd;
+ t_Error err = E_OK;
+ int i = 0, j = 0, k = 0;
+ t_FmPcdCcTree *p_FmPcdCcTree;
+ uint8_t numOfEntries;
+ t_Handle p_CcTreeTmp;
+ t_FmPcdCcGrpParams *p_FmPcdCcGroupParams;
+ t_FmPcdCcKeyAndNextEngineParams *p_Params, *p_KeyAndNextEngineParams;
+ t_NetEnvParams netEnvParams;
+ uint8_t lastOne = 0;
+ uint32_t requiredAction = 0;
+ t_FmPcdCcNode *p_FmPcdCcNextNode;
+ t_CcNodeInformation ccNodeInfo, *p_CcInformation;
+
+ SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE, NULL);
+ SANITY_CHECK_RETURN_VALUE(p_PcdGroupsParam,E_INVALID_HANDLE, NULL);
+
+ if (p_PcdGroupsParam->numOfGrps > FM_PCD_MAX_NUM_OF_CC_GROUPS)
+ {
+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfGrps should not exceed %d", FM_PCD_MAX_NUM_OF_CC_GROUPS));
+ return NULL;
+ }
+
+ p_FmPcdCcTree = (t_FmPcdCcTree*)XX_Malloc(sizeof(t_FmPcdCcTree));
+ if (!p_FmPcdCcTree)
+ {
+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("PCD tree structure"));
+ return NULL;
+ }
+ memset(p_FmPcdCcTree, 0, sizeof(t_FmPcdCcTree));
+ p_FmPcdCcTree->h_FmPcd = h_FmPcd;
+
+ p_Params = (t_FmPcdCcKeyAndNextEngineParams*)XX_Malloc(FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams));
+ memset(p_Params, 0, FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams));
+
+ INIT_LIST(&p_FmPcdCcTree->fmPortsLst);
+
+#ifdef FM_CAPWAP_SUPPORT
+ if ((p_PcdGroupsParam->numOfGrps == 1) &&
+ (p_PcdGroupsParam->ccGrpParams[0].numOfDistinctionUnits == 0) &&
+ (p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].nextEngine == e_FM_PCD_CC) &&
+ p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode &&
+ IsCapwapApplSpecific(p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode))
+ {
+ p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip = FmPcdManipApplSpecificBuild();
+ if (!p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip)
+ {
+ DeleteTree(p_FmPcdCcTree,p_FmPcd);
+ XX_Free(p_Params);
+ REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
+ return NULL;
+ }
+ }
+#endif /* FM_CAPWAP_SUPPORT */
+
+ numOfEntries = 0;
+ p_FmPcdCcTree->netEnvId = FmPcdGetNetEnvId(p_PcdGroupsParam->h_NetEnv);
+
+ for (i = 0; i < p_PcdGroupsParam->numOfGrps; i++)
+ {
+ p_FmPcdCcGroupParams = &p_PcdGroupsParam->ccGrpParams[i];
+
+ if (p_FmPcdCcGroupParams->numOfDistinctionUnits > FM_PCD_MAX_NUM_OF_CC_UNITS)
+ {
+ DeleteTree(p_FmPcdCcTree,p_FmPcd);
+ XX_Free(p_Params);
+ REPORT_ERROR(MAJOR, E_INVALID_VALUE,
+ ("numOfDistinctionUnits (group %d) should not exceed %d", i, FM_PCD_MAX_NUM_OF_CC_UNITS));
+ return NULL;
+ }
+
+ p_FmPcdCcTree->fmPcdGroupParam[i].baseGroupEntry = numOfEntries;
+ p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup =(uint8_t)( 0x01 << p_FmPcdCcGroupParams->numOfDistinctionUnits);
+ numOfEntries += p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup;
+ if (numOfEntries > FM_PCD_MAX_NUM_OF_CC_GROUPS)
+ {
+ DeleteTree(p_FmPcdCcTree,p_FmPcd);
+ XX_Free(p_Params);
+ REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfEntries can not be larger than %d", FM_PCD_MAX_NUM_OF_CC_GROUPS));
+ return NULL;
+ }
+
+ if (lastOne)
+ {
+ if (p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup > lastOne)
+ {
+ DeleteTree(p_FmPcdCcTree,p_FmPcd);
+ XX_Free(p_Params);
+ REPORT_ERROR(MAJOR, E_CONFLICT, ("numOfEntries per group must be set in descending order"));
+ return NULL;
+ }
+ }
+
+ lastOne = p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup;
+
+ netEnvParams.netEnvId = p_FmPcdCcTree->netEnvId;
+ netEnvParams.numOfDistinctionUnits = p_FmPcdCcGroupParams->numOfDistinctionUnits;
+
+ memcpy(netEnvParams.unitIds,
+ &p_FmPcdCcGroupParams->unitIds,
+ (sizeof(uint8_t)) * p_FmPcdCcGroupParams->numOfDistinctionUnits);
+
+ err = PcdGetUnitsVector(p_FmPcd, &netEnvParams);
+ if (err)
+ {
+ DeleteTree(p_FmPcdCcTree,p_FmPcd);
+ XX_Free(p_Params);
+ REPORT_ERROR(MAJOR, err, NO_MSG);
+ return NULL;
+ }
+
+ p_FmPcdCcTree->fmPcdGroupParam[i].totalBitsMask = netEnvParams.vector;
+ for (j = 0; j < p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; j++)
+ {
+ err = ValidateNextEngineParams(h_FmPcd,
+ &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j],
+ e_FM_PCD_CC_STATS_MODE_NONE);
+ if (err)
+ {
+ DeleteTree(p_FmPcdCcTree,p_FmPcd);
+ XX_Free(p_Params);
+ REPORT_ERROR(MAJOR, err, (NO_MSG));
+ return NULL;
+ }
+
+ if (p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j].h_Manip)
+ {
+ err = FmPcdManipCheckParamsForCcNextEngine(&p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], &requiredAction);
+ if (err)
+ {
+ DeleteTree(p_FmPcdCcTree,p_FmPcd);
+ XX_Free(p_Params);
+ REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG);
+ return NULL;
+ }
+ }
+ p_KeyAndNextEngineParams = p_Params+k;
+
+ memcpy(&p_KeyAndNextEngineParams->nextEngineParams,
+ &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j],
+ sizeof(t_FmPcdCcNextEngineParams));
+
+ if ((p_KeyAndNextEngineParams->nextEngineParams.nextEngine == e_FM_PCD_CC)
+ && p_KeyAndNextEngineParams->nextEngineParams.h_Manip)
+ {
+ err = AllocAndFillAdForContLookupManip(p_KeyAndNextEngineParams->nextEngineParams.params.ccParams.h_CcNode);
+ if (err)
+ {
+ DeleteTree(p_FmPcdCcTree,p_FmPcd);
+ XX_Free(p_Params);
+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree"));
+ return NULL;
+ }
+ }
+
+ requiredAction |= UPDATE_CC_WITH_TREE;
+ p_KeyAndNextEngineParams->requiredAction = requiredAction;
+
+ k++;
+ }
+ }
+
+ p_FmPcdCcTree->numOfEntries = (uint8_t)k;
+ p_FmPcdCcTree->numOfGrps = p_PcdGroupsParam->numOfGrps;
+
+ p_FmPcdCcTree->ccTreeBaseAddr =
+ PTR_TO_UINT(FM_MURAM_AllocMem(FmPcdGetMuramHandle(h_FmPcd),
+ (uint32_t)( FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE),
+ FM_PCD_CC_TREE_ADDR_ALIGN));
+ if (!p_FmPcdCcTree->ccTreeBaseAddr)
+ {
+ DeleteTree(p_FmPcdCcTree,p_FmPcd);
+ XX_Free(p_Params);
+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree"));
+ return NULL;
+ }
+ IOMemSet32(UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr), 0, (uint32_t)(FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE));
+
+ p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
+
+ j = 0;
+ for (i = 0; i < numOfEntries; i++)
+ {
+ p_KeyAndNextEngineParams = p_Params + i;
+
+ NextStepAd(p_CcTreeTmp,
+ NULL,
+ &p_KeyAndNextEngineParams->nextEngineParams,
+ p_FmPcd);
+
+ p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE);
+
+ memcpy(&p_FmPcdCcTree->keyAndNextEngineParams[i],
+ p_KeyAndNextEngineParams,
+ sizeof(t_FmPcdCcKeyAndNextEngineParams));
+
+ if (p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine== e_FM_PCD_CC)
+ {
+ p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode;
+ p_CcInformation = FindNodeInfoInReleventLst(&p_FmPcdCcNextNode->ccTreeIdLst,
+ (t_Handle)p_FmPcdCcTree,
+ p_FmPcdCcNextNode->h_Spinlock);
+
+ if (!p_CcInformation)
+ {
+ memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation));
+ ccNodeInfo.h_CcNode = (t_Handle)p_FmPcdCcTree;
+ ccNodeInfo.index = 1;
+ EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccTreeIdLst,
+ &ccNodeInfo,
+ p_FmPcdCcNextNode->h_Spinlock);
+ }
+ else
+ p_CcInformation->index++;
+ }
+ }
+
+ FmPcdIncNetEnvOwners(h_FmPcd, p_FmPcdCcTree->netEnvId);
+ p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr);
+
+ if (!FmPcdLockTryLockAll(p_FmPcd))
+ {
+ FM_PCD_CcRootDelete(p_FmPcdCcTree);
+ XX_Free(p_Params);
+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
+ return NULL;
+ }
+
+ for (i = 0; i < numOfEntries; i++)
+ {
+ if (p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction)
+ {
+ err = SetRequiredAction(h_FmPcd,
+ p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction,
+ &p_FmPcdCcTree->keyAndNextEngineParams[i],
+ p_CcTreeTmp,
+ 1,
+ p_FmPcdCcTree);
+ if (err)
+ {
+ FmPcdLockUnlockAll(p_FmPcd);
+ FM_PCD_CcRootDelete(p_FmPcdCcTree);
+ XX_Free(p_Params);
+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
+ return NULL;
+ }
+ p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE);
+ }
+ }
+
+ FmPcdLockUnlockAll(p_FmPcd);
+ p_FmPcdCcTree->p_Lock = FmPcdAcquireLock(p_FmPcd);
+ if (!p_FmPcdCcTree->p_Lock)
+ {
+ FM_PCD_CcRootDelete(p_FmPcdCcTree);
+ XX_Free(p_Params);
+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM CC lock"));
+ return NULL;
+ }
+
+ XX_Free(p_Params);
+
+ return p_FmPcdCcTree;
+}
+
+t_Error FM_PCD_CcRootDelete(t_Handle h_CcTree)
+{
+ t_FmPcd *p_FmPcd;
+ t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree;
+ int i= 0;
+
+ SANITY_CHECK_RETURN_ERROR(p_CcTree,E_INVALID_STATE);
+ p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd;
+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
+
+ FmPcdDecNetEnvOwners(p_FmPcd, p_CcTree->netEnvId);
+
+ if (p_CcTree->owners)
+ RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("the tree with this ID can not be removed because this tree is occupied, first - unbind this tree"));
+
+ /* Delete reassembly schemes if exist */
+ if (p_CcTree->h_IpReassemblyManip)
+ {
+ FmPcdManipDeleteIpReassmSchemes(p_CcTree->h_IpReassemblyManip);
+ FmPcdManipUpdateOwner(p_CcTree->h_IpReassemblyManip, FALSE);
+ }
+
+ for (i = 0; i <p_CcTree->numOfEntries; i++)
+ {
+ if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC)
+ UpdateNodeOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode, FALSE);
+
+ if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip)
+ FmPcdManipUpdateOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip, FALSE);
+
+#ifdef FM_CAPWAP_SUPPORT
+ if ((p_CcTree->numOfGrps == 1) &&
+ (p_CcTree->fmPcdGroupParam[0].numOfEntriesInGroup == 1) &&
+ (p_CcTree->keyAndNextEngineParams[0].nextEngineParams.nextEngine == e_FM_PCD_CC) &&
+ p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode &&
+ IsCapwapApplSpecific(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode))
+ {
+ if (FM_PCD_ManipNodeDelete(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.h_Manip) != E_OK)
+ return E_INVALID_STATE;
+ }
+#endif /* FM_CAPWAP_SUPPORT */
+
+#if (DPAA_VERSION >= 11)
+ if ((p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_FR) &&
+ (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic))
+ FrmReplicGroupUpdateOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic,
+ FALSE);
+#endif /* (DPAA_VERSION >= 11) */
+ }
+
+ if (p_CcTree->p_Lock)
+ FmPcdReleaseLock(p_CcTree->h_FmPcd, p_CcTree->p_Lock);
+
+ DeleteTree(p_CcTree, p_FmPcd);
+
+ return E_OK;
+}
+
+t_Error FM_PCD_CcRootModifyNextEngine(t_Handle h_CcTree,
+ uint8_t grpId,
+ uint8_t index,
+ t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams)
+{
+ t_FmPcd *p_FmPcd;
+ t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree;
+ t_Error err = E_OK;
+
+ SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
+ SANITY_CHECK_RETURN_ERROR(p_CcTree,E_INVALID_STATE);
+ p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd;
+ SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE);
+
+ if (!FmPcdLockTryLockAll(p_FmPcd))
+ {
+ DBG(TRACE, ("FmPcdLockTryLockAll failed"));
+ return ERROR_CODE(E_BUSY);
+ }
+
+ err = FmPcdCcModifyNextEngineParamTree(p_FmPcd,
+ p_CcTree,
+ grpId,
+ index,
+ p_FmPcdCcNextEngineParams);
+ FmPcdLockUnlockAll(p_FmPcd);
+
+ if (err)
+ {
+ RETURN_ERROR(MAJOR, err, NO_MSG);
+ }
+
+ return E_OK;
+}
+
+t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam)
+{
+ t_FmPcdCcNode *p_CcNode;
+ t_Error err;
+
+ SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
+ SANITY_CHECK_RETURN_VALUE(p_CcNodeParam, E_NULL_POINTER, NULL);
+
+ p_CcNode = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode));
+ if (!p_CcNode)
+ {
+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory"));
+ return NULL;
+ }
+ memset(p_CcNode, 0, sizeof(t_FmPcdCcNode));
+
+ err = FmPcdCcMatchTableSet(h_FmPcd, p_CcNode, p_CcNodeParam);
+
+ switch (GET_ERROR_TYPE(err))
+ {
+ case E_OK:
+ break;
+
+ case E_BUSY:
+ DBG(TRACE, ("E_BUSY error"));
+ return NULL;
+
+ default:
+ REPORT_ERROR(MAJOR, err, NO_MSG);
+ return NULL;
+ }
+
return p_CcNode;
}
@@ -6536,6 +6563,9 @@ t_Error FM_PCD_MatchTableGetKeyStatistics(t_Handle h_CcNode,
intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
+ if (keyIndex >= p_CcNode->numOfKeys)
+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("The provided keyIndex exceeds the number of keys in this match table"));
+
err = MatchTableGetKeyStatistics(p_CcNode,
keyIndex,
p_KeyStatistics);
@@ -6548,6 +6578,30 @@ t_Error FM_PCD_MatchTableGetKeyStatistics(t_Handle h_CcNode,
return E_OK;
}
+t_Error FM_PCD_MatchTableGetMissStatistics(t_Handle h_CcNode,
+ t_FmPcdCcKeyStatistics *p_MissStatistics)
+{
+ t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode;
+ uint32_t intFlags;
+ t_Error err;
+
+ SANITY_CHECK_RETURN_ERROR(h_CcNode, E_INVALID_HANDLE);
+ SANITY_CHECK_RETURN_ERROR(p_MissStatistics, E_NULL_POINTER);
+
+ intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock);
+
+ err = MatchTableGetKeyStatistics(p_CcNode,
+ p_CcNode->numOfKeys,
+ p_MissStatistics);
+
+ XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags);
+
+ if (err != E_OK)
+ RETURN_ERROR(MAJOR, err, NO_MSG);
+
+ return E_OK;
+}
+
t_Error FM_PCD_MatchTableFindNGetKeyStatistics(t_Handle h_CcNode,
uint8_t keySize,
uint8_t *p_Key,
@@ -6572,6 +6626,8 @@ t_Error FM_PCD_MatchTableFindNGetKeyStatistics(t_Handle h_CcNode
"match table of the provided node"));
}
+ ASSERT_COND(keyIndex < p_CcNode->numOfKeys);
+
err = MatchTableGetKeyStatistics(p_CcNode,
keyIndex,
p_KeyStatistics);
@@ -6624,10 +6680,13 @@ t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param)
{
t_FmPcdCcNode *p_CcNodeHashTbl;
t_FmPcdCcNodeParams *p_IndxHashCcNodeParam, *p_ExactMatchCcNodeParam;
- t_Handle h_CcNode;
+ t_FmPcdCcNode *p_CcNode;
+ t_Handle h_MissStatsCounters = NULL;
t_FmPcdCcKeyParams *p_HashKeyParams;
int i;
uint16_t numOfSets, numOfWays, countMask, onesCount = 0;
+ bool statsEnForMiss = FALSE;
+ t_Error err;
SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL);
SANITY_CHECK_RETURN_VALUE(p_Param, E_NULL_POINTER, NULL);
@@ -6684,6 +6743,28 @@ t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param)
if (p_Param->maxNumOfKeys % numOfSets)
DBG(INFO, ("'maxNumOfKeys' is not a multiple of hash number of ways, so number of ways will be rounded up"));
+ if ((p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_FRAME) ||
+ (p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME))
+ {
+ /* Allocating a statistics counters table that will be used by all
+ 'miss' entries of the hash table */
+ h_MissStatsCounters = (t_Handle)FM_MURAM_AllocMem(FmPcdGetMuramHandle(h_FmPcd),
+ 2 * FM_PCD_CC_STATS_COUNTER_SIZE,
+ FM_PCD_CC_AD_TABLE_ALIGN);
+ if (!h_MissStatsCounters)
+ {
+ REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics table for hash miss"));
+ return NULL;
+ }
+ memset(h_MissStatsCounters, 0, (2 * FM_PCD_CC_STATS_COUNTER_SIZE));
+
+ /* Always enable statistics for 'miss', so that a statistics AD will be
+ initialized from the start. We'll store the requested 'statistics enable'
+ value and it will be used when statistics are read by the user. */
+ statsEnForMiss = p_Param->ccNextEngineParamsForMiss.statisticsEn;
+ p_Param->ccNextEngineParamsForMiss.statisticsEn = TRUE;
+ }
+
/* Building exact-match node params, will be used to create the hash buckets */
p_ExactMatchCcNodeParam->extractCcParams.type = e_FM_PCD_EXTRACT_NON_HDR;
@@ -6703,13 +6784,23 @@ t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param)
for (i = 0; i < numOfSets; i++)
{
- h_CcNode = FM_PCD_MatchTableSet(h_FmPcd, p_ExactMatchCcNodeParam);
- if (!h_CcNode)
+ /* Each exact-match node will be marked as a 'bucket' and provided with a pointer to statistics counters,
+ to be used for 'miss' entry statistics */
+ p_CcNode = (t_FmPcdCcNode *)XX_Malloc(sizeof(t_FmPcdCcNode));
+ if (!p_CcNode)
+ break;
+ memset(p_CcNode, 0, sizeof(t_FmPcdCcNode));
+
+ p_CcNode->isHashBucket = TRUE;
+ p_CcNode->h_MissStatsCounters = h_MissStatsCounters;
+
+ err = FmPcdCcMatchTableSet(h_FmPcd, p_CcNode, p_ExactMatchCcNodeParam);
+ if (err)
break;
p_HashKeyParams[i].ccNextEngineParams.nextEngine = e_FM_PCD_CC;
p_HashKeyParams[i].ccNextEngineParams.statisticsEn = FALSE;
- p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode = h_CcNode;
+ p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode = p_CcNode;
}
if (i < numOfSets)
@@ -6717,6 +6808,8 @@ t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param)
for (i = i-1; i >=0; i--)
FM_PCD_MatchTableDelete(p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode);
+ FM_MURAM_FreeMem(FmPcdGetMuramHandle(h_FmPcd), h_MissStatsCounters);
+
REPORT_ERROR(MAJOR, E_NULL_POINTER, NO_MSG);
XX_Free(p_IndxHashCcNodeParam);
XX_Free(p_ExactMatchCcNodeParam);
@@ -6738,10 +6831,17 @@ t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param)
p_IndxHashCcNodeParam->keysParams.keySize = 2;
p_CcNodeHashTbl = FM_PCD_MatchTableSet(h_FmPcd, p_IndxHashCcNodeParam);
-
+
if (p_CcNodeHashTbl)
p_CcNodeHashTbl->kgHashShift = p_Param->kgHashShift;
-
+
+ /* Storing the allocated counters for buckets 'miss' in the hash table
+ and is statistics for miss wre enabled. */
+ p_CcNodeHashTbl->h_MissStatsCounters = h_MissStatsCounters;
+ p_CcNodeHashTbl->statsEnForMiss = statsEnForMiss;
+
+ XX_Print("Hash 0x%x: 0x%x\n", p_CcNodeHashTbl, h_MissStatsCounters);
+
XX_Free(p_IndxHashCcNodeParam);
XX_Free(p_ExactMatchCcNodeParam);
@@ -6751,12 +6851,14 @@ t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param)
t_Error FM_PCD_HashTableDelete(t_Handle h_HashTbl)
{
t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
- t_Handle *p_HashBuckets;
+ t_Handle h_FmPcd;
+ t_Handle *p_HashBuckets, h_MissStatsCounters;
uint16_t i, numOfBuckets;
t_Error err;
SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
+ /* Store all hash buckets before the hash is freed */
numOfBuckets = p_HashTbl->numOfKeys;
p_HashBuckets = (t_Handle *)XX_Malloc(numOfBuckets * sizeof(t_Handle));
@@ -6766,14 +6868,23 @@ t_Error FM_PCD_HashTableDelete(t_Handle h_HashTbl)
for (i = 0; i < numOfBuckets; i++)
p_HashBuckets[i] = p_HashTbl->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode;
+ h_FmPcd = p_HashTbl->h_FmPcd;
+ h_MissStatsCounters = p_HashTbl->h_MissStatsCounters;
+
+ /* Free the hash */
err = FM_PCD_MatchTableDelete(p_HashTbl);
+ /* Free each hash bucket */
for (i = 0; i < numOfBuckets; i++)
err |= FM_PCD_MatchTableDelete(p_HashBuckets[i]);
if (err)
RETURN_ERROR(MAJOR, err, NO_MSG);
+ /* Free statistics counters for 'miss', id these were allocated */
+ if (h_MissStatsCounters)
+ FM_MURAM_FreeMem(FmPcdGetMuramHandle(h_FmPcd), h_MissStatsCounters);
+
XX_Free(p_HashBuckets);
return E_OK;
@@ -6879,11 +6990,29 @@ t_Error FM_PCD_HashTableModifyMissNextEngine(t_Handle h_HashTbl
t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
t_Handle h_HashBucket;
uint8_t i;
+ bool nullifyMissStats = FALSE;
t_Error err;
SANITY_CHECK_RETURN_ERROR(h_HashTbl, E_INVALID_HANDLE);
SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER);
+ if ((!p_HashTbl->h_MissStatsCounters) && (p_FmPcdCcNextEngineParams->statisticsEn))
+ RETURN_ERROR(MAJOR, E_CONFLICT,
+ ("Statistics are requested for a key, but statistics mode was set"
+ "to 'NONE' upon initialization"));
+
+ if (p_HashTbl->h_MissStatsCounters)
+ {
+ if ((!p_HashTbl->statsEnForMiss) && (p_FmPcdCcNextEngineParams->statisticsEn))
+ nullifyMissStats = TRUE;
+
+ if ((p_HashTbl->statsEnForMiss) && (!p_FmPcdCcNextEngineParams->statisticsEn))
+ {
+ p_HashTbl->statsEnForMiss = FALSE;
+ p_FmPcdCcNextEngineParams->statisticsEn = TRUE;
+ }
+ }
+
for (i = 0; i < p_HashTbl->numOfKeys; i++)
{
h_HashBucket = p_HashTbl->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode;
@@ -6894,6 +7023,13 @@ t_Error FM_PCD_HashTableModifyMissNextEngine(t_Handle h_HashTbl
RETURN_ERROR(MAJOR, err, NO_MSG);
}
+ if (nullifyMissStats)
+ {
+ memset(p_HashTbl->h_MissStatsCounters, 0, (2 * FM_PCD_CC_STATS_COUNTER_SIZE));
+ memset(p_HashTbl->h_MissStatsCounters, 0, (2 * FM_PCD_CC_STATS_COUNTER_SIZE));
+ p_HashTbl->statsEnForMiss = TRUE;
+ }
+
return E_OK;
}
@@ -6947,3 +7083,21 @@ t_Error FM_PCD_HashTableFindNGetKeyStatistics(t_Handle h_HashTbl
NULL,
p_KeyStatistics);
}
+
+t_Error FM_PCD_HashTableGetMissStatistics(t_Handle h_HashTbl,
+ t_FmPcdCcKeyStatistics *p_MissStatistics)
+{
+ t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl;
+ t_Handle h_HashBucket;
+
+ SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE);
+ SANITY_CHECK_RETURN_ERROR(p_MissStatistics, E_NULL_POINTER);
+
+ if (!p_HashTbl->statsEnForMiss)
+ RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for miss"));
+
+ h_HashBucket = p_HashTbl->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode;
+
+ return FM_PCD_MatchTableGetMissStatistics(h_HashBucket,
+ p_MissStatistics);
+}
diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.h b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.h
index eeb43a2..997eb5d 100644
--- a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.h
+++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.h
@@ -310,6 +310,17 @@ typedef struct
uint32_t numOfStatsFLRs;
uint32_t countersArraySize;
+ bool isHashBucket; /**< Valid for match table node that is a bucket of a hash table only */
+ t_Handle h_MissStatsCounters; /**< Valid for hash table node and match table that is a bucket;
+ Holds the statistics counters allocated by the hash table and
+ are shared by all hash table buckets; */
+ t_Handle h_PrivMissStatsCounters; /**< Valid for match table node that is a bucket of a hash table only;
+ Holds the statistics counters that were allocated for this node
+ and replaced by the shared counters (allocated by the hash table); */
+ bool statsEnForMiss; /**< Valid for hash table node only; TRUE is statistics are currently
+ enabled for hash 'miss', FALSE otherwise; This parameter effects the
+ returned statistics count to user, statistics AD always present for 'miss'
+ for all hash buckets; */
bool glblMaskUpdated;
t_Handle p_GlblMask;
bool lclMask;
diff --git a/drivers/net/ethernet/freescale/fman/inc/Peripherals/fm_pcd_ext.h b/drivers/net/ethernet/freescale/fman/inc/Peripherals/fm_pcd_ext.h
index e2199e4..00750ba 100644
--- a/drivers/net/ethernet/freescale/fman/inc/Peripherals/fm_pcd_ext.h
+++ b/drivers/net/ethernet/freescale/fman/inc/Peripherals/fm_pcd_ext.h
@@ -1058,7 +1058,9 @@ typedef enum e_FmPcdPlcrRateMode {
*//***************************************************************************/
typedef enum e_FmPcdDoneAction {
e_FM_PCD_ENQ_FRAME = 0, /**< Enqueue frame */
- e_FM_PCD_DROP_FRAME /**< Drop frame */
+ e_FM_PCD_DROP_FRAME /**< Mark this frame as error frame and continue
+ to error flow; 'FM_PORT_FRM_ERR_CLS_DISCARD'
+ flag will be set for this frame. */
} e_FmPcdDoneAction;
/**************************************************************************//**
@@ -2070,8 +2072,7 @@ typedef struct t_FmPcdPlcrProfileParams {
/**************************************************************************//**
@Description Parameters for selecting a location for requested manipulation
*//***************************************************************************/
-typedef struct t_FmManipHdrInfo
-{
+typedef struct t_FmManipHdrInfo {
e_NetHeaderType hdr; /**< Header selection */
e_FmPcdHdrIndex hdrIndex; /**< Relevant only for MPLS, VLAN and tunneled IP. Otherwise should be cleared. */
bool byField; /**< TRUE if the location of manipulation is according to some field in the specific header*/
@@ -2565,7 +2566,7 @@ typedef struct t_FmPcdManipParams {
*//***************************************************************************/
typedef struct t_FmPcdManipReassemIpStats {
/* common counters for both IPv4 and IPv6 */
- uint32_t timeout; /**< Counts the number of TimeOut occurrences */
+ uint32_t timeout; /**< Counts the number of timeout occurrences */
uint32_t rfdPoolBusy; /**< Counts the number of failed attempts to allocate
a Reassembly Frame Descriptor */
uint32_t internalBufferBusy; /**< Counts the number of times an internal buffer busy occurred */
@@ -2932,7 +2933,8 @@ t_Error FM_PCD_MatchTableDelete(t_Handle h_CcNode);
@Return E_OK on success; Error code otherwise.
- @Cautions Allowed only following FM_PCD_MatchTableSet().
+ @Cautions Allowed only following FM_PCD_MatchTableSet();
+ Not relevant in the case the node is of type 'INDEXED_LOOKUP'.
*//***************************************************************************/
t_Error FM_PCD_MatchTableModifyMissNextEngine(t_Handle h_CcNode,
t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams);
@@ -3185,6 +3187,29 @@ t_Error FM_PCD_MatchTableGetKeyStatistics(t_Handle h_CcNode,
t_FmPcdCcKeyStatistics *p_KeyStatistics);
/**************************************************************************//**
+ @Function FM_PCD_MatchTableGetMissStatistics
+
+ @Description This routine may be used to get statistics counters of miss entry
+ in a CC Node.
+
+ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
+ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
+ these counters reflect how many frames were not matched to any
+ existing key and therefore passed through the miss entry; The
+ total frames count will be returned in the counter of the
+ first range (as only one frame length range was defined).
+
+ @Param[in] h_CcNode A handle to the node
+ @Param[out] p_MissStatistics Statistics counters for 'miss'
+
+ @Return The statistics for 'miss'.
+
+ @Cautions Allowed only following FM_PCD_MatchTableSet().
+*//***************************************************************************/
+t_Error FM_PCD_MatchTableGetMissStatistics(t_Handle h_CcNode,
+ t_FmPcdCcKeyStatistics *p_MissStatistics);
+
+/**************************************************************************//**
@Function FM_PCD_MatchTableFindNGetKeyStatistics
@Description This routine may be used to get statistics counters of specific key
@@ -3436,6 +3461,27 @@ t_Error FM_PCD_HashTableFindNGetKeyStatistics(t_Handle h_HashTbl
t_FmPcdCcKeyStatistics *p_KeyStatistics);
/**************************************************************************//**
+ @Function FM_PCD_HashTableGetMissStatistics
+
+ @Description This routine may be used to get statistics counters of 'miss'
+ entry of the a hash table.
+
+ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
+ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
+ these counters reflect how many frames were not matched to any
+ existing key and therefore passed through the miss entry;
+
+ @Param[in] h_HashTbl A handle to a hash table
+ @Param[out] p_MissStatistics Statistics counters for 'miss'
+
+ @Return The statistics for 'miss'.
+
+ @Cautions Allowed only following FM_PCD_HashTableSet().
+*//***************************************************************************/
+t_Error FM_PCD_HashTableGetMissStatistics(t_Handle h_HashTbl,
+ t_FmPcdCcKeyStatistics *p_MissStatistics);
+
+/**************************************************************************//**
@Function FM_PCD_ManipNodeSet
@Description This routine should be called for defining a manipulation
diff --git a/drivers/net/ethernet/freescale/fman/inc/integrations/T4240/dpaa_integration_ext.h b/drivers/net/ethernet/freescale/fman/inc/integrations/T4240/dpaa_integration_ext.h
index 52ed8f8..b6780b9 100644
--- a/drivers/net/ethernet/freescale/fman/inc/integrations/T4240/dpaa_integration_ext.h
+++ b/drivers/net/ethernet/freescale/fman/inc/integrations/T4240/dpaa_integration_ext.h
@@ -256,6 +256,9 @@ typedef enum
#define FM_DEQ_PIPELINE_PARAMS_FOR_OP
#define FM_QMI_NO_SINGLE_ECC_EXCEPTION
+
+#define FM_NO_GUARANTEED_RESET_VALUES
+
/* FM errata */
#define FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669
#define FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127
diff --git a/drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_exp_sym.h b/drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_exp_sym.h
index 4c60893..13c61d2 100644
--- a/drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_exp_sym.h
+++ b/drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_exp_sym.h
@@ -88,6 +88,8 @@ EXPORT_SYMBOL(FM_PCD_MatchTableGetNextEngine);
EXPORT_SYMBOL(FM_PCD_MatchTableGetKeyCounter);
EXPORT_SYMBOL(FM_PCD_MatchTableGetKeyStatistics);
EXPORT_SYMBOL(FM_PCD_MatchTableFindNGetKeyStatistics);
+EXPORT_SYMBOL(FM_PCD_MatchTableGetMissStatistics);
+EXPORT_SYMBOL(FM_PCD_HashTableGetMissStatistics);
EXPORT_SYMBOL(FM_PCD_HashTableSet);
EXPORT_SYMBOL(FM_PCD_HashTableDelete);
EXPORT_SYMBOL(FM_PCD_HashTableAddKey);
diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c
index 519c443..3104385 100644
--- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c
+++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c
@@ -1932,6 +1932,155 @@ invalid_port_id:
XX_Free(param);
break;
}
+
+
+#if defined(CONFIG_COMPAT)
+ case FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT_COMPAT:
+#endif
+ case FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT:
+ {
+ ioc_fm_pcd_cc_tbl_get_miss_params_t param;
+
+#if defined(CONFIG_COMPAT)
+ if (compat)
+ {
+ ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *compat_param;
+
+ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *) XX_Malloc(
+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t));
+ if (!compat_param)
+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
+
+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t));
+ if (copy_from_user(compat_param,
+ (ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *)compat_ptr(arg),
+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)))
+ {
+ XX_Free(compat_param);
+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
+ }
+
+ compat_copy_fm_pcd_cc_tbl_get_miss(compat_param, &param, COMPAT_US_TO_K);
+
+ XX_Free(compat_param);
+ }
+ else
+#endif
+ {
+ if (copy_from_user(&param, (ioc_fm_pcd_cc_tbl_get_miss_params_t *)arg,
+ sizeof(ioc_fm_pcd_cc_tbl_get_miss_params_t)))
+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
+ }
+
+
+ err = FM_PCD_MatchTableGetMissStatistics((t_Handle) param.id,
+ (t_FmPcdCcKeyStatistics *) &param.miss_statistics);
+
+#if defined(CONFIG_COMPAT)
+ if (compat)
+ {
+ ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *compat_param;
+
+ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_miss_params_t*) XX_Malloc(
+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t));
+ if (!compat_param)
+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
+
+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t));
+ compat_copy_fm_pcd_cc_tbl_get_miss(compat_param, &param, COMPAT_K_TO_US);
+ if (copy_to_user((ioc_compat_fm_pcd_cc_tbl_get_miss_params_t*) compat_ptr(arg),
+ compat_param,
+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)))
+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
+
+ XX_Free(compat_param);
+ }
+ else
+#endif
+ {
+ if (copy_to_user((ioc_fm_pcd_hash_table_params_t *)arg,
+ &param,
+ sizeof(ioc_fm_pcd_cc_tbl_get_miss_params_t)))
+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
+ }
+
+ break;
+ }
+
+
+#if defined(CONFIG_COMPAT)
+ case FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT_COMPAT:
+#endif
+ case FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT:
+ {
+ ioc_fm_pcd_cc_tbl_get_miss_params_t param;
+
+#if defined(CONFIG_COMPAT)
+ if (compat)
+ {
+ ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *compat_param;
+
+ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *) XX_Malloc(
+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t));
+ if (!compat_param)
+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
+
+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t));
+ if (copy_from_user(compat_param,
+ (ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *)compat_ptr(arg),
+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)))
+ {
+ XX_Free(compat_param);
+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
+ }
+
+ compat_copy_fm_pcd_cc_tbl_get_miss(compat_param, &param, COMPAT_US_TO_K);
+
+ XX_Free(compat_param);
+ }
+ else
+#endif
+ {
+ if (copy_from_user(&param, (ioc_fm_pcd_cc_tbl_get_miss_params_t *)arg,
+ sizeof(ioc_fm_pcd_cc_tbl_get_miss_params_t)))
+ RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG);
+ }
+
+
+ err = FM_PCD_HashTableGetMissStatistics((t_Handle) param.id,
+ (t_FmPcdCcKeyStatistics *) &param.miss_statistics);
+
+#if defined(CONFIG_COMPAT)
+ if (compat)
+ {
+ ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *compat_param;
+
+ compat_param = (ioc_compat_fm_pcd_cc_tbl_get_miss_params_t*) XX_Malloc(
+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t));
+ if (!compat_param)
+ RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD"));
+
+ memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t));
+ compat_copy_fm_pcd_cc_tbl_get_miss(compat_param, &param, COMPAT_K_TO_US);
+ if (copy_to_user((ioc_compat_fm_pcd_cc_tbl_get_miss_params_t*) compat_ptr(arg),
+ compat_param,
+ sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)))
+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
+
+ XX_Free(compat_param);
+ }
+ else
+#endif
+ {
+ if (copy_to_user((ioc_fm_pcd_hash_table_params_t *)arg,
+ &param,
+ sizeof(ioc_fm_pcd_cc_tbl_get_miss_params_t)))
+ RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG);
+ }
+
+ break;
+ }
+
#if defined(CONFIG_COMPAT)
case FM_PCD_IOC_HASH_TABLE_SET_COMPAT:
#endif
diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c
index 68142b6..3c1ac30 100644
--- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c
+++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c
@@ -506,7 +506,7 @@ void compat_copy_fm_pcd_hash_table(
{
if (compat == COMPAT_US_TO_K)
{
- param-> max_num_of_keys = compat_param->max_num_of_keys;
+ param->max_num_of_keys = compat_param->max_num_of_keys;
param->statistics_mode = compat_param->statistics_mode;
param->kg_hash_shift = compat_param->kg_hash_shift;
param->hash_res_mask = compat_param->hash_res_mask;
@@ -516,7 +516,7 @@ void compat_copy_fm_pcd_hash_table(
}
else
{
- compat_param-> max_num_of_keys = param->max_num_of_keys;
+ compat_param->max_num_of_keys = param->max_num_of_keys;
compat_param->statistics_mode = param->statistics_mode;
compat_param->kg_hash_shift = param->kg_hash_shift;
compat_param->hash_res_mask = param->hash_res_mask;
@@ -844,6 +844,22 @@ void compat_copy_fm_port_vsp_alloc_params(
}
#endif /* (DPAA_VERSION >= 11) */
+void compat_copy_fm_pcd_cc_tbl_get_miss(
+ ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *compat_param,
+ ioc_fm_pcd_cc_tbl_get_miss_params_t *param,
+ uint8_t compat)
+{
+ if (compat == COMPAT_US_TO_K)
+ {
+ param->id = compat_pcd_id2ptr(compat_param->id);
+ memcpy(&param->miss_statistics, &compat_param->miss_statistics, sizeof(ioc_fm_pcd_cc_key_statistics_t));
+ } else {
+ compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE);
+ memcpy(&compat_param->miss_statistics, &param->miss_statistics, sizeof(ioc_fm_pcd_cc_key_statistics_t));
+ }
+}
+
+
void compat_copy_fm_pcd_net_env(
ioc_compat_fm_pcd_net_env_params_t *compat_param,
ioc_fm_pcd_net_env_params_t *param,
diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h
index f8655db..ae19b68 100644
--- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h
+++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h
@@ -521,6 +521,12 @@ typedef struct ioc_compat_fm_ctrl_mon_counters_params_t {
compat_uptr_t p_mon;
} ioc_compat_fm_ctrl_mon_counters_params_t;
+typedef struct ioc_compat_fm_pcd_cc_tbl_get_miss_params_t {
+ compat_uptr_t id;
+ ioc_fm_pcd_cc_key_statistics_t miss_statistics;
+} ioc_compat_fm_pcd_cc_tbl_get_miss_params_t;
+
+
/* } pcd compat structures */
void compat_obj_delete(
@@ -568,6 +574,11 @@ void compat_copy_fm_pcd_cc_tree(
ioc_fm_pcd_cc_tree_params_t *param,
uint8_t compat);
+void compat_copy_fm_pcd_cc_tbl_get_miss(
+ ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *compat_param,
+ ioc_fm_pcd_cc_tbl_get_miss_params_t *param,
+ uint8_t compat);
+
void compat_fm_pcd_prs_sw(
ioc_compat_fm_pcd_prs_sw_params_t *compat_param,
ioc_fm_pcd_prs_sw_params_t *param,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index eb60e10..8e3b8e5 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -902,6 +902,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
FSL_GIANFAR_DEV_HAS_36BIT_ADDR;
+ /* default pause frame settings */
+ priv->rx_pause = priv->tx_pause = true;
+
ctype = of_get_property(np, "phy-connection-type", NULL);
/* We only care about rgmii-id. The rest are autodetected */
@@ -1232,8 +1235,10 @@ static int gfar_probe(struct platform_device *ofdev)
/* We need to delay at least 3 TX clocks */
udelay(2);
- tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
- gfar_write(&regs->maccfg1, tempval);
+ /* the soft reset bit is not self-resetting, so we need to
+ * clear it before resuming normal operation
+ */
+ gfar_write(&regs->maccfg1, 0);
/* Initialize MACCFG2. */
tempval = MACCFG2_INIT_SETTINGS;
@@ -3983,6 +3988,25 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
return IRQ_HANDLED;
}
+/* toggle pause frame settings */
+void gfar_configure_pause(struct gfar_private *priv, bool en)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval = gfar_read(&regs->maccfg1);
+
+ if (en && priv->rx_pause)
+ tempval |= MACCFG1_RX_FLOW;
+ else
+ tempval &= ~MACCFG1_RX_FLOW;
+
+ if (en && priv->tx_pause)
+ tempval |= MACCFG1_TX_FLOW;
+ else
+ tempval &= ~MACCFG1_TX_FLOW;
+
+ gfar_write(&regs->maccfg1, tempval);
+}
+
/* Called every time the controller might need to be made
* aware of new link state. The PHY code conveys this
* information through variables in the phydev structure, and this
@@ -4014,6 +4038,9 @@ static void adjust_link(struct net_device *dev)
else
tempval |= MACCFG2_FULL_DUPLEX;
+ /* update pause frame settings */
+ gfar_configure_pause(priv, !!phydev->duplex);
+
priv->oldduplex = phydev->duplex;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index a856230..e74dfeb 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1319,6 +1319,8 @@ struct gfar_private {
int oldspeed;
int oldduplex;
int oldlink;
+ bool rx_pause;
+ bool tx_pause;
struct gfar_irqinfo *irqinfo[MAXGROUPS][GFAR_NUM_IRQS];
@@ -1444,6 +1446,7 @@ void gfar_init_sysfs(struct net_device *dev);
int gfar_set_features(struct net_device *dev, netdev_features_t features);
extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
+void gfar_configure_pause(struct gfar_private *priv, bool en);
extern const struct ethtool_ops gfar_ethtool_ops;
extern struct list_head gfar_recycle_queues;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 565b26d..c03c5cd 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -539,6 +539,34 @@ static int gfar_sringparam(struct net_device *dev,
return err;
}
+static void gfar_gpauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ pause->autoneg = AUTONEG_ENABLE;
+ if (priv->rx_pause)
+ pause->rx_pause = 1;
+ if (priv->tx_pause)
+ pause->tx_pause = 1;
+}
+
+static int gfar_spauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+
+ priv->rx_pause = !!pause->rx_pause;
+ priv->tx_pause = !!pause->tx_pause;
+
+ /* update h/w settings, if link is up */
+ if (phydev && phydev->link)
+ gfar_configure_pause(priv, !!phydev->duplex);
+
+ return 0;
+}
+
int gfar_set_features(struct net_device *dev, netdev_features_t features)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -1872,6 +1900,8 @@ const struct ethtool_ops gfar_ethtool_ops = {
.set_coalesce = gfar_scoalesce,
.get_ringparam = gfar_gringparam,
.set_ringparam = gfar_sringparam,
+ .get_pauseparam = gfar_gpauseparam,
+ .set_pauseparam = gfar_spauseparam,
.get_strings = gfar_gstrings,
.get_sset_count = gfar_sset_count,
.get_ethtool_stats = gfar_fill_stats,
diff --git a/drivers/staging/fsl_dpa_offload/dpa_classifier.c b/drivers/staging/fsl_dpa_offload/dpa_classifier.c
index a39fc69..5056b2e 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_classifier.c
+++ b/drivers/staging/fsl_dpa_offload/dpa_classifier.c
@@ -462,6 +462,9 @@ int dpa_classif_table_modify_miss_action(int td,
return -EBUSY;
}
}
+
+ memcpy(&ptable->miss_action, miss_action, sizeof(*miss_action));
+
RELEASE_OBJECT(ptable);
dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
@@ -3230,6 +3233,26 @@ static inline void key_apply_mask(const struct dpa_offload_lookup_key *key,
new_key[i] = key->byte[i] & key->mask[i];
}
+int dpa_classif_get_miss_action(int td, struct dpa_cls_tbl_action *miss_action)
+{
+ struct dpa_cls_table *ptable;
+
+ if (!miss_action)
+ return -EINVAL;
+
+ LOCK_OBJECT(table_array, td, ptable, -EINVAL);
+ if (ptable->miss_action.type == DPA_CLS_TBL_ACTION_NONE) {
+ /* No miss action was specified for this table */
+ RELEASE_OBJECT(ptable);
+ return -ENODEV;
+ } else
+ memcpy(miss_action, &ptable->miss_action, sizeof(*miss_action));
+
+ RELEASE_OBJECT(ptable);
+
+ return 0;
+}
+
static int nat_hm_check_params(const struct dpa_cls_hm_nat_params *nat_params)
{
unsigned int ip_ver = 0;
diff --git a/drivers/staging/fsl_dpa_offload/dpa_classifier.h b/drivers/staging/fsl_dpa_offload/dpa_classifier.h
index 0667782..659e1b7 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_classifier.h
+++ b/drivers/staging/fsl_dpa_offload/dpa_classifier.h
@@ -206,6 +206,9 @@ struct dpa_cls_table {
/* (Initial) parameters of the DPA Classifier table. */
struct dpa_cls_tbl_params params;
+ /* Table miss action. */
+ struct dpa_cls_tbl_action miss_action;
+
/* Access control object for this table to avoid race conditions. */
struct mutex access;
};
@@ -675,6 +678,12 @@ int dpa_classif_import_static_hm(void *hm, int next_hmd, int *hmd);
void *dpa_classif_get_static_hm_handle(int hmd);
/*
+ * Provides details about the miss action configured on a classification
+ * table.
+ */
+int dpa_classif_get_miss_action(int td, struct dpa_cls_tbl_action *miss_action);
+
+/*
* Locks a header manipulation chain (marks as "used"). The header manipulation
* operations cannot be removed as long as they are locked. The function
* provides the FMan driver handle of the manip node which is chain head.
diff --git a/drivers/staging/fsl_dpa_offload/dpa_stats.c b/drivers/staging/fsl_dpa_offload/dpa_stats.c
index b911732..ef6dd1a 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_stats.c
+++ b/drivers/staging/fsl_dpa_offload/dpa_stats.c
@@ -114,6 +114,46 @@ static int check_dpa_stats_params(const struct dpa_stats_params *params)
return 0;
}
+static int set_cnt_classif_tbl_retrieve_func(struct dpa_stats_cnt_cb *cnt_cb)
+{
+ switch (cnt_cb->tbl_cb.type) {
+ case DPA_CLS_TBL_HASH:
+ cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_hash_stats;
+ break;
+ case DPA_CLS_TBL_INDEXED:
+ cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_index_stats;
+ break;
+ case DPA_CLS_TBL_EXACT_MATCH:
+ cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_match_stats;
+ break;
+ default:
+ log_err("Unsupported DPA Classifier table type %d\n",
+ cnt_cb->tbl_cb.type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int set_cnt_classif_node_retrieve_func(struct dpa_stats_cnt_cb *cnt_cb,
+ enum dpa_stats_classif_node_type ccnode_type)
+{
+ switch (ccnode_type) {
+ case DPA_CLS_TBL_HASH:
+ cnt_cb->f_get_cnt_stats = get_cnt_ccnode_hash_stats;
+ break;
+ case DPA_CLS_TBL_INDEXED:
+ cnt_cb->f_get_cnt_stats = get_cnt_ccnode_index_stats;
+ break;
+ case DPA_CLS_TBL_EXACT_MATCH:
+ cnt_cb->f_get_cnt_stats = get_cnt_ccnode_match_stats;
+ break;
+ default:
+ log_err("Unsupported Classification Node type %d", ccnode_type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int check_tbl_cls_counter(struct dpa_stats_cnt_cb *cnt_cb,
struct dpa_stats_lookup_key *entry)
{
@@ -132,7 +172,6 @@ static int check_tbl_cls_counter(struct dpa_stats_cnt_cb *cnt_cb,
dump_lookup_key(&entry->key);
return -EIO;
}
- cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_hash_stats;
break;
case DPA_CLS_TBL_INDEXED:
err = FM_PCD_MatchTableGetKeyStatistics(
@@ -145,7 +184,6 @@ static int check_tbl_cls_counter(struct dpa_stats_cnt_cb *cnt_cb,
dump_lookup_key(&entry->key);
return -EIO;
}
- cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_index_stats;
break;
case DPA_CLS_TBL_EXACT_MATCH:
err = FM_PCD_MatchTableFindNGetKeyStatistics(entry->cc_node,
@@ -159,7 +197,6 @@ static int check_tbl_cls_counter(struct dpa_stats_cnt_cb *cnt_cb,
dump_lookup_key(&entry->key);
return -EINVAL;
}
- cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_match_stats;
break;
default:
log_err("Unsupported DPA Classifier table type %d\n",
@@ -189,7 +226,6 @@ static int check_ccnode_counter(struct dpa_stats_cnt_cb *cnt_cb,
dump_lookup_key(key);
return -EIO;
}
- cnt_cb->f_get_cnt_stats = get_cnt_ccnode_hash_stats;
break;
case DPA_STATS_CLASSIF_NODE_INDEXED:
err = FM_PCD_MatchTableGetKeyStatistics(
@@ -203,7 +239,6 @@ static int check_ccnode_counter(struct dpa_stats_cnt_cb *cnt_cb,
dump_lookup_key(key);
return -EIO;
}
- cnt_cb->f_get_cnt_stats = get_cnt_ccnode_index_stats;
break;
case DPA_STATS_CLASSIF_NODE_EXACT_MATCH:
err = FM_PCD_MatchTableFindNGetKeyStatistics(
@@ -217,7 +252,6 @@ static int check_ccnode_counter(struct dpa_stats_cnt_cb *cnt_cb,
dump_lookup_key(key);
return -EINVAL;
}
- cnt_cb->f_get_cnt_stats = get_cnt_ccnode_match_stats;
break;
default:
log_err("Unsupported Classification Node type %d",
@@ -227,6 +261,47 @@ static int check_ccnode_counter(struct dpa_stats_cnt_cb *cnt_cb,
return 0;
}
+static int check_ccnode_miss_counter(void *cc_node, uint32_t id,
+ enum dpa_stats_classif_node_type ccnode_type)
+{
+ t_FmPcdCcKeyStatistics stats;
+ int err;
+
+ switch (ccnode_type) {
+ case DPA_STATS_CLASSIF_NODE_HASH:
+ err = FM_PCD_HashTableGetMissStatistics(cc_node, &stats);
+ if (err != 0) {
+ log_err("Check failed for Classification Node counter "
+ "id %d due to incorrect parameters: handle="
+ "0x%p\n", id, cc_node);
+ return -EIO;
+ }
+ break;
+ case DPA_STATS_CLASSIF_NODE_INDEXED:
+ err = FM_PCD_MatchTableGetMissStatistics(cc_node, &stats);
+ if (err != 0) {
+ log_err("Check failed for Classification Node counter "
+ "id %d due to incorrect parameters: handle=0x%p"
+ "\n", id, cc_node);
+ return -EIO;
+ }
+ break;
+ case DPA_STATS_CLASSIF_NODE_EXACT_MATCH:
+ err = FM_PCD_MatchTableGetMissStatistics(cc_node, &stats);
+ if (err != 0) {
+ log_err("Check failed for Classification Node counter "
+ "id %d due to incorrect parameters: handle=0x%p"
+ "\n", id, cc_node);
+ return -EINVAL;
+ }
+ break;
+ default:
+ log_err("Unsupported Classification Node type %d", ccnode_type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int get_new_cnt(struct dpa_stats *dpa_stats,
struct dpa_stats_cnt_cb **cnt_cb)
{
@@ -344,8 +419,7 @@ static int put_cnt(struct dpa_stats *dpa_stats, struct dpa_stats_cnt_cb *cnt_cb)
}
/* Mark the Counter id as 'not used' */
- dpa_stats->used_cnt_ids[cnt_cb->index] =
- DPA_OFFLD_INVALID_OBJECT_ID;
+ dpa_stats->used_cnt_ids[cnt_cb->index] = DPA_OFFLD_INVALID_OBJECT_ID;
/* Clear all 'cnt_cb' information */
cnt_cb->index = DPA_OFFLD_INVALID_OBJECT_ID;
@@ -627,7 +701,7 @@ static int free_resources(void)
/* Sanity check */
if (!gbl_dpa_stats) {
log_err("DPA Stats component is not initialized\n");
- return;
+ return 0;
}
dpa_stats = gbl_dpa_stats;
@@ -1149,11 +1223,20 @@ static int set_frag_manip(int td, struct dpa_stats_lookup_key *entry)
struct t_FmPcdManipStats stats;
int err = 0;
- err = dpa_classif_table_lookup_by_key(td, &entry->key, &action);
- if (err != 0) {
- log_err("Cannot retrieve next action parameters from table "
- "%d\n", td);
- return -EINVAL;
+ if (entry->miss_key) {
+ err = dpa_classif_get_miss_action(td, &action);
+ if (err != 0) {
+ log_err("Cannot retrieve miss action parameters from "
+ "table %d\n", td);
+ return -EINVAL;
+ }
+ } else {
+ err = dpa_classif_table_lookup_by_key(td, &entry->key, &action);
+ if (err != 0) {
+ log_err("Cannot retrieve next action parameters from "
+ "table %d\n", td);
+ return -EINVAL;
+ }
}
if (action.type != DPA_CLS_TBL_ACTION_ENQ) {
@@ -1175,7 +1258,6 @@ static int set_frag_manip(int td, struct dpa_stats_lookup_key *entry)
log_err("Invalid Fragmentation manip handle\n");
return -EINVAL;
}
-
return 0;
}
@@ -1447,13 +1529,6 @@ static int set_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb,
return -EINVAL;
}
- /* Copy the key descriptor */
- err = copy_key_descriptor(&prm.key, &cnt_tbl_cb->keys[0].key);
- if (err != 0) {
- log_err("Cannot copy key descriptor from user parameters\n");
- return -EINVAL;
- }
-
/* Store CcNode handle and set number of keys to one */
cnt_tbl_cb->keys[0].cc_node = cls_tbl.cc_node;
cnt_tbl_cb->keys[0].valid = TRUE;
@@ -1462,11 +1537,35 @@ static int set_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb,
/* Store DPA Classifier Table type */
cnt_tbl_cb->type = cls_tbl.type;
- /* Check the Classifier Table counter */
- err = check_tbl_cls_counter(cnt_cb, &cnt_tbl_cb->keys[0]);
+ /* Set retrieve function depending on table type */
+ err = set_cnt_classif_tbl_retrieve_func(cnt_cb);
if (err != 0)
return -EINVAL;
+ /* Determine if counter is for 'miss' entry or for a valid key */
+ if (!prm.key) {
+ cnt_tbl_cb->keys[0].miss_key = TRUE;
+
+ /* Check the Classifier Table counter parameters for "miss" */
+ err = check_ccnode_miss_counter(cnt_tbl_cb->keys[0].cc_node,
+ cnt_cb->id, cnt_tbl_cb->type);
+ if (err != 0)
+ return -EINVAL;
+ } else {
+ /* Copy the key descriptor */
+ err = copy_key_descriptor(prm.key, &cnt_tbl_cb->keys[0].key);
+ if (err != 0) {
+ log_err("Cannot copy key descriptor from user "
+ "parameters\n");
+ return -EINVAL;
+ }
+
+ /* Check the Classifier Table counter */
+ err = check_tbl_cls_counter(cnt_cb, &cnt_tbl_cb->keys[0]);
+ if (err != 0)
+ return -EINVAL;
+ }
+
if (frag_stats) {
err = set_frag_manip(prm.td, &cnt_tbl_cb->keys[0]);
if (err < 0) {
@@ -1518,23 +1617,40 @@ static int set_cnt_ccnode_cb(struct dpa_stats_cnt_cb *cnt_cb,
return -EFAULT;
}
- /* Copy the key descriptor */
- err = copy_key_descriptor(&prm.key, &cnt_cb->ccnode_cb.keys[0]);
- if (err != 0) {
- log_err("Cannot copy key descriptor from user parameters\n");
- return -EINVAL;
- }
-
/* Store CcNode handle and set number of keys to one */
cnt_cb->ccnode_cb.cc_node = prm.cc_node;
cnt_cb->members_num = 1;
- /* Check the Classifier Node counter parameters */
- err = check_ccnode_counter(cnt_cb,
- prm.ccnode_type, &cnt_cb->ccnode_cb.keys[0]);
+ /* Set retrieve function depending on counter type */
+ err = set_cnt_classif_node_retrieve_func(cnt_cb, prm.ccnode_type);
if (err != 0)
return -EINVAL;
+ if (!params->classif_node_params.key) {
+ /* Set the key byte to NULL, to mark it for 'miss' entry */
+ cnt_cb->ccnode_cb.keys[0].byte = NULL;
+
+ /* Check the Classifier Node counter parameters for 'miss' */
+ err = check_ccnode_miss_counter(cnt_cb->ccnode_cb.cc_node,
+ cnt_cb->id, prm.ccnode_type);
+ if (err != 0)
+ return -EINVAL;
+ } else {
+ /* Copy the key descriptor */
+ err = copy_key_descriptor(prm.key, &cnt_cb->ccnode_cb.keys[0]);
+ if (err != 0) {
+ log_err("Cannot copy key descriptor from user "
+ "parameters\n");
+ return -EINVAL;
+ }
+
+ /* Check the Classifier Node counter parameters */
+ err = check_ccnode_counter(cnt_cb, prm.ccnode_type,
+ &cnt_cb->ccnode_cb.keys[0]);
+ if (err != 0)
+ return -EINVAL;
+ }
+
/* Map Classif Node counter selection to CcNode statistics */
cnt_sel_to_stats(&cnt_cb->info,
dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE],
@@ -1605,7 +1721,7 @@ static int set_cnt_traffic_mng_cb(struct dpa_stats_cnt_cb *cnt_cb,
struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
uint32_t cnt_sel = params->traffic_mng_params.cnt_sel;
uint32_t cnt_src = params->traffic_mng_params.src;
- uint64_t frames, bytes;
+ u64 frames = 0, bytes = 0;
int err = 0;
if (!dpa_stats) {
@@ -1618,10 +1734,19 @@ static int set_cnt_traffic_mng_cb(struct dpa_stats_cnt_cb *cnt_cb,
return -EFAULT;
}
- if (cnt_sel == 0 || cnt_sel > DPA_STATS_CNT_NUM_ALL) {
- log_err("Parameter cnt_sel %d must be in range (1 - %d) for "
- "counter id %d\n", cnt_sel, DPA_STATS_CNT_NUM_ALL,
- cnt_cb->id);
+ /* check the counter src */
+ if (cnt_src > DPA_STATS_CNT_TRAFFIC_CG) {
+ log_err("Parameter src %d must be in range (%d - %d) for "
+ "counter id %d\n", cnt_src, DPA_STATS_CNT_TRAFFIC_CLASS,
+ DPA_STATS_CNT_TRAFFIC_CG, cnt_cb->id);
+ return -EINVAL;
+ }
+
+ /* check the counter selection */
+ if (cnt_sel > DPA_STATS_CNT_NUM_ALL) {
+ log_err("Parameter cnt_sel %d must be in range (%d - %d) for "
+ "counter id %d\n", cnt_sel, DPA_STATS_CNT_NUM_OF_BYTES,
+ DPA_STATS_CNT_NUM_ALL, cnt_cb->id);
return -EINVAL;
}
@@ -1659,12 +1784,11 @@ static int set_cnt_traffic_mng_cb(struct dpa_stats_cnt_cb *cnt_cb,
return -EINVAL;
}
- /* Decrease one to obtain the mask for all statistics */
+ /* Set number of statistics that will be written by this counter */
if (cnt_sel == DPA_STATS_CNT_NUM_ALL)
- cnt_sel -= 1;
-
- cnt_sel_to_stats(&cnt_cb->info,
- dpa_stats->stats_sel[DPA_STATS_CNT_TRAFFIC_MNG], cnt_sel);
+ cnt_cb->info.stats_num = 2;
+ else
+ cnt_cb->info.stats_num = 1;
/* Set number of bytes that will be written by this counter */
cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
@@ -1899,36 +2023,49 @@ static int set_cls_cnt_plcr_cb(struct dpa_stats_cnt_cb *cnt_cb,
}
static int set_cls_cnt_classif_tbl_pair(
- struct dpa_stats_cnt_classif_tbl_cb *cnt_tbl_cb, int td,
+ struct dpa_stats_cnt_cb *cnt_cb, int td,
const struct dpa_offload_lookup_key_pair *pair,
struct dpa_stats_lookup_key *lookup_key)
{
+ struct dpa_stats_cnt_classif_tbl_cb *cnt_tbl_cb = &cnt_cb->tbl_cb;
struct dpa_cls_tbl_params cls_tbl;
struct dpa_offload_lookup_key tbl_key;
struct dpa_cls_tbl_action action;
int err = 0;
- /* Check that key byte is not NULL */
- if (!pair->first_key.byte) {
- log_err("First key descriptor byte of the user pair cannot be "
- "NULL for table descriptor %d\n", td);
- return -EFAULT;
- }
+ /* If either the entire 'pair' or the first key is NULL, then retrieve
+ * the action associated with the 'miss action '*/
+ if ((!pair) || (pair && !pair->first_key)) {
+ err = dpa_classif_get_miss_action(td, &action);
+ if (err != 0) {
+ log_err("Cannot retrieve miss action parameters for "
+ "table descriptor %d\n", td);
+ return -EINVAL;
+ }
+ } else {
+ /* Check that key byte is not NULL */
+ if (!pair->first_key->byte) {
+ log_err("First key descriptor byte of the user pair "
+ "cannot be NULL for table descriptor %d\n", td);
+ return -EFAULT;
+ }
- /* Copy first key descriptor parameters*/
- err = copy_key_descriptor(&pair->first_key, &tbl_key);
- if (err != 0) {
- log_err("Cannot copy first key descriptor of the user pair\n");
- return -EINVAL;
- }
+ /* Copy first key descriptor parameters*/
+ err = copy_key_descriptor(pair->first_key, &tbl_key);
+ if (err != 0) {
+ log_err("Cannot copy second key descriptor of "
+ "the user pair\n");
+ return -EINVAL;
+ }
- /* Use the first key of the pair to lookup in the classifier
- * table the next table connected on a "next-action" */
- err = dpa_classif_table_lookup_by_key(td, &tbl_key, &action);
- if (err != 0) {
- log_err("Cannot retrieve next action parameters for table "
- "descriptor %d\n", td);
- return -EINVAL;
+ /* Use the first key of the pair to lookup in the classifier
+ * table the next table connected on a "next-action" */
+ err = dpa_classif_table_lookup_by_key(td, &tbl_key, &action);
+ if (err != 0) {
+ log_err("Cannot retrieve next action parameters for "
+ "table descriptor %d\n", td);
+ return -EINVAL;
+ }
}
if (action.type != DPA_CLS_TBL_ACTION_NEXT_TABLE) {
@@ -1948,23 +2085,41 @@ static int set_cls_cnt_classif_tbl_pair(
/* Store DPA Classifier Table type */
cnt_tbl_cb->type = cls_tbl.type;
+ /* Set retrieve function depending on table type */
+ set_cnt_classif_tbl_retrieve_func(cnt_cb);
+
/* Store CcNode handle */
lookup_key->cc_node = cls_tbl.cc_node;
- /* Set as lookup key the second key descriptor from the pair */
- err = copy_key_descriptor(&pair->second_key, &lookup_key->key);
- if (err != 0) {
- log_err("Cannot copy second key descriptor of the user pair\n");
- return -EINVAL;
+ if (!pair || (pair && !pair->second_key)) {
+ /* Set as the key as "for miss" */
+ lookup_key->miss_key = TRUE;
+
+ /* Check the Classifier Table counter parameters for "miss" */
+ err = check_ccnode_miss_counter(lookup_key->cc_node,
+ cnt_cb->id, cnt_tbl_cb->type);
+ } else {
+ lookup_key->miss_key = FALSE;
+
+ /* Set as lookup key the second key descriptor from the pair */
+ err = copy_key_descriptor(pair->second_key, &lookup_key->key);
+ if (err != 0) {
+ log_err("Cannot copy second key descriptor of "
+ "the user pair\n");
+ return -EINVAL;
+ }
+
+ /* Check the Classifier Table counter */
+ err = check_tbl_cls_counter(cnt_cb, lookup_key);
}
- return 0;
+ return err;
}
static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb,
const struct dpa_stats_cls_cnt_params *params)
{
- struct dpa_stats_cnt_classif_tbl_cb *cnt_tbl_cb = &cnt_cb->tbl_cb;
+ struct dpa_stats_cnt_classif_tbl_cb *tbl_cb = &cnt_cb->tbl_cb;
struct dpa_stats_cls_cnt_classif_tbl prm = params->classif_tbl_params;
struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
struct dpa_cls_tbl_params cls_tbl;
@@ -2004,11 +2159,17 @@ static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb,
return -EINVAL;
}
- cnt_tbl_cb->td = params->classif_tbl_params.td;
+ tbl_cb->td = params->classif_tbl_params.td;
cnt_cb->members_num = params->class_members;
switch (prm.key_type) {
case DPA_STATS_CLASSIF_SINGLE_KEY:
+ if (!prm.keys) {
+ log_err("Pointer to the array of keys cannot be NULL "
+ "for counter id %d\n", cnt_cb->id);
+ return -EINVAL;
+ }
+
/* Get CcNode from table descriptor */
err = dpa_classif_table_get_params(prm.td, &cls_tbl);
if (err != 0) {
@@ -2018,21 +2179,37 @@ static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb,
}
/* Store DPA Classifier Table type */
- cnt_tbl_cb->type = cls_tbl.type;
+ tbl_cb->type = cls_tbl.type;
+
+ /* Set retrieve function depending on table type */
+ set_cnt_classif_tbl_retrieve_func(cnt_cb);
for (i = 0; i < params->class_members; i++) {
/* Store CcNode handle */
- cnt_tbl_cb->keys[i].cc_node = cls_tbl.cc_node;
+ tbl_cb->keys[i].cc_node = cls_tbl.cc_node;
+
+ /* Determine if key represents a 'miss' entry */
+ if (!prm.keys[i]) {
+ tbl_cb->keys[i].miss_key = TRUE;
+ tbl_cb->keys[i].valid = TRUE;
+
+ err = check_ccnode_miss_counter(
+ tbl_cb->keys[i].cc_node,
+ cnt_cb->id, tbl_cb->type);
+ if (err != 0)
+ return -EINVAL;
+ continue;
+ }
- if (!prm.keys[i].byte) {
+ if (!prm.keys[i]->byte) {
/* Key is not valid for now */
- cnt_tbl_cb->keys[i].valid = FALSE;
+ tbl_cb->keys[i].valid = FALSE;
continue;
}
/* Copy the key descriptor */
- err = copy_key_descriptor(&prm.keys[i],
- &cnt_tbl_cb->keys[i].key);
+ err = copy_key_descriptor(prm.keys[i],
+ &tbl_cb->keys[i].key);
if (err != 0) {
log_err("Cannot copy key descriptor from user "
"parameters\n");
@@ -2040,37 +2217,39 @@ static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb,
}
/* Check the Classifier Table counter */
- err = check_tbl_cls_counter(cnt_cb,
- &cnt_tbl_cb->keys[i]);
+ err = check_tbl_cls_counter(cnt_cb, &tbl_cb->keys[i]);
if (err != 0)
return -EINVAL;
- cnt_tbl_cb->keys[i].valid = TRUE;
+ tbl_cb->keys[i].valid = TRUE;
}
break;
case DPA_STATS_CLASSIF_PAIR_KEY:
+ if (!prm.pairs) {
+ log_err("Pointer to the array of pairs cannot be NULL "
+ "for counter id %d\n", cnt_cb->id);
+ return -EINVAL;
+ }
+
for (i = 0; i < params->class_members; i++) {
- if (!prm.pairs[i].first_key.byte) {
- /* Key is not valid for now */
- cnt_tbl_cb->keys[i].valid = FALSE;
- continue;
+ if (prm.pairs[i]) {
+ if (prm.pairs[i]->first_key) {
+ if (!prm.pairs[i]->first_key->byte) {
+ /* Key is not valid for now */
+ tbl_cb->keys[i].valid = FALSE;
+ continue;
+ }
+ }
}
- err = set_cls_cnt_classif_tbl_pair(cnt_tbl_cb, prm.td,
- &prm.pairs[i], &cnt_tbl_cb->keys[i]);
+ err = set_cls_cnt_classif_tbl_pair(cnt_cb, prm.td,
+ prm.pairs[i], &tbl_cb->keys[i]);
if (err != 0) {
log_err("Cannot set classifier table pair key "
"for counter id %d\n", cnt_cb->id);
return -EINVAL;
}
-
- /* Check the Classifier Table counter */
- err = check_tbl_cls_counter(cnt_cb,
- &cnt_tbl_cb->keys[i]);
- if (err != 0)
- return -EINVAL;
-
- cnt_tbl_cb->keys[i].valid = TRUE;
+ tbl_cb->keys[i].valid = TRUE;
}
break;
default:
@@ -2084,7 +2263,7 @@ static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb,
if (frag_stats) {
/* For every valid key, retrieve the hmcd */
for (i = 0; i < params->class_members; i++) {
- if (!cnt_tbl_cb->keys[i].valid)
+ if (!tbl_cb->keys[i].valid)
continue;
err = set_frag_manip(prm.td, &cnt_cb->tbl_cb.keys[i]);
@@ -2140,24 +2319,46 @@ static int set_cls_cnt_ccnode_cb(struct dpa_stats_cnt_cb *cnt_cb,
return -EFAULT;
}
+ if (!prm.keys) {
+ log_err("Pointer to the array of keys cannot be NULL "
+ "for counter id %d\n", cnt_cb->id);
+ return -EINVAL;
+ }
+
cnt_cb->ccnode_cb.cc_node = prm.cc_node;
cnt_cb->members_num = params->class_members;
+ /* Set retrieve function depending on counter type */
+ err = set_cnt_classif_node_retrieve_func(cnt_cb, prm.ccnode_type);
+ if (err != 0)
+ return -EINVAL;
+
for (i = 0; i < params->class_members; i++) {
- /* Copy the key descriptor */
- err = copy_key_descriptor(&prm.keys[i],
- &cnt_cb->ccnode_cb.keys[i]);
- if (err != 0) {
- log_err("Cannot copy key descriptor from user "
- "parameters\n");
- return -EINVAL;
- }
+ if (!prm.keys[i]) {
+ /* Set the key byte to NULL, to mark it for 'miss' */
+ cnt_cb->ccnode_cb.keys[i].byte = NULL;
- /* Check the Classifier Node counter parameters */
- err = check_ccnode_counter(cnt_cb,
- prm.ccnode_type, &cnt_cb->ccnode_cb.keys[i]);
- if (err != 0)
- return -EINVAL;
+ /* Check the Classifier Node counter parameters */
+ err = check_ccnode_miss_counter(prm.cc_node,
+ cnt_cb->id, prm.ccnode_type);
+ if (err != 0)
+ return -EINVAL;
+ } else {
+ /* Copy the key descriptor */
+ err = copy_key_descriptor(prm.keys[i],
+ &cnt_cb->ccnode_cb.keys[i]);
+ if (err != 0) {
+ log_err("Cannot copy key descriptor from user "
+ "parameters\n");
+ return -EINVAL;
+ }
+
+ /* Check the Classifier Node counter parameters */
+ err = check_ccnode_counter(cnt_cb, prm.ccnode_type,
+ &cnt_cb->ccnode_cb.keys[i]);
+ if (err != 0)
+ return -EINVAL;
+ }
}
/* Map Classif Node counter selection to CcNode statistics */
@@ -2241,7 +2442,7 @@ static int set_cls_cnt_traffic_mng_cb(struct dpa_stats_cnt_cb *cnt_cb,
uint32_t cnt_sel = params->traffic_mng_params.cnt_sel;
uint32_t cnt_src = params->traffic_mng_params.src;
uint32_t i = 0;
- uint64_t frames, bytes;
+ u64 frames = 0, bytes = 0;
int err = 0;
if (!dpa_stats) {
@@ -2249,69 +2450,75 @@ static int set_cls_cnt_traffic_mng_cb(struct dpa_stats_cnt_cb *cnt_cb,
return -EFAULT;
}
- /* First check the counter src */
- if (cnt_src > DPA_STATS_CNT_TRAFFIC_CG ||
- cnt_src < DPA_STATS_CNT_TRAFFIC_CLASS) {
- log_err("Parameter src %d must be in range (%d - %d) for "
- "counter id %d\n", cnt_src, DPA_STATS_CNT_TRAFFIC_CLASS,
- DPA_STATS_CNT_TRAFFIC_CG, cnt_cb->id);
- return -EINVAL;
- }
-
- /* Then check the counter selection */
- if (cnt_sel == 0 || cnt_sel > DPA_STATS_CNT_NUM_ALL) {
- log_err("Parameter cnt_sel %d must be in range (1 - %d) for "
- "counter id %d\n", cnt_sel, DPA_STATS_CNT_NUM_ALL,
- cnt_cb->id);
+ /* check the counter selection */
+ if (cnt_sel > DPA_STATS_CNT_NUM_ALL) {
+ log_err("Parameter cnt_sel %d must be in range (%d - %d) for "
+ "counter id %d\n", cnt_sel, DPA_STATS_CNT_NUM_OF_BYTES,
+ DPA_STATS_CNT_NUM_ALL, cnt_cb->id);
return -EINVAL;
}
cnt_cb->members_num = params->class_members;
- /* Check the user provided Traffic Manager object */
- for (i = 0; i < params->class_members; i++) {
- if (!params->traffic_mng_params.traffic_mng[i]) {
- log_err("Parameter traffic_mng handle cannot be NULL "
- "for member %d\n", i);
- return -EFAULT;
- }
- cnt_cb->gen_cb.objs[i] =
+ /* check the counter src and set the get function */
+ switch (cnt_src) {
+ case DPA_STATS_CNT_TRAFFIC_CLASS:
+ cnt_cb->f_get_cnt_stats = get_cnt_traffic_mng_cq_stats;
+ for (i = 0; i < params->class_members; i++) {
+ if (!params->traffic_mng_params.traffic_mng[i]) {
+ log_err("Parameter traffic_mng handle cannot be"
+ " NULL for member %d\n", i);
+ return -EFAULT;
+ }
+ cnt_cb->gen_cb.objs[i] =
params->traffic_mng_params.traffic_mng[i];
-
- switch (cnt_src) {
- case DPA_STATS_CNT_TRAFFIC_CLASS:
- cnt_cb->f_get_cnt_stats = get_cnt_traffic_mng_cq_stats;
+ /* Check the user provided Traffic Manager object */
err = qman_ceetm_cq_get_dequeue_statistics(
- params->traffic_mng_params.traffic_mng[i], 0,
- &frames, &bytes);
+ cnt_cb->gen_cb.objs[i], 0, &frames,
+ &bytes);
if (err < 0) {
log_err("Invalid Traffic Manager qm_ceetm_cq "
"object for counter id %d\n",
cnt_cb->id);
return -EINVAL;
}
- break;
- case DPA_STATS_CNT_TRAFFIC_CG:
- cnt_cb->f_get_cnt_stats = get_cnt_traffic_mng_ccg_stats;
+ }
+ break;
+
+ case DPA_STATS_CNT_TRAFFIC_CG:
+ cnt_cb->f_get_cnt_stats = get_cnt_traffic_mng_ccg_stats;
+ for (i = 0; i < params->class_members; i++) {
+ if (!params->traffic_mng_params.traffic_mng[i]) {
+ log_err("Parameter traffic_mng handle cannot be"
+ " NULL for member %d\n", i);
+ return -EFAULT;
+ }
+ cnt_cb->gen_cb.objs[i] =
+ params->traffic_mng_params.traffic_mng[i];
+ /* Check the user provided Traffic Manager object */
err = qman_ceetm_ccg_get_reject_statistics(
- params->traffic_mng_params.traffic_mng[i], 0,
- &frames, &bytes);
+ cnt_cb->gen_cb.objs[i], 0, &frames,
+ &bytes);
if (err < 0) {
- log_err("Invalid Traffic Manager qm_ceetm_ccg "
- "object for counter id %d\n",
- cnt_cb->id);
+ log_err("Invalid Traffic Manager qm_ceetm_cq "
+ "object for counter id %d\n",
+ cnt_cb->id);
return -EINVAL;
}
- break;
}
+ break;
+ default:
+ log_err("Parameter src %d must be in range (%d - %d) for "
+ "counter id %d\n", cnt_src, DPA_STATS_CNT_TRAFFIC_CLASS,
+ DPA_STATS_CNT_TRAFFIC_CG, cnt_cb->id);
+ return -EINVAL;
}
- /* Decrease one to obtain the mask for all statistics */
+ /* Set number of statistics that will be written by this counter */
if (cnt_sel == DPA_STATS_CNT_NUM_ALL)
- cnt_sel -= 1;
-
- cnt_sel_to_stats(&cnt_cb->info,
- dpa_stats->stats_sel[DPA_STATS_CNT_TRAFFIC_MNG], cnt_sel);
+ cnt_cb->info.stats_num = 2;
+ else
+ cnt_cb->info.stats_num = 1;
/* Set number of bytes that will be written by this counter */
cnt_cb->bytes_num = cnt_cb->members_num *
@@ -2321,8 +2528,7 @@ static int set_cls_cnt_traffic_mng_cb(struct dpa_stats_cnt_cb *cnt_cb,
}
int set_classif_tbl_member(const struct dpa_stats_cls_member_params *prm,
- int member_index,
- struct dpa_stats_cnt_cb *cnt_cb)
+ int mbr_idx, struct dpa_stats_cnt_cb *cnt_cb)
{
struct dpa_stats_cnt_classif_tbl_cb *tbl_cb = &cnt_cb->tbl_cb;
uint32_t i = 0;
@@ -2337,67 +2543,80 @@ int set_classif_tbl_member(const struct dpa_stats_cls_member_params *prm,
}
/* Check that member index does not exceeds class size */
- if (member_index < 0 || member_index >= cnt_cb->members_num) {
+ if (mbr_idx < 0 || mbr_idx >= cnt_cb->members_num) {
log_err("Parameter member_index %d must be in range (0 - %d) "
- "for counter id %d\n", member_index,
+ "for counter id %d\n", mbr_idx,
cnt_cb->members_num - 1, cnt_cb->id);
return -EINVAL;
}
/* Release the old key memory */
- kfree(tbl_cb->keys[member_index].key.byte);
- tbl_cb->keys[member_index].key.byte = NULL;
+ kfree(tbl_cb->keys[mbr_idx].key.byte);
+ tbl_cb->keys[mbr_idx].key.byte = NULL;
- kfree(tbl_cb->keys[member_index].key.mask);
- tbl_cb->keys[member_index].key.mask = NULL;
+ kfree(tbl_cb->keys[mbr_idx].key.mask);
+ tbl_cb->keys[mbr_idx].key.mask = NULL;
/* Reset the statistics */
for (i = 0; i < cnt_cb->info.stats_num; i++) {
- cnt_cb->info.stats[member_index][i] = 0;
- cnt_cb->info.last_stats[member_index][i] = 0;
- }
-
- if ((prm->type == DPA_STATS_CLS_MEMBER_SINGLE_KEY && !prm->key.byte) ||
- (prm->type == DPA_STATS_CLS_MEMBER_PAIR_KEY &&
- !prm->pair.first_key.byte)) {
- /* Mark the key as invalid */
- tbl_cb->keys[member_index].valid = FALSE;
- return 0;
- } else {
- tbl_cb->keys[member_index].valid = TRUE;
-
- if (prm->type == DPA_STATS_CLS_MEMBER_SINGLE_KEY) {
+ cnt_cb->info.stats[mbr_idx][i] = 0;
+ cnt_cb->info.last_stats[mbr_idx][i] = 0;
+ }
+
+ if (prm->type == DPA_STATS_CLS_MEMBER_SINGLE_KEY) {
+ if (!prm->key) {
+ /* Mark the key as 'miss' entry */
+ tbl_cb->keys[mbr_idx].miss_key = TRUE;
+ tbl_cb->keys[mbr_idx].valid = TRUE;
+ return 0;
+ } else if (!prm->key->byte) {
+ /* Mark the key as invalid */
+ tbl_cb->keys[mbr_idx].valid = FALSE;
+ tbl_cb->keys[mbr_idx].miss_key = FALSE;
+ return 0;
+ } else {
/* Copy the key descriptor */
- err = copy_key_descriptor(&prm->key,
- &tbl_cb->keys[member_index].key);
+ err = copy_key_descriptor(prm->key,
+ &tbl_cb->keys[mbr_idx].key);
if (err != 0) {
log_err("Cannot copy key descriptor from user "
"parameters\n");
return -EINVAL;
}
- } else {
- err = set_cls_cnt_classif_tbl_pair(tbl_cb, tbl_cb->td,
- &prm->pair, &tbl_cb->keys[member_index]);
- if (err != 0) {
- log_err("Cannot configure the pair key for "
- "counter id %d of member %d\n",
- cnt_cb->id, member_index);
- return -EINVAL;
- }
}
- if (cnt_cb->f_get_cnt_stats != get_cnt_cls_tbl_frag_stats) {
+ } else {
+ if (prm->pair)
+ if (prm->pair->first_key)
+ if (!prm->pair->first_key->byte) {
+ /* Mark the key as invalid */
+ tbl_cb->keys[mbr_idx].valid = FALSE;
+ tbl_cb->keys[mbr_idx].miss_key = FALSE;
+ return 0;
+ }
+ err = set_cls_cnt_classif_tbl_pair(cnt_cb, tbl_cb->td,
+ prm->pair, &tbl_cb->keys[mbr_idx]);
+ if (err != 0) {
+ log_err("Cannot configure the pair key for counter id "
+ "%d of member %d\n", cnt_cb->id, mbr_idx);
+ return -EINVAL;
+ }
+ }
+
+ tbl_cb->keys[mbr_idx].valid = TRUE;
+
+ if (cnt_cb->f_get_cnt_stats != get_cnt_cls_tbl_frag_stats) {
+ if (!tbl_cb->keys[mbr_idx].miss_key) {
err = check_tbl_cls_counter(cnt_cb,
- &tbl_cb->keys[member_index]);
+ &tbl_cb->keys[mbr_idx]);
if (err != 0)
return -EINVAL;
- } else{
- err = set_frag_manip(tbl_cb->td,
- &tbl_cb->keys[member_index]);
- if (err < 0) {
- log_err("Invalid Fragmentation manip handle for"
- " counter id %d\n", cnt_cb->id);
- return -EINVAL;
- }
+ }
+ } else{
+ err = set_frag_manip(tbl_cb->td, &tbl_cb->keys[mbr_idx]);
+ if (err < 0) {
+ log_err("Invalid Fragmentation manip handle for"
+ " counter id %d\n", cnt_cb->id);
+ return -EINVAL;
}
}
@@ -2648,11 +2867,19 @@ static int get_cnt_cls_tbl_match_stats(struct dpa_stats_req_cb *req_cb,
cnt_cb->info.stats_num;
continue;
}
- err = FM_PCD_MatchTableFindNGetKeyStatistics(
- cnt_cb->tbl_cb.keys[i].cc_node,
- cnt_cb->tbl_cb.keys[i].key.size,
- cnt_cb->tbl_cb.keys[i].key.byte,
- cnt_cb->tbl_cb.keys[i].key.mask, &stats);
+
+ if (cnt_cb->tbl_cb.keys[i].miss_key) {
+ err = FM_PCD_MatchTableGetMissStatistics(
+ cnt_cb->tbl_cb.keys[i].cc_node, &stats);
+ } else {
+ err = FM_PCD_MatchTableFindNGetKeyStatistics(
+ cnt_cb->tbl_cb.keys[i].cc_node,
+ cnt_cb->tbl_cb.keys[i].key.size,
+ cnt_cb->tbl_cb.keys[i].key.byte,
+ cnt_cb->tbl_cb.keys[i].key.mask,
+ &stats);
+ }
+
if (err != 0) {
log_err("Cannot retrieve Classifier Exact Match Table "
"statistics for counter id %d\n", cnt_cb->id);
@@ -2682,11 +2909,17 @@ static int get_cnt_cls_tbl_hash_stats(struct dpa_stats_req_cb *req_cb,
cnt_cb->info.stats_num;
continue;
}
- err = FM_PCD_HashTableFindNGetKeyStatistics(
- cnt_cb->tbl_cb.keys[i].cc_node,
- cnt_cb->tbl_cb.keys[i].key.size,
- cnt_cb->tbl_cb.keys[i].key.byte,
- &stats);
+
+ if (cnt_cb->tbl_cb.keys[i].miss_key) {
+ err = FM_PCD_HashTableGetMissStatistics(
+ cnt_cb->tbl_cb.keys[i].cc_node, &stats);
+ } else {
+ err = FM_PCD_HashTableFindNGetKeyStatistics(
+ cnt_cb->tbl_cb.keys[i].cc_node,
+ cnt_cb->tbl_cb.keys[i].key.size,
+ cnt_cb->tbl_cb.keys[i].key.byte,
+ &stats);
+ }
if (err != 0) {
log_err("Cannot retrieve Classifier Hash Table "
"statistics for counter id %d\n", cnt_cb->id);
@@ -2716,10 +2949,17 @@ static int get_cnt_cls_tbl_index_stats(struct dpa_stats_req_cb *req_cb,
cnt_cb->info.stats_num;
continue;
}
- err = FM_PCD_MatchTableGetKeyStatistics(
- cnt_cb->tbl_cb.keys[i].cc_node,
- cnt_cb->tbl_cb.keys[i].key.byte[0],
- &stats);
+
+ if (cnt_cb->tbl_cb.keys[i].miss_key) {
+ err = FM_PCD_MatchTableGetMissStatistics(
+ cnt_cb->tbl_cb.keys[i].cc_node, &stats);
+ } else {
+ err = FM_PCD_MatchTableGetKeyStatistics(
+ cnt_cb->tbl_cb.keys[i].cc_node,
+ cnt_cb->tbl_cb.keys[i].key.byte[0],
+ &stats);
+ }
+
if (err != 0) {
log_err("Cannot retrieve Classifier Indexed Table "
"statistics for counter id %d\n", cnt_cb->id);
@@ -2772,11 +3012,16 @@ static int get_cnt_ccnode_match_stats(struct dpa_stats_req_cb *req_cb,
int err = 0;
for (i = 0; i < cnt_cb->members_num; i++) {
- err = FM_PCD_MatchTableFindNGetKeyStatistics(
+ if (!cnt_cb->ccnode_cb.keys[i].byte) {
+ err = FM_PCD_MatchTableGetMissStatistics(
+ cnt_cb->ccnode_cb.cc_node, &stats);
+ } else {
+ err = FM_PCD_MatchTableFindNGetKeyStatistics(
cnt_cb->ccnode_cb.cc_node,
cnt_cb->ccnode_cb.keys[i].size,
cnt_cb->ccnode_cb.keys[i].byte,
cnt_cb->ccnode_cb.keys[i].mask, &stats);
+ }
if (err != 0) {
log_err("Cannot retrieve Classification Cc Node Exact "
"Match statistics for counter id %d\n",
@@ -2797,10 +3042,16 @@ static int get_cnt_ccnode_hash_stats(struct dpa_stats_req_cb *req_cb,
int err = 0;
for (i = 0; i < cnt_cb->members_num; i++) {
- err = FM_PCD_HashTableFindNGetKeyStatistics(
+ if (!cnt_cb->ccnode_cb.keys[i].byte) {
+ err = FM_PCD_HashTableGetMissStatistics(
+ cnt_cb->ccnode_cb.cc_node, &stats);
+ } else {
+ err = FM_PCD_HashTableFindNGetKeyStatistics(
cnt_cb->ccnode_cb.cc_node,
cnt_cb->ccnode_cb.keys[i].size,
cnt_cb->ccnode_cb.keys[i].byte, &stats);
+ }
+
if (err != 0) {
log_err("Cannot retrieve Classification Cc Node Hash "
"statistics for counter id %d\n", cnt_cb->id);
@@ -2820,9 +3071,14 @@ static int get_cnt_ccnode_index_stats(struct dpa_stats_req_cb *req_cb,
int err = 0;
for (i = 0; i < cnt_cb->members_num; i++) {
- err = FM_PCD_MatchTableGetKeyStatistics(
+ if (!cnt_cb->ccnode_cb.keys[i].byte) {
+ err = FM_PCD_MatchTableGetMissStatistics(
+ cnt_cb->ccnode_cb.cc_node, &stats);
+ } else {
+ err = FM_PCD_MatchTableGetKeyStatistics(
cnt_cb->ccnode_cb.cc_node,
cnt_cb->ccnode_cb.keys[i].byte[0], &stats);
+ }
if (err != 0) {
log_err("Cannot retrieve Classification Cc Node Index "
"statistics for counter id %d\n", cnt_cb->id);
@@ -2871,14 +3127,14 @@ static int get_cnt_traffic_mng_cq_stats(struct dpa_stats_req_cb *req_cb,
struct dpa_stats_cnt_cb *cnt_cb)
{
uint32_t i = 0;
- uint64_t stats_val[2];
+ u64 stats_val[2];
int err = 0;
for (i = 0; i < cnt_cb->members_num; i++) {
/* Retrieve statistics for the current member */
err = qman_ceetm_cq_get_dequeue_statistics(
cnt_cb->gen_cb.objs[i], 0,
- stats_val[1], stats_val[0]);
+ &stats_val[1], &stats_val[0]);
if (err < 0) {
log_err("Cannot retrieve Traffic Manager Class Queue "
"statistics for counter id %d\n", cnt_cb->id);
@@ -2893,13 +3149,13 @@ static int get_cnt_traffic_mng_ccg_stats(struct dpa_stats_req_cb *req_cb,
struct dpa_stats_cnt_cb *cnt_cb)
{
uint32_t i = 0;
- uint64_t stats_val[2];
+ u64 stats_val[2];
int err = 0;
for (i = 0; i < cnt_cb->members_num; i++) {
err = qman_ceetm_ccg_get_reject_statistics(
cnt_cb->gen_cb.objs[i], 0,
- stats_val[1], stats_val[0]);
+ &stats_val[1], &stats_val[0]);
if (err < 0) {
log_err("Cannot retrieve Traffic Manager Class "
"Congestion Group statistics for counter id "
@@ -3269,6 +3525,7 @@ int dpa_stats_create_class_counter(int dpa_stats_id,
break;
case DPA_STATS_CNT_CLASSIF_TBL:
cnt_cb->type = DPA_STATS_CNT_CLASSIF_TBL;
+ cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_match_stats;
err = set_cls_cnt_classif_tbl_cb(cnt_cb, params);
if (err != 0) {
@@ -3392,7 +3649,7 @@ int dpa_stats_modify_class_counter(int dpa_stats_cnt_id,
}
if (params->type == DPA_STATS_CLS_MEMBER_SINGLE_KEY ||
- params->type == DPA_STATS_CLS_MEMBER_PAIR_KEY) {
+ params->type == DPA_STATS_CLS_MEMBER_PAIR_KEY) {
/* Modify classifier table class member */
err = set_classif_tbl_member(params, member_index, cnt_cb);
if (err < 0) {
@@ -3533,8 +3790,8 @@ int dpa_stats_get_counters(struct dpa_stats_cnt_request_params params,
for (i = 0; i < params.cnts_ids_len; i++) {
if (params.cnts_ids[i] == DPA_OFFLD_INVALID_OBJECT_ID ||
params.cnts_ids[i] > dpa_stats->config.max_counters) {
- log_err("Counter id (cnt_ids[%d]) %d is not initialized "
- "or is greater than maximum counters %d\n", i,
+ log_err("Counter id (cnt_ids[%d]) %d is not initialized"
+ " or is greater than maximum counters %d\n", i,
params.cnts_ids[i],
dpa_stats->config.max_counters);
return -EINVAL;
diff --git a/drivers/staging/fsl_dpa_offload/dpa_stats.h b/drivers/staging/fsl_dpa_offload/dpa_stats.h
index a429258..5843dca 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_stats.h
+++ b/drivers/staging/fsl_dpa_offload/dpa_stats.h
@@ -114,6 +114,7 @@ struct dpa_stats_lookup_key {
struct dpa_offload_lookup_key key; /* Key descriptor */
bool valid; /* Lookup key is valid */
void *frag; /* Fragmentation handle corresponding to this key */
+ bool miss_key; /* Provide statistics for miss entry */
};
/* DPA Stats Classif Table control block */
diff --git a/drivers/staging/fsl_dpa_offload/dpa_stats_ioctl.h b/drivers/staging/fsl_dpa_offload/dpa_stats_ioctl.h
index 070a6f0..d375a0f 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_stats_ioctl.h
+++ b/drivers/staging/fsl_dpa_offload/dpa_stats_ioctl.h
@@ -116,14 +116,14 @@ struct compat_ioc_dpa_offld_lookup_key {
struct dpa_stats_compat_cnt_classif_tbl {
int td;
- struct compat_ioc_dpa_offld_lookup_key key;
+ compat_uptr_t key;
unsigned int cnt_sel;
};
struct dpa_stats_compat_cnt_classif_node {
compat_uptr_t cc_node;
enum dpa_stats_classif_node_type ccnode_type;
- struct compat_ioc_dpa_offld_lookup_key key;
+ compat_uptr_t key;
unsigned int cnt_sel;
};
@@ -202,15 +202,15 @@ struct compat_ioc_dpa_stats_cls_cnt_params {
};
struct compat_ioc_dpa_offld_lookup_key_pair {
- struct compat_ioc_dpa_offld_lookup_key first_key;
- struct compat_ioc_dpa_offld_lookup_key second_key;
+ compat_uptr_t first_key;
+ compat_uptr_t second_key;
};
struct dpa_stats_compat_cls_member_params {
enum dpa_stats_cls_member_type type;
union {
- struct compat_ioc_dpa_offld_lookup_key key;
- struct compat_ioc_dpa_offld_lookup_key_pair pair;
+ compat_uptr_t key;
+ compat_uptr_t pair;
int sa_id;
};
};
diff --git a/drivers/staging/fsl_dpa_offload/dts/b4860qds-usdpaa-shared-interfaces.dts b/drivers/staging/fsl_dpa_offload/dts/b4860qds-usdpaa-shared-interfaces.dts
index 537d3f2..7baa9b1 100644
--- a/drivers/staging/fsl_dpa_offload/dts/b4860qds-usdpaa-shared-interfaces.dts
+++ b/drivers/staging/fsl_dpa_offload/dts/b4860qds-usdpaa-shared-interfaces.dts
@@ -149,7 +149,7 @@
/* Define frame queues for the OH port*/
/* <OH Rx error, OH Rx default> */
fsl,qman-frame-queues-oh = <0x6e 1 0x6f 1>;
- fsl,bman-buffer-pools = <&bp9>;
+ fsl,bman-buffer-pools = <&bp16>;
fsl,fman-oh-port = <&fman0_oh2>;
};
dpa_fman0_oh3: dpa-fman0-oh@3 {
diff --git a/drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c b/drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c
index 5c2cb90..0fdcdc5 100644
--- a/drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c
+++ b/drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c
@@ -90,7 +90,10 @@ static long wrp_dpa_stats_do_ioctl(struct file *filp,
unsigned int cmd, unsigned long args);
static int copy_key_descriptor(struct dpa_offload_lookup_key *src,
- struct dpa_offload_lookup_key *dst);
+ struct dpa_offload_lookup_key **dst);
+
+static int copy_pair_descriptor(struct dpa_offload_lookup_key_pair *src,
+ struct dpa_offload_lookup_key_pair **dst);
static int copy_class_members(void *objs, unsigned int size, void *dst);
@@ -103,8 +106,12 @@ static long wrp_dpa_stats_do_compat_ioctl(struct file *filp,
unsigned long args);
static int copy_key_descriptor_compatcpy(
- struct dpa_offload_lookup_key *kprm,
- const struct compat_ioc_dpa_offld_lookup_key *uprm);
+ struct dpa_offload_lookup_key **kprm,
+ compat_uptr_t uparam);
+
+static int copy_pair_descriptor_compatcpy(
+ struct dpa_offload_lookup_key_pair **ks_pair,
+ struct compat_ioc_dpa_offld_lookup_key_pair pair);
static void dpa_stats_init_compatcpy(
struct ioc_dpa_stats_params *kprm,
@@ -625,39 +632,70 @@ static long do_ioctl_stats_free(void *args)
static int do_ioctl_stats_create_counter(void *args)
{
struct ioc_dpa_stats_cnt_params prm;
- struct dpa_offload_lookup_key key;
+ struct dpa_offload_lookup_key *us_key = NULL;
long ret = 0;
if (copy_from_user(&prm, args, sizeof(prm))) {
- log_err("Cannot copy from user the counter parameters\n");
+ log_err("Could not copy counter parameters\n");
return -EINVAL;
}
- if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_NODE)
- ret = copy_key_descriptor(
- &prm.cnt_params.classif_node_params.key, &key);
- else if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_TBL)
- ret = copy_key_descriptor(
- &prm.cnt_params.classif_tbl_params.key, &key);
- if (ret != 0) {
- log_err("Cannot copy the key descriptor\n");
- return -EINVAL;
+ if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_NODE &&
+ prm.cnt_params.classif_node_params.key) {
+ /* Save user-space provided key */
+ us_key = prm.cnt_params.classif_node_params.key;
+
+ /* Override user-space pointers with kernel memory */
+ ret = copy_key_descriptor(us_key,
+ &prm.cnt_params.classif_node_params.key);
+ if (ret != 0) {
+ log_err("Could not copy the key descriptor\n");
+ kfree(prm.cnt_params.classif_node_params.key);
+ return ret;
+ }
+ }
+
+ if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_TBL &&
+ prm.cnt_params.classif_tbl_params.key) {
+ /* Save user-space provided key */
+ us_key = prm.cnt_params.classif_tbl_params.key;
+
+ /* Override user-space pointers with kernel memory */
+ ret = copy_key_descriptor(us_key,
+ &prm.cnt_params.classif_tbl_params.key);
+ if (ret != 0) {
+ log_err("Could not copy the key descriptor\n");
+ kfree(prm.cnt_params.classif_tbl_params.key);
+ return ret;
+ }
}
ret = dpa_stats_create_counter(prm.stats_id,
&prm.cnt_params, &prm.cnt_id);
- if (ret < 0)
- return ret;
- if (copy_to_user(args, &prm, sizeof(prm))) {
- log_err("Cannot copy to user the counter parameters\n");
- ret = -EINVAL;
+ if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_NODE &&
+ prm.cnt_params.classif_node_params.key) {
+ /* Release kernel-allocated memory */
+ kfree(prm.cnt_params.classif_node_params.key->byte);
+ kfree(prm.cnt_params.classif_node_params.key->mask);
+ kfree(prm.cnt_params.classif_node_params.key);
+ /* Restore user-provided key */
+ prm.cnt_params.classif_node_params.key = us_key;
}
- if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_NODE ||
- prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_TBL) {
- kfree(key.byte);
- kfree(key.mask);
+ if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_TBL &&
+ prm.cnt_params.classif_tbl_params.key) {
+ /* Release kernel-allocated memory */
+ kfree(prm.cnt_params.classif_tbl_params.key->byte);
+ kfree(prm.cnt_params.classif_tbl_params.key->mask);
+ kfree(prm.cnt_params.classif_tbl_params.key);
+ /* Restore user-provided key */
+ prm.cnt_params.classif_tbl_params.key = us_key;
+ }
+
+ if (copy_to_user(args, &prm, sizeof(prm))) {
+ log_err("Could not copy to user the Counter ID\n");
+ ret = -EINVAL;
}
return ret;
@@ -702,14 +740,14 @@ static int do_ioctl_stats_compat_create_counter(void *args)
&kprm.cnt_params.classif_tbl_params,
&uprm.cnt_params.classif_tbl_params);
if (ret < 0)
- return ret;
+ goto compat_create_counter_cleanup;
break;
case DPA_STATS_CNT_CLASSIF_NODE:
ret = dpa_stats_ccnode_cnt_compatcpy(
&kprm.cnt_params.classif_node_params,
&uprm.cnt_params.classif_node_params);
if (ret < 0)
- return ret;
+ goto compat_create_counter_cleanup;
break;
case DPA_STATS_CNT_IPSEC:
memcpy(&kprm.cnt_params.ipsec_params,
@@ -728,7 +766,7 @@ static int do_ioctl_stats_compat_create_counter(void *args)
ret = dpa_stats_create_counter(kprm.stats_id,
&kprm.cnt_params, &kprm.cnt_id);
if (ret < 0)
- return ret;
+ goto compat_create_counter_cleanup;
uprm.cnt_id = kprm.cnt_id;
@@ -737,14 +775,20 @@ static int do_ioctl_stats_compat_create_counter(void *args)
ret = -EINVAL;
}
- if (kprm.cnt_params.type == DPA_STATS_CNT_CLASSIF_NODE) {
- kfree(kprm.cnt_params.classif_node_params.key.byte);
- kfree(kprm.cnt_params.classif_node_params.key.mask);
- } else if (kprm.cnt_params.type == DPA_STATS_CNT_CLASSIF_TBL) {
- kfree(kprm.cnt_params.classif_tbl_params.key.byte);
- kfree(kprm.cnt_params.classif_tbl_params.key.mask);
+compat_create_counter_cleanup:
+ if (kprm.cnt_params.type == DPA_STATS_CNT_CLASSIF_NODE &&
+ compat_ptr(uprm.cnt_params.classif_node_params.key)) {
+ kfree(kprm.cnt_params.classif_node_params.key->byte);
+ kfree(kprm.cnt_params.classif_node_params.key->mask);
+ kfree(kprm.cnt_params.classif_node_params.key);
}
+ if (kprm.cnt_params.type == DPA_STATS_CNT_CLASSIF_TBL &&
+ compat_ptr(uprm.cnt_params.classif_tbl_params.key)) {
+ kfree(kprm.cnt_params.classif_tbl_params.key->byte);
+ kfree(kprm.cnt_params.classif_tbl_params.key->mask);
+ kfree(kprm.cnt_params.classif_tbl_params.key);
+ }
return ret;
}
#endif
@@ -754,9 +798,10 @@ static int do_ioctl_stats_create_class_counter(void *args)
struct ioc_dpa_stats_cls_cnt_params prm;
struct dpa_stats_cls_cnt_classif_node *cnode;
struct dpa_stats_cls_cnt_classif_tbl *tbl;
- struct dpa_offload_lookup_key key;
- struct dpa_stats_cnt_eth_src *eth_src = NULL;
- uint32_t i = 0, eth_src_size = 0;
+ struct dpa_offload_lookup_key **us_keys = NULL;
+ struct dpa_offload_lookup_key_pair **us_pairs = NULL;
+ uint32_t i = 0;
+ unsigned int cls_mbrs;
void *cls_objs = NULL;
int *sa_ids = NULL;
long ret = 0;
@@ -766,13 +811,14 @@ static int do_ioctl_stats_create_class_counter(void *args)
return -EINVAL;
}
+ cls_mbrs = prm.cnt_params.class_members;
+
switch (prm.cnt_params.type) {
- case DPA_STATS_CNT_ETH:
- eth_src_size = prm.cnt_params.class_members *
- sizeof(struct dpa_stats_cnt_eth_src);
+ case DPA_STATS_CNT_ETH: {
+ struct dpa_stats_cnt_eth_src *eth_src = NULL;
/* Allocate memory to store the sources array */
- eth_src = kmalloc(eth_src_size, GFP_KERNEL);
+ eth_src = kmalloc(sizeof(*eth_src) * cls_mbrs, GFP_KERNEL);
if (!eth_src) {
log_err("Cannot allocate memory for Ethernet sources "
"array\n");
@@ -781,17 +827,17 @@ static int do_ioctl_stats_create_class_counter(void *args)
if (copy_from_user(eth_src,
prm.cnt_params.eth_params.src,
- eth_src_size)) {
+ sizeof(*eth_src) * cls_mbrs)) {
log_err("Cannot copy array of Ethernet sources\n");
kfree(eth_src);
return -EBUSY;
}
prm.cnt_params.eth_params.src = eth_src;
break;
+ }
case DPA_STATS_CNT_REASS:
- ret = copy_class_members(cls_objs,
- prm.cnt_params.class_members,
- prm.cnt_params.reass_params.reass);
+ ret = copy_class_members(cls_objs, cls_mbrs,
+ prm.cnt_params.reass_params.reass);
if (ret < 0) {
log_err("Cannot copy array of Reassembly objects\n");
kfree(cls_objs);
@@ -799,8 +845,7 @@ static int do_ioctl_stats_create_class_counter(void *args)
}
break;
case DPA_STATS_CNT_FRAG:
- ret = copy_class_members(cls_objs,
- prm.cnt_params.class_members,
+ ret = copy_class_members(cls_objs, cls_mbrs,
prm.cnt_params.frag_params.frag);
if (ret < 0) {
log_err("Cannot copy array of Fragmentation objects\n");
@@ -809,8 +854,7 @@ static int do_ioctl_stats_create_class_counter(void *args)
}
break;
case DPA_STATS_CNT_POLICER:
- ret = copy_class_members(cls_objs,
- prm.cnt_params.class_members,
+ ret = copy_class_members(cls_objs, cls_mbrs,
prm.cnt_params.plcr_params.plcr);
if (ret < 0) {
log_err("Cannot copy array of Policer objects\n");
@@ -822,36 +866,50 @@ static int do_ioctl_stats_create_class_counter(void *args)
tbl = &prm.cnt_params.classif_tbl_params;
if (tbl->key_type == DPA_STATS_CLASSIF_SINGLE_KEY) {
- for (i = 0; i < prm.cnt_params.class_members; i++) {
- if (!tbl->keys[i].byte)
- continue;
+ /* Save array of user-space provided key pointers */
+ us_keys = tbl->keys;
+
+ /* Override user-space pointers with kernel memory */
+ tbl->keys = kzalloc(cls_mbrs *
+ sizeof(**tbl->keys), GFP_KERNEL);
+ if (!tbl->keys) {
+ log_err("Cannot allocate kernel memory for "
+ "lookup keys array\n");
+ return -ENOMEM;
+ }
- ret = copy_key_descriptor(&tbl->keys[i], &key);
+ for (i = 0; i < cls_mbrs; i++) {
+ if (!us_keys[i])
+ continue;
+ ret = copy_key_descriptor(us_keys[i],
+ &tbl->keys[i]);
if (ret != 0) {
- log_err("Cannot copy the key descriptor"
- "\n");
- return -EINVAL;
+ log_err("Cannot copy key descriptor\n");
+ goto create_cls_counter_cleanup;
}
}
} else if (tbl->key_type == DPA_STATS_CLASSIF_PAIR_KEY) {
- for (i = 0; i < prm.cnt_params.class_members; i++) {
- if (!tbl->pairs[i].first_key.byte)
- continue;
-
- ret = copy_key_descriptor(
- &tbl->pairs[i].first_key, &key);
- if (ret != 0) {
- log_err("Cannot copy the first key "
- "descriptor of pair-key\n");
- return -EINVAL;
- }
+ /* Save array of user-space provided pairs pointers */
+ us_pairs = tbl->pairs;
+
+ /* Override user-space pointers with kernel memory */
+ tbl->pairs = kzalloc(cls_mbrs *
+ sizeof(**tbl->pairs), GFP_KERNEL);
+ if (!tbl->pairs) {
+ log_err("Cannot allocate kernel memory for "
+ "lookup pairs array\n");
+ return -ENOMEM;
+ }
- ret = copy_key_descriptor(
- &tbl->pairs[i].second_key, &key);
+ for (i = 0; i < cls_mbrs; i++) {
+ if (!us_pairs[i])
+ continue;
+ ret = copy_pair_descriptor(us_pairs[i],
+ &tbl->pairs[i]);
if (ret != 0) {
- log_err("Cannot copy the second key "
- "descriptor of pair-key\n");
- return -EINVAL;
+ log_err("Could not copy the "
+ "pair key descriptor\n");
+ goto create_cls_counter_cleanup;
}
}
}
@@ -859,11 +917,28 @@ static int do_ioctl_stats_create_class_counter(void *args)
case DPA_STATS_CNT_CLASSIF_NODE:
cnode = &prm.cnt_params.classif_node_params;
- for (i = 0; i < prm.cnt_params.class_members; i++) {
- ret = copy_key_descriptor(&cnode->keys[i], &key);
+ if (!cnode->keys) {
+ log_err("Pointer to array of keys can't be NULL\n");
+ return -EINVAL;
+ }
+ /* Save array of user-space provided key pointers */
+ us_keys = cnode->keys;
+
+ /* Override user-space pointers with kernel memory */
+ cnode->keys = kzalloc(cls_mbrs *
+ sizeof(**cnode->keys), GFP_KERNEL);
+ if (!cnode->keys) {
+ log_err("No more memory to store array of keys\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < cls_mbrs; i++) {
+ if (!us_keys[i])
+ continue;
+ ret = copy_key_descriptor(us_keys[i], &cnode->keys[i]);
if (ret != 0) {
log_err("Cannot copy the key descriptor\n");
- return -EINVAL;
+ goto create_cls_counter_cleanup;
}
}
break;
@@ -891,17 +966,10 @@ static int do_ioctl_stats_create_class_counter(void *args)
ret = dpa_stats_create_class_counter(prm.stats_id,
&prm.cnt_params, &prm.cnt_id);
- if (ret < 0)
- return ret;
-
- if (copy_to_user(args, &prm, sizeof(prm))) {
- log_err("Cannot copy to user class counter parameters\n");
- ret = -EINVAL;
- }
-
+create_cls_counter_cleanup:
switch (prm.cnt_params.type) {
case DPA_STATS_CNT_ETH:
- kfree(eth_src);
+ kfree(prm.cnt_params.eth_params.src);
break;
case DPA_STATS_CNT_REASS:
case DPA_STATS_CNT_FRAG:
@@ -911,25 +979,53 @@ static int do_ioctl_stats_create_class_counter(void *args)
case DPA_STATS_CNT_CLASSIF_TBL:
tbl = &prm.cnt_params.classif_tbl_params;
- for (i = 0; i < prm.cnt_params.class_members; i++) {
- if (tbl->key_type == DPA_STATS_CLASSIF_SINGLE_KEY) {
- kfree(tbl->keys[i].byte);
- kfree(tbl->keys[i].mask);
+ if (tbl->key_type == DPA_STATS_CLASSIF_SINGLE_KEY) {
+ for (i = 0; i < cls_mbrs; i++) {
+ if (!tbl->keys[i])
+ continue;
+ /* Free allocated memory */
+ kfree(tbl->keys[i]->byte);
+ kfree(tbl->keys[i]->mask);
+ kfree(tbl->keys[i]);
}
+ /* Restore user-space pointers */
+ tbl->keys = us_keys;
+ }
+
+ if (tbl->key_type == DPA_STATS_CLASSIF_PAIR_KEY) {
+ for (i = 0; i < cls_mbrs; i++) {
+ if (!tbl->pairs[i])
+ continue;
- if (tbl->key_type == DPA_STATS_CLASSIF_PAIR_KEY) {
- kfree(tbl->pairs[i].first_key.byte);
- kfree(tbl->pairs[i].first_key.mask);
- kfree(tbl->pairs[i].second_key.byte);
- kfree(tbl->pairs[i].second_key.mask);
+ if (tbl->pairs[i]->first_key) {
+ kfree(tbl->pairs[i]->first_key->byte);
+ kfree(tbl->pairs[i]->first_key->mask);
+ kfree(tbl->pairs[i]->first_key);
+ }
+
+ if (tbl->pairs[i]->second_key) {
+ kfree(tbl->pairs[i]->second_key->byte);
+ kfree(tbl->pairs[i]->second_key->mask);
+ kfree(tbl->pairs[i]->second_key);
+ }
}
+ /* Restore user-space pointers */
+ tbl->keys = us_keys;
}
break;
case DPA_STATS_CNT_CLASSIF_NODE:
- for (i = 0; i < prm.cnt_params.class_members; i++) {
- kfree(prm.cnt_params.classif_node_params.keys[i].byte);
- kfree(prm.cnt_params.classif_node_params.keys[i].mask);
+ cnode = &prm.cnt_params.classif_node_params;
+
+ for (i = 0; i < cls_mbrs; i++) {
+ if (!cnode->keys[i])
+ continue;
+ /* Free allocated memory */
+ kfree(cnode->keys[i]->byte);
+ kfree(cnode->keys[i]->mask);
+ kfree(cnode->keys[i]);
}
+ /* Restore user-space pointers */
+ tbl->keys = us_keys;
break;
case DPA_STATS_CNT_IPSEC:
kfree(sa_ids);
@@ -939,6 +1035,11 @@ static int do_ioctl_stats_create_class_counter(void *args)
break;
}
+ if (copy_to_user(args, &prm, sizeof(prm))) {
+ log_err("Cannot copy to user class counter parameters\n");
+ ret = -EINVAL;
+ }
+
return ret;
}
@@ -988,46 +1089,18 @@ static int do_ioctl_stats_compat_create_class_counter(void *args)
return ret;
break;
case DPA_STATS_CNT_CLASSIF_TBL:
- {
- struct dpa_stats_cls_cnt_classif_tbl *tbl =
- &kprm_cls->classif_tbl_params;
-
- ret = dpa_stats_tbl_cls_compatcpy(tbl,
+ ret = dpa_stats_tbl_cls_compatcpy(&kprm_cls->classif_tbl_params,
&uprm_cls->classif_tbl_params, kprm_cls->class_members);
if (!ret)
break;
-
- if (tbl->key_type == DPA_STATS_CLASSIF_SINGLE_KEY) {
- for (i = 0; i < kprm_cls->class_members; i++) {
- kfree(tbl->keys[i].byte);
- kfree(tbl->keys[i].mask);
- }
- kfree(tbl->keys);
-
- } else if (tbl->key_type == DPA_STATS_CLASSIF_PAIR_KEY) {
- for (i = 0; i < kprm_cls->class_members; i++) {
- kfree(tbl->pairs[i].first_key.byte);
- kfree(tbl->pairs[i].first_key.mask);
- kfree(tbl->pairs[i].second_key.byte);
- kfree(tbl->pairs[i].second_key.mask);
- }
- kfree(tbl->pairs);
- }
- return ret;
- }
+ goto compat_create_cls_counter_cleanup;
case DPA_STATS_CNT_CLASSIF_NODE:
ret = dpa_stats_ccnode_cls_compatcpy(
- &kprm_cls->classif_node_params,
- &uprm_cls->ccnode_params,
- kprm_cls->class_members);
+ &kprm_cls->classif_node_params,
+ &uprm_cls->ccnode_params, kprm_cls->class_members);
if (!ret)
break;
- for (i = 0; i < kprm_cls->class_members; i++) {
- kfree(kprm_cls->classif_node_params.keys[i].byte);
- kfree(kprm_cls->classif_node_params.keys[i].mask);
- }
- kfree(kprm_cls->classif_node_params.keys);
- return ret;
+ goto compat_create_cls_counter_cleanup;
case DPA_STATS_CNT_IPSEC:
ret = dpa_stats_ipsec_cls_compatcpy(&kprm_cls->ipsec_params,
&uprm_cls->ipsec_params, kprm_cls->class_members);
@@ -1038,10 +1111,10 @@ static int do_ioctl_stats_compat_create_class_counter(void *args)
break;
}
- ret = dpa_stats_create_class_counter(
- kprm.stats_id, kprm_cls, &kprm.cnt_id);
+ ret = dpa_stats_create_class_counter(kprm.stats_id,
+ kprm_cls, &kprm.cnt_id);
if (ret < 0)
- return ret;
+ goto compat_create_cls_counter_cleanup;
uprm.cnt_id = kprm.cnt_id;
@@ -1050,6 +1123,7 @@ static int do_ioctl_stats_compat_create_class_counter(void *args)
ret = -EINVAL;
}
+compat_create_cls_counter_cleanup:
switch (uprm.cnt_params.type) {
case DPA_STATS_CNT_ETH:
kfree(kprm_cls->eth_params.src);
@@ -1070,17 +1144,29 @@ static int do_ioctl_stats_compat_create_class_counter(void *args)
if (tbl->key_type == DPA_STATS_CLASSIF_SINGLE_KEY) {
for (i = 0; i < kprm_cls->class_members; i++) {
- kfree(tbl->keys[i].byte);
- kfree(tbl->keys[i].mask);
+ if (!tbl->keys[i])
+ continue;
+ kfree(tbl->keys[i]->byte);
+ kfree(tbl->keys[i]->mask);
+ kfree(tbl->keys[i]);
}
kfree(tbl->keys);
} else if (tbl->key_type == DPA_STATS_CLASSIF_PAIR_KEY) {
for (i = 0; i < kprm_cls->class_members; i++) {
- kfree(tbl->pairs[i].first_key.byte);
- kfree(tbl->pairs[i].first_key.mask);
- kfree(tbl->pairs[i].second_key.byte);
- kfree(tbl->pairs[i].second_key.mask);
+ if (!tbl->pairs[i])
+ continue;
+ if (tbl->pairs[i]->first_key) {
+ kfree(tbl->pairs[i]->first_key->byte);
+ kfree(tbl->pairs[i]->first_key->mask);
+ kfree(tbl->pairs[i]->first_key);
+ }
+ if (tbl->pairs[i]->second_key) {
+ kfree(tbl->pairs[i]->second_key->byte);
+ kfree(tbl->pairs[i]->second_key->mask);
+ kfree(tbl->pairs[i]->second_key);
+ }
+ kfree(tbl->pairs[i]);
}
kfree(tbl->pairs);
}
@@ -1088,11 +1174,15 @@ static int do_ioctl_stats_compat_create_class_counter(void *args)
}
case DPA_STATS_CNT_CLASSIF_NODE:
for (i = 0; i < kprm_cls->class_members; i++) {
- kfree(kprm_cls->classif_node_params.keys[i].byte);
- kfree(kprm_cls->classif_node_params.keys[i].mask);
+ if (!kprm_cls->classif_node_params.keys[i])
+ continue;
+ kfree(kprm_cls->classif_node_params.keys[i]->byte);
+ kfree(kprm_cls->classif_node_params.keys[i]->mask);
+ kfree(kprm_cls->classif_node_params.keys[i]);
}
kfree(kprm_cls->classif_node_params.keys);
break;
+
case DPA_STATS_CNT_IPSEC:
kfree(kprm_cls->ipsec_params.sa_id);
break;
@@ -1108,8 +1198,9 @@ static int do_ioctl_stats_compat_create_class_counter(void *args)
static int do_ioctl_stats_modify_class_counter(void *args)
{
struct ioc_dpa_stats_cls_member_params prm;
- struct dpa_offload_lookup_key key;
- int ret;
+ struct dpa_offload_lookup_key *us_key = NULL;
+ struct dpa_offload_lookup_key_pair *us_pair = NULL;
+ int ret = 0;
if (copy_from_user(&prm, args, sizeof(prm))) {
log_err("Cannot copy from user the class counter parameters\n");
@@ -1118,32 +1209,32 @@ static int do_ioctl_stats_modify_class_counter(void *args)
switch (prm.params.type) {
case DPA_STATS_CLS_MEMBER_SINGLE_KEY:
- if (prm.params.key.byte) {
- ret = copy_key_descriptor(&prm.params.key, &key);
- if (ret != 0) {
- log_err("Cannot copy the key descriptor\n");
- return -EINVAL;
- }
+ if (!prm.params.key)
+ break;
+
+ /* Save user-space provided key */
+ us_key = prm.params.key;
+
+ /* Override user-space pointers with kernel memory */
+ ret = copy_key_descriptor(us_key, &prm.params.key);
+ if (ret != 0) {
+ log_err("Could not copy the key descriptor\n");
+ goto modify_counter_cleanup;
}
+
break;
case DPA_STATS_CLS_MEMBER_PAIR_KEY:
- if (prm.params.pair.first_key.byte &&
- prm.params.pair.first_key.mask) {
- ret = copy_key_descriptor(
- &prm.params.pair.first_key, &key);
- if (ret != 0) {
- log_err("Cannot copy the first key descriptor "
- "of the pair-key\n");
- return -EINVAL;
- }
+ if (!prm.params.pair)
+ break;
- ret = copy_key_descriptor(
- &prm.params.pair.second_key, &key);
- if (ret != 0) {
- log_err("Cannot copy the second key descriptor "
- "of the pair-key\n");
- return -EINVAL;
- }
+ /* Save array of user-space provided pairs pointers */
+ us_pair = prm.params.pair;
+
+ /* Override user-space pointers with kernel memory */
+ ret = copy_pair_descriptor(us_pair, &prm.params.pair);
+ if (ret != 0) {
+ log_err("Could not copy the pair key descriptor\n");
+ goto modify_counter_cleanup;
}
break;
case DPA_STATS_CLS_MEMBER_SA_ID:
@@ -1154,32 +1245,53 @@ static int do_ioctl_stats_modify_class_counter(void *args)
ret = dpa_stats_modify_class_counter(prm.cnt_id,
&prm.params, prm.member_index);
- if (ret < 0)
- return ret;
-
+modify_counter_cleanup:
switch (prm.params.type) {
case DPA_STATS_CLS_MEMBER_SINGLE_KEY:
- kfree(prm.params.key.byte);
- kfree(prm.params.key.mask);
+ if (prm.params.key) {
+ /* Release kernel-allocated memory */
+ kfree(prm.params.key->byte);
+ kfree(prm.params.key->mask);
+ kfree(prm.params.key);
+ /* Restore user-provided key */
+ prm.params.key = us_key;
+ }
break;
case DPA_STATS_CLS_MEMBER_PAIR_KEY:
- kfree(prm.params.pair.first_key.byte);
- kfree(prm.params.pair.first_key.mask);
- kfree(prm.params.pair.second_key.byte);
- kfree(prm.params.pair.second_key.mask);
+ if (prm.params.pair) {
+ if (prm.params.pair->first_key) {
+ /* Release kernel-allocated memory */
+ kfree(prm.params.pair->first_key->byte);
+ kfree(prm.params.pair->first_key->mask);
+ kfree(prm.params.pair->first_key);
+ }
+ if (prm.params.pair->second_key) {
+ /* Release kernel-allocated memory */
+ kfree(prm.params.pair->second_key->byte);
+ kfree(prm.params.pair->second_key->mask);
+ kfree(prm.params.pair->second_key);
+ }
+ kfree(prm.params.pair);
+ /* Restore user-provided key */
+ prm.params.pair->first_key = us_pair->first_key;
+ prm.params.pair->second_key = us_pair->second_key;
+ prm.params.pair = us_pair;
+ }
break;
case DPA_STATS_CLS_MEMBER_SA_ID:
break;
default:
+ log_err("Invalid class member type\n");
break;
}
if (copy_to_user(args, &prm, sizeof(prm))) {
- log_err("Cannot copy to user the class counter result\n");
- return -EBUSY;
+ log_err("Could not write "
+ "dpa_stats_modify_class_counter result\n");
+ ret = -EBUSY;
}
- return 0;
+ return ret;
}
#ifdef CONFIG_COMPAT
@@ -1187,10 +1299,11 @@ static int do_ioctl_stats_compat_modify_class_counter(void *args)
{
struct ioc_dpa_stats_cls_member_params kprm;
struct compat_ioc_dpa_stats_cls_member_params uprm;
+ struct compat_ioc_dpa_offld_lookup_key_pair pair;
int ret;
if (copy_from_user(&uprm, args, sizeof(uprm))) {
- log_err("Cannot copy from user the class counter parameters\n");
+ log_err("Cannot copy from user the modify counter parameters\n");
return -EINVAL;
}
@@ -1201,35 +1314,35 @@ static int do_ioctl_stats_compat_modify_class_counter(void *args)
switch (kprm.params.type) {
case DPA_STATS_CLS_MEMBER_SINGLE_KEY:
- if (compat_ptr(uprm.params.key.byte)) {
- ret = copy_key_descriptor_compatcpy(
- &kprm.params.key,
- &uprm.params.key);
- if (ret < 0) {
- log_err("Cannot copy the key descriptor\n");
- return ret;
- }
-
+ if (!compat_ptr(uprm.params.key))
+ break;
+ /* Copy user-provided key descriptor */
+ ret = copy_key_descriptor_compatcpy(&kprm.params.key,
+ uprm.params.key);
+ if (ret < 0) {
+ log_err("Cannot copy the key descriptor\n");
+ goto compat_modify_counter_cleanup;
}
break;
case DPA_STATS_CLS_MEMBER_PAIR_KEY:
- if (compat_ptr(uprm.params.pair.first_key.byte)) {
- ret = copy_key_descriptor_compatcpy(
- &kprm.params.pair.first_key,
- &uprm.params.pair.first_key);
- if (ret < 0)
- return ret;
+ if (!compat_ptr(uprm.params.pair))
+ break;
- ret = copy_key_descriptor_compatcpy(
- &kprm.params.pair.second_key,
- &uprm.params.pair.second_key);
- if (ret != 0) {
- log_err("Cannot copy the key descriptor of the "
- "pair-key\n");
- return -EINVAL;
- }
+ if (copy_from_user(&pair, compat_ptr(uprm.params.pair),
+ (sizeof(pair)))) {
+ log_err("Cannot copy from user array of "
+ "lookup pairs\n");
+ return -EBUSY;
+ }
+
+ /* Copy user-provided lookup pair descriptor */
+ ret = copy_pair_descriptor_compatcpy(&kprm.params.pair, pair);
+ if (ret < 0) {
+ log_err("Cannot copy the pair key descriptor\n");
+ goto compat_modify_counter_cleanup;
}
break;
+
case DPA_STATS_CLS_MEMBER_SA_ID:
kprm.params.sa_id = uprm.params.sa_id;
break;
@@ -1238,34 +1351,45 @@ static int do_ioctl_stats_compat_modify_class_counter(void *args)
}
ret = dpa_stats_modify_class_counter(kprm.cnt_id,
- &kprm.params, kprm.member_index);
+ &kprm.params, kprm.member_index);
if (ret < 0)
- return ret;
+ goto compat_modify_counter_cleanup;
uprm.cnt_id = kprm.cnt_id;
+ if (copy_to_user(args, &uprm, sizeof(uprm))) {
+ log_err("Cannot copy to user class counter result\n");
+ return -EBUSY;
+ }
+
+compat_modify_counter_cleanup:
switch (kprm.params.type) {
case DPA_STATS_CLS_MEMBER_SINGLE_KEY:
- kfree(kprm.params.key.byte);
- kfree(kprm.params.key.mask);
+ if (!kprm.params.key)
+ break;
+ kfree(kprm.params.key->byte);
+ kfree(kprm.params.key->mask);
+ kfree(kprm.params.key);
break;
case DPA_STATS_CLS_MEMBER_PAIR_KEY:
- kfree(kprm.params.pair.first_key.byte);
- kfree(kprm.params.pair.first_key.mask);
- kfree(kprm.params.pair.second_key.byte);
- kfree(kprm.params.pair.second_key.mask);
+ if (!kprm.params.pair)
+ break;
+ if (kprm.params.pair->first_key) {
+ kfree(kprm.params.pair->first_key->byte);
+ kfree(kprm.params.pair->first_key->mask);
+ kfree(kprm.params.pair->first_key);
+ }
+ if (kprm.params.pair->second_key) {
+ kfree(kprm.params.pair->second_key->byte);
+ kfree(kprm.params.pair->second_key->mask);
+ kfree(kprm.params.pair->second_key);
+ }
break;
case DPA_STATS_CLS_MEMBER_SA_ID:
break;
default:
break;
}
-
- if (copy_to_user(args, &uprm, sizeof(uprm))) {
- log_err("Cannot copy to user class counter result\n");
- return -EBUSY;
- }
-
return 0;
}
#endif
@@ -1720,89 +1844,196 @@ static long store_get_cnts_async_params(
}
static int copy_key_descriptor(struct dpa_offload_lookup_key *src,
- struct dpa_offload_lookup_key *tmp)
+ struct dpa_offload_lookup_key **ks_key)
{
- if (!src->byte) {
- log_err("Key descriptor byte from user cannot be NULL\n");
- return -EINVAL;
- }
+ struct dpa_offload_lookup_key *tmp = NULL;
- /* Allocate memory to store the key byte array */
- tmp->byte = kmalloc(src->size, GFP_KERNEL);
- if (!tmp->byte) {
- log_err("Cannot allocate memory for key descriptor byte\n");
+ /* Allocate kernel memory for key descriptor */
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ log_err("Cannot allocate kernel memory for key descriptor\n");
return -ENOMEM;
}
- if (copy_from_user(tmp->byte, src->byte, src->size)) {
- log_err("Cannot copy from user the key descriptor byte\n");
- kfree(tmp->byte);
- return -EBUSY;
+ if (src->byte) {
+ /* Allocate memory to store the key byte array */
+ tmp->byte = kmalloc(src->size, GFP_KERNEL);
+ if (!tmp->byte) {
+ log_err("Cannot allocate memory for key "
+ "descriptor byte\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(tmp->byte, src->byte, src->size)) {
+ log_err("Cannot copy from user the key "
+ "descriptor byte\n");
+ kfree(tmp->byte);
+ kfree(tmp);
+ return -EBUSY;
+ }
}
- src->byte = tmp->byte;
if (src->mask) {
/* Allocate memory to store the key mask array */
tmp->mask = kmalloc(src->size, GFP_KERNEL);
if (!tmp->mask) {
- log_err("Cannot allocate memory for key descriptor "
- "mask\n");
+ log_err("Cannot allocate memory for key "
+ "descriptor mask\n");
kfree(tmp->byte);
+ kfree(tmp);
return -ENOMEM;
}
if (copy_from_user(tmp->mask, src->mask, src->size)) {
- log_err("Cannot copy from user the key descriptor "
- "mask\n");
+ log_err("Cannot copy from user the "
+ "key descriptor mask\n");
kfree(tmp->byte);
kfree(tmp->mask);
+ kfree(tmp);
return -EBUSY;
}
- src->mask = tmp->mask;
}
+
+ tmp->size = src->size;
+ *ks_key = tmp;
+ return 0;
+}
+
+static int copy_pair_descriptor(struct dpa_offload_lookup_key_pair *src,
+ struct dpa_offload_lookup_key_pair **ks_pair)
+{
+ struct dpa_offload_lookup_key_pair *tmp;
+ int ret = 0;
+
+ /* Allocate kernel memory for pair descriptor*/
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ log_err("Cannot allocate kernel memory for pair descriptor\n");
+ return -ENOMEM;
+ }
+
+ if (src->first_key) {
+ ret = copy_key_descriptor(src->first_key, &tmp->first_key);
+ if (ret != 0) {
+ log_err("Could not copy the first key descriptor\n");
+ kfree(tmp);
+ return ret;
+ }
+ }
+
+ if (src->second_key) {
+ ret = copy_key_descriptor(src->second_key, &tmp->second_key);
+ if (ret != 0) {
+ log_err("Could not copy the second key descriptor\n");
+ kfree(tmp);
+ return ret;
+ }
+ }
+ *ks_pair = tmp;
return 0;
}
#ifdef CONFIG_COMPAT
static int copy_key_descriptor_compatcpy(
- struct dpa_offload_lookup_key *kparam,
- const struct compat_ioc_dpa_offld_lookup_key *uparam)
+ struct dpa_offload_lookup_key **ks_key, compat_uptr_t uparam)
{
- BUG_ON(uparam->size <= 0);
+ struct compat_ioc_dpa_offld_lookup_key key;
+ struct dpa_offload_lookup_key *kparam;
- kparam->size = uparam->size;
+ if (copy_from_user(&key, (compat_ptr)(uparam),
+ sizeof(struct compat_ioc_dpa_offld_lookup_key))) {
+ log_err("Cannot copy from user key descriptor\n");
+ return -EBUSY;
+ }
- /* Allocate memory to store the key byte array */
- kparam->byte = kmalloc(kparam->size, GFP_KERNEL);
- if (!kparam->byte) {
- log_err("Cannot allocate memory for key descriptor byte\n");
+ /* Allocate kernel memory for key descriptor */
+ kparam = kzalloc(sizeof(*kparam), GFP_KERNEL);
+ if (!kparam) {
+ log_err("Cannot allocate kernel memory for key descriptor\n");
return -ENOMEM;
}
- if (copy_from_user(kparam->byte, compat_ptr(uparam->byte),
- uparam->size)) {
- log_err("Cannot copy from user the key descriptor byte\n");
- return -EBUSY;
+ if (compat_ptr(key.byte)) {
+ /* Allocate memory to store the key byte array */
+ kparam->byte = kmalloc(key.size, GFP_KERNEL);
+ if (!kparam->byte) {
+ log_err("Cannot allocate memory for key descriptor "
+ "byte\n");
+ kfree(kparam);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kparam->byte,
+ compat_ptr(key.byte), key.size)) {
+ log_err("Cannot copy from user the key descriptor "
+ "byte\n");
+ kfree(kparam->byte);
+ kfree(kparam);
+ return -EBUSY;
+ }
}
- if (compat_ptr(uparam->mask)) {
+
+ if (compat_ptr(key.mask)) {
/* Allocate memory to store the key mask array */
- kparam->mask = kmalloc(kparam->size, GFP_KERNEL);
+ kparam->mask = kmalloc(key.size, GFP_KERNEL);
if (!kparam->mask) {
log_err("Cannot allocate memory for key descriptor "
"mask\n");
kfree(kparam->byte);
+ kfree(kparam);
return -ENOMEM;
}
- if (copy_from_user(kparam->mask, compat_ptr(uparam->mask),
- uparam->size)) {
+ if (copy_from_user(kparam->mask,
+ compat_ptr(key.mask), key.size)) {
log_err("Cannot copy from user the key descriptor "
"mask\n");
+ kfree(kparam->byte);
+ kfree(kparam->mask);
+ kfree(kparam);
return -EBUSY;
}
- } else
- kparam->mask = NULL;
+ }
+ kparam->size = key.size;
+ *ks_key = kparam;
+ return 0;
+}
+
+static int copy_pair_descriptor_compatcpy(
+ struct dpa_offload_lookup_key_pair **ks_pair,
+ struct compat_ioc_dpa_offld_lookup_key_pair pair)
+{
+ struct dpa_offload_lookup_key_pair *kpair;
+ int ret = 0;
+ /* Allocate kernel memory for lookup pair descriptor */
+ kpair = kzalloc(sizeof(*kpair), GFP_KERNEL);
+ if (!kpair) {
+ log_err("Cannot allocate kernel memory for pair descriptor\n");
+ return -ENOMEM;
+ }
+
+ if (compat_ptr(pair.first_key)) {
+ /* Copy user-provided key descriptor */
+ ret = copy_key_descriptor_compatcpy(
+ &kpair->first_key, pair.first_key);
+ if (ret != 0) {
+ log_err("Cannot copy first key of the pair\n");
+ kfree(kpair);
+ return ret;
+ }
+ }
+
+ if (compat_ptr(pair.second_key)) {
+ ret = copy_key_descriptor_compatcpy(
+ &kpair->second_key, pair.second_key);
+ if (ret != 0) {
+ log_err("Cannot copy second key of the pair\n");
+ kfree(kpair);
+ return ret;
+ }
+ }
+ *ks_pair = kpair;
return 0;
}
#endif
@@ -1859,12 +2090,25 @@ static void dpa_stats_plcr_cnt_compatcpy(struct dpa_stats_cnt_plcr *kprm,
kprm->cnt_sel = uprm->cnt_sel;
}
+
static long dpa_stats_tbl_cnt_compatcpy(struct dpa_stats_cnt_classif_tbl *kprm,
struct dpa_stats_compat_cnt_classif_tbl *uprm)
{
kprm->td = uprm->td;
kprm->cnt_sel = uprm->cnt_sel;
- return copy_key_descriptor_compatcpy(&kprm->key, &uprm->key);
+ /* If different than NULL, it will be overwritten */
+ kprm->key = compat_ptr(uprm->key);
+
+ if (compat_ptr(uprm->key)) {
+ /* Allocate memory for kernel-space key descriptor */
+ kprm->key = kmalloc(sizeof(*kprm->key), GFP_KERNEL);
+ if (!kprm->key) {
+ log_err("Cannot allocate memory for key descriptor\n");
+ return -ENOMEM;
+ }
+ return copy_key_descriptor_compatcpy(&kprm->key, uprm->key);
+ }
+ return 0;
}
static long dpa_stats_ccnode_cnt_compatcpy(
@@ -1874,7 +2118,19 @@ static long dpa_stats_ccnode_cnt_compatcpy(
kprm->cnt_sel = uprm->cnt_sel;
kprm->ccnode_type = uprm->ccnode_type;
kprm->cc_node = compat_get_id2ptr(uprm->cc_node, FM_MAP_TYPE_PCD_NODE);
- return copy_key_descriptor_compatcpy(&kprm->key, &uprm->key);
+ /* If different than NULL, it will be overwritten */
+ kprm->key = compat_ptr(uprm->key);
+
+ if (compat_ptr(uprm->key)) {
+ /* Allocate memory for kernel-space key descriptor */
+ kprm->key = kmalloc(sizeof(*kprm->key), GFP_KERNEL);
+ if (!kprm->key) {
+ log_err("Cannot allocate memory for key descriptor\n");
+ return -ENOMEM;
+ }
+ return copy_key_descriptor_compatcpy(&kprm->key, uprm->key);
+ }
+ return 0;
}
static long dpa_stats_eth_cls_compatcpy(struct dpa_stats_cls_cnt_eth *kprm,
@@ -2016,102 +2272,117 @@ static long dpa_stats_tbl_cls_compatcpy(
struct dpa_stats_compat_cls_cnt_classif_tbl *uprm,
uint32_t cls_members)
{
- struct compat_ioc_dpa_offld_lookup_key *keys;
- struct compat_ioc_dpa_offld_lookup_key_pair *pairs;
- uint32_t size = 0, i;
+ struct compat_ioc_dpa_offld_lookup_key_pair pair;
+ compat_uptr_t *us_keys;
+ uint32_t i;
long ret;
kprm->cnt_sel = uprm->cnt_sel;
kprm->td = uprm->td;
kprm->key_type = uprm->key_type;
+ /* Allocate memory to store array of user-space keys descriptors */
+ us_keys = kzalloc(sizeof(compat_uptr_t) * cls_members, GFP_KERNEL);
+ if (!us_keys) {
+ log_err("Cannot allocate memory array of lookup keys\n");
+ return -ENOMEM;
+ }
+
if (kprm->key_type == DPA_STATS_CLASSIF_SINGLE_KEY) {
- size = sizeof(struct dpa_offload_lookup_key) * cls_members;
- kprm->keys = kzalloc(size, GFP_KERNEL);
- if (!kprm->keys) {
- log_err("Cannot allocate kernel memory for lookup keys "
- "array\n");
- return -ENOMEM;
+ if (copy_from_user(us_keys, compat_ptr(uprm->keys),
+ (sizeof(compat_uptr_t) * cls_members))) {
+ log_err("Cannot copy from user-space array of keys "
+ "descriptors\n");
+ kfree(us_keys);
+ return -EBUSY;
}
- size = sizeof(struct compat_ioc_dpa_offld_lookup_key) *
- cls_members;
- keys = kzalloc(size, GFP_KERNEL);
- if (!keys) {
- log_err("Cannot allocate memory for lookup keys "
+ /* Allocate memory for array of kernel-space keys descriptors */
+ kprm->keys = kzalloc((sizeof(*kprm->keys) * cls_members),
+ GFP_KERNEL);
+ if (!kprm->keys) {
+ log_err("Cannot allocate kernel memory for lookup keys "
"array\n");
+ kfree(us_keys);
return -ENOMEM;
}
-
- if (copy_from_user(keys, (compat_ptr)(uprm->keys), size)) {
- log_err("Cannot copy from user array of lookup keys\n");
- kfree(keys);
- return -EBUSY;
- }
-
for (i = 0; i < cls_members; i++) {
- if (!compat_ptr(keys[i].byte))
+ if (!compat_ptr(us_keys[i]))
continue;
-
+ /* Copy user-provided key descriptor */
ret = copy_key_descriptor_compatcpy(&kprm->keys[i],
- &keys[i]);
+ us_keys[i]);
if (ret != 0) {
log_err("Cannot copy the key descriptor\n");
- kfree(keys);
- return -EINVAL;
+ kfree(us_keys);
+ return ret;
}
}
- kfree(keys);
- } else if (kprm->key_type == DPA_STATS_CLASSIF_PAIR_KEY) {
- size = sizeof(struct dpa_offload_lookup_key_pair) * cls_members;
- kprm->pairs = kzalloc(size, GFP_KERNEL);
- if (!kprm->pairs) {
- log_err("Cannot allocate kernel memory for pair lookup "
- "keys array\n");
- return -ENOMEM;
+ kfree(us_keys);
+ }
+
+ if (kprm->key_type == DPA_STATS_CLASSIF_PAIR_KEY) {
+ if (copy_from_user(us_keys, compat_ptr(uprm->pairs),
+ (sizeof(compat_uptr_t) * cls_members))) {
+ log_err("Cannot copy from user-space array of pairs "
+ "descriptors\n");
+ kfree(us_keys);
+ return -EBUSY;
}
- size = sizeof(struct compat_ioc_dpa_offld_lookup_key_pair) *
- cls_members;
- pairs = kzalloc(size, GFP_KERNEL);
- if (!pairs) {
- log_err("Cannot allocate memory for pair lookup keys "
+ /* Allocate memory for array of kernel-space pairs descriptors*/
+ kprm->pairs = kzalloc((sizeof(*kprm->pairs) * cls_members),
+ GFP_KERNEL);
+ if (!kprm->pairs) {
+ log_err("Cannot allocate kernel memory for lookup pairs"
"array\n");
+ kfree(us_keys);
return -ENOMEM;
}
- if (copy_from_user(pairs, (compat_ptr)(uprm->pairs), size)) {
- log_err("Cannot copy from user array of pair lookup "
- "keys\n");
- kfree(pairs);
- return -EBUSY;
- }
-
for (i = 0; i < cls_members; i++) {
- if (!compat_ptr(pairs[i].first_key.byte))
+ if (!compat_ptr(us_keys[i]))
continue;
- ret = copy_key_descriptor_compatcpy(
- &kprm->pairs[i].first_key,
- &pairs[i].first_key);
- if (ret != 0) {
- log_err("Cannot copy the key descriptor for the"
- " first lookup key\n");
- kfree(pairs);
- return -EINVAL;
+ /* Allocate memory for kernel pair descriptor */
+ kprm->pairs[i] = kzalloc(sizeof(*kprm->pairs[i]),
+ GFP_KERNEL);
+ if (!kprm->pairs[i]) {
+ log_err("Cannot allocate kernel memory for pair"
+ " descriptor\n");
+ return -ENOMEM;
}
- ret = copy_key_descriptor_compatcpy(
- &kprm->pairs[i].second_key,
- &pairs[i].second_key);
- if (ret != 0) {
- log_err("Cannot copy the key descriptor for the"
- " second lookup key\n", uprm->td);
- kfree(pairs);
- return -EINVAL;
+ if (copy_from_user(&pair, compat_ptr(us_keys[i]),
+ (sizeof(pair)))) {
+ log_err("Cannot copy pair descriptor\n");
+ return -EBUSY;
+ }
+
+ if (compat_ptr(pair.first_key)) {
+ /* Copy user-provided first key descriptor */
+ ret = copy_key_descriptor_compatcpy(
+ &kprm->pairs[i]->first_key,
+ pair.first_key);
+ if (ret != 0) {
+ log_err("Cannot copy first key\n");
+ kfree(us_keys);
+ return ret;
+ }
+ }
+
+ if (compat_ptr(pair.second_key)) {
+ /* Copy user-provided second key descriptor */
+ ret = copy_key_descriptor_compatcpy(
+ &kprm->pairs[i]->second_key,
+ pair.second_key);
+ if (ret != 0) {
+ log_err("Cannot copy second key\n");
+ kfree(us_keys);
+ return ret;
+ }
}
}
- kfree(pairs);
}
return 0;
}
@@ -2121,45 +2392,50 @@ static long dpa_stats_ccnode_cls_compatcpy(
struct dpa_stats_compat_cls_cnt_classif_node *uprm,
uint32_t cls_members)
{
- struct compat_ioc_dpa_offld_lookup_key *keys;
- uint32_t size, i;
+ compat_uptr_t *us_keys;
+ uint32_t i;
long ret = 0;
kprm->cc_node = compat_get_id2ptr(uprm->cc_node, FM_MAP_TYPE_PCD_NODE);
kprm->cnt_sel = uprm->cnt_sel;
kprm->ccnode_type = uprm->ccnode_type;
- size = sizeof(struct dpa_offload_lookup_key) * cls_members;
- kprm->keys = kzalloc(size, GFP_KERNEL);
- if (!kprm->keys) {
- log_err("Cannot allocate kernel memory for lookup keys "
- "array\n");
+ /* Allocate memory to store array of user-space keys descriptors */
+ us_keys = kzalloc(sizeof(compat_uptr_t) * cls_members, GFP_KERNEL);
+ if (!us_keys) {
+ log_err("Cannot allocate memory array of lookup keys\n");
return -ENOMEM;
}
- size = sizeof(struct compat_ioc_dpa_offld_lookup_key) * cls_members;
- keys = kzalloc(size, GFP_KERNEL);
- if (!keys) {
- log_err("Cannot allocate memory for lookup keys array\n");
- return -ENOMEM;
- }
-
- if (copy_from_user(keys, (compat_ptr)(uprm->keys), size)) {
- log_err("Cannot copy from user array of lookup keys\n");
- kfree(keys);
+ if (copy_from_user(us_keys, compat_ptr(uprm->keys),
+ (sizeof(compat_uptr_t) * cls_members))) {
+ log_err("Cannot copy from user-space array of keys "
+ "descriptors\n");
+ kfree(us_keys);
return -EBUSY;
}
+ /* Allocate memory to store array of kernel-space keys descriptors */
+ kprm->keys = kzalloc((sizeof(*kprm->keys) * cls_members), GFP_KERNEL);
+ if (!kprm->keys) {
+ log_err("Cannot allocate kernel memory for lookup keys "
+ "array\n");
+ kfree(us_keys);
+ return -ENOMEM;
+ }
for (i = 0; i < cls_members; i++) {
- ret = copy_key_descriptor_compatcpy(&kprm->keys[i], &keys[i]);
+ if (!compat_ptr(us_keys[i]))
+ continue;
+ /* Copy user-provided key descriptor */
+ ret = copy_key_descriptor_compatcpy(&kprm->keys[i], us_keys[i]);
if (ret != 0) {
log_err("Cannot copy the key descriptor\n");
- kfree(keys);
- return -EINVAL;
+ kfree(us_keys);
+ return ret;
}
}
- kfree(keys);
- return ret;
+ kfree(us_keys);
+ return 0;
}
static long dpa_stats_ipsec_cls_compatcpy(struct dpa_stats_cls_cnt_ipsec *kprm,
diff --git a/drivers/staging/fsl_qbman/bman_driver.c b/drivers/staging/fsl_qbman/bman_driver.c
index 35e690b..ed7dde0 100644
--- a/drivers/staging/fsl_qbman/bman_driver.c
+++ b/drivers/staging/fsl_qbman/bman_driver.c
@@ -198,11 +198,34 @@ static struct bm_portal_config *get_pcfg(struct list_head *list)
return pcfg;
}
+static struct bm_portal_config *get_pcfg_idx(struct list_head *list,
+ uint32_t idx)
+{
+ struct bm_portal_config *pcfg;
+ if (list_empty(list))
+ return NULL;
+ list_for_each_entry(pcfg, list, list) {
+ if (pcfg->public_cfg.index == idx) {
+ list_del(&pcfg->list);
+ return pcfg;
+ }
+ }
+ return NULL;
+}
+
struct bm_portal_config *bm_get_unused_portal(void)
{
+ return bm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
+}
+
+struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx)
+{
struct bm_portal_config *ret;
spin_lock(&unused_pcfgs_lock);
- ret = get_pcfg(&unused_pcfgs);
+ if (idx == QBMAN_ANY_PORTAL_IDX)
+ ret = get_pcfg(&unused_pcfgs);
+ else
+ ret = get_pcfg_idx(&unused_pcfgs, idx);
spin_unlock(&unused_pcfgs_lock);
return ret;
}
diff --git a/drivers/staging/fsl_qbman/bman_private.h b/drivers/staging/fsl_qbman/bman_private.h
index 9114308..7e54701 100644
--- a/drivers/staging/fsl_qbman/bman_private.h
+++ b/drivers/staging/fsl_qbman/bman_private.h
@@ -80,6 +80,7 @@ const struct bm_portal_config *bman_destroy_affine_portal(void);
/* Hooks from fsl_usdpaa.c to bman_driver.c */
struct bm_portal_config *bm_get_unused_portal(void);
+struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx);
void bm_put_unused_portal(struct bm_portal_config *pcfg);
void bm_set_liodns(struct bm_portal_config *pcfg);
diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa.c b/drivers/staging/fsl_qbman/fsl_usdpaa.c
index f2891ef..d069a8a 100644
--- a/drivers/staging/fsl_qbman/fsl_usdpaa.c
+++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c
@@ -50,6 +50,7 @@ struct mem_fragment {
unsigned long pfn_base; /* PFN version of 'base' */
unsigned long pfn_len; /* PFN version of 'len' */
unsigned int refs; /* zero if unmapped */
+ u64 root_len; /* Size of the orignal fragment */
struct list_head list;
/* if mapped, flags+name captured at creation time */
u32 flags;
@@ -64,7 +65,9 @@ struct mem_fragment {
* ioctl(USDPAA_IOCTL_DMA_MAP), though the actual mapping then happens via a
* mmap(). */
struct mem_mapping {
- struct mem_fragment *frag;
+ struct mem_fragment *root_frag;
+ u32 frag_count;
+ u64 total_size;
struct list_head list;
};
@@ -169,12 +172,28 @@ static const struct alloc_backend {
}
};
+/* Determines the largest acceptable page size for a given size
+ The sizes are determined by what the TLB1 acceptable page sizes are */
+static u32 largest_page_size(u32 size)
+{
+ int shift = 30; /* Start at 1G size */
+ if (size < 4096)
+ return 0;
+ do {
+ if (size >= (1<<shift))
+ return 1<<shift;
+ shift -= 2;
+ } while (shift >= 12); /* Up to 4k */
+ return 0;
+}
+
/* Helper for ioctl_dma_map() when we have a larger fragment than we need. This
* splits the fragment into 4 and returns the upper-most. (The caller can loop
* until it has a suitable fragment size.) */
static struct mem_fragment *split_frag(struct mem_fragment *frag)
{
struct mem_fragment *x[3];
+
x[0] = kmalloc(sizeof(struct mem_fragment), GFP_KERNEL);
x[1] = kmalloc(sizeof(struct mem_fragment), GFP_KERNEL);
x[2] = kmalloc(sizeof(struct mem_fragment), GFP_KERNEL);
@@ -196,6 +215,7 @@ static struct mem_fragment *split_frag(struct mem_fragment *frag)
x[2]->pfn_base = x[1]->pfn_base + frag->pfn_len;
x[0]->pfn_len = x[1]->pfn_len = x[2]->pfn_len = frag->pfn_len;
x[0]->refs = x[1]->refs = x[2]->refs = 0;
+ x[0]->root_len = x[1]->root_len = x[2]->root_len = frag->root_len;
list_add(&x[0]->list, &frag->list);
list_add(&x[1]->list, &x[0]->list);
list_add(&x[2]->list, &x[1]->list);
@@ -211,12 +231,16 @@ static struct mem_fragment *merge_frag(struct mem_fragment *frag)
uint64_t newlen = frag->len << 2;
uint64_t newbase = frag->base & ~(newlen - 1);
struct mem_fragment *tmp, *leftmost = frag, *rightmost = frag;
+
+ /* If this fragment is already at root size don't allow merge */
+ if (frag->len == frag->root_len)
+ return NULL;
/* Scan left until we find the start */
tmp = list_entry(frag->list.prev, struct mem_fragment, list);
while ((&tmp->list != &mem_list) && (tmp->base >= newbase)) {
if (tmp->refs)
return NULL;
- if (tmp->len != tmp->len)
+ if (tmp->len != frag->len)
return NULL;
leftmost = tmp;
tmp = list_entry(tmp->list.prev, struct mem_fragment, list);
@@ -226,7 +250,7 @@ static struct mem_fragment *merge_frag(struct mem_fragment *frag)
while ((&tmp->list != &mem_list) && (tmp->base < (newbase + newlen))) {
if (tmp->refs)
return NULL;
- if (tmp->len != tmp->len)
+ if (tmp->len != frag->len)
return NULL;
rightmost = tmp;
tmp = list_entry(tmp->list.next, struct mem_fragment, list);
@@ -251,15 +275,6 @@ static struct mem_fragment *merge_frag(struct mem_fragment *frag)
return frag;
}
-/* Helper to verify that 'sz' is (4096 * 4^x) for some x. */
-static int is_good_size(u64 sz)
-{
- int log = ilog2(phys_size);
- if ((phys_size & (phys_size - 1)) || (log < 12) || (log & 1))
- return 0;
- return 1;
-}
-
/* Hook from arch/powerpc/mm/mem.c */
int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size)
{
@@ -333,7 +348,8 @@ static int init_qm_portal(struct qm_portal_config *config,
}
/* Initialize the EQCR */
- if (qm_eqcr_init(portal, qm_eqcr_pvb, qm_eqcr_cce)) {
+ if (qm_eqcr_init(portal, qm_eqcr_pvb,
+ portal->eqcr.use_eqcr_ci_stashing ? 3 : 0, 1)) {
pr_err("Qman EQCR initialisation failed\n");
return 1;
}
@@ -457,6 +473,30 @@ static bool check_portal_channel(void *ctx, u32 channel)
return false;
}
+__maybe_unused static void dump_frags(void)
+{
+ struct mem_fragment *frag;
+ int i = 0;
+ list_for_each_entry(frag, &mem_list, list) {
+ pr_info("FRAG %d: base 0x%llx len 0x%llx root_len 0x%llx\n",
+ i, frag->base, frag->len, frag->root_len);
+ ++i;
+ }
+}
+
+
+__maybe_unused static void dump_frags(void)
+{
+ struct mem_fragment *frag;
+ int i = 0;
+ list_for_each_entry(frag, &mem_list, list) {
+ pr_info("FRAG %d: base 0x%llx len 0x%llx root_len 0x%llx\n",
+ i, frag->base, frag->len, frag->root_len);
+ ++i;
+ }
+}
+
+
static int usdpaa_release(struct inode *inode, struct file *filp)
{
struct ctx *ctx = filp->private_data;
@@ -556,15 +596,23 @@ static int usdpaa_release(struct inode *inode, struct file *filp)
/* Release any DMA regions */
spin_lock(&mem_lock);
list_for_each_entry_safe(map, tmpmap, &ctx->maps, list) {
- if (map->frag->has_locking && (map->frag->owner == map)) {
- map->frag->owner = NULL;
- wake_up(&map->frag->wq);
+ struct mem_fragment *current_frag = map->root_frag;
+ int i;
+ if (map->root_frag->has_locking &&
+ (map->root_frag->owner == map)) {
+ map->root_frag->owner = NULL;
+ wake_up(&map->root_frag->wq);
}
- if (!--map->frag->refs) {
- struct mem_fragment *frag = map->frag;
- do {
- frag = merge_frag(frag);
- } while (frag);
+ /* Check each fragment and merge if the ref count is 0 */
+ for (i = 0; i < map->frag_count; i++) {
+ if (!--current_frag->refs) {
+ struct mem_fragment *frag = current_frag;
+ do {
+ frag = merge_frag(frag);
+ } while (frag);
+ }
+ current_frag = list_entry(current_frag->list.next,
+ struct mem_fragment, list);
}
list_del(&map->list);
kfree(map);
@@ -601,12 +649,19 @@ static int check_mmap_dma(struct ctx *ctx, struct vm_area_struct *vma,
struct mem_mapping *map;
list_for_each_entry(map, &ctx->maps, list) {
- if (map->frag->pfn_base == vma->vm_pgoff) {
- *match = 1;
- if (map->frag->len != (vma->vm_end - vma->vm_start))
- return -EINVAL;
- *pfn = map->frag->pfn_base;
- return 0;
+ int i;
+ struct mem_fragment *frag = map->root_frag;
+
+ for (i = 0; i < map->frag_count; i++) {
+ if (frag->pfn_base == vma->vm_pgoff) {
+ *match = 1;
+ if (frag->len != (vma->vm_end - vma->vm_start))
+ return -EINVAL;
+ *pfn = frag->pfn_base;
+ return 0;
+ }
+ frag = list_entry(frag->list.next, struct mem_fragment,
+ list);
}
}
*match = 0;
@@ -687,7 +742,7 @@ static unsigned long usdpaa_get_unmapped_area(struct file *file,
{
struct vm_area_struct *vma;
- if (!is_good_size(len))
+ if (len % PAGE_SIZE)
return -EINVAL;
addr = USDPAA_MEM_ROUNDUP(addr, len);
@@ -831,15 +886,20 @@ static long ioctl_id_reserve(struct ctx *ctx, void __user *arg)
static long ioctl_dma_map(struct file *fp, struct ctx *ctx,
struct usdpaa_ioctl_dma_map *i)
{
- struct mem_fragment *frag;
+ struct mem_fragment *frag, *start_frag, *next_frag;
struct mem_mapping *map, *tmp;
- u64 search_size;
- int ret = 0;
- if (i->len && !is_good_size(i->len))
+ int ret = 0, k;
+ u32 largest_page, so_far = 0;
+ int frag_count = 0;
+ unsigned long next_addr = PAGE_SIZE;
+
+ if (i->len && i->len % PAGE_SIZE)
return -EINVAL;
+
map = kmalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return -ENOMEM;
+
spin_lock(&mem_lock);
if (i->flags & USDPAA_DMA_FLAG_SHARE) {
list_for_each_entry(frag, &mem_list, list) {
@@ -853,19 +913,23 @@ static long ioctl_dma_map(struct file *fp, struct ctx *ctx,
ret = -EBUSY;
goto out;
}
+ /* Check if this has already been mapped
+ to this process */
list_for_each_entry(tmp, &ctx->maps, list)
- if (tmp->frag == frag) {
+ if (tmp->root_frag == frag) {
ret = -EBUSY;
goto out;
}
i->has_locking = frag->has_locking;
i->did_create = 0;
i->len = frag->len;
+ start_frag = frag;
goto do_map;
}
}
/* No matching entry */
if (!(i->flags & USDPAA_DMA_FLAG_CREATE)) {
+ pr_err("ioctl_dma_map() No matching entry\n");
ret = -ENOMEM;
goto out;
}
@@ -875,52 +939,124 @@ static long ioctl_dma_map(struct file *fp, struct ctx *ctx,
ret = -EINVAL;
goto out;
}
- /* We search for the required size and if that fails, for the next
- * biggest size, etc. */
- for (search_size = i->len; search_size <= phys_size;
- search_size <<= 2) {
+ /* Verify there is sufficent space to do the mapping */
+ down_write(&current->mm->mmap_sem);
+ next_addr = usdpaa_get_unmapped_area(fp, next_addr, i->len, 0, 0);
+ up_write(&current->mm->mmap_sem);
+
+ if (next_addr & ~PAGE_MASK) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Find one of more contiguous fragments that satisfy the total length
+ trying to minimize the number of fragments
+ compute the largest page size that the allocation could use */
+ largest_page = largest_page_size(i->len);
+ start_frag = NULL;
+ while (largest_page &&
+ largest_page <= largest_page_size(phys_size) &&
+ start_frag == NULL) {
+ /* Search the list for a frag of that size */
list_for_each_entry(frag, &mem_list, list) {
- if (!frag->refs && (frag->len == search_size)) {
- while (frag->len > i->len) {
- frag = split_frag(frag);
- if (!frag) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!frag->refs && (frag->len == largest_page)) {
+ /* See if the next x fragments are free
+ and can accomidate the size */
+ u32 found_size = largest_page;
+ next_frag = list_entry(frag->list.next,
+ struct mem_fragment,
+ list);
+ /* If the fragement is too small check
+ if the neighbours cab support it */
+ while (found_size < i->len) {
+ if (&mem_list == &next_frag->list)
+ break; /* End of list */
+ if (next_frag->refs != 0 ||
+ next_frag->len == 0)
+ break; /* not enough space */
+ found_size += next_frag->len;
+ }
+ if (found_size >= i->len) {
+ /* Success! there is enough contigous
+ free space */
+ start_frag = frag;
+ break;
}
- frag->flags = i->flags;
- strncpy(frag->name, i->name,
- USDPAA_DMA_NAME_MAX);
- frag->has_locking = i->has_locking;
- init_waitqueue_head(&frag->wq);
- frag->owner = NULL;
- i->did_create = 1;
- goto do_map;
}
- }
+ } /* next frag loop */
+ /* Couldn't statisfy the request with this
+ largest page size, try a smaller one */
+ largest_page <<= 2;
+ }
+ if (start_frag == NULL) {
+ /* Couldn't find proper amount of space */
+ ret = -ENOMEM;
+ goto out;
}
- ret = -ENOMEM;
- goto out;
-
+ i->did_create = 1;
do_map:
- map->frag = frag;
- frag->refs++;
+ /* We may need to divide the final fragment to accomidate the mapping */
+ next_frag = start_frag;
+ while (so_far != i->len) {
+ BUG_ON(next_frag->len == 0);
+ while ((next_frag->len + so_far) > i->len) {
+ /* Split frag until they match */
+ split_frag(next_frag);
+ }
+ so_far += next_frag->len;
+ ++frag_count;
+ next_frag = list_entry(next_frag->list.next,
+ struct mem_fragment, list);
+ }
+
+ /* we need to reserve start count fragments starting at start frag */
+ next_frag = start_frag;
+ for (k = 0; k < frag_count; k++) {
+ next_frag->refs++;
+ next_frag = list_entry(next_frag->list.next,
+ struct mem_fragment, list);
+ }
+
+ start_frag->flags = i->flags;
+ strncpy(start_frag->name, i->name, USDPAA_DMA_NAME_MAX);
+ start_frag->has_locking = i->has_locking;
+ init_waitqueue_head(&start_frag->wq);
+ start_frag->owner = NULL;
+
+ /* Setup the map entry */
+ map->root_frag = start_frag;
+ map->total_size = i->len;
+ map->frag_count = frag_count;
list_add(&map->list, &ctx->maps);
- i->phys_addr = frag->base;
-
+ i->phys_addr = start_frag->base;
out:
spin_unlock(&mem_lock);
if (!ret) {
unsigned long longret;
- down_write(&current->mm->mmap_sem);
- longret = do_mmap_pgoff(fp, PAGE_SIZE, map->frag->len, PROT_READ |
- (i->flags & USDPAA_DMA_FLAG_RDONLY ? 0 : PROT_WRITE),
- MAP_SHARED, map->frag->pfn_base);
- up_write(&current->mm->mmap_sem);
- if (longret & ~PAGE_MASK)
- ret = (int)longret;
- else
- i->ptr = (void *)longret;
+ unsigned long next_addr = PAGE_SIZE;
+ next_frag = start_frag;
+ for (k = 0; k < frag_count; k++) {
+ down_write(&current->mm->mmap_sem);
+ longret = do_mmap_pgoff(fp, next_addr, next_frag->len,
+ PROT_READ |
+ (i->flags &
+ USDPAA_DMA_FLAG_RDONLY ? 0
+ : PROT_WRITE),
+ MAP_SHARED,
+ next_frag->pfn_base);
+ up_write(&current->mm->mmap_sem);
+ if (longret & ~PAGE_MASK)
+ ret = (int)longret;
+ else {
+ if (k == 0)
+ i->ptr = (void *)longret;
+ else
+ BUG_ON(next_addr != longret);
+ next_addr = longret + next_frag->len;
+ }
+ next_frag = list_entry(next_frag->list.next,
+ struct mem_fragment, list);
+ }
} else
kfree(map);
return ret;
@@ -940,12 +1076,12 @@ static long ioctl_dma_unmap(struct ctx *ctx, void __user *arg)
}
spin_lock(&mem_lock);
list_for_each_entry(map, &ctx->maps, list) {
- if (map->frag->pfn_base == vma->vm_pgoff) {
+ if (map->root_frag->pfn_base == vma->vm_pgoff) {
/* Drop the map lock if we hold it */
- if (map->frag->has_locking &&
- (map->frag->owner == map)) {
- map->frag->owner = NULL;
- wake_up(&map->frag->wq);
+ if (map->root_frag->has_locking &&
+ (map->root_frag->owner == map)) {
+ map->root_frag->owner = NULL;
+ wake_up(&map->root_frag->wq);
}
goto map_match;
}
@@ -982,8 +1118,8 @@ static int test_lock(struct mem_mapping *map)
{
int ret = 0;
spin_lock(&mem_lock);
- if (!map->frag->owner) {
- map->frag->owner = map;
+ if (!map->root_frag->owner) {
+ map->root_frag->owner = map;
ret = 1;
}
spin_unlock(&mem_lock);
@@ -1003,7 +1139,7 @@ static long ioctl_dma_lock(struct ctx *ctx, void __user *arg)
}
spin_lock(&mem_lock);
list_for_each_entry(map, &ctx->maps, list) {
- if (map->frag->pfn_base == vma->vm_pgoff)
+ if (map->root_frag->pfn_base == vma->vm_pgoff)
goto map_match;
}
map = NULL;
@@ -1011,9 +1147,9 @@ map_match:
spin_unlock(&mem_lock);
up_read(&current->mm->mmap_sem);
- if (!map->frag->has_locking)
+ if (!map->root_frag->has_locking)
return -ENODEV;
- return wait_event_interruptible(map->frag->wq, test_lock(map));
+ return wait_event_interruptible(map->root_frag->wq, test_lock(map));
}
static long ioctl_dma_unlock(struct ctx *ctx, void __user *arg)
@@ -1029,12 +1165,12 @@ static long ioctl_dma_unlock(struct ctx *ctx, void __user *arg)
else {
spin_lock(&mem_lock);
list_for_each_entry(map, &ctx->maps, list) {
- if (map->frag->pfn_base == vma->vm_pgoff) {
- if (!map->frag->has_locking)
+ if (map->root_frag->pfn_base == vma->vm_pgoff) {
+ if (!map->root_frag->has_locking)
ret = -ENODEV;
- else if (map->frag->owner == map) {
- map->frag->owner = NULL;
- wake_up(&map->frag->wq);
+ else if (map->root_frag->owner == map) {
+ map->root_frag->owner = NULL;
+ wake_up(&map->root_frag->wq);
ret = 0;
} else
ret = -EBUSY;
@@ -1083,7 +1219,8 @@ static long ioctl_portal_map(struct file *fp, struct ctx *ctx,
return -ENOMEM;
memcpy(&mapping->user, arg, sizeof(mapping->user));
if (mapping->user.type == usdpaa_portal_qman) {
- mapping->qportal = qm_get_unused_portal();
+ mapping->qportal =
+ qm_get_unused_portal_idx(mapping->user.index);
if (!mapping->qportal) {
ret = -ENODEV;
goto err_get_portal;
@@ -1091,13 +1228,16 @@ static long ioctl_portal_map(struct file *fp, struct ctx *ctx,
mapping->phys = &mapping->qportal->addr_phys[0];
mapping->user.channel = mapping->qportal->public_cfg.channel;
mapping->user.pools = mapping->qportal->public_cfg.pools;
+ mapping->user.index = mapping->qportal->public_cfg.index;
} else if (mapping->user.type == usdpaa_portal_bman) {
- mapping->bportal = bm_get_unused_portal();
+ mapping->bportal =
+ bm_get_unused_portal_idx(mapping->user.index);
if (!mapping->bportal) {
ret = -ENODEV;
goto err_get_portal;
}
mapping->phys = &mapping->bportal->addr_phys[0];
+ mapping->user.index = mapping->bportal->public_cfg.index;
} else {
ret = -EINVAL;
goto err_copy_from_user;
@@ -1281,11 +1421,13 @@ static long usdpaa_ioctl_compat(struct file *fp, unsigned int cmd,
if (copy_from_user(&input, a, sizeof(input)))
return -EFAULT;
converted.type = input.type;
+ converted.index = input.index;
ret = ioctl_portal_map(fp, ctx, &converted);
input.addr.cinh = ptr_to_compat(converted.addr.cinh);
input.addr.cena = ptr_to_compat(converted.addr.cena);
input.channel = converted.channel;
input.pools = converted.pools;
+ input.index = converted.index;
if (copy_to_user(a, &input, sizeof(input)))
return -EFAULT;
return ret;
@@ -1383,12 +1525,12 @@ __init void fsl_usdpaa_init_early(void)
pr_info("No USDPAA memory, no 'usdpaa_mem' bootarg\n");
return;
}
- if (!is_good_size(phys_size)) {
- pr_err("'usdpaa_mem' bootarg must be 4096*4^x\n");
+ if (phys_size % PAGE_SIZE) {
+ pr_err("'usdpaa_mem' bootarg must be a multiple of page size\n");
phys_size = 0;
return;
}
- phys_start = memblock_alloc(phys_size, phys_size);
+ phys_start = memblock_alloc(phys_size, largest_page_size(phys_size));
if (!phys_start) {
pr_err("Failed to reserve USDPAA region (sz:%llx)\n",
phys_size);
@@ -1406,25 +1548,39 @@ static int __init usdpaa_init(void)
{
struct mem_fragment *frag;
int ret;
+ u64 tmp_size = phys_size;
+ u64 tmp_start = phys_start;
+ u64 tmp_pfn_size = pfn_size;
+ u64 tmp_pfn_start = pfn_start;
pr_info("Freescale USDPAA process driver\n");
if (!phys_start) {
pr_warn("fsl-usdpaa: no region found\n");
return 0;
}
- frag = kmalloc(sizeof(*frag), GFP_KERNEL);
- if (!frag) {
- pr_err("Failed to setup USDPAA memory accounting\n");
- return -ENOMEM;
+
+ while (tmp_size != 0) {
+ u32 frag_size = largest_page_size(tmp_size);
+ frag = kmalloc(sizeof(*frag), GFP_KERNEL);
+ if (!frag) {
+ pr_err("Failed to setup USDPAA memory accounting\n");
+ return -ENOMEM;
+ }
+ frag->base = tmp_start;
+ frag->len = frag->root_len = frag_size;
+ frag->pfn_base = tmp_pfn_start;
+ frag->pfn_len = frag_size / PAGE_SIZE;
+ frag->refs = 0;
+ init_waitqueue_head(&frag->wq);
+ frag->owner = NULL;
+ list_add(&frag->list, &mem_list);
+
+ /* Adjust for this frag */
+ tmp_start += frag_size;
+ tmp_size -= frag_size;
+ tmp_pfn_start += frag_size / PAGE_SIZE;
+ tmp_pfn_size -= frag_size / PAGE_SIZE;
}
- frag->base = phys_start;
- frag->len = phys_size;
- frag->pfn_base = pfn_start;
- frag->pfn_len = pfn_size;
- frag->refs = 0;
- init_waitqueue_head(&frag->wq);
- frag->owner = NULL;
- list_add(&frag->list, &mem_list);
ret = misc_register(&usdpaa_miscdev);
if (ret)
pr_err("fsl-usdpaa: failed to register misc device\n");
diff --git a/drivers/staging/fsl_qbman/qman_driver.c b/drivers/staging/fsl_qbman/qman_driver.c
index 7b6108a..a1aab54 100644
--- a/drivers/staging/fsl_qbman/qman_driver.c
+++ b/drivers/staging/fsl_qbman/qman_driver.c
@@ -455,6 +455,21 @@ static struct qm_portal_config *get_pcfg(struct list_head *list)
return pcfg;
}
+static struct qm_portal_config *get_pcfg_idx(struct list_head *list, u32 idx)
+{
+ struct qm_portal_config *pcfg;
+ if (list_empty(list))
+ return NULL;
+ list_for_each_entry(pcfg, list, list) {
+ if (pcfg->public_cfg.index == idx) {
+ list_del(&pcfg->list);
+ return pcfg;
+ }
+ }
+ return NULL;
+}
+
+
static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
{
int ret;
@@ -531,11 +546,14 @@ _iommu_domain_free:
iommu_domain_free(pcfg->iommu_domain);
}
-struct qm_portal_config *qm_get_unused_portal(void)
+struct qm_portal_config *qm_get_unused_portal_idx(u32 idx)
{
struct qm_portal_config *ret;
spin_lock(&unused_pcfgs_lock);
- ret = get_pcfg(&unused_pcfgs);
+ if (idx == QBMAN_ANY_PORTAL_IDX)
+ ret = get_pcfg(&unused_pcfgs);
+ else
+ ret = get_pcfg_idx(&unused_pcfgs, idx);
spin_unlock(&unused_pcfgs_lock);
/* Bind stashing LIODNs to the CPU we are currently executing on, and
* set the portal to use the stashing request queue corresonding to the
@@ -550,6 +568,11 @@ struct qm_portal_config *qm_get_unused_portal(void)
return ret;
}
+struct qm_portal_config *qm_get_unused_portal()
+{
+ return qm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
+}
+
void qm_put_unused_portal(struct qm_portal_config *pcfg)
{
spin_lock(&unused_pcfgs_lock);
diff --git a/drivers/staging/fsl_qbman/qman_high.c b/drivers/staging/fsl_qbman/qman_high.c
index 8dbb0ce..f8f0524 100644
--- a/drivers/staging/fsl_qbman/qman_high.c
+++ b/drivers/staging/fsl_qbman/qman_high.c
@@ -359,9 +359,6 @@ loop:
goto loop;
}
-
-
-
struct qman_portal *qman_create_portal(
struct qman_portal *portal,
const struct qm_portal_config *config,
@@ -380,12 +377,20 @@ struct qman_portal *qman_create_portal(
__p = &portal->p;
+ portal->p.eqcr.use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ?
+ 1 : 0);
+
/* prep the low-level portal struct with the mapped addresses from the
* config, everything that follows depends on it and "config" is more
* for (de)reference... */
__p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
__p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
- if (qm_eqcr_init(__p, qm_eqcr_pvb, qm_eqcr_cce)) {
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(__p, qm_eqcr_pvb,
+ portal->p.eqcr.use_eqcr_ci_stashing ? 3 : 0, 1)) {
pr_err("Qman EQCR initialisation failed\n");
goto fail_eqcr;
}
@@ -1983,10 +1988,23 @@ static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
(*p)->eqci_owned = fq;
}
#endif
- avail = qm_eqcr_get_avail(&(*p)->p);
- if (avail < 2)
- update_eqcr_ci(*p, avail);
- eq = qm_eqcr_start(&(*p)->p);
+ if ((*p)->p.eqcr.use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&(*p)->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&(*p)->p);
+ if (avail < 2)
+ update_eqcr_ci(*p, avail);
+ eq = qm_eqcr_start_no_stash(&(*p)->p);
+ }
+
if (unlikely(!eq)) {
#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
diff --git a/drivers/staging/fsl_qbman/qman_low.h b/drivers/staging/fsl_qbman/qman_low.h
index d63c722..0b5f16c 100644
--- a/drivers/staging/fsl_qbman/qman_low.h
+++ b/drivers/staging/fsl_qbman/qman_low.h
@@ -131,10 +131,6 @@ enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
qm_eqcr_pce = 1, /* PI index, cache-enabled */
qm_eqcr_pvb = 2 /* valid-bit */
};
-enum qm_eqcr_cmode { /* s/w-only */
- qm_eqcr_cci, /* CI index, cache-inhibited */
- qm_eqcr_cce /* CI index, cache-enabled */
-};
enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
qm_dqrr_dpull = 1 /* PDQCR */
@@ -170,10 +166,10 @@ enum qm_mr_cmode { /* matches QCSP_CFG::MM */
struct qm_eqcr {
struct qm_eqcr_entry *ring, *cursor;
u8 ci, available, ithresh, vbit;
+ u32 use_eqcr_ci_stashing;
#ifdef CONFIG_FSL_DPA_CHECKING
u32 busy;
enum qm_eqcr_pmode pmode;
- enum qm_eqcr_cmode cmode;
#endif
};
@@ -283,7 +279,8 @@ static inline void EQCR_INC(struct qm_eqcr *eqcr)
static inline int qm_eqcr_init(struct qm_portal *portal,
enum qm_eqcr_pmode pmode,
- __maybe_unused enum qm_eqcr_cmode cmode)
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
{
/* This use of 'register', as well as all other occurances, is because
* it has been observed to generate much faster code with gcc than is
@@ -305,9 +302,10 @@ static inline int qm_eqcr_init(struct qm_portal *portal,
#ifdef CONFIG_FSL_DPA_CHECKING
eqcr->busy = 0;
eqcr->pmode = pmode;
- eqcr->cmode = cmode;
#endif
cfg = (qm_in(CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
qm_out(CFG, cfg);
return 0;
@@ -328,7 +326,8 @@ static inline void qm_eqcr_finish(struct qm_portal *portal)
pr_crit("EQCR destroyed unquiesced\n");
}
-static inline struct qm_eqcr_entry *qm_eqcr_start(struct qm_portal *portal)
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
{
register struct qm_eqcr *eqcr = &portal->eqcr;
DPA_ASSERT(!eqcr->busy);
@@ -343,6 +342,28 @@ static inline struct qm_eqcr_entry *qm_eqcr_start(struct qm_portal *portal)
return eqcr->cursor;
}
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+ DPA_ASSERT(!eqcr->busy);
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dcbz_64(eqcr->cursor);
+ return eqcr->cursor;
+}
+
static inline void qm_eqcr_abort(struct qm_portal *portal)
{
__maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
@@ -436,7 +457,6 @@ static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
{
register struct qm_eqcr *eqcr = &portal->eqcr;
u8 diff, old_ci = eqcr->ci;
- DPA_ASSERT(eqcr->cmode == qm_eqcr_cci);
eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
eqcr->available += diff;
@@ -446,7 +466,6 @@ static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
{
__maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
- DPA_ASSERT(eqcr->cmode == qm_eqcr_cce);
qm_cl_touch_ro(EQCR_CI);
}
@@ -454,7 +473,6 @@ static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
{
register struct qm_eqcr *eqcr = &portal->eqcr;
u8 diff, old_ci = eqcr->ci;
- DPA_ASSERT(eqcr->cmode == qm_eqcr_cce);
eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
qm_cl_invalidate(EQCR_CI);
diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
diff --git a/drivers/staging/fsl_qbman/qman_private.h b/drivers/staging/fsl_qbman/qman_private.h
index 7f35dcf..d2bdd1e 100644
--- a/drivers/staging/fsl_qbman/qman_private.h
+++ b/drivers/staging/fsl_qbman/qman_private.h
@@ -219,6 +219,8 @@ void qman_destroy_portal(struct qman_portal *qm);
/* Hooks from fsl_usdpaa.c to qman_driver.c */
struct qm_portal_config *qm_get_unused_portal(void);
+struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
+
void qm_put_unused_portal(struct qm_portal_config *pcfg);
void qm_set_liodns(struct qm_portal_config *pcfg);
diff --git a/include/linux/fsl_dpa_stats.h b/include/linux/fsl_dpa_stats.h
index ba11791..b8fe850 100644
--- a/include/linux/fsl_dpa_stats.h
+++ b/include/linux/fsl_dpa_stats.h
@@ -396,8 +396,11 @@ struct dpa_stats_cnt_classif_tbl {
/* Table descriptor */
int td;
- /* Key to identify a specific entry */
- struct dpa_offload_lookup_key key;
+ /*
+ * Pointer to a key that identifies a specific entry or NULL in order
+ * to obtain statistics for miss entry
+ */
+ struct dpa_offload_lookup_key *key;
/*
* Single or multiple selection of Classifier Table counters
@@ -429,8 +432,11 @@ struct dpa_stats_cnt_classif_node {
/* The type of FMAN Classification Node */
enum dpa_stats_classif_node_type ccnode_type;
- /* Key to identify a specific entry */
- struct dpa_offload_lookup_key key;
+ /*
+ * Pointer to a key that identifies a specific entry or NULL in order
+ * to obtain statistics for miss entry
+ */
+ struct dpa_offload_lookup_key *key;
/*
* Single or multiple selection of Classifier
@@ -576,11 +582,17 @@ enum dpa_stats_classif_key_type {
/* DPA Stats Classification counter - pair of keys */
struct dpa_offload_lookup_key_pair {
- /* Key to identify the first entry */
- struct dpa_offload_lookup_key first_key;
+ /*
+ * Pointer to a key that identifies the first entry or NULL in order
+ * to identify the miss entry of the first table
+ */
+ struct dpa_offload_lookup_key *first_key;
- /* Key to identify the entry connected to the first entry */
- struct dpa_offload_lookup_key second_key;
+ /*
+ * Pointer to a key that identifies the entry connected to the first
+ * entry first entry or NULL in order to identify the miss entry
+ */
+ struct dpa_offload_lookup_key *second_key;
};
/* DPA Stats Classifier Table class counter parameters */
@@ -601,18 +613,28 @@ struct dpa_stats_cls_cnt_classif_tbl {
*/
/*
- * Array of keys to identify specific entries. A key can be
- * 'invalidated' by providing the 'byte' and 'mask' pointers
- * set to NULL.
+ * Pointer to an array of keys, where each element of the array
+ * can either be a key that identifies a specific entry or NULL
+ * in order to obtain the statistics for the miss entry. A key
+ * can be'invalidated' by providing the 'byte' pointer set
+ * to NULL.
*/
- struct dpa_offload_lookup_key *keys;
+ struct dpa_offload_lookup_key **keys;
/*
* Array of 'pair-keys' to identify specific entries. A key pair
* can be 'invalidated' by providing the 'byte' and 'mask'
* pointers of the first key set to NULL
*/
- struct dpa_offload_lookup_key_pair *pairs;
+
+ /*
+ * Pointer to an array of ‘pair-keys’, where each element of the
+ * array can either be a ‘pair-key’ that identifies a specific
+ * entry or NULL in in order to obtain the statistics for the
+ * miss entry. A key pair can be 'invalidated' by providing the
+ * 'byte' pointer of the first key set to NULL.
+ */
+ struct dpa_offload_lookup_key_pair **pairs;
};
/*
@@ -636,7 +658,7 @@ struct dpa_stats_cls_cnt_classif_node {
enum dpa_stats_classif_node_type ccnode_type;
/* Array of keys to identify specific entries */
- struct dpa_offload_lookup_key *keys;
+ struct dpa_offload_lookup_key **keys;
/*
* Single or multiple selection of Classifier counters
@@ -739,17 +761,20 @@ struct dpa_stats_cls_member_params {
union {
/*
- * Key to set or update in case the byte and mask pointers are
- * not NULL, or class member to invalidate otherwise
+ * Pointer to a key to set or update in case the byte pointer is
+ * not NULL, or class member to invalidate otherwise. The
+ * pointer can be NULL, in which case it represents the miss
+ * entry.
*/
- struct dpa_offload_lookup_key key;
+ struct dpa_offload_lookup_key *key;
/*
- * Key to set or update in case the byte and mask pointers of
- * the first key are not NULL, or class member to invalidate
- * otherwise
+ * Pointer to a 'pair-key' to set or update in case the byte
+ * pointer of the first key is not NULL, or class member to
+ * invalidate otherwise. The pointer can be NULL, in which case
+ * it represents the miss entry.
*/
- struct dpa_offload_lookup_key_pair pair;
+ struct dpa_offload_lookup_key_pair *pair;
/*
* Security association identifier to set or update or class
diff --git a/include/linux/fsl_usdpaa.h b/include/linux/fsl_usdpaa.h
index de017a6..fbf9480 100644
--- a/include/linux/fsl_usdpaa.h
+++ b/include/linux/fsl_usdpaa.h
@@ -165,9 +165,17 @@ enum usdpaa_portal_type {
usdpaa_portal_bman,
};
+#define QBMAN_ANY_PORTAL_IDX 0xffffffff
+
struct usdpaa_ioctl_portal_map {
/* Input parameter, is a qman or bman portal required. */
+
enum usdpaa_portal_type type;
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ for don't care. The portal index will be populated by the
+ driver when the ioctl() successfully completes */
+ uint32_t index;
+
/* Return value if the map succeeds, this gives the mapped
* cache-inhibited (cinh) and cache-enabled (cena) addresses. */
struct usdpaa_portal_map {
@@ -183,6 +191,10 @@ struct usdpaa_ioctl_portal_map {
struct compat_usdpaa_ioctl_portal_map {
/* Input parameter, is a qman or bman portal required. */
enum usdpaa_portal_type type;
+ /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ for don't care. The portal index will be populated by the
+ driver when the ioctl() successfully completes */
+ uint32_t index;
/* Return value if the map succeeds, this gives the mapped
* cache-inhibited (cinh) and cache-enabled (cena) addresses. */
struct usdpaa_portal_map_compat {
diff --git a/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h b/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h
index 7925905..542f935 100644
--- a/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h
+++ b/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h
@@ -2107,6 +2107,80 @@ typedef struct ioc_fm_pcd_frm_replic_member_params_t {
} ioc_fm_pcd_frm_replic_member_params_t;
#endif /* DPAA_VERSION >= 11 */
+
+typedef struct ioc_fm_pcd_cc_key_statistics_t {
+ uint32_t byte_count; /**< This counter reflects byte count of frames that
+ were matched by this key. */
+ uint32_t frame_count; /**< This counter reflects count of frames that
+ were matched by this key. */
+#if (DPAA_VERSION >= 11)
+ uint32_t frame_length_range_count[IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR];
+ /**< These counters reflect how many frames matched
+ this key in 'RMON' statistics mode:
+ Each counter holds the number of frames of a
+ specific frames length range, according to the
+ ranges provided at initialization. */
+#endif /* (DPAA_VERSION >= 11) */
+} ioc_fm_pcd_cc_key_statistics_t;
+
+
+typedef struct ioc_fm_pcd_cc_tbl_get_miss_params_t {
+ void *id;
+ ioc_fm_pcd_cc_key_statistics_t miss_statistics;
+} ioc_fm_pcd_cc_tbl_get_miss_params_t;
+
+
+/**************************************************************************//**
+ @Function FM_PCD_MatchTableGetMissStatistics
+
+ @Description This routine may be used to get statistics counters of miss entry
+ in a CC Node.
+
+ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
+ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
+ these counters reflect how many frames were not matched to any
+ existing key and therefore passed through the miss entry; The
+ total frames count will be returned in the counter of the
+ first range (as only one frame length range was defined).
+
+ @Param[in] h_CcNode A handle to the node
+ @Param[out] p_MissStatistics Statistics counters for 'miss'
+
+ @Return E_OK on success; Error code otherwise.
+
+ @Cautions Allowed only following FM_PCD_MatchTableSet().
+*//***************************************************************************/
+
+#if defined(CONFIG_COMPAT)
+#define FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(12), ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)
+#endif
+#define FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(12), ioc_fm_pcd_cc_tbl_get_miss_params_t)
+
+/**************************************************************************//**
+ @Function FM_PCD_HashTableGetMissStatistics
+
+ @Description This routine may be used to get statistics counters of 'miss'
+ entry of the a hash table.
+
+ If 'e_FM_PCD_CC_STATS_MODE_FRAME' and
+ 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node,
+ these counters reflect how many frames were not matched to any
+ existing key and therefore passed through the miss entry;
+
+ @Param[in] h_HashTbl A handle to a hash table
+ @Param[out] p_MissStatistics Statistics counters for 'miss'
+
+ @Return E_OK on success; Error code otherwise.
+
+ @Cautions Allowed only following FM_PCD_HashTableSet().
+*//***************************************************************************/
+
+#if defined(CONFIG_COMPAT)
+#define FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(13), ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)
+#endif
+#define FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(13), ioc_fm_pcd_cc_tbl_get_miss_params_t)
+
+
/**************************************************************************//**
@Function FM_PCD_NetEnvCharacteristicsSet