From 69aa12734fa9d6f00a49c94edd609a413948359c Mon Sep 17 00:00:00 2001 From: Minghuan Lian Date: Tue, 30 Jul 2013 11:34:27 +0800 Subject: powerpc/dts: fix sRIO and RMan error interrupts for b4860 For B4 platform, MPIC EISR register is in reversed bitmap order, instead of "Error interrupt source 0-31. Bit 0 represents SRC0." the correct ordering is "Error interrupt source 0-31. Bit 0 represents SRC31." This patch is to fix sRIO and RMan EISR bit value of error interrupts in dts node. Signed-off-by: Minghuan Lian Change-Id: I3eacf5ebee6da5ac847d6ab93fe1e38a07e57176 Reviewed-on: http://git.am.freescale.net:8181/3616 Tested-by: Review Code-CDREVIEW Reviewed-by: Wood Scott-B07421 Reviewed-by: Fleming Andrew-AFLEMING Reviewed-by: Schmitt Richard-B43082 Tested-by: Schmitt Richard-B43082 diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi index a166d1c..d2192e7 100644 --- a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi @@ -376,7 +376,7 @@ &rio { compatible = "fsl,srio"; - interrupts = <16 2 1 11>; + interrupts = <16 2 1 20>; #address-cells = <2>; #size-cells = <2>; fsl,iommu-parent = <&pamu0>; @@ -521,5 +521,6 @@ /include/ "qoriq-rman-0.dtsi" rman: rman@1e0000 { fsl,qman-channels-id = <0x820 0x821>; + interrupts = <16 2 1 20>; }; }; -- cgit v0.10.2 From 87f9e29a73beda4296a024645c3a7250a8f6876a Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Fri, 2 Aug 2013 13:48:57 -0500 Subject: powerpc/e6500: skip rev1 errata workaround on rev2 "default y" prevents the mere absence of those config lines from turning off the errata. Signed-off-by: Scott Wood Change-Id: Ib825e8c5941f422990a997a79412669bda677d79 Reviewed-on: http://git.am.freescale.net:8181/3748 Tested-by: Review Code-CDREVIEW Reviewed-by: Rivera Jose-B46482 diff --git a/arch/powerpc/configs/85xx/e6500rev2_defconfig b/arch/powerpc/configs/85xx/e6500rev2_defconfig index 26a586a..819d403 100644 --- a/arch/powerpc/configs/85xx/e6500rev2_defconfig +++ b/arch/powerpc/configs/85xx/e6500rev2_defconfig @@ -1,5 +1,7 @@ CONFIG_PPC64=y CONFIG_PPC_BOOK3E_64=y +# CONFIG_FSL_ERRATUM_A_004801 is not set +# CONFIG_FSL_ERRATUM_A_005337 is not set CONFIG_ALTIVEC=y CONFIG_SMP=y CONFIG_NR_CPUS=24 -- cgit v0.10.2 From b441fdd11651990ba285843c8de0fd81667861c6 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Fri, 12 Jul 2013 19:11:47 -0500 Subject: powerpc/fsl-booke: Work around erratum A-006958 Erratum A-006598 says that 64-bit mftb is not atomic -- it's subject to a similar race condition as doing mftbu/mftbl on 32-bit. The lower half of timebase is updated before the upper half; thus, we can share the workaround for a similar bug on Cell. This workaround involves looping if the lower half of timebase is zero, thus avoiding the need for a scratch register (other than CR0). This workaround must be avoided when the timebase is frozen, such as during the timebase sync code. This deals with kernel and vdso accesses, but other userspace accesses will of course need to be fixed elsewhere. Signed-off-by: Scott Wood Change-Id: I88da5fef252872ba17c4496ed1a053dc4be645af --- v2: Share the Cell workaround instead of using the workaround suggested by the erratum. Reviewed-on: http://git.am.freescale.net:8181/3749 Reviewed-by: Rivera Jose-B46482 Tested-by: Rivera Jose-B46482 diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index b7d2747..e354127 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -369,12 +369,12 @@ extern const char *powerpc_base_platform; #define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ - CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV) + CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_CELL_TB_BUG) #define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \ - CPU_FTR_SMT) + CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT) #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 635fc0b..3a853ae 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -362,7 +362,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_601) #define ISYNC_601 #endif -#ifdef CONFIG_PPC_CELL +#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) #define MFTB(dest) \ 90: mftb dest; \ BEGIN_FTR_SECTION_NESTED(96); \ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index f1b79a6..cad9faa 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1058,7 +1058,7 @@ : "memory") #ifdef __powerpc64__ -#ifdef CONFIG_PPC_CELL +#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) #define mftb() ({unsigned long rval; \ asm volatile( \ "90: mftb %0;\n" \ diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 30721ac..68f65ca 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -130,7 +130,30 @@ static void __cpuinit mpc85xx_give_timebase(void) tb_req = 0; mpc85xx_timebase_freeze(1); +#ifdef CONFIG_PPC64 + /* + * e5500/e6500 have a workaround for erratum A-006958 in place + * that will reread the timebase until TBL is non-zero. + * That would be a bad thing when the timebase is frozen. + * + * Thus, we read it manually, and instead of checking that + * TBL is non-zero, we ensure that TB does not change. We don't + * do that for the main mftb implementation, because it requires + * a scratch register + */ + { + u64 prev; + + asm volatile("mftb %0" : "=r" (timebase)); + + do { + prev = timebase; + asm volatile("mftb %0" : "=r" (timebase)); + } while (prev != timebase); + } +#else timebase = get_tb(); +#endif mb(); tb_valid = 1; -- cgit v0.10.2 From 963ad71d782051f37f4bf7b937798d069a01ad3e Mon Sep 17 00:00:00 2001 From: Shaveta Leekha Date: Thu, 1 Aug 2013 17:14:00 +0530 Subject: powerpc/b4860: Add StarCore/SC3900 nodes in B4860 device files - Add dsp nodes in B4860 pre silicon device file - Add L2 cache nodes for dsp/Starcores in B4860 post silicon file Signed-off-by: Shaveta Leekha Change-Id: I10329607b97961a44f46bc814032fc9faa0e7f96 Reviewed-on: http://git.am.freescale.net:8181/3693 Tested-by: Review Code-CDREVIEW Reviewed-by: Sethi Varun-B16395 Reviewed-by: Wood Scott-B07421 Reviewed-by: Fleming Andrew-AFLEMING Reviewed-by: Rivera Jose-B46482 Tested-by: Rivera Jose-B46482 diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi index d2192e7..80d0f90 100644 --- a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi @@ -518,6 +518,24 @@ compatible = "fsl,b4860-l2-cache-controller"; }; + L2_2: l2-cache-controller@c60000 { + compatible = "fsl,b4860-l2-cache-controller"; + reg = <0xc60000 0x1000>; + next-level-cache = <&cpc>; + }; + + L2_3: l2-cache-controller@ca0000 { + compatible = "fsl,b4860-l2-cache-controller"; + reg = <0xca0000 0x1000>; + next-level-cache = <&cpc>; + }; + + L2_4: l2-cache-controller@ce0000 { + compatible = "fsl,b4860-l2-cache-controller"; + reg = <0xce0000 0x1000>; + next-level-cache = <&cpc>; + }; + /include/ "qoriq-rman-0.dtsi" rman: rman@1e0000 { fsl,qman-channels-id = <0x820 0x821>; diff --git a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi index 6c85eca..61f89b8 100644 --- a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi @@ -105,4 +105,63 @@ next-level-cache = <&L2>; }; }; + + dsp-clusters { + #address-cells = <1>; + #size-cells = <0>; + + dsp-cluster0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,sc3900-cluster"; + reg = <0>; + + dsp0: dsp@0 { + compatible = "fsl,sc3900"; + reg = <0>; + next-level-cache = <&L2_2>; + }; + dsp1: dsp@1 { + compatible = "fsl,sc3900"; + reg = <1>; + next-level-cache = <&L2_2>; + }; + }; + + dsp-cluster1 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,sc3900-cluster"; + reg = <1>; + + dsp2: dsp@2 { + compatible = "fsl,sc3900"; + reg = <2>; + next-level-cache = <&L2_2>; + }; + dsp3: dsp@3 { + compatible = "fsl,sc3900"; + reg = <3>; + next-level-cache = <&L2_2>; + }; + }; + + dsp-cluster2 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,sc3900-cluster"; + reg = <2>; + + dsp4: dsp@4 { + compatible = "fsl,sc3900"; + reg = <4>; + next-level-cache = <&L2_2>; + }; + dsp5: dsp@5 { + compatible = "fsl,sc3900"; + reg = <5>; + next-level-cache = <&L2_2>; + }; + }; + }; }; -- cgit v0.10.2 From 605d2ad9a561593fa1187b72568fc9b353815a2f Mon Sep 17 00:00:00 2001 From: Mandy Lavi Date: Tue, 30 Jul 2013 14:26:03 +0300 Subject: fmd: support retrieval of more statistics counters - miss entry in a CC Node - miss entry in the hash table Signed-off-by: Mandy Lavi Change-Id: Iacbebff6f8f7ef1fa9c97b24d1072515f5098860 Reviewed-on: http://git.am.freescale.net:8181/3828 Reviewed-by: Lavi Mandy-R52568 Reviewed-by: Chereji Marian-Cornel-R27762 Reviewed-by: Schmitt Richard-B43082 Tested-by: Schmitt Richard-B43082 diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c index a68ff12..a8a52b2 100644 --- a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c @@ -1192,6 +1192,10 @@ static void DeleteNode(t_FmPcdCcNode *p_CcNode) p_CcNode->h_Spinlock = NULL; } + /* Restore the original counters pointer instead of the mutual pointer (mutual to all hash buckets) */ + if (p_CcNode->isHashBucket && (p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_NONE)) + p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].p_StatsObj->h_StatsCounters = p_CcNode->h_PrivMissStatsCounters; + /* Releasing all currently used statistics objects, including 'miss' entry */ for (i = 0; i < p_CcNode->numOfKeys + 1; i++) if (p_CcNode->keyAndNextEngineParams[i].p_StatsObj) @@ -1200,7 +1204,6 @@ static void DeleteNode(t_FmPcdCcNode *p_CcNode) if (!LIST_IsEmpty(&p_CcNode->availableStatsLst)) { t_Handle h_FmMuram = FmPcdGetMuramHandle(p_CcNode->h_FmPcd); - ASSERT_COND(h_FmMuram); FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram); @@ -1490,7 +1493,7 @@ t_Error ValidateNextEngineParams(t_Handle h_FmPcd, (p_FmPcdCcNextEngineParams->statisticsEn)) RETURN_ERROR(MAJOR, E_CONFLICT, ("Statistics are requested for a key, but statistics mode was set" - "to 'NONE' upon initialization of this match table")); + "to 'NONE' upon initialization")); switch (p_FmPcdCcNextEngineParams->nextEngine) { @@ -3397,13 +3400,18 @@ static t_Error ValidateAndCalcStatsParams(t_FmPcdCcNode *p_CcNode, uint32_t *p_NumOfRanges, uint32_t *p_CountersArraySize) { - e_FmPcdCcStatsMode statisticsMode = p_CcNode->statisticsMode; + e_FmPcdCcStatsMode statisticsMode = p_CcNode->statisticsMode; + uint32_t i; UNUSED(p_CcNodeParam); switch (statisticsMode) { case e_FM_PCD_CC_STATS_MODE_NONE: + for (i = 0; i < p_CcNode->numOfKeys; i++) + if (p_CcNodeParam->keysParams.keyParams[i].ccNextEngineParams.statisticsEn) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("Statistics cannot be enabled for key %d when statistics mode was set to 'NONE'", i)); return E_OK; case e_FM_PCD_CC_STATS_MODE_FRAME: @@ -3983,9 +3991,6 @@ static t_Error MatchTableGetKeyStatistics(t_FmPcdCcNode *p_CcNode, if (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this match table")); - if (keyIndex >= p_CcNode->numOfKeys) - RETURN_ERROR(MAJOR, E_INVALID_STATE, ("The provided keyIndex exceeds the number of keys in this match table")); - if (!p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this key")); @@ -5014,927 +5019,949 @@ void FmPcdCcGetAdTablesThatPointOnReplicGroup(t_Handle h_Node, ASSERT_COND(i != p_CurrentNode->numOfKeys); } #endif /* (DPAA_VERSION >= 11) */ -/*********************** End of inter-module routines ************************/ - -/****************************************/ -/* API Init unit functions */ -/****************************************/ - -t_Handle FM_PCD_CcRootBuild(t_Handle h_FmPcd, t_FmPcdCcTreeParams *p_PcdGroupsParam) +t_Error FmPcdCcMatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNode *p_CcNode, t_FmPcdCcNodeParams *p_CcNodeParam) { - t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; - t_Error err = E_OK; - int i = 0, j = 0, k = 0; - t_FmPcdCcTree *p_FmPcdCcTree; - uint8_t numOfEntries; - t_Handle p_CcTreeTmp; - t_FmPcdCcGrpParams *p_FmPcdCcGroupParams; - t_FmPcdCcKeyAndNextEngineParams *p_Params, *p_KeyAndNextEngineParams; - t_NetEnvParams netEnvParams; - uint8_t lastOne = 0; - uint32_t requiredAction = 0; - t_FmPcdCcNode *p_FmPcdCcNextNode; - t_CcNodeInformation ccNodeInfo, *p_CcInformation; + t_FmPcd *p_FmPcd = (t_FmPcd *) h_FmPcd; + t_FmPcdCcNode *p_FmPcdCcNextNode; + t_Error err = E_OK; + uint32_t tmp, keySize; + bool glblMask = FALSE; + t_FmPcdCcKeyParams *p_KeyParams; + t_Handle h_FmMuram, p_KeysMatchTblTmp, p_AdTableTmp; +#if (DPAA_VERSION >= 11) + t_Handle h_StatsFLRs; +#endif /* (DPAA_VERSION >= 11) */ + bool fullField = FALSE; + ccPrivateInfo_t icCode = CC_PRIVATE_INFO_NONE; + bool isKeyTblAlloc, fromIc = FALSE; + uint32_t matchTableSize, adTableSize; + t_CcNodeInformation ccNodeInfo, *p_CcInformation; + t_FmPcdStatsObj *p_StatsObj; + t_FmPcdCcStatsParams statsParams = {0}; + t_Handle h_Manip; - SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE, NULL); - SANITY_CHECK_RETURN_VALUE(p_PcdGroupsParam,E_INVALID_HANDLE, NULL); + ASSERT_COND(h_FmPcd); + ASSERT_COND(p_CcNode); + ASSERT_COND(p_CcNodeParam); - if (p_PcdGroupsParam->numOfGrps > FM_PCD_MAX_NUM_OF_CC_GROUPS) - { - REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfGrps should not exceed %d", FM_PCD_MAX_NUM_OF_CC_GROUPS)); - return NULL; - } + p_CcNode->p_GlblMask = (t_Handle)XX_Malloc(CC_GLBL_MASK_SIZE * sizeof(uint8_t)); + memset(p_CcNode->p_GlblMask, 0, CC_GLBL_MASK_SIZE * sizeof(uint8_t)); - p_FmPcdCcTree = (t_FmPcdCcTree*)XX_Malloc(sizeof(t_FmPcdCcTree)); - if (!p_FmPcdCcTree) - { - REPORT_ERROR(MAJOR, E_NO_MEMORY, ("PCD tree structure")); - return NULL; - } - memset(p_FmPcdCcTree, 0, sizeof(t_FmPcdCcTree)); - p_FmPcdCcTree->h_FmPcd = h_FmPcd; + p_CcNode->h_FmPcd = h_FmPcd; + p_CcNode->numOfKeys = p_CcNodeParam->keysParams.numOfKeys; + p_CcNode->maxNumOfKeys = p_CcNodeParam->keysParams.maxNumOfKeys; + p_CcNode->maskSupport = p_CcNodeParam->keysParams.maskSupport; + p_CcNode->statisticsMode = p_CcNodeParam->keysParams.statisticsMode; - p_Params = (t_FmPcdCcKeyAndNextEngineParams*)XX_Malloc(FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams)); - memset(p_Params, 0, FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams)); + /* For backward compatibility - even if statistics mode is nullified, + we'll fix it to frame mode so we can support per-key request for + statistics using 'statisticsEn' in next engine parameters */ + if (!p_CcNode->maxNumOfKeys && + (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE)) + p_CcNode->statisticsMode = e_FM_PCD_CC_STATS_MODE_FRAME; - INIT_LIST(&p_FmPcdCcTree->fmPortsLst); + h_FmMuram = FmPcdGetMuramHandle(h_FmPcd); + if (!h_FmMuram) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM MURAM")); -#ifdef FM_CAPWAP_SUPPORT - if ((p_PcdGroupsParam->numOfGrps == 1) && - (p_PcdGroupsParam->ccGrpParams[0].numOfDistinctionUnits == 0) && - (p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].nextEngine == e_FM_PCD_CC) && - p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode && - IsCapwapApplSpecific(p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode)) + INIT_LIST(&p_CcNode->ccPrevNodesLst); + INIT_LIST(&p_CcNode->ccTreeIdLst); + INIT_LIST(&p_CcNode->ccTreesLst); + INIT_LIST(&p_CcNode->availableStatsLst); + + p_CcNode->h_Spinlock = XX_InitSpinlock(); + if (!p_CcNode->h_Spinlock) { - p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip = FmPcdManipApplSpecificBuild(); - if (!p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip) - { - DeleteTree(p_FmPcdCcTree,p_FmPcd); - XX_Free(p_Params); - REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); - return NULL; - } + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("CC node spinlock")); } -#endif /* FM_CAPWAP_SUPPORT */ - - numOfEntries = 0; - p_FmPcdCcTree->netEnvId = FmPcdGetNetEnvId(p_PcdGroupsParam->h_NetEnv); - for (i = 0; i < p_PcdGroupsParam->numOfGrps; i++) + if ((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_BY_HDR) && + ((p_CcNodeParam->extractCcParams.extractByHdr.hdr == HEADER_TYPE_IPv4) || + (p_CcNodeParam->extractCcParams.extractByHdr.hdr == HEADER_TYPE_IPv6)) && + (p_CcNodeParam->extractCcParams.extractByHdr.type == e_FM_PCD_EXTRACT_FULL_FIELD) && + ((p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv6 == NET_HEADER_FIELD_IPv6_HOP_LIMIT) || + (p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv4 == NET_HEADER_FIELD_IPv4_TTL))) { - p_FmPcdCcGroupParams = &p_PcdGroupsParam->ccGrpParams[i]; - - if (p_FmPcdCcGroupParams->numOfDistinctionUnits > FM_PCD_MAX_NUM_OF_CC_UNITS) - { - DeleteTree(p_FmPcdCcTree,p_FmPcd); - XX_Free(p_Params); - REPORT_ERROR(MAJOR, E_INVALID_VALUE, - ("numOfDistinctionUnits (group %d) should not exceed %d", i, FM_PCD_MAX_NUM_OF_CC_UNITS)); - return NULL; - } - - p_FmPcdCcTree->fmPcdGroupParam[i].baseGroupEntry = numOfEntries; - p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup =(uint8_t)( 0x01 << p_FmPcdCcGroupParams->numOfDistinctionUnits); - numOfEntries += p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; - if (numOfEntries > FM_PCD_MAX_NUM_OF_CC_GROUPS) + err = Ipv4TtlOrIpv6HopLimitCheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); + glblMask = FALSE; + } + else if ((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_NON_HDR) && + ((p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_KEY) || + (p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_HASH) || + (p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_FLOW_ID))) + { + if ((p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_FLOW_ID) && + (p_CcNodeParam->extractCcParams.extractNonHdr.offset != 0)) { - DeleteTree(p_FmPcdCcTree,p_FmPcd); - XX_Free(p_Params); - REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfEntries can not be larger than %d", FM_PCD_MAX_NUM_OF_CC_GROUPS)); - return NULL; + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("In the case of the extraction from e_FM_PCD_EXTRACT_FROM_FLOW_ID offset has to be 0")); } - if (lastOne) + icCode = IcDefineCode(p_CcNodeParam); + fromIc = TRUE; + if (icCode == CC_PRIVATE_INFO_NONE) { - if (p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup > lastOne) - { - DeleteTree(p_FmPcdCcTree,p_FmPcd); - XX_Free(p_Params); - REPORT_ERROR(MAJOR, E_CONFLICT, ("numOfEntries per group must be set in descending order")); - return NULL; - } + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("user asked extraction from IC and field in internal context or action wasn't initialized in the right way")); } - lastOne = p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; - - netEnvParams.netEnvId = p_FmPcdCcTree->netEnvId; - netEnvParams.numOfDistinctionUnits = p_FmPcdCcGroupParams->numOfDistinctionUnits; - - memcpy(netEnvParams.unitIds, - &p_FmPcdCcGroupParams->unitIds, - (sizeof(uint8_t)) * p_FmPcdCcGroupParams->numOfDistinctionUnits); - - err = PcdGetUnitsVector(p_FmPcd, &netEnvParams); - if (err) + if ((icCode == CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP) || + (icCode == CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP)) { - DeleteTree(p_FmPcdCcTree,p_FmPcd); - XX_Free(p_Params); - REPORT_ERROR(MAJOR, err, NO_MSG); - return NULL; + err = IcHashIndexedCheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); + glblMask = TRUE; } - - p_FmPcdCcTree->fmPcdGroupParam[i].totalBitsMask = netEnvParams.vector; - for (j = 0; j < p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; j++) + else { - err = ValidateNextEngineParams(h_FmPcd, - &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], - e_FM_PCD_CC_STATS_MODE_NONE); - if (err) - { - DeleteTree(p_FmPcdCcTree,p_FmPcd); - XX_Free(p_Params); - REPORT_ERROR(MAJOR, err, (NO_MSG)); - return NULL; - } - - if (p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j].h_Manip) - { - err = FmPcdManipCheckParamsForCcNextEngine(&p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], &requiredAction); - if (err) - { - DeleteTree(p_FmPcdCcTree,p_FmPcd); - XX_Free(p_Params); - REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); - return NULL; - } - } - p_KeyAndNextEngineParams = p_Params+k; - - memcpy(&p_KeyAndNextEngineParams->nextEngineParams, - &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], - sizeof(t_FmPcdCcNextEngineParams)); - - if ((p_KeyAndNextEngineParams->nextEngineParams.nextEngine == e_FM_PCD_CC) - && p_KeyAndNextEngineParams->nextEngineParams.h_Manip) - { - err = AllocAndFillAdForContLookupManip(p_KeyAndNextEngineParams->nextEngineParams.params.ccParams.h_CcNode); - if (err) - { - DeleteTree(p_FmPcdCcTree,p_FmPcd); - XX_Free(p_Params); - REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree")); - return NULL; - } - } - - requiredAction |= UPDATE_CC_WITH_TREE; - p_KeyAndNextEngineParams->requiredAction = requiredAction; - - k++; + err = CheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); + if (p_CcNode->glblMaskSize) + glblMask = TRUE; } } - - p_FmPcdCcTree->numOfEntries = (uint8_t)k; - p_FmPcdCcTree->numOfGrps = p_PcdGroupsParam->numOfGrps; - - p_FmPcdCcTree->ccTreeBaseAddr = - PTR_TO_UINT(FM_MURAM_AllocMem(FmPcdGetMuramHandle(h_FmPcd), - (uint32_t)( FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE), - FM_PCD_CC_TREE_ADDR_ALIGN)); - if (!p_FmPcdCcTree->ccTreeBaseAddr) + else { - DeleteTree(p_FmPcdCcTree,p_FmPcd); - XX_Free(p_Params); - REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree")); - return NULL; + err = CheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); + if (p_CcNode->glblMaskSize) + glblMask = TRUE; } - IOMemSet32(UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr), 0, (uint32_t)(FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE)); - - p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); - j = 0; - for (i = 0; i < numOfEntries; i++) + if (err) { - p_KeyAndNextEngineParams = p_Params + i; + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, err, NO_MSG); + } - NextStepAd(p_CcTreeTmp, - NULL, - &p_KeyAndNextEngineParams->nextEngineParams, - p_FmPcd); + switch (p_CcNodeParam->extractCcParams.type) + { + case (e_FM_PCD_EXTRACT_BY_HDR): + switch (p_CcNodeParam->extractCcParams.extractByHdr.type) + { + case (e_FM_PCD_EXTRACT_FULL_FIELD): + p_CcNode->parseCode = + GetFullFieldParseCode(p_CcNodeParam->extractCcParams.extractByHdr.hdr, + p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex, + p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField); + GetSizeHeaderField(p_CcNodeParam->extractCcParams.extractByHdr.hdr, + p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex, + p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField, + &p_CcNode->sizeOfExtraction); + fullField = TRUE; + if ((p_CcNode->parseCode != CC_PC_FF_TCI1) && + (p_CcNode->parseCode != CC_PC_FF_TCI2) && + (p_CcNode->parseCode != CC_PC_FF_MPLS1) && + (p_CcNode->parseCode != CC_PC_FF_MPLS1) && + (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC1) && + (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC2) && + (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1) && + (p_CcNode->parseCode != CC_PC_FF_IPDSCP) && + (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2) && + glblMask) + { + glblMask = FALSE; + p_CcNode->glblMaskSize = 4; + p_CcNode->lclMask = TRUE; + } + break; - p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE); + case (e_FM_PCD_EXTRACT_FROM_HDR): + p_CcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.size; + p_CcNode->offset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.offset; + p_CcNode->userOffset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.offset; + p_CcNode->parseCode = + GetPrParseCode(p_CcNodeParam->extractCcParams.extractByHdr.hdr, + p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex, + p_CcNode->offset,glblMask, + &p_CcNode->prsArrayOffset); + break; - memcpy(&p_FmPcdCcTree->keyAndNextEngineParams[i], - p_KeyAndNextEngineParams, - sizeof(t_FmPcdCcKeyAndNextEngineParams)); + case (e_FM_PCD_EXTRACT_FROM_FIELD): + p_CcNode->offset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.offset; + p_CcNode->userOffset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.offset; + p_CcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.size; + p_CcNode->parseCode = + GetFieldParseCode(p_CcNodeParam->extractCcParams.extractByHdr.hdr, + p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.field, + p_CcNode->offset, + &p_CcNode->prsArrayOffset, + p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex); + break; - if (p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine== e_FM_PCD_CC) - { - p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode; - p_CcInformation = FindNodeInfoInReleventLst(&p_FmPcdCcNextNode->ccTreeIdLst, - (t_Handle)p_FmPcdCcTree, - p_FmPcdCcNextNode->h_Spinlock); + default: + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + break; - if (!p_CcInformation) + case (e_FM_PCD_EXTRACT_NON_HDR): + /* get the field code for the generic extract */ + p_CcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractNonHdr.size; + p_CcNode->offset = p_CcNodeParam->extractCcParams.extractNonHdr.offset; + p_CcNode->userOffset = p_CcNodeParam->extractCcParams.extractNonHdr.offset; + p_CcNode->parseCode = + GetGenParseCode(h_FmPcd, + p_CcNodeParam->extractCcParams.extractNonHdr.src, + p_CcNode->offset, + glblMask, + &p_CcNode->prsArrayOffset, + fromIc,icCode); + + if (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED) { - memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); - ccNodeInfo.h_CcNode = (t_Handle)p_FmPcdCcTree; - ccNodeInfo.index = 1; - EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccTreeIdLst, - &ccNodeInfo, - p_FmPcdCcNextNode->h_Spinlock); + if ((p_CcNode->offset + p_CcNode->sizeOfExtraction) > 8) + { + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_INVALID_SELECTION,("when node of the type CC_PC_GENERIC_IC_HASH_INDEXED offset + size can not be bigger then size of HASH 64 bits (8 bytes)")); + } } - else - p_CcInformation->index++; - } - } + if ((p_CcNode->parseCode == CC_PC_GENERIC_IC_GMASK) || + (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED)) + { + p_CcNode->offset += p_CcNode->prsArrayOffset; + p_CcNode->prsArrayOffset = 0; + } + break; - FmPcdIncNetEnvOwners(h_FmPcd, p_FmPcdCcTree->netEnvId); - p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); + default: + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } - if (!FmPcdLockTryLockAll(p_FmPcd)) + if (p_CcNode->parseCode == CC_PC_ILLEGAL) { - FM_PCD_CcRootDelete(p_FmPcdCcTree); - XX_Free(p_Params); - DBG(TRACE, ("FmPcdLockTryLockAll failed")); - return NULL; + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("illegal extraction type")); } - for (i = 0; i < numOfEntries; i++) + if ((p_CcNode->sizeOfExtraction > FM_PCD_MAX_SIZE_OF_KEY) || + !p_CcNode->sizeOfExtraction) { - if (p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction) - { - err = SetRequiredAction(h_FmPcd, - p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction, - &p_FmPcdCcTree->keyAndNextEngineParams[i], - p_CcTreeTmp, - 1, - p_FmPcdCcTree); - if (err) - { - FmPcdLockUnlockAll(p_FmPcd); - FM_PCD_CcRootDelete(p_FmPcdCcTree); - XX_Free(p_Params); - REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); - return NULL; - } - p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE); - } + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("sizeOfExatrction can not be greater than 56 and not 0")); } - FmPcdLockUnlockAll(p_FmPcd); - p_FmPcdCcTree->p_Lock = FmPcdAcquireLock(p_FmPcd); - if (!p_FmPcdCcTree->p_Lock) + if (p_CcNodeParam->keysParams.keySize != p_CcNode->sizeOfExtraction) { - FM_PCD_CcRootDelete(p_FmPcdCcTree); - XX_Free(p_Params); - REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM CC lock")); - return NULL; + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction")); } - XX_Free(p_Params); - - return p_FmPcdCcTree; -} + p_CcNode->userSizeOfExtraction = p_CcNode->sizeOfExtraction; -t_Error FM_PCD_CcRootDelete(t_Handle h_CcTree) -{ - t_FmPcd *p_FmPcd; - t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree; - int i= 0; + if (!glblMask) + memset(p_CcNode->p_GlblMask, 0xff, CC_GLBL_MASK_SIZE * sizeof(uint8_t)); - SANITY_CHECK_RETURN_ERROR(p_CcTree,E_INVALID_STATE); - p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd; - SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + err = CheckAndSetManipParamsWithCcNodeParams(p_CcNode); + if (err != E_OK) + { + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction")); + } - FmPcdDecNetEnvOwners(p_FmPcd, p_CcTree->netEnvId); + /* Calculating matching table entry size by rounding up the user-defined size of extraction to valid entry size */ + GetCcExtractKeySize(p_CcNode->sizeOfExtraction, &p_CcNode->ccKeySizeAccExtraction); - if (p_CcTree->owners) - RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("the tree with this ID can not be removed because this tree is occupied, first - unbind this tree")); + /* If local mask is used, it is stored next to each key in the keys match table */ + if (p_CcNode->lclMask) + keySize = (uint32_t)(2 * p_CcNode->ccKeySizeAccExtraction); + else + keySize = p_CcNode->ccKeySizeAccExtraction; - /* Delete reassembly schemes if exist */ - if (p_CcTree->h_IpReassemblyManip) + /* Update CC shadow with maximal size required by this node */ + if (p_CcNode->maxNumOfKeys) { - FmPcdManipDeleteIpReassmSchemes(p_CcTree->h_IpReassemblyManip); - FmPcdManipUpdateOwner(p_CcTree->h_IpReassemblyManip, FALSE); - } + err = CalcAndUpdateCcShadow(p_CcNode, + isKeyTblAlloc, + &matchTableSize, + &adTableSize); + if (err != E_OK) + { + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, err, NO_MSG); + } - for (i = 0; i numOfEntries; i++) - { - if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) - UpdateNodeOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode, FALSE); + p_CcNode->keysMatchTableMaxSize = matchTableSize; - if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip) - FmPcdManipUpdateOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip, FALSE); + if (p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_NONE) + { + err = AllocStatsObjs(p_CcNode); + if (err != E_OK) + { + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + } -#ifdef FM_CAPWAP_SUPPORT - if ((p_CcTree->numOfGrps == 1) && - (p_CcTree->fmPcdGroupParam[0].numOfEntriesInGroup == 1) && - (p_CcTree->keyAndNextEngineParams[0].nextEngineParams.nextEngine == e_FM_PCD_CC) && - p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode && - IsCapwapApplSpecific(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode)) + /* If manipulation will be initialized before this node, it will use the table + descriptor in the AD table of previous node and this node will need an extra + AD as his table descriptor. */ + p_CcNode->h_TmpAd = (t_Handle)FM_MURAM_AllocMem(h_FmMuram, + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!p_CcNode->h_TmpAd) { - if (FM_PCD_ManipNodeDelete(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.h_Manip) != E_OK) - return E_INVALID_STATE; + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC action descriptor")); } -#endif /* FM_CAPWAP_SUPPORT */ + } + else + { + matchTableSize = (uint32_t)(keySize * sizeof(uint8_t) * (p_CcNode->numOfKeys + 1)); + adTableSize = (uint32_t)(FM_PCD_CC_AD_ENTRY_SIZE * (p_CcNode->numOfKeys + 1)); + } #if (DPAA_VERSION >= 11) - if ((p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_FR) && - (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic)) - FrmReplicGroupUpdateOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic, - FALSE); -#endif /* (DPAA_VERSION >= 11) */ - } + switch (p_CcNode->statisticsMode) + { - if (p_CcTree->p_Lock) - FmPcdReleaseLock(p_CcTree->h_FmPcd, p_CcTree->p_Lock); + case e_FM_PCD_CC_STATS_MODE_RMON: + /* If RMON statistics or RMON conditional statistics modes are requested, + allocate frame length ranges array */ + p_CcNode->h_StatsFLRs = + FM_MURAM_AllocMem(h_FmMuram, + (uint32_t)(p_CcNode->numOfStatsFLRs) * FM_PCD_CC_STATS_FLR_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); - DeleteTree(p_CcTree, p_FmPcd); + if (!p_CcNode->h_StatsFLRs) + { + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC frame length ranges array")); + } - return E_OK; -} + /* Initialize using value received from the user */ + for (tmp = 0; tmp < p_CcNode->numOfStatsFLRs; tmp++) + { + h_StatsFLRs = PTR_MOVE(p_CcNode->h_StatsFLRs, tmp * FM_PCD_CC_STATS_FLR_SIZE); -t_Error FM_PCD_CcRootModifyNextEngine(t_Handle h_CcTree, - uint8_t grpId, - uint8_t index, - t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) -{ - t_FmPcd *p_FmPcd; - t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree; - t_Error err = E_OK; + Mem2IOCpy32(h_StatsFLRs, + &(p_CcNodeParam->keysParams.frameLengthRanges[tmp]), + FM_PCD_CC_STATS_FLR_SIZE); + } + break; - SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); - SANITY_CHECK_RETURN_ERROR(p_CcTree,E_INVALID_STATE); - p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd; - SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + default: + break; + } +#endif /* (DPAA_VERSION >= 11) */ - if (!FmPcdLockTryLockAll(p_FmPcd)) + + /* Allocate keys match table. Not required for some CC nodes, for example for IPv4 TTL + identification, IPv6 hop count identification, etc. */ + if (isKeyTblAlloc) { - DBG(TRACE, ("FmPcdLockTryLockAll failed")); - return ERROR_CODE(E_BUSY); + p_CcNode->h_KeysMatchTable = + (t_Handle)FM_MURAM_AllocMem(h_FmMuram, + matchTableSize, + FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN); + if (!p_CcNode->h_KeysMatchTable) + { + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node key match table")); + } + IOMemSet32((uint8_t *)p_CcNode->h_KeysMatchTable, + 0, + matchTableSize); } - err = FmPcdCcModifyNextEngineParamTree(p_FmPcd, - p_CcTree, - grpId, - index, - p_FmPcdCcNextEngineParams); - FmPcdLockUnlockAll(p_FmPcd); - - if (err) + /* Allocate action descriptors table */ + p_CcNode->h_AdTable = + (t_Handle)FM_MURAM_AllocMem(h_FmMuram, + adTableSize, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!p_CcNode->h_AdTable) { - RETURN_ERROR(MAJOR, err, NO_MSG); + DeleteNode(p_CcNode); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptors table")); } + IOMemSet32((uint8_t *)p_CcNode->h_AdTable, 0, adTableSize); - return E_OK; -} + p_KeysMatchTblTmp = p_CcNode->h_KeysMatchTable; + p_AdTableTmp = p_CcNode->h_AdTable; -t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam) -{ - t_FmPcd *p_FmPcd = (t_FmPcd *) h_FmPcd; - t_FmPcdCcNode *p_CcNode, *p_FmPcdCcNextNode; - t_Error err = E_OK; - uint32_t tmp, keySize; - bool glblMask = FALSE; - t_FmPcdCcKeyParams *p_KeyParams; - t_Handle h_FmMuram, p_KeysMatchTblTmp, p_AdTableTmp; -#if (DPAA_VERSION >= 11) - t_Handle h_StatsFLRs; -#endif /* (DPAA_VERSION >= 11) */ - bool fullField = FALSE; - ccPrivateInfo_t icCode = CC_PRIVATE_INFO_NONE; - bool isKeyTblAlloc, fromIc = FALSE; - uint32_t matchTableSize, adTableSize; - t_CcNodeInformation ccNodeInfo, *p_CcInformation; - t_FmPcdStatsObj *p_StatsObj; - t_FmPcdCcStatsParams statsParams = {0}; - t_Handle h_Manip; + /* For each key, create the key and the next step AD */ + for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++) + { + p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; - SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE,NULL); + if (p_KeysMatchTblTmp) + { + /* Copy the key */ + Mem2IOCpy32((void*)p_KeysMatchTblTmp, p_KeyParams->p_Key, p_CcNode->sizeOfExtraction); - p_CcNode = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode)); - if (!p_CcNode) - { - REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); - return NULL; - } - memset(p_CcNode, 0, sizeof(t_FmPcdCcNode)); + /* Copy the key mask or initialize it to 0xFF..F */ + if (p_CcNode->lclMask && p_KeyParams->p_Mask) + { + Mem2IOCpy32(PTR_MOVE(p_KeysMatchTblTmp, + p_CcNode->ccKeySizeAccExtraction), /* User's size of extraction rounded up to a valid matching table entry size */ + p_KeyParams->p_Mask, + p_CcNode->sizeOfExtraction); /* Exact size of extraction as received from the user */ + } + else if (p_CcNode->lclMask) + { + IOMemSet32(PTR_MOVE(p_KeysMatchTblTmp, + p_CcNode->ccKeySizeAccExtraction), /* User's size of extraction rounded up to a valid matching table entry size */ + 0xff, + p_CcNode->sizeOfExtraction); /* Exact size of extraction as received from the user */ + } - p_CcNode->p_GlblMask = (t_Handle)XX_Malloc(CC_GLBL_MASK_SIZE * sizeof(uint8_t)); - memset(p_CcNode->p_GlblMask, 0, CC_GLBL_MASK_SIZE * sizeof(uint8_t)); + p_KeysMatchTblTmp = PTR_MOVE(p_KeysMatchTblTmp, keySize * sizeof(uint8_t)); + } - p_CcNode->h_FmPcd = h_FmPcd; - p_CcNode->numOfKeys = p_CcNodeParam->keysParams.numOfKeys; - p_CcNode->maxNumOfKeys = p_CcNodeParam->keysParams.maxNumOfKeys; - p_CcNode->maskSupport = p_CcNodeParam->keysParams.maskSupport; - p_CcNode->statisticsMode = p_CcNodeParam->keysParams.statisticsMode; + /* Create the next action descriptor in the match table */ + if (p_KeyParams->ccNextEngineParams.statisticsEn) + { + p_StatsObj = GetStatsObj(p_CcNode); + ASSERT_COND(p_StatsObj); - /* For backward compatibility - even if statistics mode is nullified, - we'll fix it to frame mode so we can support per-key request for - statistics using 'statisticsEn' in next engine parameters */ - if (!p_CcNode->maxNumOfKeys && - (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE)) - p_CcNode->statisticsMode = e_FM_PCD_CC_STATS_MODE_FRAME; + statsParams.h_StatsAd = p_StatsObj->h_StatsAd; + statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters; +#if (DPAA_VERSION >= 11) + statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs; - h_FmMuram = FmPcdGetMuramHandle(h_FmPcd); - if (!h_FmMuram) - { - REPORT_ERROR(MAJOR, E_INVALID_HANDLE, ("FM MURAM")); - return NULL; - } +#endif /* (DPAA_VERSION >= 11) */ + NextStepAd(p_AdTableTmp, + &statsParams, + &p_KeyParams->ccNextEngineParams, + p_FmPcd); - INIT_LIST(&p_CcNode->ccPrevNodesLst); - INIT_LIST(&p_CcNode->ccTreeIdLst); - INIT_LIST(&p_CcNode->ccTreesLst); - INIT_LIST(&p_CcNode->availableStatsLst); + p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = p_StatsObj; + } + else + { + NextStepAd(p_AdTableTmp, + NULL, + &p_KeyParams->ccNextEngineParams, + p_FmPcd); - p_CcNode->h_Spinlock = XX_InitSpinlock(); - if (!p_CcNode->h_Spinlock) - { - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, E_NO_MEMORY, ("CC node spinlock")); - return NULL; - } + p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = NULL; + } - if ((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_BY_HDR) && - ((p_CcNodeParam->extractCcParams.extractByHdr.hdr == HEADER_TYPE_IPv4) || - (p_CcNodeParam->extractCcParams.extractByHdr.hdr == HEADER_TYPE_IPv6)) && - (p_CcNodeParam->extractCcParams.extractByHdr.type == e_FM_PCD_EXTRACT_FULL_FIELD) && - ((p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv6 == NET_HEADER_FIELD_IPv6_HOP_LIMIT) || - (p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv4 == NET_HEADER_FIELD_IPv4_TTL))) - { - err = Ipv4TtlOrIpv6HopLimitCheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); - glblMask = FALSE; + p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE); } - else if ((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_NON_HDR) && - ((p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_KEY) || - (p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_HASH) || - (p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_FLOW_ID))) + + /* Update next engine for the 'miss' entry */ + if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.statisticsEn) { - if ((p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_FLOW_ID) && - (p_CcNodeParam->extractCcParams.extractNonHdr.offset != 0)) - { - REPORT_ERROR(MAJOR, E_INVALID_VALUE, - ("In the case of the extraction from e_FM_PCD_EXTRACT_FROM_FLOW_ID offset has to be 0")); - return NULL; - } + p_StatsObj = GetStatsObj(p_CcNode); + ASSERT_COND(p_StatsObj); - icCode = IcDefineCode(p_CcNodeParam); - fromIc = TRUE; - if (icCode == CC_PRIVATE_INFO_NONE) + /* All 'bucket' nodes of a hash table should share the same statistics counters, + allocated by the hash table. So, if this node is a bucket of a hash table, + we'll replace the locally allocated counters with the shared counters. */ + if (p_CcNode->isHashBucket) { - REPORT_ERROR(MAJOR, E_INVALID_STATE, - ("user asked extraction from IC and field in internal context or action wasn't initialized in the right way")); - return NULL; - } + ASSERT_COND(p_CcNode->h_MissStatsCounters); - if ((icCode == CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP) || - (icCode == CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP)) - { - err = IcHashIndexedCheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); - glblMask = TRUE; - } - else - { - err = CheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); - if (p_CcNode->glblMaskSize) - glblMask = TRUE; + /* Store original counters pointer and replace it with mutual preallocated pointer */ + p_CcNode->h_PrivMissStatsCounters = p_StatsObj->h_StatsCounters; + p_StatsObj->h_StatsCounters = p_CcNode->h_MissStatsCounters; } + + statsParams.h_StatsAd = p_StatsObj->h_StatsAd; + statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters; +#if (DPAA_VERSION >= 11) + statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs; + +#endif /* (DPAA_VERSION >= 11) */ + + NextStepAd(p_AdTableTmp, + &statsParams, + &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, + p_FmPcd); + + p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = p_StatsObj; } else { - err = CheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); - if (p_CcNode->glblMaskSize) - glblMask = TRUE; - } + NextStepAd(p_AdTableTmp, + NULL, + &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, + p_FmPcd); - if (err) - { - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, err, NO_MSG); - return NULL; + p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = NULL; } - switch (p_CcNodeParam->extractCcParams.type) + /* This parameter will be used to initialize the "key length" field in the action descriptor + that points to this node and it should be 0 for full field extraction */ + if (fullField == TRUE) + p_CcNode->sizeOfExtraction = 0; + + for (tmp = 0; tmp < MIN(p_CcNode->numOfKeys + 1, CC_MAX_NUM_OF_KEYS); tmp++) { - case (e_FM_PCD_EXTRACT_BY_HDR): - switch (p_CcNodeParam->extractCcParams.extractByHdr.type) + if (p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine == e_FM_PCD_CC) + { + p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode; + p_CcInformation = FindNodeInfoInReleventLst(&p_FmPcdCcNextNode->ccPrevNodesLst, + (t_Handle)p_CcNode, + p_FmPcdCcNextNode->h_Spinlock); + if (!p_CcInformation) { - case (e_FM_PCD_EXTRACT_FULL_FIELD): - p_CcNode->parseCode = - GetFullFieldParseCode(p_CcNodeParam->extractCcParams.extractByHdr.hdr, - p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex, - p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField); - GetSizeHeaderField(p_CcNodeParam->extractCcParams.extractByHdr.hdr, - p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex, - p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField, - &p_CcNode->sizeOfExtraction); - fullField = TRUE; - if ((p_CcNode->parseCode != CC_PC_FF_TCI1) && - (p_CcNode->parseCode != CC_PC_FF_TCI2) && - (p_CcNode->parseCode != CC_PC_FF_MPLS1) && - (p_CcNode->parseCode != CC_PC_FF_MPLS1) && - (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC1) && - (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC2) && - (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1) && - (p_CcNode->parseCode != CC_PC_FF_IPDSCP) && - (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2) && - glblMask) - { - glblMask = FALSE; - p_CcNode->glblMaskSize = 4; - p_CcNode->lclMask = TRUE; - } - break; - - case (e_FM_PCD_EXTRACT_FROM_HDR): - p_CcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.size; - p_CcNode->offset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.offset; - p_CcNode->userOffset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.offset; - p_CcNode->parseCode = - GetPrParseCode(p_CcNodeParam->extractCcParams.extractByHdr.hdr, - p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex, - p_CcNode->offset,glblMask, - &p_CcNode->prsArrayOffset); - break; - - case (e_FM_PCD_EXTRACT_FROM_FIELD): - p_CcNode->offset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.offset; - p_CcNode->userOffset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.offset; - p_CcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.size; - p_CcNode->parseCode = - GetFieldParseCode(p_CcNodeParam->extractCcParams.extractByHdr.hdr, - p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.field, - p_CcNode->offset, - &p_CcNode->prsArrayOffset, - p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex); - break; - - default: - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); - return NULL; + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = (t_Handle)p_CcNode; + ccNodeInfo.index = 1; + EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccPrevNodesLst, + &ccNodeInfo, + p_FmPcdCcNextNode->h_Spinlock); } - break; - - case (e_FM_PCD_EXTRACT_NON_HDR): - /* get the field code for the generic extract */ - p_CcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractNonHdr.size; - p_CcNode->offset = p_CcNodeParam->extractCcParams.extractNonHdr.offset; - p_CcNode->userOffset = p_CcNodeParam->extractCcParams.extractNonHdr.offset; - p_CcNode->parseCode = - GetGenParseCode(h_FmPcd, - p_CcNodeParam->extractCcParams.extractNonHdr.src, - p_CcNode->offset, - glblMask, - &p_CcNode->prsArrayOffset, - fromIc,icCode); + else + p_CcInformation->index++; - if (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED) + if (p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip) { - if ((p_CcNode->offset + p_CcNode->sizeOfExtraction) > 8) + h_Manip = p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip; + p_CcInformation = FindNodeInfoInReleventLst(FmPcdManipGetNodeLstPointedOnThisManip(h_Manip), + (t_Handle)p_CcNode, + FmPcdManipGetSpinlock(h_Manip)); + if (!p_CcInformation) { - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, E_INVALID_SELECTION,("when node of the type CC_PC_GENERIC_IC_HASH_INDEXED offset + size can not be bigger then size of HASH 64 bits (8 bytes)")); - return NULL; + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = (t_Handle)p_CcNode; + ccNodeInfo.index = 1; + EnqueueNodeInfoToRelevantLst(FmPcdManipGetNodeLstPointedOnThisManip(h_Manip), + &ccNodeInfo, + FmPcdManipGetSpinlock(h_Manip)); } + else + p_CcInformation->index++; } - if ((p_CcNode->parseCode == CC_PC_GENERIC_IC_GMASK) || - (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED)) - { - p_CcNode->offset += p_CcNode->prsArrayOffset; - p_CcNode->prsArrayOffset = 0; - } - break; - - default: - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); - return NULL; + } } - if (p_CcNode->parseCode == CC_PC_ILLEGAL) - { - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("illegal extraction type")); - return NULL; - } + p_AdTableTmp = p_CcNode->h_AdTable; - if ((p_CcNode->sizeOfExtraction > FM_PCD_MAX_SIZE_OF_KEY) || - !p_CcNode->sizeOfExtraction) + if (!FmPcdLockTryLockAll(h_FmPcd)) { - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("sizeOfExatrction can not be greater than 56 and not 0")); - return NULL; + FM_PCD_MatchTableDelete((t_Handle)p_CcNode); + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return ERROR_CODE(E_BUSY); } - if (p_CcNodeParam->keysParams.keySize != p_CcNode->sizeOfExtraction) + /* Required action for each next engine */ + for (tmp = 0; tmp < MIN(p_CcNode->numOfKeys + 1, CC_MAX_NUM_OF_KEYS); tmp++) { - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction")); - return NULL; + if (p_CcNode->keyAndNextEngineParams[tmp].requiredAction) + { + err = SetRequiredAction(h_FmPcd, + p_CcNode->keyAndNextEngineParams[tmp].requiredAction, + &p_CcNode->keyAndNextEngineParams[tmp], + p_AdTableTmp, + 1, + NULL); + if (err) + { + FmPcdLockUnlockAll(h_FmPcd); + FM_PCD_MatchTableDelete((t_Handle)p_CcNode); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE); + } } - p_CcNode->userSizeOfExtraction = p_CcNode->sizeOfExtraction; + FmPcdLockUnlockAll(h_FmPcd); - if (!glblMask) - memset(p_CcNode->p_GlblMask, 0xff, CC_GLBL_MASK_SIZE * sizeof(uint8_t)); + return E_OK; +} +/*********************** End of inter-module routines ************************/ - err = CheckAndSetManipParamsWithCcNodeParams(p_CcNode); - if (err != E_OK) + +/****************************************/ +/* API Init unit functions */ +/****************************************/ + +t_Handle FM_PCD_CcRootBuild(t_Handle h_FmPcd, t_FmPcdCcTreeParams *p_PcdGroupsParam) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_Error err = E_OK; + int i = 0, j = 0, k = 0; + t_FmPcdCcTree *p_FmPcdCcTree; + uint8_t numOfEntries; + t_Handle p_CcTreeTmp; + t_FmPcdCcGrpParams *p_FmPcdCcGroupParams; + t_FmPcdCcKeyAndNextEngineParams *p_Params, *p_KeyAndNextEngineParams; + t_NetEnvParams netEnvParams; + uint8_t lastOne = 0; + uint32_t requiredAction = 0; + t_FmPcdCcNode *p_FmPcdCcNextNode; + t_CcNodeInformation ccNodeInfo, *p_CcInformation; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(p_PcdGroupsParam,E_INVALID_HANDLE, NULL); + + if (p_PcdGroupsParam->numOfGrps > FM_PCD_MAX_NUM_OF_CC_GROUPS) { - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction")); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfGrps should not exceed %d", FM_PCD_MAX_NUM_OF_CC_GROUPS)); return NULL; } - /* Calculating matching table entry size by rounding up the user-defined size of extraction to valid entry size */ - GetCcExtractKeySize(p_CcNode->sizeOfExtraction, &p_CcNode->ccKeySizeAccExtraction); - - /* If local mask is used, it is stored next to each key in the keys match table */ - if (p_CcNode->lclMask) - keySize = (uint32_t)(2 * p_CcNode->ccKeySizeAccExtraction); - else - keySize = p_CcNode->ccKeySizeAccExtraction; - - /* Update CC shadow with maximal size required by this node */ - if (p_CcNode->maxNumOfKeys) + p_FmPcdCcTree = (t_FmPcdCcTree*)XX_Malloc(sizeof(t_FmPcdCcTree)); + if (!p_FmPcdCcTree) { - err = CalcAndUpdateCcShadow(p_CcNode, - isKeyTblAlloc, - &matchTableSize, - &adTableSize); - if (err != E_OK) - { - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, err, NO_MSG); - return NULL; - } + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("PCD tree structure")); + return NULL; + } + memset(p_FmPcdCcTree, 0, sizeof(t_FmPcdCcTree)); + p_FmPcdCcTree->h_FmPcd = h_FmPcd; - p_CcNode->keysMatchTableMaxSize = matchTableSize; + p_Params = (t_FmPcdCcKeyAndNextEngineParams*)XX_Malloc(FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams)); + memset(p_Params, 0, FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams)); - if (p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_NONE) - { - err = AllocStatsObjs(p_CcNode); - if (err != E_OK) - { - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, err, NO_MSG); - return NULL; - } - } + INIT_LIST(&p_FmPcdCcTree->fmPortsLst); - /* If manipulation will be initialized before this node, it will use the table - descriptor in the AD table of previous node and this node will need an extra - AD as his table descriptor. */ - p_CcNode->h_TmpAd = (t_Handle)FM_MURAM_AllocMem(h_FmMuram, - FM_PCD_CC_AD_ENTRY_SIZE, - FM_PCD_CC_AD_TABLE_ALIGN); - if (!p_CcNode->h_TmpAd) +#ifdef FM_CAPWAP_SUPPORT + if ((p_PcdGroupsParam->numOfGrps == 1) && + (p_PcdGroupsParam->ccGrpParams[0].numOfDistinctionUnits == 0) && + (p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].nextEngine == e_FM_PCD_CC) && + p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode && + IsCapwapApplSpecific(p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode)) + { + p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip = FmPcdManipApplSpecificBuild(); + if (!p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip) { - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC action descriptor")); + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); return NULL; } } - else - { - matchTableSize = (uint32_t)(keySize * sizeof(uint8_t) * (p_CcNode->numOfKeys + 1)); - adTableSize = (uint32_t)(FM_PCD_CC_AD_ENTRY_SIZE * (p_CcNode->numOfKeys + 1)); - } +#endif /* FM_CAPWAP_SUPPORT */ -#if (DPAA_VERSION >= 11) - switch (p_CcNode->statisticsMode) + numOfEntries = 0; + p_FmPcdCcTree->netEnvId = FmPcdGetNetEnvId(p_PcdGroupsParam->h_NetEnv); + + for (i = 0; i < p_PcdGroupsParam->numOfGrps; i++) { + p_FmPcdCcGroupParams = &p_PcdGroupsParam->ccGrpParams[i]; - case e_FM_PCD_CC_STATS_MODE_RMON: - /* If RMON statistics or RMON conditional statistics modes are requested, - allocate frame length ranges array */ - p_CcNode->h_StatsFLRs = - FM_MURAM_AllocMem(h_FmMuram, - (uint32_t)(p_CcNode->numOfStatsFLRs) * FM_PCD_CC_STATS_FLR_SIZE, - FM_PCD_CC_AD_TABLE_ALIGN); + if (p_FmPcdCcGroupParams->numOfDistinctionUnits > FM_PCD_MAX_NUM_OF_CC_UNITS) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("numOfDistinctionUnits (group %d) should not exceed %d", i, FM_PCD_MAX_NUM_OF_CC_UNITS)); + return NULL; + } - if (!p_CcNode->h_StatsFLRs) + p_FmPcdCcTree->fmPcdGroupParam[i].baseGroupEntry = numOfEntries; + p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup =(uint8_t)( 0x01 << p_FmPcdCcGroupParams->numOfDistinctionUnits); + numOfEntries += p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; + if (numOfEntries > FM_PCD_MAX_NUM_OF_CC_GROUPS) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfEntries can not be larger than %d", FM_PCD_MAX_NUM_OF_CC_GROUPS)); + return NULL; + } + + if (lastOne) + { + if (p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup > lastOne) { - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC frame length ranges array")); + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_CONFLICT, ("numOfEntries per group must be set in descending order")); return NULL; } + } - /* Initialize using value received from the user */ - for (tmp = 0; tmp < p_CcNode->numOfStatsFLRs; tmp++) - { - h_StatsFLRs = PTR_MOVE(p_CcNode->h_StatsFLRs, tmp * FM_PCD_CC_STATS_FLR_SIZE); - - Mem2IOCpy32(h_StatsFLRs, - &(p_CcNodeParam->keysParams.frameLengthRanges[tmp]), - FM_PCD_CC_STATS_FLR_SIZE); - } - break; + lastOne = p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; - default: - break; - } -#endif /* (DPAA_VERSION >= 11) */ + netEnvParams.netEnvId = p_FmPcdCcTree->netEnvId; + netEnvParams.numOfDistinctionUnits = p_FmPcdCcGroupParams->numOfDistinctionUnits; + memcpy(netEnvParams.unitIds, + &p_FmPcdCcGroupParams->unitIds, + (sizeof(uint8_t)) * p_FmPcdCcGroupParams->numOfDistinctionUnits); - /* Allocate keys match table. Not required for some CC nodes, for example for IPv4 TTL - identification, IPv6 hop count identification, etc. */ - if (isKeyTblAlloc) - { - p_CcNode->h_KeysMatchTable = - (t_Handle)FM_MURAM_AllocMem(h_FmMuram, - matchTableSize, - FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN); - if (!p_CcNode->h_KeysMatchTable) + err = PcdGetUnitsVector(p_FmPcd, &netEnvParams); + if (err) { - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node key match table")); + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, err, NO_MSG); return NULL; } - IOMemSet32((uint8_t *)p_CcNode->h_KeysMatchTable, - 0, - matchTableSize); - } - - /* Allocate action descriptors table */ - p_CcNode->h_AdTable = - (t_Handle)FM_MURAM_AllocMem(h_FmMuram, - adTableSize, - FM_PCD_CC_AD_TABLE_ALIGN); - if (!p_CcNode->h_AdTable) - { - DeleteNode(p_CcNode); - REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptors table")); - return NULL; - } - IOMemSet32((uint8_t *)p_CcNode->h_AdTable, 0, adTableSize); - - p_KeysMatchTblTmp = p_CcNode->h_KeysMatchTable; - p_AdTableTmp = p_CcNode->h_AdTable; - - /* For each key, create the key and the next step AD */ - for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++) - { - p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; - if (p_KeysMatchTblTmp) + p_FmPcdCcTree->fmPcdGroupParam[i].totalBitsMask = netEnvParams.vector; + for (j = 0; j < p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; j++) { - /* Copy the key */ - Mem2IOCpy32((void*)p_KeysMatchTblTmp, p_KeyParams->p_Key, p_CcNode->sizeOfExtraction); - - /* Copy the key mask or initialize it to 0xFF..F */ - if (p_CcNode->lclMask && p_KeyParams->p_Mask) + err = ValidateNextEngineParams(h_FmPcd, + &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], + e_FM_PCD_CC_STATS_MODE_NONE); + if (err) { - Mem2IOCpy32(PTR_MOVE(p_KeysMatchTblTmp, - p_CcNode->ccKeySizeAccExtraction), /* User's size of extraction rounded up to a valid matching table entry size */ - p_KeyParams->p_Mask, - p_CcNode->sizeOfExtraction); /* Exact size of extraction as received from the user */ + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, err, (NO_MSG)); + return NULL; } - else if (p_CcNode->lclMask) + + if (p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j].h_Manip) { - IOMemSet32(PTR_MOVE(p_KeysMatchTblTmp, - p_CcNode->ccKeySizeAccExtraction), /* User's size of extraction rounded up to a valid matching table entry size */ - 0xff, - p_CcNode->sizeOfExtraction); /* Exact size of extraction as received from the user */ + err = FmPcdManipCheckParamsForCcNextEngine(&p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], &requiredAction); + if (err) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + return NULL; + } } + p_KeyAndNextEngineParams = p_Params+k; - p_KeysMatchTblTmp = PTR_MOVE(p_KeysMatchTblTmp, keySize * sizeof(uint8_t)); - } - - /* Create the next action descriptor in the match table */ - if (p_KeyParams->ccNextEngineParams.statisticsEn) - { - p_StatsObj = GetStatsObj(p_CcNode); - ASSERT_COND(p_StatsObj); - - statsParams.h_StatsAd = p_StatsObj->h_StatsAd; - statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters; -#if (DPAA_VERSION >= 11) - statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs; + memcpy(&p_KeyAndNextEngineParams->nextEngineParams, + &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], + sizeof(t_FmPcdCcNextEngineParams)); -#endif /* (DPAA_VERSION >= 11) */ - NextStepAd(p_AdTableTmp, - &statsParams, - &p_KeyParams->ccNextEngineParams, - p_FmPcd); + if ((p_KeyAndNextEngineParams->nextEngineParams.nextEngine == e_FM_PCD_CC) + && p_KeyAndNextEngineParams->nextEngineParams.h_Manip) + { + err = AllocAndFillAdForContLookupManip(p_KeyAndNextEngineParams->nextEngineParams.params.ccParams.h_CcNode); + if (err) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree")); + return NULL; + } + } - p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = p_StatsObj; - } - else - { - NextStepAd(p_AdTableTmp, - NULL, - &p_KeyParams->ccNextEngineParams, - p_FmPcd); + requiredAction |= UPDATE_CC_WITH_TREE; + p_KeyAndNextEngineParams->requiredAction = requiredAction; - p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = NULL; + k++; } - - p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE); } - /* Update next engine for the 'miss' entry */ - if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.statisticsEn) - { - p_StatsObj = GetStatsObj(p_CcNode); - ASSERT_COND(p_StatsObj); - - statsParams.h_StatsAd = p_StatsObj->h_StatsAd; - statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters; -#if (DPAA_VERSION >= 11) - statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs; + p_FmPcdCcTree->numOfEntries = (uint8_t)k; + p_FmPcdCcTree->numOfGrps = p_PcdGroupsParam->numOfGrps; -#endif /* (DPAA_VERSION >= 11) */ + p_FmPcdCcTree->ccTreeBaseAddr = + PTR_TO_UINT(FM_MURAM_AllocMem(FmPcdGetMuramHandle(h_FmPcd), + (uint32_t)( FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE), + FM_PCD_CC_TREE_ADDR_ALIGN)); + if (!p_FmPcdCcTree->ccTreeBaseAddr) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree")); + return NULL; + } + IOMemSet32(UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr), 0, (uint32_t)(FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE)); - NextStepAd(p_AdTableTmp, - &statsParams, - &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, - p_FmPcd); + p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); - p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = p_StatsObj; - } - else + j = 0; + for (i = 0; i < numOfEntries; i++) { - NextStepAd(p_AdTableTmp, + p_KeyAndNextEngineParams = p_Params + i; + + NextStepAd(p_CcTreeTmp, NULL, - &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, + &p_KeyAndNextEngineParams->nextEngineParams, p_FmPcd); - p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = NULL; - } + p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE); - /* This parameter will be used to initialize the "key length" field in the action descriptor - that points to this node and it should be 0 for full field extraction */ - if (fullField == TRUE) - p_CcNode->sizeOfExtraction = 0; + memcpy(&p_FmPcdCcTree->keyAndNextEngineParams[i], + p_KeyAndNextEngineParams, + sizeof(t_FmPcdCcKeyAndNextEngineParams)); - for (tmp = 0; tmp < MIN(p_CcNode->numOfKeys + 1, CC_MAX_NUM_OF_KEYS); tmp++) - { - if (p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine == e_FM_PCD_CC) + if (p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine== e_FM_PCD_CC) { - p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode; - p_CcInformation = FindNodeInfoInReleventLst(&p_FmPcdCcNextNode->ccPrevNodesLst, - (t_Handle)p_CcNode, + p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode; + p_CcInformation = FindNodeInfoInReleventLst(&p_FmPcdCcNextNode->ccTreeIdLst, + (t_Handle)p_FmPcdCcTree, p_FmPcdCcNextNode->h_Spinlock); + if (!p_CcInformation) { memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); - ccNodeInfo.h_CcNode = (t_Handle)p_CcNode; + ccNodeInfo.h_CcNode = (t_Handle)p_FmPcdCcTree; ccNodeInfo.index = 1; - EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccPrevNodesLst, + EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccTreeIdLst, &ccNodeInfo, p_FmPcdCcNextNode->h_Spinlock); } else p_CcInformation->index++; - - if (p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip) - { - h_Manip = p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip; - p_CcInformation = FindNodeInfoInReleventLst(FmPcdManipGetNodeLstPointedOnThisManip(h_Manip), - (t_Handle)p_CcNode, - FmPcdManipGetSpinlock(h_Manip)); - if (!p_CcInformation) - { - memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); - ccNodeInfo.h_CcNode = (t_Handle)p_CcNode; - ccNodeInfo.index = 1; - EnqueueNodeInfoToRelevantLst(FmPcdManipGetNodeLstPointedOnThisManip(h_Manip), - &ccNodeInfo, - FmPcdManipGetSpinlock(h_Manip)); - } - else - p_CcInformation->index++; - } } } - p_AdTableTmp = p_CcNode->h_AdTable; + FmPcdIncNetEnvOwners(h_FmPcd, p_FmPcdCcTree->netEnvId); + p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); - if (!FmPcdLockTryLockAll(h_FmPcd)) + if (!FmPcdLockTryLockAll(p_FmPcd)) { - FM_PCD_MatchTableDelete((t_Handle)p_CcNode); - DBG(TRACE, ("FmPcdLockTryLockAll failed")); - return NULL; + FM_PCD_CcRootDelete(p_FmPcdCcTree); + XX_Free(p_Params); + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return NULL; } - /* Required action for each next engine */ - for (tmp = 0; tmp < MIN(p_CcNode->numOfKeys + 1, CC_MAX_NUM_OF_KEYS); tmp++) + for (i = 0; i < numOfEntries; i++) { - if (p_CcNode->keyAndNextEngineParams[tmp].requiredAction) + if (p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction) { err = SetRequiredAction(h_FmPcd, - p_CcNode->keyAndNextEngineParams[tmp].requiredAction, - &p_CcNode->keyAndNextEngineParams[tmp], - p_AdTableTmp, + p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction, + &p_FmPcdCcTree->keyAndNextEngineParams[i], + p_CcTreeTmp, 1, - NULL); + p_FmPcdCcTree); if (err) { - FmPcdLockUnlockAll(h_FmPcd); - FM_PCD_MatchTableDelete((t_Handle)p_CcNode); - REPORT_ERROR(MAJOR, err, NO_MSG); + FmPcdLockUnlockAll(p_FmPcd); + FM_PCD_CcRootDelete(p_FmPcdCcTree); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); return NULL; } - p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE); + p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE); } } - FmPcdLockUnlockAll(h_FmPcd); + FmPcdLockUnlockAll(p_FmPcd); + p_FmPcdCcTree->p_Lock = FmPcdAcquireLock(p_FmPcd); + if (!p_FmPcdCcTree->p_Lock) + { + FM_PCD_CcRootDelete(p_FmPcdCcTree); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM CC lock")); + return NULL; + } + + XX_Free(p_Params); + + return p_FmPcdCcTree; +} + +t_Error FM_PCD_CcRootDelete(t_Handle h_CcTree) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree; + int i= 0; + + SANITY_CHECK_RETURN_ERROR(p_CcTree,E_INVALID_STATE); + p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + FmPcdDecNetEnvOwners(p_FmPcd, p_CcTree->netEnvId); + + if (p_CcTree->owners) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("the tree with this ID can not be removed because this tree is occupied, first - unbind this tree")); + + /* Delete reassembly schemes if exist */ + if (p_CcTree->h_IpReassemblyManip) + { + FmPcdManipDeleteIpReassmSchemes(p_CcTree->h_IpReassemblyManip); + FmPcdManipUpdateOwner(p_CcTree->h_IpReassemblyManip, FALSE); + } + + for (i = 0; i numOfEntries; i++) + { + if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + UpdateNodeOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode, FALSE); + + if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip) + FmPcdManipUpdateOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip, FALSE); + +#ifdef FM_CAPWAP_SUPPORT + if ((p_CcTree->numOfGrps == 1) && + (p_CcTree->fmPcdGroupParam[0].numOfEntriesInGroup == 1) && + (p_CcTree->keyAndNextEngineParams[0].nextEngineParams.nextEngine == e_FM_PCD_CC) && + p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode && + IsCapwapApplSpecific(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode)) + { + if (FM_PCD_ManipNodeDelete(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.h_Manip) != E_OK) + return E_INVALID_STATE; + } +#endif /* FM_CAPWAP_SUPPORT */ + +#if (DPAA_VERSION >= 11) + if ((p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_FR) && + (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic)) + FrmReplicGroupUpdateOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic, + FALSE); +#endif /* (DPAA_VERSION >= 11) */ + } + + if (p_CcTree->p_Lock) + FmPcdReleaseLock(p_CcTree->h_FmPcd, p_CcTree->p_Lock); + + DeleteTree(p_CcTree, p_FmPcd); + + return E_OK; +} + +t_Error FM_PCD_CcRootModifyNextEngine(t_Handle h_CcTree, + uint8_t grpId, + uint8_t index, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_CcTree,E_INVALID_STATE); + p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + if (!FmPcdLockTryLockAll(p_FmPcd)) + { + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return ERROR_CODE(E_BUSY); + } + + err = FmPcdCcModifyNextEngineParamTree(p_FmPcd, + p_CcTree, + grpId, + index, + p_FmPcdCcNextEngineParams); + FmPcdLockUnlockAll(p_FmPcd); + + if (err) + { + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + return E_OK; +} + +t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam) +{ + t_FmPcdCcNode *p_CcNode; + t_Error err; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(p_CcNodeParam, E_NULL_POINTER, NULL); + + p_CcNode = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode)); + if (!p_CcNode) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); + return NULL; + } + memset(p_CcNode, 0, sizeof(t_FmPcdCcNode)); + + err = FmPcdCcMatchTableSet(h_FmPcd, p_CcNode, p_CcNodeParam); + + switch (GET_ERROR_TYPE(err)) + { + case E_OK: + break; + + case E_BUSY: + DBG(TRACE, ("E_BUSY error")); + return NULL; + + default: + REPORT_ERROR(MAJOR, err, NO_MSG); + return NULL; + } return p_CcNode; } @@ -6536,6 +6563,9 @@ t_Error FM_PCD_MatchTableGetKeyStatistics(t_Handle h_CcNode, intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock); + if (keyIndex >= p_CcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("The provided keyIndex exceeds the number of keys in this match table")); + err = MatchTableGetKeyStatistics(p_CcNode, keyIndex, p_KeyStatistics); @@ -6548,6 +6578,30 @@ t_Error FM_PCD_MatchTableGetKeyStatistics(t_Handle h_CcNode, return E_OK; } +t_Error FM_PCD_MatchTableGetMissStatistics(t_Handle h_CcNode, + t_FmPcdCcKeyStatistics *p_MissStatistics) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + uint32_t intFlags; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(h_CcNode, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_MissStatistics, E_NULL_POINTER); + + intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock); + + err = MatchTableGetKeyStatistics(p_CcNode, + p_CcNode->numOfKeys, + p_MissStatistics); + + XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); + + if (err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + return E_OK; +} + t_Error FM_PCD_MatchTableFindNGetKeyStatistics(t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, @@ -6572,6 +6626,8 @@ t_Error FM_PCD_MatchTableFindNGetKeyStatistics(t_Handle h_CcNode "match table of the provided node")); } + ASSERT_COND(keyIndex < p_CcNode->numOfKeys); + err = MatchTableGetKeyStatistics(p_CcNode, keyIndex, p_KeyStatistics); @@ -6624,10 +6680,13 @@ t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param) { t_FmPcdCcNode *p_CcNodeHashTbl; t_FmPcdCcNodeParams *p_IndxHashCcNodeParam, *p_ExactMatchCcNodeParam; - t_Handle h_CcNode; + t_FmPcdCcNode *p_CcNode; + t_Handle h_MissStatsCounters = NULL; t_FmPcdCcKeyParams *p_HashKeyParams; int i; uint16_t numOfSets, numOfWays, countMask, onesCount = 0; + bool statsEnForMiss = FALSE; + t_Error err; SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(p_Param, E_NULL_POINTER, NULL); @@ -6684,6 +6743,28 @@ t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param) if (p_Param->maxNumOfKeys % numOfSets) DBG(INFO, ("'maxNumOfKeys' is not a multiple of hash number of ways, so number of ways will be rounded up")); + if ((p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_FRAME) || + (p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME)) + { + /* Allocating a statistics counters table that will be used by all + 'miss' entries of the hash table */ + h_MissStatsCounters = (t_Handle)FM_MURAM_AllocMem(FmPcdGetMuramHandle(h_FmPcd), + 2 * FM_PCD_CC_STATS_COUNTER_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!h_MissStatsCounters) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics table for hash miss")); + return NULL; + } + memset(h_MissStatsCounters, 0, (2 * FM_PCD_CC_STATS_COUNTER_SIZE)); + + /* Always enable statistics for 'miss', so that a statistics AD will be + initialized from the start. We'll store the requested 'statistics enable' + value and it will be used when statistics are read by the user. */ + statsEnForMiss = p_Param->ccNextEngineParamsForMiss.statisticsEn; + p_Param->ccNextEngineParamsForMiss.statisticsEn = TRUE; + } + /* Building exact-match node params, will be used to create the hash buckets */ p_ExactMatchCcNodeParam->extractCcParams.type = e_FM_PCD_EXTRACT_NON_HDR; @@ -6703,13 +6784,23 @@ t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param) for (i = 0; i < numOfSets; i++) { - h_CcNode = FM_PCD_MatchTableSet(h_FmPcd, p_ExactMatchCcNodeParam); - if (!h_CcNode) + /* Each exact-match node will be marked as a 'bucket' and provided with a pointer to statistics counters, + to be used for 'miss' entry statistics */ + p_CcNode = (t_FmPcdCcNode *)XX_Malloc(sizeof(t_FmPcdCcNode)); + if (!p_CcNode) + break; + memset(p_CcNode, 0, sizeof(t_FmPcdCcNode)); + + p_CcNode->isHashBucket = TRUE; + p_CcNode->h_MissStatsCounters = h_MissStatsCounters; + + err = FmPcdCcMatchTableSet(h_FmPcd, p_CcNode, p_ExactMatchCcNodeParam); + if (err) break; p_HashKeyParams[i].ccNextEngineParams.nextEngine = e_FM_PCD_CC; p_HashKeyParams[i].ccNextEngineParams.statisticsEn = FALSE; - p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode = h_CcNode; + p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode = p_CcNode; } if (i < numOfSets) @@ -6717,6 +6808,8 @@ t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param) for (i = i-1; i >=0; i--) FM_PCD_MatchTableDelete(p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode); + FM_MURAM_FreeMem(FmPcdGetMuramHandle(h_FmPcd), h_MissStatsCounters); + REPORT_ERROR(MAJOR, E_NULL_POINTER, NO_MSG); XX_Free(p_IndxHashCcNodeParam); XX_Free(p_ExactMatchCcNodeParam); @@ -6738,10 +6831,17 @@ t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param) p_IndxHashCcNodeParam->keysParams.keySize = 2; p_CcNodeHashTbl = FM_PCD_MatchTableSet(h_FmPcd, p_IndxHashCcNodeParam); - + if (p_CcNodeHashTbl) p_CcNodeHashTbl->kgHashShift = p_Param->kgHashShift; - + + /* Storing the allocated counters for buckets 'miss' in the hash table + and is statistics for miss wre enabled. */ + p_CcNodeHashTbl->h_MissStatsCounters = h_MissStatsCounters; + p_CcNodeHashTbl->statsEnForMiss = statsEnForMiss; + + XX_Print("Hash 0x%x: 0x%x\n", p_CcNodeHashTbl, h_MissStatsCounters); + XX_Free(p_IndxHashCcNodeParam); XX_Free(p_ExactMatchCcNodeParam); @@ -6751,12 +6851,14 @@ t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param) t_Error FM_PCD_HashTableDelete(t_Handle h_HashTbl) { t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; - t_Handle *p_HashBuckets; + t_Handle h_FmPcd; + t_Handle *p_HashBuckets, h_MissStatsCounters; uint16_t i, numOfBuckets; t_Error err; SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); + /* Store all hash buckets before the hash is freed */ numOfBuckets = p_HashTbl->numOfKeys; p_HashBuckets = (t_Handle *)XX_Malloc(numOfBuckets * sizeof(t_Handle)); @@ -6766,14 +6868,23 @@ t_Error FM_PCD_HashTableDelete(t_Handle h_HashTbl) for (i = 0; i < numOfBuckets; i++) p_HashBuckets[i] = p_HashTbl->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode; + h_FmPcd = p_HashTbl->h_FmPcd; + h_MissStatsCounters = p_HashTbl->h_MissStatsCounters; + + /* Free the hash */ err = FM_PCD_MatchTableDelete(p_HashTbl); + /* Free each hash bucket */ for (i = 0; i < numOfBuckets; i++) err |= FM_PCD_MatchTableDelete(p_HashBuckets[i]); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); + /* Free statistics counters for 'miss', id these were allocated */ + if (h_MissStatsCounters) + FM_MURAM_FreeMem(FmPcdGetMuramHandle(h_FmPcd), h_MissStatsCounters); + XX_Free(p_HashBuckets); return E_OK; @@ -6879,11 +6990,29 @@ t_Error FM_PCD_HashTableModifyMissNextEngine(t_Handle h_HashTbl t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; t_Handle h_HashBucket; uint8_t i; + bool nullifyMissStats = FALSE; t_Error err; SANITY_CHECK_RETURN_ERROR(h_HashTbl, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); + if ((!p_HashTbl->h_MissStatsCounters) && (p_FmPcdCcNextEngineParams->statisticsEn)) + RETURN_ERROR(MAJOR, E_CONFLICT, + ("Statistics are requested for a key, but statistics mode was set" + "to 'NONE' upon initialization")); + + if (p_HashTbl->h_MissStatsCounters) + { + if ((!p_HashTbl->statsEnForMiss) && (p_FmPcdCcNextEngineParams->statisticsEn)) + nullifyMissStats = TRUE; + + if ((p_HashTbl->statsEnForMiss) && (!p_FmPcdCcNextEngineParams->statisticsEn)) + { + p_HashTbl->statsEnForMiss = FALSE; + p_FmPcdCcNextEngineParams->statisticsEn = TRUE; + } + } + for (i = 0; i < p_HashTbl->numOfKeys; i++) { h_HashBucket = p_HashTbl->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode; @@ -6894,6 +7023,13 @@ t_Error FM_PCD_HashTableModifyMissNextEngine(t_Handle h_HashTbl RETURN_ERROR(MAJOR, err, NO_MSG); } + if (nullifyMissStats) + { + memset(p_HashTbl->h_MissStatsCounters, 0, (2 * FM_PCD_CC_STATS_COUNTER_SIZE)); + memset(p_HashTbl->h_MissStatsCounters, 0, (2 * FM_PCD_CC_STATS_COUNTER_SIZE)); + p_HashTbl->statsEnForMiss = TRUE; + } + return E_OK; } @@ -6947,3 +7083,21 @@ t_Error FM_PCD_HashTableFindNGetKeyStatistics(t_Handle h_HashTbl NULL, p_KeyStatistics); } + +t_Error FM_PCD_HashTableGetMissStatistics(t_Handle h_HashTbl, + t_FmPcdCcKeyStatistics *p_MissStatistics) +{ + t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; + t_Handle h_HashBucket; + + SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_MissStatistics, E_NULL_POINTER); + + if (!p_HashTbl->statsEnForMiss) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for miss")); + + h_HashBucket = p_HashTbl->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode; + + return FM_PCD_MatchTableGetMissStatistics(h_HashBucket, + p_MissStatistics); +} diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.h b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.h index eeb43a2..997eb5d 100644 --- a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.h +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.h @@ -310,6 +310,17 @@ typedef struct uint32_t numOfStatsFLRs; uint32_t countersArraySize; + bool isHashBucket; /**< Valid for match table node that is a bucket of a hash table only */ + t_Handle h_MissStatsCounters; /**< Valid for hash table node and match table that is a bucket; + Holds the statistics counters allocated by the hash table and + are shared by all hash table buckets; */ + t_Handle h_PrivMissStatsCounters; /**< Valid for match table node that is a bucket of a hash table only; + Holds the statistics counters that were allocated for this node + and replaced by the shared counters (allocated by the hash table); */ + bool statsEnForMiss; /**< Valid for hash table node only; TRUE is statistics are currently + enabled for hash 'miss', FALSE otherwise; This parameter effects the + returned statistics count to user, statistics AD always present for 'miss' + for all hash buckets; */ bool glblMaskUpdated; t_Handle p_GlblMask; bool lclMask; diff --git a/drivers/net/ethernet/freescale/fman/inc/Peripherals/fm_pcd_ext.h b/drivers/net/ethernet/freescale/fman/inc/Peripherals/fm_pcd_ext.h index 60edfd2..d9bea95 100644 --- a/drivers/net/ethernet/freescale/fman/inc/Peripherals/fm_pcd_ext.h +++ b/drivers/net/ethernet/freescale/fman/inc/Peripherals/fm_pcd_ext.h @@ -1068,7 +1068,9 @@ typedef enum e_FmPcdPlcrRateMode { *//***************************************************************************/ typedef enum e_FmPcdDoneAction { e_FM_PCD_ENQ_FRAME = 0, /**< Enqueue frame */ - e_FM_PCD_DROP_FRAME /**< Drop frame */ + e_FM_PCD_DROP_FRAME /**< Mark this frame as error frame and continue + to error flow; 'FM_PORT_FRM_ERR_CLS_DISCARD' + flag will be set for this frame. */ } e_FmPcdDoneAction; /**************************************************************************//** @@ -2080,8 +2082,7 @@ typedef struct t_FmPcdPlcrProfileParams { /**************************************************************************//** @Description Parameters for selecting a location for requested manipulation *//***************************************************************************/ -typedef struct t_FmManipHdrInfo -{ +typedef struct t_FmManipHdrInfo { e_NetHeaderType hdr; /**< Header selection */ e_FmPcdHdrIndex hdrIndex; /**< Relevant only for MPLS, VLAN and tunneled IP. Otherwise should be cleared. */ bool byField; /**< TRUE if the location of manipulation is according to some field in the specific header*/ @@ -2575,7 +2576,7 @@ typedef struct t_FmPcdManipParams { *//***************************************************************************/ typedef struct t_FmPcdManipReassemIpStats { /* common counters for both IPv4 and IPv6 */ - uint32_t timeout; /**< Counts the number of TimeOut occurrences */ + uint32_t timeout; /**< Counts the number of timeout occurrences */ uint32_t rfdPoolBusy; /**< Counts the number of failed attempts to allocate a Reassembly Frame Descriptor */ uint32_t internalBufferBusy; /**< Counts the number of times an internal buffer busy occurred */ @@ -2942,7 +2943,8 @@ t_Error FM_PCD_MatchTableDelete(t_Handle h_CcNode); @Return E_OK on success; Error code otherwise. - @Cautions Allowed only following FM_PCD_MatchTableSet(). + @Cautions Allowed only following FM_PCD_MatchTableSet(); + Not relevant in the case the node is of type 'INDEXED_LOOKUP'. *//***************************************************************************/ t_Error FM_PCD_MatchTableModifyMissNextEngine(t_Handle h_CcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams); @@ -3195,6 +3197,29 @@ t_Error FM_PCD_MatchTableGetKeyStatistics(t_Handle h_CcNode, t_FmPcdCcKeyStatistics *p_KeyStatistics); /**************************************************************************//** + @Function FM_PCD_MatchTableGetMissStatistics + + @Description This routine may be used to get statistics counters of miss entry + in a CC Node. + + If 'e_FM_PCD_CC_STATS_MODE_FRAME' and + 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node, + these counters reflect how many frames were not matched to any + existing key and therefore passed through the miss entry; The + total frames count will be returned in the counter of the + first range (as only one frame length range was defined). + + @Param[in] h_CcNode A handle to the node + @Param[out] p_MissStatistics Statistics counters for 'miss' + + @Return The statistics for 'miss'. + + @Cautions Allowed only following FM_PCD_MatchTableSet(). +*//***************************************************************************/ +t_Error FM_PCD_MatchTableGetMissStatistics(t_Handle h_CcNode, + t_FmPcdCcKeyStatistics *p_MissStatistics); + +/**************************************************************************//** @Function FM_PCD_MatchTableFindNGetKeyStatistics @Description This routine may be used to get statistics counters of specific key @@ -3446,6 +3471,27 @@ t_Error FM_PCD_HashTableFindNGetKeyStatistics(t_Handle h_HashTbl t_FmPcdCcKeyStatistics *p_KeyStatistics); /**************************************************************************//** + @Function FM_PCD_HashTableGetMissStatistics + + @Description This routine may be used to get statistics counters of 'miss' + entry of the a hash table. + + If 'e_FM_PCD_CC_STATS_MODE_FRAME' and + 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node, + these counters reflect how many frames were not matched to any + existing key and therefore passed through the miss entry; + + @Param[in] h_HashTbl A handle to a hash table + @Param[out] p_MissStatistics Statistics counters for 'miss' + + @Return The statistics for 'miss'. + + @Cautions Allowed only following FM_PCD_HashTableSet(). +*//***************************************************************************/ +t_Error FM_PCD_HashTableGetMissStatistics(t_Handle h_HashTbl, + t_FmPcdCcKeyStatistics *p_MissStatistics); + +/**************************************************************************//** @Function FM_PCD_ManipNodeSet @Description This routine should be called for defining a manipulation diff --git a/drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_exp_sym.h b/drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_exp_sym.h index 4c60893..13c61d2 100644 --- a/drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_exp_sym.h +++ b/drivers/net/ethernet/freescale/fman/src/inc/wrapper/lnxwrp_exp_sym.h @@ -88,6 +88,8 @@ EXPORT_SYMBOL(FM_PCD_MatchTableGetNextEngine); EXPORT_SYMBOL(FM_PCD_MatchTableGetKeyCounter); EXPORT_SYMBOL(FM_PCD_MatchTableGetKeyStatistics); EXPORT_SYMBOL(FM_PCD_MatchTableFindNGetKeyStatistics); +EXPORT_SYMBOL(FM_PCD_MatchTableGetMissStatistics); +EXPORT_SYMBOL(FM_PCD_HashTableGetMissStatistics); EXPORT_SYMBOL(FM_PCD_HashTableSet); EXPORT_SYMBOL(FM_PCD_HashTableDelete); EXPORT_SYMBOL(FM_PCD_HashTableAddKey); -- cgit v0.10.2 From 56fefd712b54327e340ece05a7f115a1d5af5fff Mon Sep 17 00:00:00 2001 From: Marian Chereji Date: Tue, 30 Jul 2013 18:21:39 +0300 Subject: dpa_offload: Add possibility to query classification table miss action details Added a new function to query classification table miss action details. This function is only advertised as internal API and is needed by the DPA Stats component to acquire Miss Action Statistics. Signed-off-by: Marian Chereji Change-Id: If9f506316c887bc44ebd5363c526a8f4fda3da1c Reviewed-on: http://git.am.freescale.net:8181/3829 Reviewed-by: Schmitt Richard-B43082 Tested-by: Schmitt Richard-B43082 diff --git a/drivers/staging/fsl_dpa_offload/dpa_classifier.c b/drivers/staging/fsl_dpa_offload/dpa_classifier.c index a39fc69..5056b2e 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_classifier.c +++ b/drivers/staging/fsl_dpa_offload/dpa_classifier.c @@ -462,6 +462,9 @@ int dpa_classif_table_modify_miss_action(int td, return -EBUSY; } } + + memcpy(&ptable->miss_action, miss_action, sizeof(*miss_action)); + RELEASE_OBJECT(ptable); dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__, @@ -3230,6 +3233,26 @@ static inline void key_apply_mask(const struct dpa_offload_lookup_key *key, new_key[i] = key->byte[i] & key->mask[i]; } +int dpa_classif_get_miss_action(int td, struct dpa_cls_tbl_action *miss_action) +{ + struct dpa_cls_table *ptable; + + if (!miss_action) + return -EINVAL; + + LOCK_OBJECT(table_array, td, ptable, -EINVAL); + if (ptable->miss_action.type == DPA_CLS_TBL_ACTION_NONE) { + /* No miss action was specified for this table */ + RELEASE_OBJECT(ptable); + return -ENODEV; + } else + memcpy(miss_action, &ptable->miss_action, sizeof(*miss_action)); + + RELEASE_OBJECT(ptable); + + return 0; +} + static int nat_hm_check_params(const struct dpa_cls_hm_nat_params *nat_params) { unsigned int ip_ver = 0; diff --git a/drivers/staging/fsl_dpa_offload/dpa_classifier.h b/drivers/staging/fsl_dpa_offload/dpa_classifier.h index 0667782..659e1b7 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_classifier.h +++ b/drivers/staging/fsl_dpa_offload/dpa_classifier.h @@ -206,6 +206,9 @@ struct dpa_cls_table { /* (Initial) parameters of the DPA Classifier table. */ struct dpa_cls_tbl_params params; + /* Table miss action. */ + struct dpa_cls_tbl_action miss_action; + /* Access control object for this table to avoid race conditions. */ struct mutex access; }; @@ -675,6 +678,12 @@ int dpa_classif_import_static_hm(void *hm, int next_hmd, int *hmd); void *dpa_classif_get_static_hm_handle(int hmd); /* + * Provides details about the miss action configured on a classification + * table. + */ +int dpa_classif_get_miss_action(int td, struct dpa_cls_tbl_action *miss_action); + +/* * Locks a header manipulation chain (marks as "used"). The header manipulation * operations cannot be removed as long as they are locked. The function * provides the FMan driver handle of the manip node which is chain head. -- cgit v0.10.2 From af2dcedf4b69a94ed42689be537d3f66e3bb2cbd Mon Sep 17 00:00:00 2001 From: Anca Jeanina FLOAREA Date: Fri, 2 Aug 2013 14:56:08 +0300 Subject: dpa_offload: Add support in DPA Stats for miss statistics Added support in DPA Stats to retrieve the statistics for the miss-entry of either a Classification Node or a Classification Table. The user can request statistics for miss-entry by providing the key set to NULL. The Classification counter API was changed in order to allow the user to provide NULL instead of a valid key. Signed-off-by: Anca Jeanina FLOAREA Change-Id: Id871766e5cc7b494c934096829e12af15ded7544 Reviewed-on: http://git.am.freescale.net:8181/3830 Reviewed-by: Zanoschi Aurelian-B43522 Reviewed-by: Chereji Marian-Cornel-R27762 Reviewed-by: Schmitt Richard-B43082 Tested-by: Schmitt Richard-B43082 diff --git a/drivers/staging/fsl_dpa_offload/dpa_stats.c b/drivers/staging/fsl_dpa_offload/dpa_stats.c index b911732..f82fcc3 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_stats.c +++ b/drivers/staging/fsl_dpa_offload/dpa_stats.c @@ -114,6 +114,46 @@ static int check_dpa_stats_params(const struct dpa_stats_params *params) return 0; } +static int set_cnt_classif_tbl_retrieve_func(struct dpa_stats_cnt_cb *cnt_cb) +{ + switch (cnt_cb->tbl_cb.type) { + case DPA_CLS_TBL_HASH: + cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_hash_stats; + break; + case DPA_CLS_TBL_INDEXED: + cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_index_stats; + break; + case DPA_CLS_TBL_EXACT_MATCH: + cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_match_stats; + break; + default: + log_err("Unsupported DPA Classifier table type %d\n", + cnt_cb->tbl_cb.type); + return -EINVAL; + } + return 0; +} + +static int set_cnt_classif_node_retrieve_func(struct dpa_stats_cnt_cb *cnt_cb, + enum dpa_stats_classif_node_type ccnode_type) +{ + switch (ccnode_type) { + case DPA_CLS_TBL_HASH: + cnt_cb->f_get_cnt_stats = get_cnt_ccnode_hash_stats; + break; + case DPA_CLS_TBL_INDEXED: + cnt_cb->f_get_cnt_stats = get_cnt_ccnode_index_stats; + break; + case DPA_CLS_TBL_EXACT_MATCH: + cnt_cb->f_get_cnt_stats = get_cnt_ccnode_match_stats; + break; + default: + log_err("Unsupported Classification Node type %d", ccnode_type); + return -EINVAL; + } + return 0; +} + static int check_tbl_cls_counter(struct dpa_stats_cnt_cb *cnt_cb, struct dpa_stats_lookup_key *entry) { @@ -132,7 +172,6 @@ static int check_tbl_cls_counter(struct dpa_stats_cnt_cb *cnt_cb, dump_lookup_key(&entry->key); return -EIO; } - cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_hash_stats; break; case DPA_CLS_TBL_INDEXED: err = FM_PCD_MatchTableGetKeyStatistics( @@ -145,7 +184,6 @@ static int check_tbl_cls_counter(struct dpa_stats_cnt_cb *cnt_cb, dump_lookup_key(&entry->key); return -EIO; } - cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_index_stats; break; case DPA_CLS_TBL_EXACT_MATCH: err = FM_PCD_MatchTableFindNGetKeyStatistics(entry->cc_node, @@ -159,7 +197,6 @@ static int check_tbl_cls_counter(struct dpa_stats_cnt_cb *cnt_cb, dump_lookup_key(&entry->key); return -EINVAL; } - cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_match_stats; break; default: log_err("Unsupported DPA Classifier table type %d\n", @@ -189,7 +226,6 @@ static int check_ccnode_counter(struct dpa_stats_cnt_cb *cnt_cb, dump_lookup_key(key); return -EIO; } - cnt_cb->f_get_cnt_stats = get_cnt_ccnode_hash_stats; break; case DPA_STATS_CLASSIF_NODE_INDEXED: err = FM_PCD_MatchTableGetKeyStatistics( @@ -203,7 +239,6 @@ static int check_ccnode_counter(struct dpa_stats_cnt_cb *cnt_cb, dump_lookup_key(key); return -EIO; } - cnt_cb->f_get_cnt_stats = get_cnt_ccnode_index_stats; break; case DPA_STATS_CLASSIF_NODE_EXACT_MATCH: err = FM_PCD_MatchTableFindNGetKeyStatistics( @@ -217,7 +252,6 @@ static int check_ccnode_counter(struct dpa_stats_cnt_cb *cnt_cb, dump_lookup_key(key); return -EINVAL; } - cnt_cb->f_get_cnt_stats = get_cnt_ccnode_match_stats; break; default: log_err("Unsupported Classification Node type %d", @@ -227,6 +261,47 @@ static int check_ccnode_counter(struct dpa_stats_cnt_cb *cnt_cb, return 0; } +static int check_ccnode_miss_counter(void *cc_node, uint32_t id, + enum dpa_stats_classif_node_type ccnode_type) +{ + t_FmPcdCcKeyStatistics stats; + int err; + + switch (ccnode_type) { + case DPA_STATS_CLASSIF_NODE_HASH: + err = FM_PCD_HashTableGetMissStatistics(cc_node, &stats); + if (err != 0) { + log_err("Check failed for Classification Node counter " + "id %d due to incorrect parameters: handle=" + "0x%p\n", id, cc_node); + return -EIO; + } + break; + case DPA_STATS_CLASSIF_NODE_INDEXED: + err = FM_PCD_MatchTableGetMissStatistics(cc_node, &stats); + if (err != 0) { + log_err("Check failed for Classification Node counter " + "id %d due to incorrect parameters: handle=0x%p" + "\n", id, cc_node); + return -EIO; + } + break; + case DPA_STATS_CLASSIF_NODE_EXACT_MATCH: + err = FM_PCD_MatchTableGetMissStatistics(cc_node, &stats); + if (err != 0) { + log_err("Check failed for Classification Node counter " + "id %d due to incorrect parameters: handle=0x%p" + "\n", id, cc_node); + return -EINVAL; + } + break; + default: + log_err("Unsupported Classification Node type %d", ccnode_type); + return -EINVAL; + } + return 0; +} + static int get_new_cnt(struct dpa_stats *dpa_stats, struct dpa_stats_cnt_cb **cnt_cb) { @@ -344,8 +419,7 @@ static int put_cnt(struct dpa_stats *dpa_stats, struct dpa_stats_cnt_cb *cnt_cb) } /* Mark the Counter id as 'not used' */ - dpa_stats->used_cnt_ids[cnt_cb->index] = - DPA_OFFLD_INVALID_OBJECT_ID; + dpa_stats->used_cnt_ids[cnt_cb->index] = DPA_OFFLD_INVALID_OBJECT_ID; /* Clear all 'cnt_cb' information */ cnt_cb->index = DPA_OFFLD_INVALID_OBJECT_ID; @@ -627,7 +701,7 @@ static int free_resources(void) /* Sanity check */ if (!gbl_dpa_stats) { log_err("DPA Stats component is not initialized\n"); - return; + return 0; } dpa_stats = gbl_dpa_stats; @@ -1149,11 +1223,20 @@ static int set_frag_manip(int td, struct dpa_stats_lookup_key *entry) struct t_FmPcdManipStats stats; int err = 0; - err = dpa_classif_table_lookup_by_key(td, &entry->key, &action); - if (err != 0) { - log_err("Cannot retrieve next action parameters from table " - "%d\n", td); - return -EINVAL; + if (entry->miss_key) { + err = dpa_classif_get_miss_action(td, &action); + if (err != 0) { + log_err("Cannot retrieve miss action parameters from " + "table %d\n", td); + return -EINVAL; + } + } else { + err = dpa_classif_table_lookup_by_key(td, &entry->key, &action); + if (err != 0) { + log_err("Cannot retrieve next action parameters from " + "table %d\n", td); + return -EINVAL; + } } if (action.type != DPA_CLS_TBL_ACTION_ENQ) { @@ -1175,7 +1258,6 @@ static int set_frag_manip(int td, struct dpa_stats_lookup_key *entry) log_err("Invalid Fragmentation manip handle\n"); return -EINVAL; } - return 0; } @@ -1447,13 +1529,6 @@ static int set_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb, return -EINVAL; } - /* Copy the key descriptor */ - err = copy_key_descriptor(&prm.key, &cnt_tbl_cb->keys[0].key); - if (err != 0) { - log_err("Cannot copy key descriptor from user parameters\n"); - return -EINVAL; - } - /* Store CcNode handle and set number of keys to one */ cnt_tbl_cb->keys[0].cc_node = cls_tbl.cc_node; cnt_tbl_cb->keys[0].valid = TRUE; @@ -1462,11 +1537,35 @@ static int set_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb, /* Store DPA Classifier Table type */ cnt_tbl_cb->type = cls_tbl.type; - /* Check the Classifier Table counter */ - err = check_tbl_cls_counter(cnt_cb, &cnt_tbl_cb->keys[0]); + /* Set retrieve function depending on table type */ + err = set_cnt_classif_tbl_retrieve_func(cnt_cb); if (err != 0) return -EINVAL; + /* Determine if counter is for 'miss' entry or for a valid key */ + if (!prm.key) { + cnt_tbl_cb->keys[0].miss_key = TRUE; + + /* Check the Classifier Table counter parameters for "miss" */ + err = check_ccnode_miss_counter(cnt_tbl_cb->keys[0].cc_node, + cnt_cb->id, cnt_tbl_cb->type); + if (err != 0) + return -EINVAL; + } else { + /* Copy the key descriptor */ + err = copy_key_descriptor(prm.key, &cnt_tbl_cb->keys[0].key); + if (err != 0) { + log_err("Cannot copy key descriptor from user " + "parameters\n"); + return -EINVAL; + } + + /* Check the Classifier Table counter */ + err = check_tbl_cls_counter(cnt_cb, &cnt_tbl_cb->keys[0]); + if (err != 0) + return -EINVAL; + } + if (frag_stats) { err = set_frag_manip(prm.td, &cnt_tbl_cb->keys[0]); if (err < 0) { @@ -1518,23 +1617,40 @@ static int set_cnt_ccnode_cb(struct dpa_stats_cnt_cb *cnt_cb, return -EFAULT; } - /* Copy the key descriptor */ - err = copy_key_descriptor(&prm.key, &cnt_cb->ccnode_cb.keys[0]); - if (err != 0) { - log_err("Cannot copy key descriptor from user parameters\n"); - return -EINVAL; - } - /* Store CcNode handle and set number of keys to one */ cnt_cb->ccnode_cb.cc_node = prm.cc_node; cnt_cb->members_num = 1; - /* Check the Classifier Node counter parameters */ - err = check_ccnode_counter(cnt_cb, - prm.ccnode_type, &cnt_cb->ccnode_cb.keys[0]); + /* Set retrieve function depending on counter type */ + err = set_cnt_classif_node_retrieve_func(cnt_cb, prm.ccnode_type); if (err != 0) return -EINVAL; + if (!params->classif_node_params.key) { + /* Set the key byte to NULL, to mark it for 'miss' entry */ + cnt_cb->ccnode_cb.keys[0].byte = NULL; + + /* Check the Classifier Node counter parameters for 'miss' */ + err = check_ccnode_miss_counter(cnt_cb->ccnode_cb.cc_node, + cnt_cb->id, prm.ccnode_type); + if (err != 0) + return -EINVAL; + } else { + /* Copy the key descriptor */ + err = copy_key_descriptor(prm.key, &cnt_cb->ccnode_cb.keys[0]); + if (err != 0) { + log_err("Cannot copy key descriptor from user " + "parameters\n"); + return -EINVAL; + } + + /* Check the Classifier Node counter parameters */ + err = check_ccnode_counter(cnt_cb, prm.ccnode_type, + &cnt_cb->ccnode_cb.keys[0]); + if (err != 0) + return -EINVAL; + } + /* Map Classif Node counter selection to CcNode statistics */ cnt_sel_to_stats(&cnt_cb->info, dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE], @@ -1899,36 +2015,49 @@ static int set_cls_cnt_plcr_cb(struct dpa_stats_cnt_cb *cnt_cb, } static int set_cls_cnt_classif_tbl_pair( - struct dpa_stats_cnt_classif_tbl_cb *cnt_tbl_cb, int td, + struct dpa_stats_cnt_cb *cnt_cb, int td, const struct dpa_offload_lookup_key_pair *pair, struct dpa_stats_lookup_key *lookup_key) { + struct dpa_stats_cnt_classif_tbl_cb *cnt_tbl_cb = &cnt_cb->tbl_cb; struct dpa_cls_tbl_params cls_tbl; struct dpa_offload_lookup_key tbl_key; struct dpa_cls_tbl_action action; int err = 0; - /* Check that key byte is not NULL */ - if (!pair->first_key.byte) { - log_err("First key descriptor byte of the user pair cannot be " - "NULL for table descriptor %d\n", td); - return -EFAULT; - } + /* If either the entire 'pair' or the first key is NULL, then retrieve + * the action associated with the 'miss action '*/ + if ((!pair) || (pair && !pair->first_key)) { + err = dpa_classif_get_miss_action(td, &action); + if (err != 0) { + log_err("Cannot retrieve miss action parameters for " + "table descriptor %d\n", td); + return -EINVAL; + } + } else { + /* Check that key byte is not NULL */ + if (!pair->first_key->byte) { + log_err("First key descriptor byte of the user pair " + "cannot be NULL for table descriptor %d\n", td); + return -EFAULT; + } - /* Copy first key descriptor parameters*/ - err = copy_key_descriptor(&pair->first_key, &tbl_key); - if (err != 0) { - log_err("Cannot copy first key descriptor of the user pair\n"); - return -EINVAL; - } + /* Copy first key descriptor parameters*/ + err = copy_key_descriptor(pair->first_key, &tbl_key); + if (err != 0) { + log_err("Cannot copy second key descriptor of " + "the user pair\n"); + return -EINVAL; + } - /* Use the first key of the pair to lookup in the classifier - * table the next table connected on a "next-action" */ - err = dpa_classif_table_lookup_by_key(td, &tbl_key, &action); - if (err != 0) { - log_err("Cannot retrieve next action parameters for table " - "descriptor %d\n", td); - return -EINVAL; + /* Use the first key of the pair to lookup in the classifier + * table the next table connected on a "next-action" */ + err = dpa_classif_table_lookup_by_key(td, &tbl_key, &action); + if (err != 0) { + log_err("Cannot retrieve next action parameters for " + "table descriptor %d\n", td); + return -EINVAL; + } } if (action.type != DPA_CLS_TBL_ACTION_NEXT_TABLE) { @@ -1948,23 +2077,41 @@ static int set_cls_cnt_classif_tbl_pair( /* Store DPA Classifier Table type */ cnt_tbl_cb->type = cls_tbl.type; + /* Set retrieve function depending on table type */ + set_cnt_classif_tbl_retrieve_func(cnt_cb); + /* Store CcNode handle */ lookup_key->cc_node = cls_tbl.cc_node; - /* Set as lookup key the second key descriptor from the pair */ - err = copy_key_descriptor(&pair->second_key, &lookup_key->key); - if (err != 0) { - log_err("Cannot copy second key descriptor of the user pair\n"); - return -EINVAL; + if (!pair || (pair && !pair->second_key)) { + /* Set as the key as "for miss" */ + lookup_key->miss_key = TRUE; + + /* Check the Classifier Table counter parameters for "miss" */ + err = check_ccnode_miss_counter(lookup_key->cc_node, + cnt_cb->id, cnt_tbl_cb->type); + } else { + lookup_key->miss_key = FALSE; + + /* Set as lookup key the second key descriptor from the pair */ + err = copy_key_descriptor(pair->second_key, &lookup_key->key); + if (err != 0) { + log_err("Cannot copy second key descriptor of " + "the user pair\n"); + return -EINVAL; + } + + /* Check the Classifier Table counter */ + err = check_tbl_cls_counter(cnt_cb, lookup_key); } - return 0; + return err; } static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb, const struct dpa_stats_cls_cnt_params *params) { - struct dpa_stats_cnt_classif_tbl_cb *cnt_tbl_cb = &cnt_cb->tbl_cb; + struct dpa_stats_cnt_classif_tbl_cb *tbl_cb = &cnt_cb->tbl_cb; struct dpa_stats_cls_cnt_classif_tbl prm = params->classif_tbl_params; struct dpa_stats *dpa_stats = cnt_cb->dpa_stats; struct dpa_cls_tbl_params cls_tbl; @@ -2004,11 +2151,17 @@ static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb, return -EINVAL; } - cnt_tbl_cb->td = params->classif_tbl_params.td; + tbl_cb->td = params->classif_tbl_params.td; cnt_cb->members_num = params->class_members; switch (prm.key_type) { case DPA_STATS_CLASSIF_SINGLE_KEY: + if (!prm.keys) { + log_err("Pointer to the array of keys cannot be NULL " + "for counter id %d\n", cnt_cb->id); + return -EINVAL; + } + /* Get CcNode from table descriptor */ err = dpa_classif_table_get_params(prm.td, &cls_tbl); if (err != 0) { @@ -2018,21 +2171,37 @@ static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb, } /* Store DPA Classifier Table type */ - cnt_tbl_cb->type = cls_tbl.type; + tbl_cb->type = cls_tbl.type; + + /* Set retrieve function depending on table type */ + set_cnt_classif_tbl_retrieve_func(cnt_cb); for (i = 0; i < params->class_members; i++) { /* Store CcNode handle */ - cnt_tbl_cb->keys[i].cc_node = cls_tbl.cc_node; + tbl_cb->keys[i].cc_node = cls_tbl.cc_node; + + /* Determine if key represents a 'miss' entry */ + if (!prm.keys[i]) { + tbl_cb->keys[i].miss_key = TRUE; + tbl_cb->keys[i].valid = TRUE; + + err = check_ccnode_miss_counter( + tbl_cb->keys[i].cc_node, + cnt_cb->id, tbl_cb->type); + if (err != 0) + return -EINVAL; + continue; + } - if (!prm.keys[i].byte) { + if (!prm.keys[i]->byte) { /* Key is not valid for now */ - cnt_tbl_cb->keys[i].valid = FALSE; + tbl_cb->keys[i].valid = FALSE; continue; } /* Copy the key descriptor */ - err = copy_key_descriptor(&prm.keys[i], - &cnt_tbl_cb->keys[i].key); + err = copy_key_descriptor(prm.keys[i], + &tbl_cb->keys[i].key); if (err != 0) { log_err("Cannot copy key descriptor from user " "parameters\n"); @@ -2040,37 +2209,39 @@ static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb, } /* Check the Classifier Table counter */ - err = check_tbl_cls_counter(cnt_cb, - &cnt_tbl_cb->keys[i]); + err = check_tbl_cls_counter(cnt_cb, &tbl_cb->keys[i]); if (err != 0) return -EINVAL; - cnt_tbl_cb->keys[i].valid = TRUE; + tbl_cb->keys[i].valid = TRUE; } break; case DPA_STATS_CLASSIF_PAIR_KEY: + if (!prm.pairs) { + log_err("Pointer to the array of pairs cannot be NULL " + "for counter id %d\n", cnt_cb->id); + return -EINVAL; + } + for (i = 0; i < params->class_members; i++) { - if (!prm.pairs[i].first_key.byte) { - /* Key is not valid for now */ - cnt_tbl_cb->keys[i].valid = FALSE; - continue; + if (prm.pairs[i]) { + if (prm.pairs[i]->first_key) { + if (!prm.pairs[i]->first_key->byte) { + /* Key is not valid for now */ + tbl_cb->keys[i].valid = FALSE; + continue; + } + } } - err = set_cls_cnt_classif_tbl_pair(cnt_tbl_cb, prm.td, - &prm.pairs[i], &cnt_tbl_cb->keys[i]); + err = set_cls_cnt_classif_tbl_pair(cnt_cb, prm.td, + prm.pairs[i], &tbl_cb->keys[i]); if (err != 0) { log_err("Cannot set classifier table pair key " "for counter id %d\n", cnt_cb->id); return -EINVAL; } - - /* Check the Classifier Table counter */ - err = check_tbl_cls_counter(cnt_cb, - &cnt_tbl_cb->keys[i]); - if (err != 0) - return -EINVAL; - - cnt_tbl_cb->keys[i].valid = TRUE; + tbl_cb->keys[i].valid = TRUE; } break; default: @@ -2084,7 +2255,7 @@ static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb, if (frag_stats) { /* For every valid key, retrieve the hmcd */ for (i = 0; i < params->class_members; i++) { - if (!cnt_tbl_cb->keys[i].valid) + if (!tbl_cb->keys[i].valid) continue; err = set_frag_manip(prm.td, &cnt_cb->tbl_cb.keys[i]); @@ -2140,24 +2311,46 @@ static int set_cls_cnt_ccnode_cb(struct dpa_stats_cnt_cb *cnt_cb, return -EFAULT; } + if (!prm.keys) { + log_err("Pointer to the array of keys cannot be NULL " + "for counter id %d\n", cnt_cb->id); + return -EINVAL; + } + cnt_cb->ccnode_cb.cc_node = prm.cc_node; cnt_cb->members_num = params->class_members; + /* Set retrieve function depending on counter type */ + err = set_cnt_classif_node_retrieve_func(cnt_cb, prm.ccnode_type); + if (err != 0) + return -EINVAL; + for (i = 0; i < params->class_members; i++) { - /* Copy the key descriptor */ - err = copy_key_descriptor(&prm.keys[i], - &cnt_cb->ccnode_cb.keys[i]); - if (err != 0) { - log_err("Cannot copy key descriptor from user " - "parameters\n"); - return -EINVAL; - } + if (!prm.keys[i]) { + /* Set the key byte to NULL, to mark it for 'miss' */ + cnt_cb->ccnode_cb.keys[i].byte = NULL; - /* Check the Classifier Node counter parameters */ - err = check_ccnode_counter(cnt_cb, - prm.ccnode_type, &cnt_cb->ccnode_cb.keys[i]); - if (err != 0) - return -EINVAL; + /* Check the Classifier Node counter parameters */ + err = check_ccnode_miss_counter(prm.cc_node, + cnt_cb->id, prm.ccnode_type); + if (err != 0) + return -EINVAL; + } else { + /* Copy the key descriptor */ + err = copy_key_descriptor(prm.keys[i], + &cnt_cb->ccnode_cb.keys[i]); + if (err != 0) { + log_err("Cannot copy key descriptor from user " + "parameters\n"); + return -EINVAL; + } + + /* Check the Classifier Node counter parameters */ + err = check_ccnode_counter(cnt_cb, prm.ccnode_type, + &cnt_cb->ccnode_cb.keys[i]); + if (err != 0) + return -EINVAL; + } } /* Map Classif Node counter selection to CcNode statistics */ @@ -2321,8 +2514,7 @@ static int set_cls_cnt_traffic_mng_cb(struct dpa_stats_cnt_cb *cnt_cb, } int set_classif_tbl_member(const struct dpa_stats_cls_member_params *prm, - int member_index, - struct dpa_stats_cnt_cb *cnt_cb) + int mbr_idx, struct dpa_stats_cnt_cb *cnt_cb) { struct dpa_stats_cnt_classif_tbl_cb *tbl_cb = &cnt_cb->tbl_cb; uint32_t i = 0; @@ -2337,67 +2529,80 @@ int set_classif_tbl_member(const struct dpa_stats_cls_member_params *prm, } /* Check that member index does not exceeds class size */ - if (member_index < 0 || member_index >= cnt_cb->members_num) { + if (mbr_idx < 0 || mbr_idx >= cnt_cb->members_num) { log_err("Parameter member_index %d must be in range (0 - %d) " - "for counter id %d\n", member_index, + "for counter id %d\n", mbr_idx, cnt_cb->members_num - 1, cnt_cb->id); return -EINVAL; } /* Release the old key memory */ - kfree(tbl_cb->keys[member_index].key.byte); - tbl_cb->keys[member_index].key.byte = NULL; + kfree(tbl_cb->keys[mbr_idx].key.byte); + tbl_cb->keys[mbr_idx].key.byte = NULL; - kfree(tbl_cb->keys[member_index].key.mask); - tbl_cb->keys[member_index].key.mask = NULL; + kfree(tbl_cb->keys[mbr_idx].key.mask); + tbl_cb->keys[mbr_idx].key.mask = NULL; /* Reset the statistics */ for (i = 0; i < cnt_cb->info.stats_num; i++) { - cnt_cb->info.stats[member_index][i] = 0; - cnt_cb->info.last_stats[member_index][i] = 0; - } - - if ((prm->type == DPA_STATS_CLS_MEMBER_SINGLE_KEY && !prm->key.byte) || - (prm->type == DPA_STATS_CLS_MEMBER_PAIR_KEY && - !prm->pair.first_key.byte)) { - /* Mark the key as invalid */ - tbl_cb->keys[member_index].valid = FALSE; - return 0; - } else { - tbl_cb->keys[member_index].valid = TRUE; - - if (prm->type == DPA_STATS_CLS_MEMBER_SINGLE_KEY) { + cnt_cb->info.stats[mbr_idx][i] = 0; + cnt_cb->info.last_stats[mbr_idx][i] = 0; + } + + if (prm->type == DPA_STATS_CLS_MEMBER_SINGLE_KEY) { + if (!prm->key) { + /* Mark the key as 'miss' entry */ + tbl_cb->keys[mbr_idx].miss_key = TRUE; + tbl_cb->keys[mbr_idx].valid = TRUE; + return 0; + } else if (!prm->key->byte) { + /* Mark the key as invalid */ + tbl_cb->keys[mbr_idx].valid = FALSE; + tbl_cb->keys[mbr_idx].miss_key = FALSE; + return 0; + } else { /* Copy the key descriptor */ - err = copy_key_descriptor(&prm->key, - &tbl_cb->keys[member_index].key); + err = copy_key_descriptor(prm->key, + &tbl_cb->keys[mbr_idx].key); if (err != 0) { log_err("Cannot copy key descriptor from user " "parameters\n"); return -EINVAL; } - } else { - err = set_cls_cnt_classif_tbl_pair(tbl_cb, tbl_cb->td, - &prm->pair, &tbl_cb->keys[member_index]); - if (err != 0) { - log_err("Cannot configure the pair key for " - "counter id %d of member %d\n", - cnt_cb->id, member_index); - return -EINVAL; - } } - if (cnt_cb->f_get_cnt_stats != get_cnt_cls_tbl_frag_stats) { + } else { + if (prm->pair) + if (prm->pair->first_key) + if (!prm->pair->first_key->byte) { + /* Mark the key as invalid */ + tbl_cb->keys[mbr_idx].valid = FALSE; + tbl_cb->keys[mbr_idx].miss_key = FALSE; + return 0; + } + err = set_cls_cnt_classif_tbl_pair(cnt_cb, tbl_cb->td, + prm->pair, &tbl_cb->keys[mbr_idx]); + if (err != 0) { + log_err("Cannot configure the pair key for counter id " + "%d of member %d\n", cnt_cb->id, mbr_idx); + return -EINVAL; + } + } + + tbl_cb->keys[mbr_idx].valid = TRUE; + + if (cnt_cb->f_get_cnt_stats != get_cnt_cls_tbl_frag_stats) { + if (!tbl_cb->keys[mbr_idx].miss_key) { err = check_tbl_cls_counter(cnt_cb, - &tbl_cb->keys[member_index]); + &tbl_cb->keys[mbr_idx]); if (err != 0) return -EINVAL; - } else{ - err = set_frag_manip(tbl_cb->td, - &tbl_cb->keys[member_index]); - if (err < 0) { - log_err("Invalid Fragmentation manip handle for" - " counter id %d\n", cnt_cb->id); - return -EINVAL; - } + } + } else{ + err = set_frag_manip(tbl_cb->td, &tbl_cb->keys[mbr_idx]); + if (err < 0) { + log_err("Invalid Fragmentation manip handle for" + " counter id %d\n", cnt_cb->id); + return -EINVAL; } } @@ -2648,11 +2853,19 @@ static int get_cnt_cls_tbl_match_stats(struct dpa_stats_req_cb *req_cb, cnt_cb->info.stats_num; continue; } - err = FM_PCD_MatchTableFindNGetKeyStatistics( - cnt_cb->tbl_cb.keys[i].cc_node, - cnt_cb->tbl_cb.keys[i].key.size, - cnt_cb->tbl_cb.keys[i].key.byte, - cnt_cb->tbl_cb.keys[i].key.mask, &stats); + + if (cnt_cb->tbl_cb.keys[i].miss_key) { + err = FM_PCD_MatchTableGetMissStatistics( + cnt_cb->tbl_cb.keys[i].cc_node, &stats); + } else { + err = FM_PCD_MatchTableFindNGetKeyStatistics( + cnt_cb->tbl_cb.keys[i].cc_node, + cnt_cb->tbl_cb.keys[i].key.size, + cnt_cb->tbl_cb.keys[i].key.byte, + cnt_cb->tbl_cb.keys[i].key.mask, + &stats); + } + if (err != 0) { log_err("Cannot retrieve Classifier Exact Match Table " "statistics for counter id %d\n", cnt_cb->id); @@ -2682,11 +2895,17 @@ static int get_cnt_cls_tbl_hash_stats(struct dpa_stats_req_cb *req_cb, cnt_cb->info.stats_num; continue; } - err = FM_PCD_HashTableFindNGetKeyStatistics( - cnt_cb->tbl_cb.keys[i].cc_node, - cnt_cb->tbl_cb.keys[i].key.size, - cnt_cb->tbl_cb.keys[i].key.byte, - &stats); + + if (cnt_cb->tbl_cb.keys[i].miss_key) { + err = FM_PCD_HashTableGetMissStatistics( + cnt_cb->tbl_cb.keys[i].cc_node, &stats); + } else { + err = FM_PCD_HashTableFindNGetKeyStatistics( + cnt_cb->tbl_cb.keys[i].cc_node, + cnt_cb->tbl_cb.keys[i].key.size, + cnt_cb->tbl_cb.keys[i].key.byte, + &stats); + } if (err != 0) { log_err("Cannot retrieve Classifier Hash Table " "statistics for counter id %d\n", cnt_cb->id); @@ -2716,10 +2935,17 @@ static int get_cnt_cls_tbl_index_stats(struct dpa_stats_req_cb *req_cb, cnt_cb->info.stats_num; continue; } - err = FM_PCD_MatchTableGetKeyStatistics( - cnt_cb->tbl_cb.keys[i].cc_node, - cnt_cb->tbl_cb.keys[i].key.byte[0], - &stats); + + if (cnt_cb->tbl_cb.keys[i].miss_key) { + err = FM_PCD_MatchTableGetMissStatistics( + cnt_cb->tbl_cb.keys[i].cc_node, &stats); + } else { + err = FM_PCD_MatchTableGetKeyStatistics( + cnt_cb->tbl_cb.keys[i].cc_node, + cnt_cb->tbl_cb.keys[i].key.byte[0], + &stats); + } + if (err != 0) { log_err("Cannot retrieve Classifier Indexed Table " "statistics for counter id %d\n", cnt_cb->id); @@ -2772,11 +2998,16 @@ static int get_cnt_ccnode_match_stats(struct dpa_stats_req_cb *req_cb, int err = 0; for (i = 0; i < cnt_cb->members_num; i++) { - err = FM_PCD_MatchTableFindNGetKeyStatistics( + if (!cnt_cb->ccnode_cb.keys[i].byte) { + err = FM_PCD_MatchTableGetMissStatistics( + cnt_cb->ccnode_cb.cc_node, &stats); + } else { + err = FM_PCD_MatchTableFindNGetKeyStatistics( cnt_cb->ccnode_cb.cc_node, cnt_cb->ccnode_cb.keys[i].size, cnt_cb->ccnode_cb.keys[i].byte, cnt_cb->ccnode_cb.keys[i].mask, &stats); + } if (err != 0) { log_err("Cannot retrieve Classification Cc Node Exact " "Match statistics for counter id %d\n", @@ -2797,10 +3028,16 @@ static int get_cnt_ccnode_hash_stats(struct dpa_stats_req_cb *req_cb, int err = 0; for (i = 0; i < cnt_cb->members_num; i++) { - err = FM_PCD_HashTableFindNGetKeyStatistics( + if (!cnt_cb->ccnode_cb.keys[i].byte) { + err = FM_PCD_HashTableGetMissStatistics( + cnt_cb->ccnode_cb.cc_node, &stats); + } else { + err = FM_PCD_HashTableFindNGetKeyStatistics( cnt_cb->ccnode_cb.cc_node, cnt_cb->ccnode_cb.keys[i].size, cnt_cb->ccnode_cb.keys[i].byte, &stats); + } + if (err != 0) { log_err("Cannot retrieve Classification Cc Node Hash " "statistics for counter id %d\n", cnt_cb->id); @@ -2820,9 +3057,14 @@ static int get_cnt_ccnode_index_stats(struct dpa_stats_req_cb *req_cb, int err = 0; for (i = 0; i < cnt_cb->members_num; i++) { - err = FM_PCD_MatchTableGetKeyStatistics( + if (!cnt_cb->ccnode_cb.keys[i].byte) { + err = FM_PCD_MatchTableGetMissStatistics( + cnt_cb->ccnode_cb.cc_node, &stats); + } else { + err = FM_PCD_MatchTableGetKeyStatistics( cnt_cb->ccnode_cb.cc_node, cnt_cb->ccnode_cb.keys[i].byte[0], &stats); + } if (err != 0) { log_err("Cannot retrieve Classification Cc Node Index " "statistics for counter id %d\n", cnt_cb->id); @@ -3269,6 +3511,7 @@ int dpa_stats_create_class_counter(int dpa_stats_id, break; case DPA_STATS_CNT_CLASSIF_TBL: cnt_cb->type = DPA_STATS_CNT_CLASSIF_TBL; + cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_match_stats; err = set_cls_cnt_classif_tbl_cb(cnt_cb, params); if (err != 0) { @@ -3392,7 +3635,7 @@ int dpa_stats_modify_class_counter(int dpa_stats_cnt_id, } if (params->type == DPA_STATS_CLS_MEMBER_SINGLE_KEY || - params->type == DPA_STATS_CLS_MEMBER_PAIR_KEY) { + params->type == DPA_STATS_CLS_MEMBER_PAIR_KEY) { /* Modify classifier table class member */ err = set_classif_tbl_member(params, member_index, cnt_cb); if (err < 0) { diff --git a/drivers/staging/fsl_dpa_offload/dpa_stats.h b/drivers/staging/fsl_dpa_offload/dpa_stats.h index a429258..5843dca 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_stats.h +++ b/drivers/staging/fsl_dpa_offload/dpa_stats.h @@ -114,6 +114,7 @@ struct dpa_stats_lookup_key { struct dpa_offload_lookup_key key; /* Key descriptor */ bool valid; /* Lookup key is valid */ void *frag; /* Fragmentation handle corresponding to this key */ + bool miss_key; /* Provide statistics for miss entry */ }; /* DPA Stats Classif Table control block */ diff --git a/drivers/staging/fsl_dpa_offload/dpa_stats_ioctl.h b/drivers/staging/fsl_dpa_offload/dpa_stats_ioctl.h index 070a6f0..d375a0f 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_stats_ioctl.h +++ b/drivers/staging/fsl_dpa_offload/dpa_stats_ioctl.h @@ -116,14 +116,14 @@ struct compat_ioc_dpa_offld_lookup_key { struct dpa_stats_compat_cnt_classif_tbl { int td; - struct compat_ioc_dpa_offld_lookup_key key; + compat_uptr_t key; unsigned int cnt_sel; }; struct dpa_stats_compat_cnt_classif_node { compat_uptr_t cc_node; enum dpa_stats_classif_node_type ccnode_type; - struct compat_ioc_dpa_offld_lookup_key key; + compat_uptr_t key; unsigned int cnt_sel; }; @@ -202,15 +202,15 @@ struct compat_ioc_dpa_stats_cls_cnt_params { }; struct compat_ioc_dpa_offld_lookup_key_pair { - struct compat_ioc_dpa_offld_lookup_key first_key; - struct compat_ioc_dpa_offld_lookup_key second_key; + compat_uptr_t first_key; + compat_uptr_t second_key; }; struct dpa_stats_compat_cls_member_params { enum dpa_stats_cls_member_type type; union { - struct compat_ioc_dpa_offld_lookup_key key; - struct compat_ioc_dpa_offld_lookup_key_pair pair; + compat_uptr_t key; + compat_uptr_t pair; int sa_id; }; }; diff --git a/drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c b/drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c index 5c2cb90..0fdcdc5 100644 --- a/drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c +++ b/drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c @@ -90,7 +90,10 @@ static long wrp_dpa_stats_do_ioctl(struct file *filp, unsigned int cmd, unsigned long args); static int copy_key_descriptor(struct dpa_offload_lookup_key *src, - struct dpa_offload_lookup_key *dst); + struct dpa_offload_lookup_key **dst); + +static int copy_pair_descriptor(struct dpa_offload_lookup_key_pair *src, + struct dpa_offload_lookup_key_pair **dst); static int copy_class_members(void *objs, unsigned int size, void *dst); @@ -103,8 +106,12 @@ static long wrp_dpa_stats_do_compat_ioctl(struct file *filp, unsigned long args); static int copy_key_descriptor_compatcpy( - struct dpa_offload_lookup_key *kprm, - const struct compat_ioc_dpa_offld_lookup_key *uprm); + struct dpa_offload_lookup_key **kprm, + compat_uptr_t uparam); + +static int copy_pair_descriptor_compatcpy( + struct dpa_offload_lookup_key_pair **ks_pair, + struct compat_ioc_dpa_offld_lookup_key_pair pair); static void dpa_stats_init_compatcpy( struct ioc_dpa_stats_params *kprm, @@ -625,39 +632,70 @@ static long do_ioctl_stats_free(void *args) static int do_ioctl_stats_create_counter(void *args) { struct ioc_dpa_stats_cnt_params prm; - struct dpa_offload_lookup_key key; + struct dpa_offload_lookup_key *us_key = NULL; long ret = 0; if (copy_from_user(&prm, args, sizeof(prm))) { - log_err("Cannot copy from user the counter parameters\n"); + log_err("Could not copy counter parameters\n"); return -EINVAL; } - if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_NODE) - ret = copy_key_descriptor( - &prm.cnt_params.classif_node_params.key, &key); - else if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_TBL) - ret = copy_key_descriptor( - &prm.cnt_params.classif_tbl_params.key, &key); - if (ret != 0) { - log_err("Cannot copy the key descriptor\n"); - return -EINVAL; + if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_NODE && + prm.cnt_params.classif_node_params.key) { + /* Save user-space provided key */ + us_key = prm.cnt_params.classif_node_params.key; + + /* Override user-space pointers with kernel memory */ + ret = copy_key_descriptor(us_key, + &prm.cnt_params.classif_node_params.key); + if (ret != 0) { + log_err("Could not copy the key descriptor\n"); + kfree(prm.cnt_params.classif_node_params.key); + return ret; + } + } + + if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_TBL && + prm.cnt_params.classif_tbl_params.key) { + /* Save user-space provided key */ + us_key = prm.cnt_params.classif_tbl_params.key; + + /* Override user-space pointers with kernel memory */ + ret = copy_key_descriptor(us_key, + &prm.cnt_params.classif_tbl_params.key); + if (ret != 0) { + log_err("Could not copy the key descriptor\n"); + kfree(prm.cnt_params.classif_tbl_params.key); + return ret; + } } ret = dpa_stats_create_counter(prm.stats_id, &prm.cnt_params, &prm.cnt_id); - if (ret < 0) - return ret; - if (copy_to_user(args, &prm, sizeof(prm))) { - log_err("Cannot copy to user the counter parameters\n"); - ret = -EINVAL; + if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_NODE && + prm.cnt_params.classif_node_params.key) { + /* Release kernel-allocated memory */ + kfree(prm.cnt_params.classif_node_params.key->byte); + kfree(prm.cnt_params.classif_node_params.key->mask); + kfree(prm.cnt_params.classif_node_params.key); + /* Restore user-provided key */ + prm.cnt_params.classif_node_params.key = us_key; } - if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_NODE || - prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_TBL) { - kfree(key.byte); - kfree(key.mask); + if (prm.cnt_params.type == DPA_STATS_CNT_CLASSIF_TBL && + prm.cnt_params.classif_tbl_params.key) { + /* Release kernel-allocated memory */ + kfree(prm.cnt_params.classif_tbl_params.key->byte); + kfree(prm.cnt_params.classif_tbl_params.key->mask); + kfree(prm.cnt_params.classif_tbl_params.key); + /* Restore user-provided key */ + prm.cnt_params.classif_tbl_params.key = us_key; + } + + if (copy_to_user(args, &prm, sizeof(prm))) { + log_err("Could not copy to user the Counter ID\n"); + ret = -EINVAL; } return ret; @@ -702,14 +740,14 @@ static int do_ioctl_stats_compat_create_counter(void *args) &kprm.cnt_params.classif_tbl_params, &uprm.cnt_params.classif_tbl_params); if (ret < 0) - return ret; + goto compat_create_counter_cleanup; break; case DPA_STATS_CNT_CLASSIF_NODE: ret = dpa_stats_ccnode_cnt_compatcpy( &kprm.cnt_params.classif_node_params, &uprm.cnt_params.classif_node_params); if (ret < 0) - return ret; + goto compat_create_counter_cleanup; break; case DPA_STATS_CNT_IPSEC: memcpy(&kprm.cnt_params.ipsec_params, @@ -728,7 +766,7 @@ static int do_ioctl_stats_compat_create_counter(void *args) ret = dpa_stats_create_counter(kprm.stats_id, &kprm.cnt_params, &kprm.cnt_id); if (ret < 0) - return ret; + goto compat_create_counter_cleanup; uprm.cnt_id = kprm.cnt_id; @@ -737,14 +775,20 @@ static int do_ioctl_stats_compat_create_counter(void *args) ret = -EINVAL; } - if (kprm.cnt_params.type == DPA_STATS_CNT_CLASSIF_NODE) { - kfree(kprm.cnt_params.classif_node_params.key.byte); - kfree(kprm.cnt_params.classif_node_params.key.mask); - } else if (kprm.cnt_params.type == DPA_STATS_CNT_CLASSIF_TBL) { - kfree(kprm.cnt_params.classif_tbl_params.key.byte); - kfree(kprm.cnt_params.classif_tbl_params.key.mask); +compat_create_counter_cleanup: + if (kprm.cnt_params.type == DPA_STATS_CNT_CLASSIF_NODE && + compat_ptr(uprm.cnt_params.classif_node_params.key)) { + kfree(kprm.cnt_params.classif_node_params.key->byte); + kfree(kprm.cnt_params.classif_node_params.key->mask); + kfree(kprm.cnt_params.classif_node_params.key); } + if (kprm.cnt_params.type == DPA_STATS_CNT_CLASSIF_TBL && + compat_ptr(uprm.cnt_params.classif_tbl_params.key)) { + kfree(kprm.cnt_params.classif_tbl_params.key->byte); + kfree(kprm.cnt_params.classif_tbl_params.key->mask); + kfree(kprm.cnt_params.classif_tbl_params.key); + } return ret; } #endif @@ -754,9 +798,10 @@ static int do_ioctl_stats_create_class_counter(void *args) struct ioc_dpa_stats_cls_cnt_params prm; struct dpa_stats_cls_cnt_classif_node *cnode; struct dpa_stats_cls_cnt_classif_tbl *tbl; - struct dpa_offload_lookup_key key; - struct dpa_stats_cnt_eth_src *eth_src = NULL; - uint32_t i = 0, eth_src_size = 0; + struct dpa_offload_lookup_key **us_keys = NULL; + struct dpa_offload_lookup_key_pair **us_pairs = NULL; + uint32_t i = 0; + unsigned int cls_mbrs; void *cls_objs = NULL; int *sa_ids = NULL; long ret = 0; @@ -766,13 +811,14 @@ static int do_ioctl_stats_create_class_counter(void *args) return -EINVAL; } + cls_mbrs = prm.cnt_params.class_members; + switch (prm.cnt_params.type) { - case DPA_STATS_CNT_ETH: - eth_src_size = prm.cnt_params.class_members * - sizeof(struct dpa_stats_cnt_eth_src); + case DPA_STATS_CNT_ETH: { + struct dpa_stats_cnt_eth_src *eth_src = NULL; /* Allocate memory to store the sources array */ - eth_src = kmalloc(eth_src_size, GFP_KERNEL); + eth_src = kmalloc(sizeof(*eth_src) * cls_mbrs, GFP_KERNEL); if (!eth_src) { log_err("Cannot allocate memory for Ethernet sources " "array\n"); @@ -781,17 +827,17 @@ static int do_ioctl_stats_create_class_counter(void *args) if (copy_from_user(eth_src, prm.cnt_params.eth_params.src, - eth_src_size)) { + sizeof(*eth_src) * cls_mbrs)) { log_err("Cannot copy array of Ethernet sources\n"); kfree(eth_src); return -EBUSY; } prm.cnt_params.eth_params.src = eth_src; break; + } case DPA_STATS_CNT_REASS: - ret = copy_class_members(cls_objs, - prm.cnt_params.class_members, - prm.cnt_params.reass_params.reass); + ret = copy_class_members(cls_objs, cls_mbrs, + prm.cnt_params.reass_params.reass); if (ret < 0) { log_err("Cannot copy array of Reassembly objects\n"); kfree(cls_objs); @@ -799,8 +845,7 @@ static int do_ioctl_stats_create_class_counter(void *args) } break; case DPA_STATS_CNT_FRAG: - ret = copy_class_members(cls_objs, - prm.cnt_params.class_members, + ret = copy_class_members(cls_objs, cls_mbrs, prm.cnt_params.frag_params.frag); if (ret < 0) { log_err("Cannot copy array of Fragmentation objects\n"); @@ -809,8 +854,7 @@ static int do_ioctl_stats_create_class_counter(void *args) } break; case DPA_STATS_CNT_POLICER: - ret = copy_class_members(cls_objs, - prm.cnt_params.class_members, + ret = copy_class_members(cls_objs, cls_mbrs, prm.cnt_params.plcr_params.plcr); if (ret < 0) { log_err("Cannot copy array of Policer objects\n"); @@ -822,36 +866,50 @@ static int do_ioctl_stats_create_class_counter(void *args) tbl = &prm.cnt_params.classif_tbl_params; if (tbl->key_type == DPA_STATS_CLASSIF_SINGLE_KEY) { - for (i = 0; i < prm.cnt_params.class_members; i++) { - if (!tbl->keys[i].byte) - continue; + /* Save array of user-space provided key pointers */ + us_keys = tbl->keys; + + /* Override user-space pointers with kernel memory */ + tbl->keys = kzalloc(cls_mbrs * + sizeof(**tbl->keys), GFP_KERNEL); + if (!tbl->keys) { + log_err("Cannot allocate kernel memory for " + "lookup keys array\n"); + return -ENOMEM; + } - ret = copy_key_descriptor(&tbl->keys[i], &key); + for (i = 0; i < cls_mbrs; i++) { + if (!us_keys[i]) + continue; + ret = copy_key_descriptor(us_keys[i], + &tbl->keys[i]); if (ret != 0) { - log_err("Cannot copy the key descriptor" - "\n"); - return -EINVAL; + log_err("Cannot copy key descriptor\n"); + goto create_cls_counter_cleanup; } } } else if (tbl->key_type == DPA_STATS_CLASSIF_PAIR_KEY) { - for (i = 0; i < prm.cnt_params.class_members; i++) { - if (!tbl->pairs[i].first_key.byte) - continue; - - ret = copy_key_descriptor( - &tbl->pairs[i].first_key, &key); - if (ret != 0) { - log_err("Cannot copy the first key " - "descriptor of pair-key\n"); - return -EINVAL; - } + /* Save array of user-space provided pairs pointers */ + us_pairs = tbl->pairs; + + /* Override user-space pointers with kernel memory */ + tbl->pairs = kzalloc(cls_mbrs * + sizeof(**tbl->pairs), GFP_KERNEL); + if (!tbl->pairs) { + log_err("Cannot allocate kernel memory for " + "lookup pairs array\n"); + return -ENOMEM; + } - ret = copy_key_descriptor( - &tbl->pairs[i].second_key, &key); + for (i = 0; i < cls_mbrs; i++) { + if (!us_pairs[i]) + continue; + ret = copy_pair_descriptor(us_pairs[i], + &tbl->pairs[i]); if (ret != 0) { - log_err("Cannot copy the second key " - "descriptor of pair-key\n"); - return -EINVAL; + log_err("Could not copy the " + "pair key descriptor\n"); + goto create_cls_counter_cleanup; } } } @@ -859,11 +917,28 @@ static int do_ioctl_stats_create_class_counter(void *args) case DPA_STATS_CNT_CLASSIF_NODE: cnode = &prm.cnt_params.classif_node_params; - for (i = 0; i < prm.cnt_params.class_members; i++) { - ret = copy_key_descriptor(&cnode->keys[i], &key); + if (!cnode->keys) { + log_err("Pointer to array of keys can't be NULL\n"); + return -EINVAL; + } + /* Save array of user-space provided key pointers */ + us_keys = cnode->keys; + + /* Override user-space pointers with kernel memory */ + cnode->keys = kzalloc(cls_mbrs * + sizeof(**cnode->keys), GFP_KERNEL); + if (!cnode->keys) { + log_err("No more memory to store array of keys\n"); + return -ENOMEM; + } + + for (i = 0; i < cls_mbrs; i++) { + if (!us_keys[i]) + continue; + ret = copy_key_descriptor(us_keys[i], &cnode->keys[i]); if (ret != 0) { log_err("Cannot copy the key descriptor\n"); - return -EINVAL; + goto create_cls_counter_cleanup; } } break; @@ -891,17 +966,10 @@ static int do_ioctl_stats_create_class_counter(void *args) ret = dpa_stats_create_class_counter(prm.stats_id, &prm.cnt_params, &prm.cnt_id); - if (ret < 0) - return ret; - - if (copy_to_user(args, &prm, sizeof(prm))) { - log_err("Cannot copy to user class counter parameters\n"); - ret = -EINVAL; - } - +create_cls_counter_cleanup: switch (prm.cnt_params.type) { case DPA_STATS_CNT_ETH: - kfree(eth_src); + kfree(prm.cnt_params.eth_params.src); break; case DPA_STATS_CNT_REASS: case DPA_STATS_CNT_FRAG: @@ -911,25 +979,53 @@ static int do_ioctl_stats_create_class_counter(void *args) case DPA_STATS_CNT_CLASSIF_TBL: tbl = &prm.cnt_params.classif_tbl_params; - for (i = 0; i < prm.cnt_params.class_members; i++) { - if (tbl->key_type == DPA_STATS_CLASSIF_SINGLE_KEY) { - kfree(tbl->keys[i].byte); - kfree(tbl->keys[i].mask); + if (tbl->key_type == DPA_STATS_CLASSIF_SINGLE_KEY) { + for (i = 0; i < cls_mbrs; i++) { + if (!tbl->keys[i]) + continue; + /* Free allocated memory */ + kfree(tbl->keys[i]->byte); + kfree(tbl->keys[i]->mask); + kfree(tbl->keys[i]); } + /* Restore user-space pointers */ + tbl->keys = us_keys; + } + + if (tbl->key_type == DPA_STATS_CLASSIF_PAIR_KEY) { + for (i = 0; i < cls_mbrs; i++) { + if (!tbl->pairs[i]) + continue; - if (tbl->key_type == DPA_STATS_CLASSIF_PAIR_KEY) { - kfree(tbl->pairs[i].first_key.byte); - kfree(tbl->pairs[i].first_key.mask); - kfree(tbl->pairs[i].second_key.byte); - kfree(tbl->pairs[i].second_key.mask); + if (tbl->pairs[i]->first_key) { + kfree(tbl->pairs[i]->first_key->byte); + kfree(tbl->pairs[i]->first_key->mask); + kfree(tbl->pairs[i]->first_key); + } + + if (tbl->pairs[i]->second_key) { + kfree(tbl->pairs[i]->second_key->byte); + kfree(tbl->pairs[i]->second_key->mask); + kfree(tbl->pairs[i]->second_key); + } } + /* Restore user-space pointers */ + tbl->keys = us_keys; } break; case DPA_STATS_CNT_CLASSIF_NODE: - for (i = 0; i < prm.cnt_params.class_members; i++) { - kfree(prm.cnt_params.classif_node_params.keys[i].byte); - kfree(prm.cnt_params.classif_node_params.keys[i].mask); + cnode = &prm.cnt_params.classif_node_params; + + for (i = 0; i < cls_mbrs; i++) { + if (!cnode->keys[i]) + continue; + /* Free allocated memory */ + kfree(cnode->keys[i]->byte); + kfree(cnode->keys[i]->mask); + kfree(cnode->keys[i]); } + /* Restore user-space pointers */ + tbl->keys = us_keys; break; case DPA_STATS_CNT_IPSEC: kfree(sa_ids); @@ -939,6 +1035,11 @@ static int do_ioctl_stats_create_class_counter(void *args) break; } + if (copy_to_user(args, &prm, sizeof(prm))) { + log_err("Cannot copy to user class counter parameters\n"); + ret = -EINVAL; + } + return ret; } @@ -988,46 +1089,18 @@ static int do_ioctl_stats_compat_create_class_counter(void *args) return ret; break; case DPA_STATS_CNT_CLASSIF_TBL: - { - struct dpa_stats_cls_cnt_classif_tbl *tbl = - &kprm_cls->classif_tbl_params; - - ret = dpa_stats_tbl_cls_compatcpy(tbl, + ret = dpa_stats_tbl_cls_compatcpy(&kprm_cls->classif_tbl_params, &uprm_cls->classif_tbl_params, kprm_cls->class_members); if (!ret) break; - - if (tbl->key_type == DPA_STATS_CLASSIF_SINGLE_KEY) { - for (i = 0; i < kprm_cls->class_members; i++) { - kfree(tbl->keys[i].byte); - kfree(tbl->keys[i].mask); - } - kfree(tbl->keys); - - } else if (tbl->key_type == DPA_STATS_CLASSIF_PAIR_KEY) { - for (i = 0; i < kprm_cls->class_members; i++) { - kfree(tbl->pairs[i].first_key.byte); - kfree(tbl->pairs[i].first_key.mask); - kfree(tbl->pairs[i].second_key.byte); - kfree(tbl->pairs[i].second_key.mask); - } - kfree(tbl->pairs); - } - return ret; - } + goto compat_create_cls_counter_cleanup; case DPA_STATS_CNT_CLASSIF_NODE: ret = dpa_stats_ccnode_cls_compatcpy( - &kprm_cls->classif_node_params, - &uprm_cls->ccnode_params, - kprm_cls->class_members); + &kprm_cls->classif_node_params, + &uprm_cls->ccnode_params, kprm_cls->class_members); if (!ret) break; - for (i = 0; i < kprm_cls->class_members; i++) { - kfree(kprm_cls->classif_node_params.keys[i].byte); - kfree(kprm_cls->classif_node_params.keys[i].mask); - } - kfree(kprm_cls->classif_node_params.keys); - return ret; + goto compat_create_cls_counter_cleanup; case DPA_STATS_CNT_IPSEC: ret = dpa_stats_ipsec_cls_compatcpy(&kprm_cls->ipsec_params, &uprm_cls->ipsec_params, kprm_cls->class_members); @@ -1038,10 +1111,10 @@ static int do_ioctl_stats_compat_create_class_counter(void *args) break; } - ret = dpa_stats_create_class_counter( - kprm.stats_id, kprm_cls, &kprm.cnt_id); + ret = dpa_stats_create_class_counter(kprm.stats_id, + kprm_cls, &kprm.cnt_id); if (ret < 0) - return ret; + goto compat_create_cls_counter_cleanup; uprm.cnt_id = kprm.cnt_id; @@ -1050,6 +1123,7 @@ static int do_ioctl_stats_compat_create_class_counter(void *args) ret = -EINVAL; } +compat_create_cls_counter_cleanup: switch (uprm.cnt_params.type) { case DPA_STATS_CNT_ETH: kfree(kprm_cls->eth_params.src); @@ -1070,17 +1144,29 @@ static int do_ioctl_stats_compat_create_class_counter(void *args) if (tbl->key_type == DPA_STATS_CLASSIF_SINGLE_KEY) { for (i = 0; i < kprm_cls->class_members; i++) { - kfree(tbl->keys[i].byte); - kfree(tbl->keys[i].mask); + if (!tbl->keys[i]) + continue; + kfree(tbl->keys[i]->byte); + kfree(tbl->keys[i]->mask); + kfree(tbl->keys[i]); } kfree(tbl->keys); } else if (tbl->key_type == DPA_STATS_CLASSIF_PAIR_KEY) { for (i = 0; i < kprm_cls->class_members; i++) { - kfree(tbl->pairs[i].first_key.byte); - kfree(tbl->pairs[i].first_key.mask); - kfree(tbl->pairs[i].second_key.byte); - kfree(tbl->pairs[i].second_key.mask); + if (!tbl->pairs[i]) + continue; + if (tbl->pairs[i]->first_key) { + kfree(tbl->pairs[i]->first_key->byte); + kfree(tbl->pairs[i]->first_key->mask); + kfree(tbl->pairs[i]->first_key); + } + if (tbl->pairs[i]->second_key) { + kfree(tbl->pairs[i]->second_key->byte); + kfree(tbl->pairs[i]->second_key->mask); + kfree(tbl->pairs[i]->second_key); + } + kfree(tbl->pairs[i]); } kfree(tbl->pairs); } @@ -1088,11 +1174,15 @@ static int do_ioctl_stats_compat_create_class_counter(void *args) } case DPA_STATS_CNT_CLASSIF_NODE: for (i = 0; i < kprm_cls->class_members; i++) { - kfree(kprm_cls->classif_node_params.keys[i].byte); - kfree(kprm_cls->classif_node_params.keys[i].mask); + if (!kprm_cls->classif_node_params.keys[i]) + continue; + kfree(kprm_cls->classif_node_params.keys[i]->byte); + kfree(kprm_cls->classif_node_params.keys[i]->mask); + kfree(kprm_cls->classif_node_params.keys[i]); } kfree(kprm_cls->classif_node_params.keys); break; + case DPA_STATS_CNT_IPSEC: kfree(kprm_cls->ipsec_params.sa_id); break; @@ -1108,8 +1198,9 @@ static int do_ioctl_stats_compat_create_class_counter(void *args) static int do_ioctl_stats_modify_class_counter(void *args) { struct ioc_dpa_stats_cls_member_params prm; - struct dpa_offload_lookup_key key; - int ret; + struct dpa_offload_lookup_key *us_key = NULL; + struct dpa_offload_lookup_key_pair *us_pair = NULL; + int ret = 0; if (copy_from_user(&prm, args, sizeof(prm))) { log_err("Cannot copy from user the class counter parameters\n"); @@ -1118,32 +1209,32 @@ static int do_ioctl_stats_modify_class_counter(void *args) switch (prm.params.type) { case DPA_STATS_CLS_MEMBER_SINGLE_KEY: - if (prm.params.key.byte) { - ret = copy_key_descriptor(&prm.params.key, &key); - if (ret != 0) { - log_err("Cannot copy the key descriptor\n"); - return -EINVAL; - } + if (!prm.params.key) + break; + + /* Save user-space provided key */ + us_key = prm.params.key; + + /* Override user-space pointers with kernel memory */ + ret = copy_key_descriptor(us_key, &prm.params.key); + if (ret != 0) { + log_err("Could not copy the key descriptor\n"); + goto modify_counter_cleanup; } + break; case DPA_STATS_CLS_MEMBER_PAIR_KEY: - if (prm.params.pair.first_key.byte && - prm.params.pair.first_key.mask) { - ret = copy_key_descriptor( - &prm.params.pair.first_key, &key); - if (ret != 0) { - log_err("Cannot copy the first key descriptor " - "of the pair-key\n"); - return -EINVAL; - } + if (!prm.params.pair) + break; - ret = copy_key_descriptor( - &prm.params.pair.second_key, &key); - if (ret != 0) { - log_err("Cannot copy the second key descriptor " - "of the pair-key\n"); - return -EINVAL; - } + /* Save array of user-space provided pairs pointers */ + us_pair = prm.params.pair; + + /* Override user-space pointers with kernel memory */ + ret = copy_pair_descriptor(us_pair, &prm.params.pair); + if (ret != 0) { + log_err("Could not copy the pair key descriptor\n"); + goto modify_counter_cleanup; } break; case DPA_STATS_CLS_MEMBER_SA_ID: @@ -1154,32 +1245,53 @@ static int do_ioctl_stats_modify_class_counter(void *args) ret = dpa_stats_modify_class_counter(prm.cnt_id, &prm.params, prm.member_index); - if (ret < 0) - return ret; - +modify_counter_cleanup: switch (prm.params.type) { case DPA_STATS_CLS_MEMBER_SINGLE_KEY: - kfree(prm.params.key.byte); - kfree(prm.params.key.mask); + if (prm.params.key) { + /* Release kernel-allocated memory */ + kfree(prm.params.key->byte); + kfree(prm.params.key->mask); + kfree(prm.params.key); + /* Restore user-provided key */ + prm.params.key = us_key; + } break; case DPA_STATS_CLS_MEMBER_PAIR_KEY: - kfree(prm.params.pair.first_key.byte); - kfree(prm.params.pair.first_key.mask); - kfree(prm.params.pair.second_key.byte); - kfree(prm.params.pair.second_key.mask); + if (prm.params.pair) { + if (prm.params.pair->first_key) { + /* Release kernel-allocated memory */ + kfree(prm.params.pair->first_key->byte); + kfree(prm.params.pair->first_key->mask); + kfree(prm.params.pair->first_key); + } + if (prm.params.pair->second_key) { + /* Release kernel-allocated memory */ + kfree(prm.params.pair->second_key->byte); + kfree(prm.params.pair->second_key->mask); + kfree(prm.params.pair->second_key); + } + kfree(prm.params.pair); + /* Restore user-provided key */ + prm.params.pair->first_key = us_pair->first_key; + prm.params.pair->second_key = us_pair->second_key; + prm.params.pair = us_pair; + } break; case DPA_STATS_CLS_MEMBER_SA_ID: break; default: + log_err("Invalid class member type\n"); break; } if (copy_to_user(args, &prm, sizeof(prm))) { - log_err("Cannot copy to user the class counter result\n"); - return -EBUSY; + log_err("Could not write " + "dpa_stats_modify_class_counter result\n"); + ret = -EBUSY; } - return 0; + return ret; } #ifdef CONFIG_COMPAT @@ -1187,10 +1299,11 @@ static int do_ioctl_stats_compat_modify_class_counter(void *args) { struct ioc_dpa_stats_cls_member_params kprm; struct compat_ioc_dpa_stats_cls_member_params uprm; + struct compat_ioc_dpa_offld_lookup_key_pair pair; int ret; if (copy_from_user(&uprm, args, sizeof(uprm))) { - log_err("Cannot copy from user the class counter parameters\n"); + log_err("Cannot copy from user the modify counter parameters\n"); return -EINVAL; } @@ -1201,35 +1314,35 @@ static int do_ioctl_stats_compat_modify_class_counter(void *args) switch (kprm.params.type) { case DPA_STATS_CLS_MEMBER_SINGLE_KEY: - if (compat_ptr(uprm.params.key.byte)) { - ret = copy_key_descriptor_compatcpy( - &kprm.params.key, - &uprm.params.key); - if (ret < 0) { - log_err("Cannot copy the key descriptor\n"); - return ret; - } - + if (!compat_ptr(uprm.params.key)) + break; + /* Copy user-provided key descriptor */ + ret = copy_key_descriptor_compatcpy(&kprm.params.key, + uprm.params.key); + if (ret < 0) { + log_err("Cannot copy the key descriptor\n"); + goto compat_modify_counter_cleanup; } break; case DPA_STATS_CLS_MEMBER_PAIR_KEY: - if (compat_ptr(uprm.params.pair.first_key.byte)) { - ret = copy_key_descriptor_compatcpy( - &kprm.params.pair.first_key, - &uprm.params.pair.first_key); - if (ret < 0) - return ret; + if (!compat_ptr(uprm.params.pair)) + break; - ret = copy_key_descriptor_compatcpy( - &kprm.params.pair.second_key, - &uprm.params.pair.second_key); - if (ret != 0) { - log_err("Cannot copy the key descriptor of the " - "pair-key\n"); - return -EINVAL; - } + if (copy_from_user(&pair, compat_ptr(uprm.params.pair), + (sizeof(pair)))) { + log_err("Cannot copy from user array of " + "lookup pairs\n"); + return -EBUSY; + } + + /* Copy user-provided lookup pair descriptor */ + ret = copy_pair_descriptor_compatcpy(&kprm.params.pair, pair); + if (ret < 0) { + log_err("Cannot copy the pair key descriptor\n"); + goto compat_modify_counter_cleanup; } break; + case DPA_STATS_CLS_MEMBER_SA_ID: kprm.params.sa_id = uprm.params.sa_id; break; @@ -1238,34 +1351,45 @@ static int do_ioctl_stats_compat_modify_class_counter(void *args) } ret = dpa_stats_modify_class_counter(kprm.cnt_id, - &kprm.params, kprm.member_index); + &kprm.params, kprm.member_index); if (ret < 0) - return ret; + goto compat_modify_counter_cleanup; uprm.cnt_id = kprm.cnt_id; + if (copy_to_user(args, &uprm, sizeof(uprm))) { + log_err("Cannot copy to user class counter result\n"); + return -EBUSY; + } + +compat_modify_counter_cleanup: switch (kprm.params.type) { case DPA_STATS_CLS_MEMBER_SINGLE_KEY: - kfree(kprm.params.key.byte); - kfree(kprm.params.key.mask); + if (!kprm.params.key) + break; + kfree(kprm.params.key->byte); + kfree(kprm.params.key->mask); + kfree(kprm.params.key); break; case DPA_STATS_CLS_MEMBER_PAIR_KEY: - kfree(kprm.params.pair.first_key.byte); - kfree(kprm.params.pair.first_key.mask); - kfree(kprm.params.pair.second_key.byte); - kfree(kprm.params.pair.second_key.mask); + if (!kprm.params.pair) + break; + if (kprm.params.pair->first_key) { + kfree(kprm.params.pair->first_key->byte); + kfree(kprm.params.pair->first_key->mask); + kfree(kprm.params.pair->first_key); + } + if (kprm.params.pair->second_key) { + kfree(kprm.params.pair->second_key->byte); + kfree(kprm.params.pair->second_key->mask); + kfree(kprm.params.pair->second_key); + } break; case DPA_STATS_CLS_MEMBER_SA_ID: break; default: break; } - - if (copy_to_user(args, &uprm, sizeof(uprm))) { - log_err("Cannot copy to user class counter result\n"); - return -EBUSY; - } - return 0; } #endif @@ -1720,89 +1844,196 @@ static long store_get_cnts_async_params( } static int copy_key_descriptor(struct dpa_offload_lookup_key *src, - struct dpa_offload_lookup_key *tmp) + struct dpa_offload_lookup_key **ks_key) { - if (!src->byte) { - log_err("Key descriptor byte from user cannot be NULL\n"); - return -EINVAL; - } + struct dpa_offload_lookup_key *tmp = NULL; - /* Allocate memory to store the key byte array */ - tmp->byte = kmalloc(src->size, GFP_KERNEL); - if (!tmp->byte) { - log_err("Cannot allocate memory for key descriptor byte\n"); + /* Allocate kernel memory for key descriptor */ + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) { + log_err("Cannot allocate kernel memory for key descriptor\n"); return -ENOMEM; } - if (copy_from_user(tmp->byte, src->byte, src->size)) { - log_err("Cannot copy from user the key descriptor byte\n"); - kfree(tmp->byte); - return -EBUSY; + if (src->byte) { + /* Allocate memory to store the key byte array */ + tmp->byte = kmalloc(src->size, GFP_KERNEL); + if (!tmp->byte) { + log_err("Cannot allocate memory for key " + "descriptor byte\n"); + return -ENOMEM; + } + + if (copy_from_user(tmp->byte, src->byte, src->size)) { + log_err("Cannot copy from user the key " + "descriptor byte\n"); + kfree(tmp->byte); + kfree(tmp); + return -EBUSY; + } } - src->byte = tmp->byte; if (src->mask) { /* Allocate memory to store the key mask array */ tmp->mask = kmalloc(src->size, GFP_KERNEL); if (!tmp->mask) { - log_err("Cannot allocate memory for key descriptor " - "mask\n"); + log_err("Cannot allocate memory for key " + "descriptor mask\n"); kfree(tmp->byte); + kfree(tmp); return -ENOMEM; } if (copy_from_user(tmp->mask, src->mask, src->size)) { - log_err("Cannot copy from user the key descriptor " - "mask\n"); + log_err("Cannot copy from user the " + "key descriptor mask\n"); kfree(tmp->byte); kfree(tmp->mask); + kfree(tmp); return -EBUSY; } - src->mask = tmp->mask; } + + tmp->size = src->size; + *ks_key = tmp; + return 0; +} + +static int copy_pair_descriptor(struct dpa_offload_lookup_key_pair *src, + struct dpa_offload_lookup_key_pair **ks_pair) +{ + struct dpa_offload_lookup_key_pair *tmp; + int ret = 0; + + /* Allocate kernel memory for pair descriptor*/ + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) { + log_err("Cannot allocate kernel memory for pair descriptor\n"); + return -ENOMEM; + } + + if (src->first_key) { + ret = copy_key_descriptor(src->first_key, &tmp->first_key); + if (ret != 0) { + log_err("Could not copy the first key descriptor\n"); + kfree(tmp); + return ret; + } + } + + if (src->second_key) { + ret = copy_key_descriptor(src->second_key, &tmp->second_key); + if (ret != 0) { + log_err("Could not copy the second key descriptor\n"); + kfree(tmp); + return ret; + } + } + *ks_pair = tmp; return 0; } #ifdef CONFIG_COMPAT static int copy_key_descriptor_compatcpy( - struct dpa_offload_lookup_key *kparam, - const struct compat_ioc_dpa_offld_lookup_key *uparam) + struct dpa_offload_lookup_key **ks_key, compat_uptr_t uparam) { - BUG_ON(uparam->size <= 0); + struct compat_ioc_dpa_offld_lookup_key key; + struct dpa_offload_lookup_key *kparam; - kparam->size = uparam->size; + if (copy_from_user(&key, (compat_ptr)(uparam), + sizeof(struct compat_ioc_dpa_offld_lookup_key))) { + log_err("Cannot copy from user key descriptor\n"); + return -EBUSY; + } - /* Allocate memory to store the key byte array */ - kparam->byte = kmalloc(kparam->size, GFP_KERNEL); - if (!kparam->byte) { - log_err("Cannot allocate memory for key descriptor byte\n"); + /* Allocate kernel memory for key descriptor */ + kparam = kzalloc(sizeof(*kparam), GFP_KERNEL); + if (!kparam) { + log_err("Cannot allocate kernel memory for key descriptor\n"); return -ENOMEM; } - if (copy_from_user(kparam->byte, compat_ptr(uparam->byte), - uparam->size)) { - log_err("Cannot copy from user the key descriptor byte\n"); - return -EBUSY; + if (compat_ptr(key.byte)) { + /* Allocate memory to store the key byte array */ + kparam->byte = kmalloc(key.size, GFP_KERNEL); + if (!kparam->byte) { + log_err("Cannot allocate memory for key descriptor " + "byte\n"); + kfree(kparam); + return -ENOMEM; + } + + if (copy_from_user(kparam->byte, + compat_ptr(key.byte), key.size)) { + log_err("Cannot copy from user the key descriptor " + "byte\n"); + kfree(kparam->byte); + kfree(kparam); + return -EBUSY; + } } - if (compat_ptr(uparam->mask)) { + + if (compat_ptr(key.mask)) { /* Allocate memory to store the key mask array */ - kparam->mask = kmalloc(kparam->size, GFP_KERNEL); + kparam->mask = kmalloc(key.size, GFP_KERNEL); if (!kparam->mask) { log_err("Cannot allocate memory for key descriptor " "mask\n"); kfree(kparam->byte); + kfree(kparam); return -ENOMEM; } - if (copy_from_user(kparam->mask, compat_ptr(uparam->mask), - uparam->size)) { + if (copy_from_user(kparam->mask, + compat_ptr(key.mask), key.size)) { log_err("Cannot copy from user the key descriptor " "mask\n"); + kfree(kparam->byte); + kfree(kparam->mask); + kfree(kparam); return -EBUSY; } - } else - kparam->mask = NULL; + } + kparam->size = key.size; + *ks_key = kparam; + return 0; +} + +static int copy_pair_descriptor_compatcpy( + struct dpa_offload_lookup_key_pair **ks_pair, + struct compat_ioc_dpa_offld_lookup_key_pair pair) +{ + struct dpa_offload_lookup_key_pair *kpair; + int ret = 0; + /* Allocate kernel memory for lookup pair descriptor */ + kpair = kzalloc(sizeof(*kpair), GFP_KERNEL); + if (!kpair) { + log_err("Cannot allocate kernel memory for pair descriptor\n"); + return -ENOMEM; + } + + if (compat_ptr(pair.first_key)) { + /* Copy user-provided key descriptor */ + ret = copy_key_descriptor_compatcpy( + &kpair->first_key, pair.first_key); + if (ret != 0) { + log_err("Cannot copy first key of the pair\n"); + kfree(kpair); + return ret; + } + } + + if (compat_ptr(pair.second_key)) { + ret = copy_key_descriptor_compatcpy( + &kpair->second_key, pair.second_key); + if (ret != 0) { + log_err("Cannot copy second key of the pair\n"); + kfree(kpair); + return ret; + } + } + *ks_pair = kpair; return 0; } #endif @@ -1859,12 +2090,25 @@ static void dpa_stats_plcr_cnt_compatcpy(struct dpa_stats_cnt_plcr *kprm, kprm->cnt_sel = uprm->cnt_sel; } + static long dpa_stats_tbl_cnt_compatcpy(struct dpa_stats_cnt_classif_tbl *kprm, struct dpa_stats_compat_cnt_classif_tbl *uprm) { kprm->td = uprm->td; kprm->cnt_sel = uprm->cnt_sel; - return copy_key_descriptor_compatcpy(&kprm->key, &uprm->key); + /* If different than NULL, it will be overwritten */ + kprm->key = compat_ptr(uprm->key); + + if (compat_ptr(uprm->key)) { + /* Allocate memory for kernel-space key descriptor */ + kprm->key = kmalloc(sizeof(*kprm->key), GFP_KERNEL); + if (!kprm->key) { + log_err("Cannot allocate memory for key descriptor\n"); + return -ENOMEM; + } + return copy_key_descriptor_compatcpy(&kprm->key, uprm->key); + } + return 0; } static long dpa_stats_ccnode_cnt_compatcpy( @@ -1874,7 +2118,19 @@ static long dpa_stats_ccnode_cnt_compatcpy( kprm->cnt_sel = uprm->cnt_sel; kprm->ccnode_type = uprm->ccnode_type; kprm->cc_node = compat_get_id2ptr(uprm->cc_node, FM_MAP_TYPE_PCD_NODE); - return copy_key_descriptor_compatcpy(&kprm->key, &uprm->key); + /* If different than NULL, it will be overwritten */ + kprm->key = compat_ptr(uprm->key); + + if (compat_ptr(uprm->key)) { + /* Allocate memory for kernel-space key descriptor */ + kprm->key = kmalloc(sizeof(*kprm->key), GFP_KERNEL); + if (!kprm->key) { + log_err("Cannot allocate memory for key descriptor\n"); + return -ENOMEM; + } + return copy_key_descriptor_compatcpy(&kprm->key, uprm->key); + } + return 0; } static long dpa_stats_eth_cls_compatcpy(struct dpa_stats_cls_cnt_eth *kprm, @@ -2016,102 +2272,117 @@ static long dpa_stats_tbl_cls_compatcpy( struct dpa_stats_compat_cls_cnt_classif_tbl *uprm, uint32_t cls_members) { - struct compat_ioc_dpa_offld_lookup_key *keys; - struct compat_ioc_dpa_offld_lookup_key_pair *pairs; - uint32_t size = 0, i; + struct compat_ioc_dpa_offld_lookup_key_pair pair; + compat_uptr_t *us_keys; + uint32_t i; long ret; kprm->cnt_sel = uprm->cnt_sel; kprm->td = uprm->td; kprm->key_type = uprm->key_type; + /* Allocate memory to store array of user-space keys descriptors */ + us_keys = kzalloc(sizeof(compat_uptr_t) * cls_members, GFP_KERNEL); + if (!us_keys) { + log_err("Cannot allocate memory array of lookup keys\n"); + return -ENOMEM; + } + if (kprm->key_type == DPA_STATS_CLASSIF_SINGLE_KEY) { - size = sizeof(struct dpa_offload_lookup_key) * cls_members; - kprm->keys = kzalloc(size, GFP_KERNEL); - if (!kprm->keys) { - log_err("Cannot allocate kernel memory for lookup keys " - "array\n"); - return -ENOMEM; + if (copy_from_user(us_keys, compat_ptr(uprm->keys), + (sizeof(compat_uptr_t) * cls_members))) { + log_err("Cannot copy from user-space array of keys " + "descriptors\n"); + kfree(us_keys); + return -EBUSY; } - size = sizeof(struct compat_ioc_dpa_offld_lookup_key) * - cls_members; - keys = kzalloc(size, GFP_KERNEL); - if (!keys) { - log_err("Cannot allocate memory for lookup keys " + /* Allocate memory for array of kernel-space keys descriptors */ + kprm->keys = kzalloc((sizeof(*kprm->keys) * cls_members), + GFP_KERNEL); + if (!kprm->keys) { + log_err("Cannot allocate kernel memory for lookup keys " "array\n"); + kfree(us_keys); return -ENOMEM; } - - if (copy_from_user(keys, (compat_ptr)(uprm->keys), size)) { - log_err("Cannot copy from user array of lookup keys\n"); - kfree(keys); - return -EBUSY; - } - for (i = 0; i < cls_members; i++) { - if (!compat_ptr(keys[i].byte)) + if (!compat_ptr(us_keys[i])) continue; - + /* Copy user-provided key descriptor */ ret = copy_key_descriptor_compatcpy(&kprm->keys[i], - &keys[i]); + us_keys[i]); if (ret != 0) { log_err("Cannot copy the key descriptor\n"); - kfree(keys); - return -EINVAL; + kfree(us_keys); + return ret; } } - kfree(keys); - } else if (kprm->key_type == DPA_STATS_CLASSIF_PAIR_KEY) { - size = sizeof(struct dpa_offload_lookup_key_pair) * cls_members; - kprm->pairs = kzalloc(size, GFP_KERNEL); - if (!kprm->pairs) { - log_err("Cannot allocate kernel memory for pair lookup " - "keys array\n"); - return -ENOMEM; + kfree(us_keys); + } + + if (kprm->key_type == DPA_STATS_CLASSIF_PAIR_KEY) { + if (copy_from_user(us_keys, compat_ptr(uprm->pairs), + (sizeof(compat_uptr_t) * cls_members))) { + log_err("Cannot copy from user-space array of pairs " + "descriptors\n"); + kfree(us_keys); + return -EBUSY; } - size = sizeof(struct compat_ioc_dpa_offld_lookup_key_pair) * - cls_members; - pairs = kzalloc(size, GFP_KERNEL); - if (!pairs) { - log_err("Cannot allocate memory for pair lookup keys " + /* Allocate memory for array of kernel-space pairs descriptors*/ + kprm->pairs = kzalloc((sizeof(*kprm->pairs) * cls_members), + GFP_KERNEL); + if (!kprm->pairs) { + log_err("Cannot allocate kernel memory for lookup pairs" "array\n"); + kfree(us_keys); return -ENOMEM; } - if (copy_from_user(pairs, (compat_ptr)(uprm->pairs), size)) { - log_err("Cannot copy from user array of pair lookup " - "keys\n"); - kfree(pairs); - return -EBUSY; - } - for (i = 0; i < cls_members; i++) { - if (!compat_ptr(pairs[i].first_key.byte)) + if (!compat_ptr(us_keys[i])) continue; - ret = copy_key_descriptor_compatcpy( - &kprm->pairs[i].first_key, - &pairs[i].first_key); - if (ret != 0) { - log_err("Cannot copy the key descriptor for the" - " first lookup key\n"); - kfree(pairs); - return -EINVAL; + /* Allocate memory for kernel pair descriptor */ + kprm->pairs[i] = kzalloc(sizeof(*kprm->pairs[i]), + GFP_KERNEL); + if (!kprm->pairs[i]) { + log_err("Cannot allocate kernel memory for pair" + " descriptor\n"); + return -ENOMEM; } - ret = copy_key_descriptor_compatcpy( - &kprm->pairs[i].second_key, - &pairs[i].second_key); - if (ret != 0) { - log_err("Cannot copy the key descriptor for the" - " second lookup key\n", uprm->td); - kfree(pairs); - return -EINVAL; + if (copy_from_user(&pair, compat_ptr(us_keys[i]), + (sizeof(pair)))) { + log_err("Cannot copy pair descriptor\n"); + return -EBUSY; + } + + if (compat_ptr(pair.first_key)) { + /* Copy user-provided first key descriptor */ + ret = copy_key_descriptor_compatcpy( + &kprm->pairs[i]->first_key, + pair.first_key); + if (ret != 0) { + log_err("Cannot copy first key\n"); + kfree(us_keys); + return ret; + } + } + + if (compat_ptr(pair.second_key)) { + /* Copy user-provided second key descriptor */ + ret = copy_key_descriptor_compatcpy( + &kprm->pairs[i]->second_key, + pair.second_key); + if (ret != 0) { + log_err("Cannot copy second key\n"); + kfree(us_keys); + return ret; + } } } - kfree(pairs); } return 0; } @@ -2121,45 +2392,50 @@ static long dpa_stats_ccnode_cls_compatcpy( struct dpa_stats_compat_cls_cnt_classif_node *uprm, uint32_t cls_members) { - struct compat_ioc_dpa_offld_lookup_key *keys; - uint32_t size, i; + compat_uptr_t *us_keys; + uint32_t i; long ret = 0; kprm->cc_node = compat_get_id2ptr(uprm->cc_node, FM_MAP_TYPE_PCD_NODE); kprm->cnt_sel = uprm->cnt_sel; kprm->ccnode_type = uprm->ccnode_type; - size = sizeof(struct dpa_offload_lookup_key) * cls_members; - kprm->keys = kzalloc(size, GFP_KERNEL); - if (!kprm->keys) { - log_err("Cannot allocate kernel memory for lookup keys " - "array\n"); + /* Allocate memory to store array of user-space keys descriptors */ + us_keys = kzalloc(sizeof(compat_uptr_t) * cls_members, GFP_KERNEL); + if (!us_keys) { + log_err("Cannot allocate memory array of lookup keys\n"); return -ENOMEM; } - size = sizeof(struct compat_ioc_dpa_offld_lookup_key) * cls_members; - keys = kzalloc(size, GFP_KERNEL); - if (!keys) { - log_err("Cannot allocate memory for lookup keys array\n"); - return -ENOMEM; - } - - if (copy_from_user(keys, (compat_ptr)(uprm->keys), size)) { - log_err("Cannot copy from user array of lookup keys\n"); - kfree(keys); + if (copy_from_user(us_keys, compat_ptr(uprm->keys), + (sizeof(compat_uptr_t) * cls_members))) { + log_err("Cannot copy from user-space array of keys " + "descriptors\n"); + kfree(us_keys); return -EBUSY; } + /* Allocate memory to store array of kernel-space keys descriptors */ + kprm->keys = kzalloc((sizeof(*kprm->keys) * cls_members), GFP_KERNEL); + if (!kprm->keys) { + log_err("Cannot allocate kernel memory for lookup keys " + "array\n"); + kfree(us_keys); + return -ENOMEM; + } for (i = 0; i < cls_members; i++) { - ret = copy_key_descriptor_compatcpy(&kprm->keys[i], &keys[i]); + if (!compat_ptr(us_keys[i])) + continue; + /* Copy user-provided key descriptor */ + ret = copy_key_descriptor_compatcpy(&kprm->keys[i], us_keys[i]); if (ret != 0) { log_err("Cannot copy the key descriptor\n"); - kfree(keys); - return -EINVAL; + kfree(us_keys); + return ret; } } - kfree(keys); - return ret; + kfree(us_keys); + return 0; } static long dpa_stats_ipsec_cls_compatcpy(struct dpa_stats_cls_cnt_ipsec *kprm, diff --git a/include/linux/fsl_dpa_stats.h b/include/linux/fsl_dpa_stats.h index ba11791..b8fe850 100644 --- a/include/linux/fsl_dpa_stats.h +++ b/include/linux/fsl_dpa_stats.h @@ -396,8 +396,11 @@ struct dpa_stats_cnt_classif_tbl { /* Table descriptor */ int td; - /* Key to identify a specific entry */ - struct dpa_offload_lookup_key key; + /* + * Pointer to a key that identifies a specific entry or NULL in order + * to obtain statistics for miss entry + */ + struct dpa_offload_lookup_key *key; /* * Single or multiple selection of Classifier Table counters @@ -429,8 +432,11 @@ struct dpa_stats_cnt_classif_node { /* The type of FMAN Classification Node */ enum dpa_stats_classif_node_type ccnode_type; - /* Key to identify a specific entry */ - struct dpa_offload_lookup_key key; + /* + * Pointer to a key that identifies a specific entry or NULL in order + * to obtain statistics for miss entry + */ + struct dpa_offload_lookup_key *key; /* * Single or multiple selection of Classifier @@ -576,11 +582,17 @@ enum dpa_stats_classif_key_type { /* DPA Stats Classification counter - pair of keys */ struct dpa_offload_lookup_key_pair { - /* Key to identify the first entry */ - struct dpa_offload_lookup_key first_key; + /* + * Pointer to a key that identifies the first entry or NULL in order + * to identify the miss entry of the first table + */ + struct dpa_offload_lookup_key *first_key; - /* Key to identify the entry connected to the first entry */ - struct dpa_offload_lookup_key second_key; + /* + * Pointer to a key that identifies the entry connected to the first + * entry first entry or NULL in order to identify the miss entry + */ + struct dpa_offload_lookup_key *second_key; }; /* DPA Stats Classifier Table class counter parameters */ @@ -601,18 +613,28 @@ struct dpa_stats_cls_cnt_classif_tbl { */ /* - * Array of keys to identify specific entries. A key can be - * 'invalidated' by providing the 'byte' and 'mask' pointers - * set to NULL. + * Pointer to an array of keys, where each element of the array + * can either be a key that identifies a specific entry or NULL + * in order to obtain the statistics for the miss entry. A key + * can be'invalidated' by providing the 'byte' pointer set + * to NULL. */ - struct dpa_offload_lookup_key *keys; + struct dpa_offload_lookup_key **keys; /* * Array of 'pair-keys' to identify specific entries. A key pair * can be 'invalidated' by providing the 'byte' and 'mask' * pointers of the first key set to NULL */ - struct dpa_offload_lookup_key_pair *pairs; + + /* + * Pointer to an array of ‘pair-keys’, where each element of the + * array can either be a ‘pair-key’ that identifies a specific + * entry or NULL in in order to obtain the statistics for the + * miss entry. A key pair can be 'invalidated' by providing the + * 'byte' pointer of the first key set to NULL. + */ + struct dpa_offload_lookup_key_pair **pairs; }; /* @@ -636,7 +658,7 @@ struct dpa_stats_cls_cnt_classif_node { enum dpa_stats_classif_node_type ccnode_type; /* Array of keys to identify specific entries */ - struct dpa_offload_lookup_key *keys; + struct dpa_offload_lookup_key **keys; /* * Single or multiple selection of Classifier counters @@ -739,17 +761,20 @@ struct dpa_stats_cls_member_params { union { /* - * Key to set or update in case the byte and mask pointers are - * not NULL, or class member to invalidate otherwise + * Pointer to a key to set or update in case the byte pointer is + * not NULL, or class member to invalidate otherwise. The + * pointer can be NULL, in which case it represents the miss + * entry. */ - struct dpa_offload_lookup_key key; + struct dpa_offload_lookup_key *key; /* - * Key to set or update in case the byte and mask pointers of - * the first key are not NULL, or class member to invalidate - * otherwise + * Pointer to a 'pair-key' to set or update in case the byte + * pointer of the first key is not NULL, or class member to + * invalidate otherwise. The pointer can be NULL, in which case + * it represents the miss entry. */ - struct dpa_offload_lookup_key_pair pair; + struct dpa_offload_lookup_key_pair *pair; /* * Security association identifier to set or update or class -- cgit v0.10.2 From b5414961f740becc012f6c63592dc5be2894c84b Mon Sep 17 00:00:00 2001 From: Mandy Lavi Date: Mon, 5 Aug 2013 14:53:00 +0300 Subject: fmd: add T4/B4 definition FM_NO_GUARANTEED_RESET_VALUES Apparently reset values of B4/T4 in fmd are not completely reliable. will use this definition to force fman init values. Signed-off-by: Mandy Lavi Change-Id: Id0b16cf7faf8c53ab9741bcd370e27e91a9ab2a0 Reviewed-on: http://git.am.freescale.net:8181/3904 Reviewed-by: Chereji Marian-Cornel-R27762 Tested-by: Schmitt Richard-B43082 Reviewed-by: Schmitt Richard-B43082 diff --git a/drivers/net/ethernet/freescale/fman/inc/integrations/T4240/dpaa_integration_ext.h b/drivers/net/ethernet/freescale/fman/inc/integrations/T4240/dpaa_integration_ext.h index 83ae8ba..64349ea 100644 --- a/drivers/net/ethernet/freescale/fman/inc/integrations/T4240/dpaa_integration_ext.h +++ b/drivers/net/ethernet/freescale/fman/inc/integrations/T4240/dpaa_integration_ext.h @@ -256,6 +256,9 @@ typedef enum #define FM_DEQ_PIPELINE_PARAMS_FOR_OP #define FM_QMI_NO_SINGLE_ECC_EXCEPTION + +#define FM_NO_GUARANTEED_RESET_VALUES + /* FM errata */ #define FM_HEAVY_TRAFFIC_HANG_ERRATA_FMAN_A005669 #define FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 -- cgit v0.10.2 From 35fbdcf1075faeac0fba5c15acbbab710d682237 Mon Sep 17 00:00:00 2001 From: Mandy Lavi Date: Wed, 31 Jul 2013 14:32:12 +0300 Subject: fmd: add ioctl support to both xxTableGetMissStatistics functions FM_PCD_MatchTableGetMissStatistics and FM_PCD_HashTableGetMissStatistics Signed-off-by: Mandy Lavi Change-Id: I56a02300f4603245344effc898ff9fdd137d8e4c Reviewed-on: http://git.am.freescale.net:8181/3905 Reviewed-by: Schmitt Richard-B43082 Tested-by: Schmitt Richard-B43082 diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c index 3c372a3..f87529d 100644 --- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c +++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c @@ -1946,6 +1946,155 @@ invalid_port_id: XX_Free(param); break; } + + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT_COMPAT: +#endif + case FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT: + { + ioc_fm_pcd_cc_tbl_get_miss_params_t param; + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)); + if (!compat_param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + + memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)); + if (copy_from_user(compat_param, + (ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *)compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t))) + { + XX_Free(compat_param); + RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG); + } + + compat_copy_fm_pcd_cc_tbl_get_miss(compat_param, ¶m, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(¶m, (ioc_fm_pcd_cc_tbl_get_miss_params_t *)arg, + sizeof(ioc_fm_pcd_cc_tbl_get_miss_params_t))) + RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG); + } + + + err = FM_PCD_MatchTableGetMissStatistics((t_Handle) param.id, + (t_FmPcdCcKeyStatistics *) ¶m.miss_statistics); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_cc_tbl_get_miss_params_t*) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)); + if (!compat_param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + + memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)); + compat_copy_fm_pcd_cc_tbl_get_miss(compat_param, ¶m, COMPAT_K_TO_US); + if (copy_to_user((ioc_compat_fm_pcd_cc_tbl_get_miss_params_t*) compat_ptr(arg), + compat_param, + sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t))) + RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_to_user((ioc_fm_pcd_hash_table_params_t *)arg, + ¶m, + sizeof(ioc_fm_pcd_cc_tbl_get_miss_params_t))) + RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG); + } + + break; + } + + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT_COMPAT: +#endif + case FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT: + { + ioc_fm_pcd_cc_tbl_get_miss_params_t param; + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)); + if (!compat_param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + + memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)); + if (copy_from_user(compat_param, + (ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *)compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t))) + { + XX_Free(compat_param); + RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG); + } + + compat_copy_fm_pcd_cc_tbl_get_miss(compat_param, ¶m, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(¶m, (ioc_fm_pcd_cc_tbl_get_miss_params_t *)arg, + sizeof(ioc_fm_pcd_cc_tbl_get_miss_params_t))) + RETURN_ERROR(MINOR, E_WRITE_FAILED, NO_MSG); + } + + + err = FM_PCD_HashTableGetMissStatistics((t_Handle) param.id, + (t_FmPcdCcKeyStatistics *) ¶m.miss_statistics); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_cc_tbl_get_miss_params_t*) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)); + if (!compat_param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + + memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t)); + compat_copy_fm_pcd_cc_tbl_get_miss(compat_param, ¶m, COMPAT_K_TO_US); + if (copy_to_user((ioc_compat_fm_pcd_cc_tbl_get_miss_params_t*) compat_ptr(arg), + compat_param, + sizeof(ioc_compat_fm_pcd_cc_tbl_get_miss_params_t))) + RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_to_user((ioc_fm_pcd_hash_table_params_t *)arg, + ¶m, + sizeof(ioc_fm_pcd_cc_tbl_get_miss_params_t))) + RETURN_ERROR(MINOR, E_READ_FAILED, NO_MSG); + } + + break; + } + #if defined(CONFIG_COMPAT) case FM_PCD_IOC_HASH_TABLE_SET_COMPAT: #endif diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c index 68142b6..3c1ac30 100644 --- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c +++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.c @@ -506,7 +506,7 @@ void compat_copy_fm_pcd_hash_table( { if (compat == COMPAT_US_TO_K) { - param-> max_num_of_keys = compat_param->max_num_of_keys; + param->max_num_of_keys = compat_param->max_num_of_keys; param->statistics_mode = compat_param->statistics_mode; param->kg_hash_shift = compat_param->kg_hash_shift; param->hash_res_mask = compat_param->hash_res_mask; @@ -516,7 +516,7 @@ void compat_copy_fm_pcd_hash_table( } else { - compat_param-> max_num_of_keys = param->max_num_of_keys; + compat_param->max_num_of_keys = param->max_num_of_keys; compat_param->statistics_mode = param->statistics_mode; compat_param->kg_hash_shift = param->kg_hash_shift; compat_param->hash_res_mask = param->hash_res_mask; @@ -844,6 +844,22 @@ void compat_copy_fm_port_vsp_alloc_params( } #endif /* (DPAA_VERSION >= 11) */ +void compat_copy_fm_pcd_cc_tbl_get_miss( + ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *compat_param, + ioc_fm_pcd_cc_tbl_get_miss_params_t *param, + uint8_t compat) +{ + if (compat == COMPAT_US_TO_K) + { + param->id = compat_pcd_id2ptr(compat_param->id); + memcpy(¶m->miss_statistics, &compat_param->miss_statistics, sizeof(ioc_fm_pcd_cc_key_statistics_t)); + } else { + compat_param->id = compat_add_ptr2id(param->id, FM_MAP_TYPE_PCD_NODE); + memcpy(&compat_param->miss_statistics, ¶m->miss_statistics, sizeof(ioc_fm_pcd_cc_key_statistics_t)); + } +} + + void compat_copy_fm_pcd_net_env( ioc_compat_fm_pcd_net_env_params_t *compat_param, ioc_fm_pcd_net_env_params_t *param, diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h index f8655db..ae19b68 100644 --- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h +++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm_compat.h @@ -521,6 +521,12 @@ typedef struct ioc_compat_fm_ctrl_mon_counters_params_t { compat_uptr_t p_mon; } ioc_compat_fm_ctrl_mon_counters_params_t; +typedef struct ioc_compat_fm_pcd_cc_tbl_get_miss_params_t { + compat_uptr_t id; + ioc_fm_pcd_cc_key_statistics_t miss_statistics; +} ioc_compat_fm_pcd_cc_tbl_get_miss_params_t; + + /* } pcd compat structures */ void compat_obj_delete( @@ -568,6 +574,11 @@ void compat_copy_fm_pcd_cc_tree( ioc_fm_pcd_cc_tree_params_t *param, uint8_t compat); +void compat_copy_fm_pcd_cc_tbl_get_miss( + ioc_compat_fm_pcd_cc_tbl_get_miss_params_t *compat_param, + ioc_fm_pcd_cc_tbl_get_miss_params_t *param, + uint8_t compat); + void compat_fm_pcd_prs_sw( ioc_compat_fm_pcd_prs_sw_params_t *compat_param, ioc_fm_pcd_prs_sw_params_t *param, diff --git a/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h b/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h index 93bbebb..8c07f18 100644 --- a/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h +++ b/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h @@ -2117,6 +2117,80 @@ typedef struct ioc_fm_pcd_frm_replic_member_params_t { } ioc_fm_pcd_frm_replic_member_params_t; #endif /* DPAA_VERSION >= 11 */ + +typedef struct ioc_fm_pcd_cc_key_statistics_t { + uint32_t byte_count; /**< This counter reflects byte count of frames that + were matched by this key. */ + uint32_t frame_count; /**< This counter reflects count of frames that + were matched by this key. */ +#if (DPAA_VERSION >= 11) + uint32_t frame_length_range_count[IOC_FM_PCD_CC_STATS_MAX_NUM_OF_FLR]; + /**< These counters reflect how many frames matched + this key in 'RMON' statistics mode: + Each counter holds the number of frames of a + specific frames length range, according to the + ranges provided at initialization. */ +#endif /* (DPAA_VERSION >= 11) */ +} ioc_fm_pcd_cc_key_statistics_t; + + +typedef struct ioc_fm_pcd_cc_tbl_get_miss_params_t { + void *id; + ioc_fm_pcd_cc_key_statistics_t miss_statistics; +} ioc_fm_pcd_cc_tbl_get_miss_params_t; + + +/**************************************************************************//** + @Function FM_PCD_MatchTableGetMissStatistics + + @Description This routine may be used to get statistics counters of miss entry + in a CC Node. + + If 'e_FM_PCD_CC_STATS_MODE_FRAME' and + 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node, + these counters reflect how many frames were not matched to any + existing key and therefore passed through the miss entry; The + total frames count will be returned in the counter of the + first range (as only one frame length range was defined). + + @Param[in] h_CcNode A handle to the node + @Param[out] p_MissStatistics Statistics counters for 'miss' + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_MatchTableSet(). +*//***************************************************************************/ + +#if defined(CONFIG_COMPAT) +#define FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(12), ioc_compat_fm_pcd_cc_tbl_get_miss_params_t) +#endif +#define FM_PCD_IOC_MATCH_TABLE_GET_MISS_STAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(12), ioc_fm_pcd_cc_tbl_get_miss_params_t) + +/**************************************************************************//** + @Function FM_PCD_HashTableGetMissStatistics + + @Description This routine may be used to get statistics counters of 'miss' + entry of the a hash table. + + If 'e_FM_PCD_CC_STATS_MODE_FRAME' and + 'e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME' were set for this node, + these counters reflect how many frames were not matched to any + existing key and therefore passed through the miss entry; + + @Param[in] h_HashTbl A handle to a hash table + @Param[out] p_MissStatistics Statistics counters for 'miss' + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_HashTableSet(). +*//***************************************************************************/ + +#if defined(CONFIG_COMPAT) +#define FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT_COMPAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(13), ioc_compat_fm_pcd_cc_tbl_get_miss_params_t) +#endif +#define FM_PCD_IOC_HASH_TABLE_GET_MISS_STAT _IOWR(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(13), ioc_fm_pcd_cc_tbl_get_miss_params_t) + + /**************************************************************************//** @Function FM_PCD_NetEnvCharacteristicsSet -- cgit v0.10.2 From ffbd247893eb0baf52925b2d7b4a3dbc2dfd3b8e Mon Sep 17 00:00:00 2001 From: Radu Bulie Date: Fri, 12 Jul 2013 12:16:10 +0000 Subject: dpa_offload: Copy storage profile id to userspace during look-up by key or by ref operations Not all parameters were copied to userspace in classifier wrapper during look-up by key or by ref operations. The patch fixes the above issue. Change-Id: I4c33455a327c8dfd6fd0fa28d13cc7e28b856714 Signed-off-by: Radu Bulie Reviewed-on: http://git.am.freescale.net:8181/3281 Tested-by: Review Code-CDREVIEW Reviewed-by: Chereji Marian-Cornel-R27762 Reviewed-by: Fleming Andrew-AFLEMING diff --git a/drivers/staging/fsl_dpa_offload/wrp_dpa_classifier.c b/drivers/staging/fsl_dpa_offload/wrp_dpa_classifier.c index 6d59682..26cab69 100644 --- a/drivers/staging/fsl_dpa_offload/wrp_dpa_classifier.c +++ b/drivers/staging/fsl_dpa_offload/wrp_dpa_classifier.c @@ -2411,7 +2411,8 @@ int dpa_cls_tbl_action_params_rcompatcpy( uparam->enq_params.new_fqid = kparam->enq_params.new_fqid; uparam->enq_params.hmd = kparam->enq_params.hmd; - + uparam->enq_params.new_rel_vsp_id = + kparam->enq_params.new_rel_vsp_id; if (kparam->enq_params.policer_params) { BUG_ON(!compat_ptr(uparam->enq_params.policer_params)); if (copy_to_user( -- cgit v0.10.2 From 39d855d7071f77ac8dd697ed34f75c4be94f2f07 Mon Sep 17 00:00:00 2001 From: Radu Bulie Date: Wed, 17 Jul 2013 09:18:35 +0000 Subject: dpa_offload: Copy multicast group and header manipulation descriptors to userspace during look-up by key or by ref operations Multicast group and hmanip descriptors were copied to userspace in classifier wrapper during look-up by key or by ref operations. The patch fixes the above issue. Signed-off-by: Radu Bulie Change-Id: Ibba7869bcbe339a3ef23bd4071a3aaa682fe7b9d Reviewed-on: http://git.am.freescale.net:8181/3353 Reviewed-by: Bulie Radu-Andrei-B37577 Tested-by: Review Code-CDREVIEW Reviewed-by: Chereji Marian-Cornel-R27762 Reviewed-by: Fleming Andrew-AFLEMING Reviewed-by: Schmitt Richard-B43082 Tested-by: Schmitt Richard-B43082 diff --git a/drivers/staging/fsl_dpa_offload/wrp_dpa_classifier.c b/drivers/staging/fsl_dpa_offload/wrp_dpa_classifier.c index 26cab69..1e35b09 100644 --- a/drivers/staging/fsl_dpa_offload/wrp_dpa_classifier.c +++ b/drivers/staging/fsl_dpa_offload/wrp_dpa_classifier.c @@ -2430,6 +2430,10 @@ int dpa_cls_tbl_action_params_rcompatcpy( kparam->next_table_params.next_td; uparam->next_table_params.hmd = kparam->next_table_params.hmd; break; + case DPA_CLS_TBL_ACTION_MCAST: + uparam->mcast_params.grpd = kparam->mcast_params.grpd; + uparam->mcast_params.hmd = kparam->mcast_params.hmd; + break; default: break; } -- cgit v0.10.2 From afaabd75ae3bf93350dcbd81c2c348d5da3d8e58 Mon Sep 17 00:00:00 2001 From: Andrei Varvara Date: Thu, 18 Jul 2013 12:25:04 +0300 Subject: crypto: caam - Add define for NULL encryption This define is required for making descriptors with NULL encryption. Change-Id: If71ee727f18f1ea23a6bbe67b03046d526897a52 Signed-off-by: Andrei Varvara Change-Id: I61ef1a7d6bcf115f99f5160f76154c09e5f7f7da Reviewed-on: http://git.am.freescale.net:8181/3472 Tested-by: Review Code-CDREVIEW Reviewed-by: Fleming Andrew-AFLEMING Reviewed-by: Schmitt Richard-B43082 Tested-by: Schmitt Richard-B43082 diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index 53b296f..7ddc851 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -463,6 +463,7 @@ struct sec4_sg_entry { #define OP_PCL_IPSEC_DES_IV64 0x0100 #define OP_PCL_IPSEC_DES 0x0200 #define OP_PCL_IPSEC_3DES 0x0300 +#define OP_PCL_IPSEC_NULL_ENC 0x0b00 #define OP_PCL_IPSEC_AES_CBC 0x0c00 #define OP_PCL_IPSEC_AES_CTR 0x0d00 #define OP_PCL_IPSEC_AES_XTS 0x1600 -- cgit v0.10.2 From d21323fbdf5ae3ade5e1427a1b6fdaae670ffbbf Mon Sep 17 00:00:00 2001 From: Andrei Varvara Date: Thu, 18 Jul 2013 15:10:16 +0300 Subject: dpa_offload: dpa_ipsec - Add support for NULL encryption The dpa ipsec component was not supporting ESP with NULL encryption. The patch is adding this suport. Signed-off-by: Andrei Varvara Change-Id: I89111367e60c4592289687b0a13d84ac2911d23c Reviewed-on: http://git.am.freescale.net:8181/3473 Tested-by: Review Code-CDREVIEW Reviewed-by: Fleming Andrew-AFLEMING Reviewed-by: Schmitt Richard-B43082 Tested-by: Schmitt Richard-B43082 diff --git a/drivers/staging/fsl_dpa_offload/dpa_ipsec.c b/drivers/staging/fsl_dpa_offload/dpa_ipsec.c index fb076f4..63f067f 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_ipsec.c +++ b/drivers/staging/fsl_dpa_offload/dpa_ipsec.c @@ -808,7 +808,7 @@ static int init_sa_manager(struct dpa_ipsec *dpa_ipsec) return -ENOMEM; } - /* fill with ids */ + /* fill with IDs */ for (i = 0; i < sa_mng->max_num_sa; i++) if (cq_put_4bytes(sa_mng->sa_id_cq, i) < 0) { pr_err("Could not fill SA ID management CQ\n"); @@ -865,7 +865,8 @@ static int init_sa_manager(struct dpa_ipsec *dpa_ipsec) return -ENOMEM; } - /* Allocate space for the SEC descriptor which is holding the + /* + * Allocate space for the SEC descriptor which is holding the * preheader information and the share descriptor. * Required 64 byte align. */ @@ -2310,7 +2311,7 @@ static int create_sa_fq_pair(struct dpa_ipsec_sa *sa, } static inline int set_cipher_auth_alg(enum dpa_ipsec_cipher_alg alg_suite, - uint16_t *cipher, uint16_t *auth) + uint16_t *cipher, uint16_t *auth) { *cipher = ipsec_algs[alg_suite].enc_alg; *auth = ipsec_algs[alg_suite].auth_alg; @@ -2969,7 +2970,7 @@ static int check_sa_params(struct dpa_ipsec_sa_params *sa_params) /* * check crypto params: * - an authentication key must always be provided - * - a cipher key must be provided if alg != NULL encryption + * - a cipher key must be provided if algorithm != NULL encryption */ err = set_cipher_auth_alg(sa_params->crypto_params.alg_suite, @@ -2977,20 +2978,23 @@ static int check_sa_params(struct dpa_ipsec_sa_params *sa_params) if (err < 0) return err; - if (sa_params->crypto_params.auth_key == NULL) { + if (!sa_params->crypto_params.auth_key || + sa_params->crypto_params.auth_key_len == 0) { pr_err("A valid authentication key must be provided\n"); return -EINVAL; } - /* TODO: check cipher_key ONLY if alg != null encryption */ - if (sa_params->crypto_params.cipher_key == NULL) { + /* Check cipher_key only if the cipher algorithm isn't NULL encryption*/ + if (cipher_alg != OP_PCL_IPSEC_NULL_ENC && + (!sa_params->crypto_params.cipher_key || + sa_params->crypto_params.cipher_key_len == 0)) { pr_err("A valid cipher key must be provided\n"); return -EINVAL; } if (sa_params->sa_dir == DPA_IPSEC_OUTBOUND) { - if ((sa_params->sa_out_params.ip_hdr_size == 0) || - (sa_params->sa_out_params.outer_ip_header == NULL)) { + if (sa_params->sa_out_params.ip_hdr_size == 0 || + !sa_params->sa_out_params.outer_ip_header) { pr_err("Transport mode is not currently supported." "Specify a valid encapsulation header\n"); return -EINVAL; diff --git a/drivers/staging/fsl_dpa_offload/dpa_ipsec.h b/drivers/staging/fsl_dpa_offload/dpa_ipsec.h index c08419f..25eca88 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_ipsec.h +++ b/drivers/staging/fsl_dpa_offload/dpa_ipsec.h @@ -76,21 +76,21 @@ /* DPA_IPSEC_CIPHER_ALG_3DES_CBC_HMAC_SHA_512_256 */ \ IPSEC_ALGS_ENTRY(3DES, HMAC_SHA2_512_256), \ /* DPA_IPSEC_CIPHER_ALG_NULL_ENC_HMAC_96_MD5_128 */ \ - IPSEC_ALGS_ENTRY(INVALID_ALG_ID, HMAC_MD5_96), \ + IPSEC_ALGS_ENTRY(NULL_ENC, HMAC_MD5_96), \ /* DPA_IPSEC_CIPHER_ALG_NULL_ENC_HMAC_96_SHA_160 */ \ - IPSEC_ALGS_ENTRY(INVALID_ALG_ID, HMAC_SHA1_96), \ + IPSEC_ALGS_ENTRY(NULL_ENC, HMAC_SHA1_96), \ /* DPA_IPSEC_CIPHER_ALG_NULL_ENC_AES_XCBC_MAC_96 */ \ - IPSEC_ALGS_ENTRY(INVALID_ALG_ID, AES_XCBC_MAC_96), \ + IPSEC_ALGS_ENTRY(NULL_ENC, AES_XCBC_MAC_96), \ /* DPA_IPSEC_CIPHER_ALG_NULL_ENC_HMAC_MD5_128 */ \ - IPSEC_ALGS_ENTRY(INVALID_ALG_ID, HMAC_MD5_128), \ + IPSEC_ALGS_ENTRY(NULL_ENC, HMAC_MD5_128), \ /* DPA_IPSEC_CIPHER_ALG_NULL_ENC_HMAC_SHA_160 */ \ - IPSEC_ALGS_ENTRY(INVALID_ALG_ID, HMAC_SHA1_160), \ + IPSEC_ALGS_ENTRY(NULL_ENC, HMAC_SHA1_160), \ /* DPA_IPSEC_CIPHER_ALG_NULL_ENC_HMAC_SHA_256_128 */ \ - IPSEC_ALGS_ENTRY(INVALID_ALG_ID, HMAC_SHA2_256_128), \ + IPSEC_ALGS_ENTRY(NULL_ENC, HMAC_SHA2_256_128), \ /* DPA_IPSEC_CIPHER_ALG_NULL_ENC_HMAC_SHA_384_192 */ \ - IPSEC_ALGS_ENTRY(INVALID_ALG_ID, HMAC_SHA2_384_192), \ + IPSEC_ALGS_ENTRY(NULL_ENC, HMAC_SHA2_384_192), \ /* DPA_IPSEC_CIPHER_ALG_NULL_ENC_HMAC_SHA_512_256 */ \ - IPSEC_ALGS_ENTRY(INVALID_ALG_ID, HMAC_SHA2_512_256), \ + IPSEC_ALGS_ENTRY(NULL_ENC, HMAC_SHA2_512_256), \ /* DPA_IPSEC_CIPHER_ALG_AES_CBC_HMAC_96_MD5_128 */ \ IPSEC_ALGS_ENTRY(AES_CBC, HMAC_MD5_96), \ /* DPA_IPSEC_CIPHER_ALG_AES_CBC_HMAC_96_SHA_160 */ \ -- cgit v0.10.2 From 114290031bd35a31b7cd60681d984e63e0e70840 Mon Sep 17 00:00:00 2001 From: Andrei Varvara Date: Thu, 18 Jul 2013 16:06:26 +0300 Subject: dpa_offload: dpa_ipsec - Update create SA function to set hmd to invalid DPA Classifier API was updated by adding a new attribute (hmd) to the struct dpa_cls_tbl_next_table_desc. DPA IPSec is using the classifier feature for linking two tables, if inbound policy verification is required but does not require the hmd option. A minor update had to be made to set the hdm to -1 (not valid) otherwise the 0 value would have been concidered valid by classifier and crash since not hmd is required here. Signed-off-by: Andrei Varvara Change-Id: I76eccbf6a60b7d563540de539be5144112c1567c Reviewed-on: http://git.am.freescale.net:8181/3474 Tested-by: Review Code-CDREVIEW Reviewed-by: Fleming Andrew-AFLEMING Reviewed-by: Schmitt Richard-B43082 Tested-by: Schmitt Richard-B43082 diff --git a/drivers/staging/fsl_dpa_offload/dpa_ipsec.c b/drivers/staging/fsl_dpa_offload/dpa_ipsec.c index 63f067f..797bee5 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_ipsec.c +++ b/drivers/staging/fsl_dpa_offload/dpa_ipsec.c @@ -3439,6 +3439,7 @@ int dpa_ipsec_create_sa(int dpa_ipsec_id, memset(&action, 0, sizeof(action)); action.type = DPA_CLS_TBL_ACTION_NEXT_TABLE; action.next_table_params.next_td = inbpol_td; + action.next_table_params.hmd = DPA_OFFLD_DESC_NONE; action.enable_statistics = FALSE; err = set_flow_id_action(sa, &action); if (err < 0) { -- cgit v0.10.2 From a402d57e6cddc8bc8709ee0bd4769e9a9d6f0309 Mon Sep 17 00:00:00 2001 From: Geoff Thorpe Date: Fri, 5 Jul 2013 16:28:37 -0400 Subject: qman: remove eqcr_cmode It was unused, and confusing efforts to implement support for EQCR_CI-stashing. Signed-off-by: Geoff Thorpe Change-Id: Ia390a4b4b54efba60b4e9b12a73b7d1ec88bf530 Reviewed-on: http://git.am.freescale.net:8181/3697 Tested-by: Review Code-CDREVIEW Reviewed-by: Wang Haiying-R54964 Reviewed-by: Schmitt Richard-B43082 diff --git a/drivers/staging/fsl_qbman/qman_high.c b/drivers/staging/fsl_qbman/qman_high.c index 22f512b..9f969bd 100644 --- a/drivers/staging/fsl_qbman/qman_high.c +++ b/drivers/staging/fsl_qbman/qman_high.c @@ -387,7 +387,7 @@ struct qman_portal *qman_create_portal( * for (de)reference... */ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; - if (qm_eqcr_init(__p, qm_eqcr_pvb, qm_eqcr_cce)) { + if (qm_eqcr_init(__p, qm_eqcr_pvb)) { pr_err("Qman EQCR initialisation failed\n"); goto fail_eqcr; } diff --git a/drivers/staging/fsl_qbman/qman_low.h b/drivers/staging/fsl_qbman/qman_low.h index 1205ac7..a610394 100644 --- a/drivers/staging/fsl_qbman/qman_low.h +++ b/drivers/staging/fsl_qbman/qman_low.h @@ -131,10 +131,6 @@ enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ qm_eqcr_pce = 1, /* PI index, cache-enabled */ qm_eqcr_pvb = 2 /* valid-bit */ }; -enum qm_eqcr_cmode { /* s/w-only */ - qm_eqcr_cci, /* CI index, cache-inhibited */ - qm_eqcr_cce /* CI index, cache-enabled */ -}; enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ qm_dqrr_dpull = 1 /* PDQCR */ @@ -173,7 +169,6 @@ struct qm_eqcr { #ifdef CONFIG_FSL_DPA_CHECKING u32 busy; enum qm_eqcr_pmode pmode; - enum qm_eqcr_cmode cmode; #endif }; @@ -282,8 +277,7 @@ static inline void EQCR_INC(struct qm_eqcr *eqcr) } static inline int qm_eqcr_init(struct qm_portal *portal, - enum qm_eqcr_pmode pmode, - __maybe_unused enum qm_eqcr_cmode cmode) + enum qm_eqcr_pmode pmode) { /* This use of 'register', as well as all other occurances, is because * it has been observed to generate much faster code with gcc than is @@ -305,7 +299,6 @@ static inline int qm_eqcr_init(struct qm_portal *portal, #ifdef CONFIG_FSL_DPA_CHECKING eqcr->busy = 0; eqcr->pmode = pmode; - eqcr->cmode = cmode; #endif cfg = (qm_in(CFG) & 0x00ffffff) | ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ @@ -436,7 +429,6 @@ static inline u8 qm_eqcr_cci_update(struct qm_portal *portal) { register struct qm_eqcr *eqcr = &portal->eqcr; u8 diff, old_ci = eqcr->ci; - DPA_ASSERT(eqcr->cmode == qm_eqcr_cci); eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); eqcr->available += diff; @@ -446,7 +438,6 @@ static inline u8 qm_eqcr_cci_update(struct qm_portal *portal) static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) { __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; - DPA_ASSERT(eqcr->cmode == qm_eqcr_cce); qm_cl_touch_ro(EQCR_CI); } @@ -454,7 +445,6 @@ static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) { register struct qm_eqcr *eqcr = &portal->eqcr; u8 diff, old_ci = eqcr->ci; - DPA_ASSERT(eqcr->cmode == qm_eqcr_cce); eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); qm_cl_invalidate(EQCR_CI); diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); -- cgit v0.10.2 From aa57f7af3c9857ba40c8e1dd8dbefd5f0aa67d94 Mon Sep 17 00:00:00 2001 From: Haiying Wang Date: Mon, 8 Jul 2013 12:09:12 -0400 Subject: qman: Add CI-stashing support for QMan rev3.0 or later Signed-off-by: Geoff Thorpe Signed-off-by: Haiying Wang Change-Id: Icda6a8547559ab45ceea7160cde566ca022e92e8 Reviewed-on: http://git.am.freescale.net:8181/3698 Reviewed-by: Schmitt Richard-B43082 Tested-by: Schmitt Richard-B43082 diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa.c b/drivers/staging/fsl_qbman/fsl_usdpaa.c index bce680f..0167b7b 100644 --- a/drivers/staging/fsl_qbman/fsl_usdpaa.c +++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c @@ -326,7 +326,8 @@ static int init_qm_portal(struct qm_portal_config *config, qm_dqrr_cdc_consume_n(portal, 0xffff); /* Initialize the EQCR */ - if (qm_eqcr_init(portal, qm_eqcr_pvb, qm_eqcr_cce)) { + if (qm_eqcr_init(portal, qm_eqcr_pvb, + portal->eqcr.use_eqcr_ci_stashing ? 3 : 0, 1)) { pr_err("Qman EQCR initialisation failed\n"); return 1; } diff --git a/drivers/staging/fsl_qbman/qman_high.c b/drivers/staging/fsl_qbman/qman_high.c index 9f969bd..eaa2e39 100644 --- a/drivers/staging/fsl_qbman/qman_high.c +++ b/drivers/staging/fsl_qbman/qman_high.c @@ -361,9 +361,6 @@ loop: goto loop; } - - - struct qman_portal *qman_create_portal( struct qman_portal *portal, const struct qm_portal_config *config, @@ -382,12 +379,20 @@ struct qman_portal *qman_create_portal( __p = &portal->p; + portal->p.eqcr.use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? + 1 : 0); + /* prep the low-level portal struct with the mapped addresses from the * config, everything that follows depends on it and "config" is more * for (de)reference... */ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; - if (qm_eqcr_init(__p, qm_eqcr_pvb)) { + /* + * If CI-stashing is used, the current defaults use a threshold of 3, + * and stash with high-than-DQRR priority. + */ + if (qm_eqcr_init(__p, qm_eqcr_pvb, + portal->p.eqcr.use_eqcr_ci_stashing ? 3 : 0, 1)) { pr_err("Qman EQCR initialisation failed\n"); goto fail_eqcr; } @@ -1986,10 +1991,23 @@ static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p, (*p)->eqci_owned = fq; } #endif - avail = qm_eqcr_get_avail(&(*p)->p); - if (avail < 2) - update_eqcr_ci(*p, avail); - eq = qm_eqcr_start(&(*p)->p); + if ((*p)->p.eqcr.use_eqcr_ci_stashing) { + /* + * The stashing case is easy, only update if we need to in + * order to try and liberate ring entries. + */ + eq = qm_eqcr_start_stash(&(*p)->p); + } else { + /* + * The non-stashing case is harder, need to prefetch ahead of + * time. + */ + avail = qm_eqcr_get_avail(&(*p)->p); + if (avail < 2) + update_eqcr_ci(*p, avail); + eq = qm_eqcr_start_no_stash(&(*p)->p); + } + if (unlikely(!eq)) { #ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && diff --git a/drivers/staging/fsl_qbman/qman_low.h b/drivers/staging/fsl_qbman/qman_low.h index a610394..715c3a8 100644 --- a/drivers/staging/fsl_qbman/qman_low.h +++ b/drivers/staging/fsl_qbman/qman_low.h @@ -166,6 +166,7 @@ enum qm_mr_cmode { /* matches QCSP_CFG::MM */ struct qm_eqcr { struct qm_eqcr_entry *ring, *cursor; u8 ci, available, ithresh, vbit; + u32 use_eqcr_ci_stashing; #ifdef CONFIG_FSL_DPA_CHECKING u32 busy; enum qm_eqcr_pmode pmode; @@ -277,7 +278,9 @@ static inline void EQCR_INC(struct qm_eqcr *eqcr) } static inline int qm_eqcr_init(struct qm_portal *portal, - enum qm_eqcr_pmode pmode) + enum qm_eqcr_pmode pmode, + unsigned int eq_stash_thresh, + int eq_stash_prio) { /* This use of 'register', as well as all other occurances, is because * it has been observed to generate much faster code with gcc than is @@ -301,6 +304,8 @@ static inline int qm_eqcr_init(struct qm_portal *portal, eqcr->pmode = pmode; #endif cfg = (qm_in(CFG) & 0x00ffffff) | + (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ + (eq_stash_prio << 26) | /* QCSP_CFG: EP */ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ qm_out(CFG, cfg); return 0; @@ -321,7 +326,8 @@ static inline void qm_eqcr_finish(struct qm_portal *portal) pr_crit("EQCR destroyed unquiesced\n"); } -static inline struct qm_eqcr_entry *qm_eqcr_start(struct qm_portal *portal) +static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal + *portal) { register struct qm_eqcr *eqcr = &portal->eqcr; DPA_ASSERT(!eqcr->busy); @@ -336,6 +342,28 @@ static inline struct qm_eqcr_entry *qm_eqcr_start(struct qm_portal *portal) return eqcr->cursor; } +static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal + *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + u8 diff, old_ci; + + DPA_ASSERT(!eqcr->busy); + if (!eqcr->available) { + old_ci = eqcr->ci; + eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); + diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); + eqcr->available += diff; + if (!diff) + return NULL; + } +#ifdef CONFIG_FSL_DPA_CHECKING + eqcr->busy = 1; +#endif + dcbz_64(eqcr->cursor); + return eqcr->cursor; +} + static inline void qm_eqcr_abort(struct qm_portal *portal) { __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; -- cgit v0.10.2 From 7f25123acd0cdbf95b1cf7d66b74d03f3b30f6ae Mon Sep 17 00:00:00 2001 From: Mandy Lavi Date: Sun, 7 Jul 2013 13:56:29 +0300 Subject: fmd: better support debug in preemptible kernel Prevent sleeping function to be called from invalid context Signed-off-by: Mandy Lavi Change-Id: I1fa7619489248c823d55a96b00cc4b7bd75bbf6d Reviewed-on: http://git.am.freescale.net:8181/3430 Tested-by: Review Code-CDREVIEW Reviewed-by: Fleming Andrew-AFLEMING diff --git a/drivers/net/ethernet/freescale/fman/src/xx/xx_linux.c b/drivers/net/ethernet/freescale/fman/src/xx/xx_linux.c index 9c2371d..132459b 100644 --- a/drivers/net/ethernet/freescale/fman/src/xx/xx_linux.c +++ b/drivers/net/ethernet/freescale/fman/src/xx/xx_linux.c @@ -168,7 +168,7 @@ void XX_Print(char *str, ...) #ifdef CONFIG_SMP if (vsnprintf (buf, BUF_SIZE, str, args) >= BUF_SIZE) printk(KERN_WARNING "Illegal string to print!\n more than %d characters.\n\tString was not printed completelly.\n", BUF_SIZE); - printk (KERN_CRIT "cpu%d: %s",raw_smp_processor_id(), buf); + printk(KERN_CRIT "cpu%d/%d: %s", get_hard_smp_processor_id(raw_smp_processor_id()), raw_smp_processor_id(), buf); #else vprintk(str, args); #endif /* CONFIG_SMP */ @@ -186,7 +186,8 @@ void XX_Fprint(void *file, char *str, ...) #ifdef CONFIG_SMP if (vsnprintf (buf, BUF_SIZE, str, args) >= BUF_SIZE) printk(KERN_WARNING "Illegal string to print!\n more than %d characters.\n\tString was not printed completelly.\n", BUF_SIZE); - printk (KERN_CRIT "cpu%d: %s", raw_smp_processor_id(), buf); + printk (KERN_CRIT "cpu%d/%d: %s",hard_smp_processor_id(), smp_processor_id(), buf); + #else vprintk(str, args); #endif /* CONFIG_SMP */ @@ -353,14 +354,14 @@ uint32_t XX_DisableAllIntr(void) { unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); return (uint32_t)flags; } void XX_RestoreAllIntr(uint32_t flags) { - local_irq_restore((unsigned long)flags); + local_irq_restore_nort((unsigned long)flags); } t_Error XX_Call( uint32_t qid, t_Error (* f)(t_Handle), t_Handle id, t_Handle appId, uint16_t flags ) -- cgit v0.10.2 From 17b2f42e8fee7ad98cc8f7ed7e50aa5b76a3acab Mon Sep 17 00:00:00 2001 From: Mandy Lavi Date: Thu, 30 May 2013 10:32:43 +0300 Subject: fmd: add more details to fman_chosen_node_device_tree Documentaion Signed-off-by: Mandy Lavi Change-Id: I8bf5700cadb8e40dd68728f8aa78ba1a4d70f054 Reviewed-on: http://git.am.freescale.net:8181/3429 Tested-by: Review Code-CDREVIEW Reviewed-by: Fleming Andrew-AFLEMING diff --git a/Documentation/devicetree/bindings/powerpc/fsl/fman_chosen_node_device_tree.txt b/Documentation/devicetree/bindings/powerpc/fsl/fman_chosen_node_device_tree.txt old mode 100755 new mode 100644 index 39df24a..abdd9ad --- a/Documentation/devicetree/bindings/powerpc/fsl/fman_chosen_node_device_tree.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/fman_chosen_node_device_tree.txt @@ -120,7 +120,11 @@ PROPERTIES Usage: optional Value type: Definition: Specifies the number of profiles for this port (first value) - and the dfault virtual port eelative id (second value). + and the dfault virtual port relative id (second value). Note that the + kernel FMan driver automatically initializes the default VSP (using + the configuration from the Linux dpaa Ethernet Driver of the equivalent + port) while the rest of the VSPs should be initialized by the user + (using the appropriate IOCTLs). For more information please refer to the FMan User's guide - errors-to-discard -- cgit v0.10.2 From c1efd40363ac2bb4737ae9a22d047eda8510bfe8 Mon Sep 17 00:00:00 2001 From: Mandy Lavi Date: Tue, 16 Jul 2013 11:03:57 +0300 Subject: fmd: handle erratum A006981 Changes open dma allocation to workaround erratum A006981 Also verify that it applies only to B4-rev1 while rev2 should have this erratum fixed Signed-off-by: Mandy Lavi Change-Id: Icc0552269da9ad80763e8ad1394d6ad67ee143a8 Reviewed-on: http://git.am.freescale.net:8181/3428 Tested-by: Review Code-CDREVIEW Reviewed-by: Fleming Andrew-AFLEMING diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Port/fm_port.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Port/fm_port.c index 6b92941..b2dbcac 100644 --- a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Port/fm_port.c +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Port/fm_port.c @@ -2444,6 +2444,17 @@ t_Handle FM_PORT_Config(t_FmPortParams *p_FmPortParams) } } +#ifdef FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 + if ((p_FmPort->fmRevInfo.majorRev == 6) && + (p_FmPort->fmRevInfo.minorRev == 0) && + ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) || + (p_FmPort->portType == e_FM_PORT_TYPE_TX))) + { + p_FmPort->openDmas.num = 16; + p_FmPort->openDmas.extra = 0; + } +#endif /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 */ + if (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND) p_FmPort->p_FmPortDriverParam->syncReq = DEFAULT_PORT_syncReqForHc; else diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/fm.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/fm.c index 2fdc4c8..a44bdab 100644 --- a/drivers/net/ethernet/freescale/fman/Peripherals/FM/fm.c +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/fm.c @@ -3313,6 +3313,10 @@ t_Error FmSetNumOfOpenDmas(t_Handle h_Fm, p_Fm->p_FmStateStruct->fmId)); #else if ((p_Fm->p_FmStateStruct->revInfo.majorRev >= 6) && +#ifdef FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 + !((p_Fm->p_FmStateStruct->revInfo.majorRev == 6) && + (p_Fm->p_FmStateStruct->revInfo.minorRev == 0)) && +#endif /* FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 */ (p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas - oldVal + numOfOpenDmas > DMA_THRESH_MAX_COMMQ + 1)) RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Requested numOfOpenDmas for fm%d exceeds DMA Command queue (%d)", diff --git a/drivers/net/ethernet/freescale/fman/inc/integrations/T4240/dpaa_integration_ext.h b/drivers/net/ethernet/freescale/fman/inc/integrations/T4240/dpaa_integration_ext.h index 64349ea..b6780b9 100644 --- a/drivers/net/ethernet/freescale/fman/inc/integrations/T4240/dpaa_integration_ext.h +++ b/drivers/net/ethernet/freescale/fman/inc/integrations/T4240/dpaa_integration_ext.h @@ -264,6 +264,7 @@ typedef enum #define FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 #define FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 #define FM_OP_NO_VSP_NO_RELEASE_ERRATA_FMAN_A006675 +#define FM_HEAVY_TRAFFIC_SEQUENCER_HANG_ERRATA_FMAN_A006981 #define FM_BCB_ERRATA_BMI_SW001 #define FM_LEN_CHECK_ERRATA_FMAN_SW002 -- cgit v0.10.2 From f3d9ad20b340df7c4ed1d7b8ce6a54d4272b3821 Mon Sep 17 00:00:00 2001 From: Mandy Lavi Date: Mon, 24 Jun 2013 15:54:35 +0300 Subject: fmd: remove unused macros Signed-off-by: Mandy Lavi Change-Id: If2fda57aaf42719d0a3aae2256d513856583913b Reviewed-on: http://git.am.freescale.net:8181/3427 Tested-by: Review Code-CDREVIEW Reviewed-by: Fleming Andrew-AFLEMING diff --git a/drivers/net/ethernet/freescale/fman/inc/Peripherals/fm_pcd_ext.h b/drivers/net/ethernet/freescale/fman/inc/Peripherals/fm_pcd_ext.h index d9bea95..00750ba 100644 --- a/drivers/net/ethernet/freescale/fman/inc/Peripherals/fm_pcd_ext.h +++ b/drivers/net/ethernet/freescale/fman/inc/Peripherals/fm_pcd_ext.h @@ -99,16 +99,6 @@ #define FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */ #define FM_PCD_PRS_SW_OFFSET 0x00000040 /**< Size of illegal addresses at the beginning of the SW parser area */ -#if (DPAA_VERSION >= 11) -#define FM_PCD_PRS_SW_PATCHES_SIZE 0x00000240 /**< Number of bytes saved for patches */ -#else -#define FM_PCD_PRS_SW_PATCHES_SIZE 0x00000200 /**< Number of bytes saved for patches */ -#endif /* (DPAA_VERSION >= 11) */ - -#define FM_PCD_PRS_SW_TAIL_SIZE 4 /**< Number of bytes that must be cleared at - the end of the SW parser area */ -#define FM_SW_PRS_MAX_IMAGE_SIZE (FM_PCD_SW_PRS_SIZE-FM_PCD_PRS_SW_OFFSET-FM_PCD_PRS_SW_TAIL_SIZE-FM_PCD_PRS_SW_PATCHES_SIZE) - /**< Maximum size of SW parser code */ #define FM_PCD_MAX_MANIP_INSRT_TEMPLATE_SIZE 128 /**< Maximum size of insertion template for insert manipulation */ diff --git a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c index f87529d..d896b27 100644 --- a/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c +++ b/drivers/net/ethernet/freescale/fman/src/wrapper/lnxwrp_ioctls_fm.c @@ -126,18 +126,6 @@ #error Error: please synchronize IOC_ defines! #endif -#if CMP_IOC_DEFINE(FM_PCD_PRS_SW_PATCHES_SIZE) -#error Error: please synchronize IOC_ defines! -#endif - -#if CMP_IOC_DEFINE(FM_PCD_PRS_SW_TAIL_SIZE) -#error Error: please synchronize IOC_ defines! -#endif - -#if CMP_IOC_DEFINE(FM_SW_PRS_MAX_IMAGE_SIZE) -#error Error: please synchronize IOC_ defines! -#endif - #if CMP_IOC_DEFINE(FM_PCD_MAX_MANIP_INSRT_TEMPLATE_SIZE) #error Error: please synchronize IOC_ defines! #endif diff --git a/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h b/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h index 8c07f18..542f935 100644 --- a/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h +++ b/include/uapi/linux/fmd/Peripherals/fm_pcd_ioctls.h @@ -78,7 +78,7 @@ #define IOC_FM_PCD_PRS_NUM_OF_HDRS 16 /**< Number of headers supported by HW parser */ #define IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS (32 - IOC_FM_PCD_MAX_NUM_OF_PRIVATE_HDRS) - /**< Number of distinction units is limited by + /**< Number of distinction units is limited by register size (32 bits) minus reserved bits for private headers. */ #define IOC_FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS 4 /**< Maximum number of interchangeable headers @@ -96,16 +96,6 @@ #define IOC_FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of SW parser area */ #define IOC_FM_PCD_PRS_SW_OFFSET 0x00000040 /**< Size of illegal addresses at the beginning of the SW parser area */ -#if DPAA_VERSION >= 11 -#define IOC_FM_PCD_PRS_SW_PATCHES_SIZE 0x00000240 /**< Number of bytes saved for patches */ -#else -#define IOC_FM_PCD_PRS_SW_PATCHES_SIZE 0x00000200 /**< Number of bytes saved for patches */ -#endif - -#define IOC_FM_PCD_PRS_SW_TAIL_SIZE 4 /**< Number of bytes that must be cleared at - the end of the SW parser area */ -#define IOC_FM_SW_PRS_MAX_IMAGE_SIZE (IOC_FM_PCD_SW_PRS_SIZE-IOC_FM_PCD_PRS_SW_OFFSET-IOC_FM_PCD_PRS_SW_TAIL_SIZE-IOC_FM_PCD_PRS_SW_PATCHES_SIZE) - /**< Maximum size of SW parser code */ #define IOC_FM_PCD_MAX_MANIP_INSRT_TEMPLATE_SIZE 128 /**< Maximum size of insertion template for insert manipulation */ -- cgit v0.10.2 From a1ba3e25a3a9c67e6cb64669ef2df7694954a483 Mon Sep 17 00:00:00 2001 From: Alexandru Badicioiu Date: Thu, 22 Aug 2013 10:27:42 +0300 Subject: dpa_offload : fix kernel segfault in ipsec_offload application Buffer pool id for frames enqueued to ipsec_offload inbound macless interface must match the interface bpid. Signed-off-by: Alexandru Badicioiu diff --git a/drivers/staging/fsl_dpa_offload/dts/b4860qds-usdpaa-shared-interfaces.dts b/drivers/staging/fsl_dpa_offload/dts/b4860qds-usdpaa-shared-interfaces.dts index 537d3f2..7baa9b1 100644 --- a/drivers/staging/fsl_dpa_offload/dts/b4860qds-usdpaa-shared-interfaces.dts +++ b/drivers/staging/fsl_dpa_offload/dts/b4860qds-usdpaa-shared-interfaces.dts @@ -149,7 +149,7 @@ /* Define frame queues for the OH port*/ /* */ fsl,qman-frame-queues-oh = <0x6e 1 0x6f 1>; - fsl,bman-buffer-pools = <&bp9>; + fsl,bman-buffer-pools = <&bp16>; fsl,fman-oh-port = <&fman0_oh2>; }; dpa_fman0_oh3: dpa-fman0-oh@3 { -- cgit v0.10.2 From 0b8dc3c3c79d1b7ccd6bcffc0cabc8dde66ef478 Mon Sep 17 00:00:00 2001 From: Poonam Aggrwal Date: Wed, 14 Aug 2013 04:47:00 +0530 Subject: Updates to device trees for B4860 for DSP clusters and their L2 caches B4860 has 1 PPC core cluster and 3 DSP core clusters. Similarly B4420 has 1 PPC core cluster and 1 DSP core cluster. Each DSP core cluster consists of 2 SC3900 cores and a shared L2 cache. 1. Add DSP clusters for B4420 2. Reorganized the L2 cache nodes such that they now appear in only the soc specific dtsi files(b4860si-post.dtsi and b4420si-post.dtsi). Earlier they were shown partly in common b4si-post.dtsi and si specific b4860si-post.dtsi files . 3. Fixed an issue in b4860si-pre.dtsi, now DSP cluster correctly point to their respective L2 caches Signed-off-by: Poonam Aggrwal Signed-off-by: Shaveta Leekha Change-Id: Ie09007f4c596fc5947e0b4b005225b8b1f9aa443 Reviewed-on: http://git.am.freescale.net:8181/4005 Reviewed-by: Sethi Varun-B16395 Reviewed-by: Rivera Jose-B46482 Tested-by: Rivera Jose-B46482 diff --git a/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi index f039c52..0198d22 100644 --- a/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi @@ -318,5 +318,13 @@ L2: l2-cache-controller@c20000 { compatible = "fsl,b4420-l2-cache-controller"; + reg = <0xc20000 0x1000>; + next-level-cache = <&cpc>; + }; + + L2_2: l2-cache-controller@c60000 { + compatible = "fsl,b4420-l2-cache-controller"; + reg = <0xc60000 0x1000>; + next-level-cache = <&cpc>; }; }; diff --git a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi index d56ac22..ee7263b 100644 --- a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi @@ -90,4 +90,27 @@ next-level-cache = <&L2>; }; }; + + dsp-clusters { + #address-cells = <1>; + #size-cells = <0>; + + dsp-cluster0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,sc3900-cluster"; + reg = <0>; + + dsp0: dsp@0 { + compatible = "fsl,sc3900"; + reg = <0>; + next-level-cache = <&L2_2>; + }; + dsp1: dsp@1 { + compatible = "fsl,sc3900"; + reg = <1>; + next-level-cache = <&L2_2>; + }; + }; + }; }; diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi index 80d0f90..4d35d4f 100644 --- a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi @@ -516,6 +516,8 @@ L2: l2-cache-controller@c20000 { compatible = "fsl,b4860-l2-cache-controller"; + reg = <0xc20000 0x1000>; + next-level-cache = <&cpc>; }; L2_2: l2-cache-controller@c60000 { diff --git a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi index 61f89b8..e344468 100644 --- a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi @@ -137,12 +137,12 @@ dsp2: dsp@2 { compatible = "fsl,sc3900"; reg = <2>; - next-level-cache = <&L2_2>; + next-level-cache = <&L2_3>; }; dsp3: dsp@3 { compatible = "fsl,sc3900"; reg = <3>; - next-level-cache = <&L2_2>; + next-level-cache = <&L2_3>; }; }; @@ -155,12 +155,12 @@ dsp4: dsp@4 { compatible = "fsl,sc3900"; reg = <4>; - next-level-cache = <&L2_2>; + next-level-cache = <&L2_4>; }; dsp5: dsp@5 { compatible = "fsl,sc3900"; reg = <5>; - next-level-cache = <&L2_2>; + next-level-cache = <&L2_4>; }; }; }; diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi index 6e2781e..55ced62 100644 --- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi @@ -329,9 +329,4 @@ /include/ "qoriq-duart-1.dtsi" /include/ "qoriq-sec5.3-0.dtsi" - L2: l2-cache-controller@c20000 { - compatible = "fsl,b4-l2-cache-controller"; - reg = <0xc20000 0x1000>; - next-level-cache = <&cpc>; - }; }; -- cgit v0.10.2 From be3b76372527f0a1b823168c92023dc4b02603b5 Mon Sep 17 00:00:00 2001 From: Varun Sethi Date: Mon, 22 Jul 2013 19:20:25 +0530 Subject: Introduce an API for setting operation mapping index per window. This API can be used for setting operation mapping per DMA window. Signed-off-by: Varun Sethi Change-Id: Iea6d7993f09bddbaae94c475fd192f5106784bde Reviewed-on: http://git.am.freescale.net:8181/3440 Reviewed-by: Yoder Stuart-B08248 Reviewed-by: Fleming Andrew-AFLEMING Tested-by: Fleming Andrew-AFLEMING Conflicts: drivers/iommu/fsl_pamu.c drivers/iommu/fsl_pamu.h drivers/iommu/fsl_pamu_domain.c Change-Id: Iea6d7993f09bddbaae94c475fd192f5106784bde Reviewed-on: http://git.am.freescale.net:8181/3440 Reviewed-by: Schmitt Richard-B43082 Tested-by: Schmitt Richard-B43082 diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index 1f69fd1..a646275 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -295,6 +295,18 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value) } set_bf(paace->impl_attr, PAACE_IA_CID, value); + switch (field) { + case PAACE_STASH_FIELD: + set_bf(paace->impl_attr, PAACE_IA_CID, value); + break; + case PAACE_OMI_FIELD: + set_bf(paace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); + paace->op_encode.index_ot.omi = value; + break; + default: + pr_debug("Invalid field, can't update\n"); + return -EINVAL; + } mb(); return 0; diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h index 83cbd26..480ec23 100644 --- a/drivers/iommu/fsl_pamu.h +++ b/drivers/iommu/fsl_pamu.h @@ -384,6 +384,12 @@ struct ome { #define EOE_WWSAOL 0x1e /* Write with stash allocate only and lock */ #define EOE_VALID 0x80 +enum paace_field { + PAACE_STASH_FIELD, + PAACE_OMI_FIELD, + PAACE_FIELD_MAX, +}; + /* Function prototypes */ int pamu_domain_init(void); int pamu_enable_liodn(int liodn); diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 5b54505..07ec977 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -794,6 +794,49 @@ static int configure_domain_geometry(struct iommu_domain *domain, void *data) return 0; } +/* Set the domain operation mapping attribute */ +static int configure_domain_op_map(struct fsl_dma_domain *dma_domain, + void *data) +{ + struct dma_window *wnd; + unsigned long flags; + struct pamu_attr_info attr_info; + int ret, i; + struct iommu_omi_attribute *omi_attr = data; + + spin_lock_irqsave(&dma_domain->domain_lock, flags); + + if (!dma_domain->win_arr) { + pr_err("Number of windows not configured\n"); + spin_unlock_irqrestore(&dma_domain->domain_lock, flags); + return -ENODEV; + } + + if (omi_attr->omi >= OMI_MAX) { + pr_err("Invalid operation mapping index\n"); + spin_unlock_irqrestore(&dma_domain->domain_lock, flags); + return -EINVAL; + } + + if (~omi_attr->window == 0) { + wnd = &dma_domain->win_arr[0]; + for (i = 0; i < dma_domain->win_cnt; i++) + wnd[i].omi = omi_attr->omi; + } else { + wnd = &dma_domain->win_arr[omi_attr->window]; + wnd->omi = omi_attr->omi; + } + + attr_info.window = omi_attr->window; + attr_info.field = PAACE_OMI_FIELD; + attr_info.value = omi_attr->omi; + ret = update_domain_attr(dma_domain, &attr_info); + + spin_unlock_irqrestore(&dma_domain->domain_lock, flags); + + return ret; +} + /* Set the domain stash attribute */ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) { @@ -869,6 +912,9 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, case DOMAIN_ATTR_PAMU_ENABLE: ret = configure_domain_dma_state(dma_domain, *(int *)data); break; + case DOMAIN_ATTR_PAMU_OP_MAP: + ret = configure_domain_op_map(dma_domain, data); + break; default: pr_err("Unsupported attribute type\n"); ret = -EINVAL; @@ -896,6 +942,30 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, case DOMAIN_ATTR_FSL_PAMUV1: *(int *)data = DOMAIN_ATTR_FSL_PAMUV1; break; + case DOMAIN_ATTR_PAMU_STASH: { + struct iommu_stash_attribute *stash_attr = data; + struct dma_window *wnd; + + if (stash_attr->window >= dma_domain->win_cnt || + ~stash_attr->window == 0) + return -EINVAL; + + wnd = &dma_domain->win_arr[stash_attr->window]; + memcpy(stash_attr, &wnd->stash_attr, sizeof(struct iommu_stash_attribute)); + break; + } + case DOMAIN_ATTR_PAMU_OP_MAP: { + struct iommu_omi_attribute *omi_attr = data; + struct dma_window *wnd; + + if (omi_attr->window >= dma_domain->win_cnt || + ~omi_attr->window == 0) + return -EINVAL; + + wnd = &dma_domain->win_arr[omi_attr->window]; + omi_attr->omi = wnd->omi; + break; + } default: pr_err("Unsupported attribute type\n"); ret = -EINVAL; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index f6c0fc7..bfcc382 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -59,6 +59,11 @@ struct iommu_stash_attribute { u32 cache; /* cache to stash to: L1,L2,L3 */ }; +struct iommu_omi_attribute { + u32 omi; /* index in the operation mapping table */ + u32 window; /* ~0 indicates all windows */ +}; + struct iommu_domain_geometry { dma_addr_t aperture_start; /* First address that can be mapped */ dma_addr_t aperture_end; /* Last address that can be mapped */ @@ -96,6 +101,7 @@ enum iommu_attr { DOMAIN_ATTR_PAMU_STASH, DOMAIN_ATTR_PAMU_ENABLE, DOMAIN_ATTR_FSL_PAMUV1, + DOMAIN_ATTR_PAMU_OP_MAP, DOMAIN_ATTR_MAX, }; -- cgit v0.10.2 From 99baf88f7b5da4642d76b60f781cd3eaee5b3822 Mon Sep 17 00:00:00 2001 From: Varun Sethi Date: Mon, 22 Jul 2013 18:33:12 +0530 Subject: Move operation mapping index to iommu.h. Represent operation mappings as an enum. These have been moved to iommu.h to support IOMMU API for setting operation mappings per window. Signed-off-by: Varun Sethi Change-Id: I900274f8ed703b9e10a4b3fb7d6653bd8c3a080d Reviewed-on: http://git.am.freescale.net:8181/3438 Reviewed-by: Yoder Stuart-B08248 Reviewed-by: Fleming Andrew-AFLEMING Tested-by: Fleming Andrew-AFLEMING diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index a646275..9fb4ee2 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -35,11 +35,6 @@ #include "fsl_pamu.h" -/* define indexes for each operation mapping scenario */ -#define OMI_QMAN 0x00 -#define OMI_FMAN 0x01 -#define OMI_QMAN_PRIV 0x02 -#define OMI_CAAM 0x03 /* Handling access violations */ #define make64(high, low) (((u64)(high) << 32) | (low)) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index bfcc382..9318d9f 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -81,6 +81,14 @@ struct iommu_domain { #define IOMMU_CAP_CACHE_COHERENCY 0x1 #define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ +/* define indexes for each operation mapping scenario */ +enum omap_index { + OMI_QMAN, + OMI_FMAN, + OMI_QMAN_PRIV, + OMI_CAAM, +}; + /* * Following constraints are specifc to PAMUV1: * -aperture must be power of 2, and naturally aligned -- cgit v0.10.2 From 3e9416a16c9c71bae0ecbe476bd3f832bab52f81 Mon Sep 17 00:00:00 2001 From: Varun Sethi Date: Mon, 22 Jul 2013 19:11:28 +0530 Subject: Make stash id and operation mapping index per window attributes. Stash ID and operation mapping can now be set per dma window. Signed-off-by: Varun Sethi Change-Id: I987abbcba0575fea1b43843c2bce342f4eae4df2 Reviewed-on: http://git.am.freescale.net:8181/3439 Reviewed-by: Yoder Stuart-B08248 Reviewed-by: Fleming Andrew-AFLEMING Tested-by: Fleming Andrew-AFLEMING Conflicts: drivers/iommu/fsl_pamu.c drivers/iommu/fsl_pamu.h drivers/iommu/fsl_pamu_domain.c diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index 9fb4ee2..ddd0836 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -273,7 +273,7 @@ void pamu_free_subwins(int liodn) * Function used for updating stash destination for the coressponding * LIODN. */ -int pamu_update_paace_stash(int liodn, u32 subwin, u32 value) +int pamu_update_paace_field(int liodn, u32 subwin, int field, u32 value) { struct paace *paace; @@ -288,7 +288,6 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value) return -ENOENT; } } - set_bf(paace->impl_attr, PAACE_IA_CID, value); switch (field) { case PAACE_STASH_FIELD: diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h index 480ec23..fd9dd6d 100644 --- a/drivers/iommu/fsl_pamu.h +++ b/drivers/iommu/fsl_pamu.h @@ -404,7 +404,7 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin_addr, u32 get_stash_id(u32 stash_dest_hint, u32 vcpu); void get_ome_index(u32 *omi_index, struct device *dev); -int pamu_update_paace_stash(int liodn, u32 subwin, u32 value); +int pamu_update_paace_field(int liodn, u32 subwin, int field, u32 value); int pamu_disable_spaace(int liodn, u32 subwin); u32 pamu_get_max_subwin_cnt(void); diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 07ec977..99c182d 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -122,10 +122,10 @@ static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) spin_lock(&iommu_lock); ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i, sub_win_ptr[i].size, - ~(u32)0, + sub_win_ptr[i].omi, rpn, dma_domain->snoop_id, - dma_domain->stash_id, + sub_win_ptr[i].stash_id, (i > 0) ? 1 : 0, sub_win_ptr[i].prot); spin_unlock(&iommu_lock); @@ -149,9 +149,9 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain) spin_lock(&iommu_lock); ret = pamu_config_ppaace(liodn, wnd_addr, wnd->size, - ~(u32)0, + wnd->omi, wnd->paddr >> PAMU_PAGE_SHIFT, - dma_domain->snoop_id, dma_domain->stash_id, + dma_domain->snoop_id, wnd->stash_id, 0, wnd->prot); spin_unlock(&iommu_lock); if (ret) @@ -181,10 +181,10 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr if (dma_domain->win_cnt > 1) { ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr, wnd->size, - ~(u32)0, + wnd->omi, wnd->paddr >> PAMU_PAGE_SHIFT, dma_domain->snoop_id, - dma_domain->stash_id, + wnd->stash_id, (wnd_nr > 0) ? 1 : 0, wnd->prot); if (ret) @@ -196,9 +196,9 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr ret = pamu_config_ppaace(liodn, wnd_addr, wnd->size, - ~(u32)0, + wnd->omi, wnd->paddr >> PAMU_PAGE_SHIFT, - dma_domain->snoop_id, dma_domain->stash_id, + dma_domain->snoop_id, wnd->stash_id, 0, wnd->prot); if (ret) pr_err("Window reconfiguration failed for liodn %d\n", liodn); @@ -209,29 +209,31 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr return ret; } -static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, - u32 val) +struct pamu_attr_info { + u32 window; + int field; + u32 value; +}; + +static int update_liodn_attr(int liodn, struct fsl_dma_domain *dma_domain, + struct pamu_attr_info *attr_info) { int ret = 0, i; spin_lock(&iommu_lock); - if (!dma_domain->win_cnt) { - ret = pamu_update_paace_stash(liodn, 0, val); - if (ret) { - pr_err("Failed to update PAACE field for liodn %d\n ", liodn); - spin_unlock(&iommu_lock); - return ret; - } - } else { + + + if (~attr_info->window == 0) { for (i = 0; i < dma_domain->win_cnt; i++) { - ret = pamu_update_paace_stash(liodn, i, val); - if (ret) { - pr_err("Failed to update SPAACE %d field for liodn %d\n ", i, liodn); - spin_unlock(&iommu_lock); - return ret; - } + ret = pamu_update_paace_field(liodn, i, attr_info->field, + attr_info->value); + if (ret) + break; } - } + } else + ret = pamu_update_paace_field(liodn, attr_info->window, attr_info->field, + attr_info->value); + spin_unlock(&iommu_lock); return ret; @@ -263,7 +265,7 @@ static int pamu_set_liodn(int liodn, struct device *dev, if (!ret) ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index, 0, dma_domain->snoop_id, - dma_domain->stash_id, win_cnt, 0); + ~(u32)0, win_cnt, 0); spin_unlock(&iommu_lock); if (ret) { pr_err("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt); @@ -279,7 +281,7 @@ static int pamu_set_liodn(int liodn, struct device *dev, ret = pamu_config_spaace(liodn, win_cnt, i, subwin_size, omi_index, 0, dma_domain->snoop_id, - dma_domain->stash_id, + ~(u32)0, 0, 0); spin_unlock(&iommu_lock); if (ret) { @@ -320,7 +322,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void) if (!domain) return NULL; - domain->stash_id = ~(u32)0; domain->snoop_id = ~(u32)0; domain->win_cnt = pamu_get_max_subwin_cnt(); domain->geom_size = 0; @@ -476,15 +477,20 @@ static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain, return ret; } -/* Update stash destination for all LIODNs associated with the domain */ -static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val) +/* + * Update attribute for all LIODNs associated with the domain + * + */ +static int update_domain_attr(struct fsl_dma_domain *dma_domain, + struct pamu_attr_info *attr_info) { struct device_domain_info *info; int ret = 0; if (!list_empty(&dma_domain->devices)) { list_for_each_entry(info, &dma_domain->devices, link) { - ret = update_liodn_stash(info->liodn, dma_domain, val); + ret = update_liodn_attr(info->liodn, dma_domain, + attr_info); if (ret) break; } @@ -841,23 +847,45 @@ static int configure_domain_op_map(struct fsl_dma_domain *dma_domain, static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) { struct iommu_stash_attribute *stash_attr = data; + struct dma_window *wnd; unsigned long flags; - int ret; + u32 stash_id; + int ret, i; + struct pamu_attr_info attr_info; spin_lock_irqsave(&dma_domain->domain_lock, flags); - memcpy(&dma_domain->dma_stash, stash_attr, - sizeof(struct iommu_stash_attribute)); + if (!dma_domain->win_arr) { + pr_err("Number of windows not configured\n"); + spin_unlock_irqrestore(&dma_domain->domain_lock, flags); + return -ENODEV; + } - dma_domain->stash_id = get_stash_id(stash_attr->cache, + stash_id = get_stash_id(stash_attr->cache, stash_attr->cpu); - if (dma_domain->stash_id == ~(u32)0) { + if (~stash_id == 0) { pr_err("Invalid stash attributes\n"); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); return -EINVAL; } - ret = update_domain_stash(dma_domain, dma_domain->stash_id); + if (~stash_attr->window == 0) { + wnd = &dma_domain->win_arr[0]; + for (i = 0; i < dma_domain->win_cnt; i++) { + wnd[i].stash_id = stash_id; + memcpy(&wnd[i].stash_attr, stash_attr, sizeof(struct iommu_stash_attribute)); + wnd[i].stash_attr.window = i; + } + } else { + wnd = &dma_domain->win_arr[stash_attr->window]; + wnd->stash_id = stash_id; + memcpy(&wnd->stash_attr, stash_attr, sizeof(struct iommu_stash_attribute)); + } + + attr_info.window = stash_attr->window; + attr_info.field = PAACE_STASH_FIELD; + attr_info.value = stash_id; + ret = update_domain_attr(dma_domain, &attr_info); spin_unlock_irqrestore(&dma_domain->domain_lock, flags); @@ -932,10 +960,6 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, switch (attr_type) { - case DOMAIN_ATTR_PAMU_STASH: - memcpy((struct iommu_stash_attribute *) data, &dma_domain->dma_stash, - sizeof(struct iommu_stash_attribute)); - break; case DOMAIN_ATTR_PAMU_ENABLE: *(int *)data = dma_domain->enabled; break; @@ -1128,6 +1152,16 @@ static void fsl_pamu_remove_device(struct device *dev) iommu_group_remove_device(dev); } +static void dma_domain_init_windows(struct fsl_dma_domain *dma_domain) +{ + int i; + + for (i = 0; i < dma_domain->win_cnt; i++) { + dma_domain->win_arr[i].stash_id = ~(u32)0; + dma_domain->win_arr[i].omi = ~(u32)0; + } +} + static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) { struct fsl_dma_domain *dma_domain = domain->priv; @@ -1171,6 +1205,7 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) return -ENOMEM; } dma_domain->win_cnt = w_count; + dma_domain_init_windows(dma_domain); } spin_unlock_irqrestore(&dma_domain->domain_lock, flags); diff --git a/drivers/iommu/fsl_pamu_domain.h b/drivers/iommu/fsl_pamu_domain.h index 52dede7..1d55d25 100644 --- a/drivers/iommu/fsl_pamu_domain.h +++ b/drivers/iommu/fsl_pamu_domain.h @@ -22,10 +22,13 @@ #include "fsl_pamu.h" struct dma_window { - phys_addr_t paddr; + phys_addr_t paddr; u64 size; - int valid; - int prot; + struct iommu_stash_attribute stash_attr; + int valid; + int prot; + u32 stash_id; + u32 omi; }; struct fsl_dma_domain { @@ -67,9 +70,6 @@ struct fsl_dma_domain { */ int mapped; int enabled; - /* stash_id obtained from the stash attribute details */ - u32 stash_id; - struct iommu_stash_attribute dma_stash; u32 snoop_id; struct iommu_domain *iommu_domain; spinlock_t domain_lock; diff --git a/drivers/staging/fsl_qbman/qman_driver.c b/drivers/staging/fsl_qbman/qman_driver.c index 4c9f6c6..875c0df 100644 --- a/drivers/staging/fsl_qbman/qman_driver.c +++ b/drivers/staging/fsl_qbman/qman_driver.c @@ -487,6 +487,7 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) } stash_attr.cpu = cpu; stash_attr.cache = IOMMU_ATTR_CACHE_L1; + stash_attr.window = ~(u32)0; ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_PAMU_STASH, &stash_attr); if (ret < 0) { diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 9318d9f..afdf87b 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -56,7 +56,8 @@ enum stash_target { struct iommu_stash_attribute { u32 cpu; /* cpu number */ - u32 cache; /* cache to stash to: L1,L2,L3 */ + u32 cache; /* cache to stash to L1,L2,L3 */ + u32 window; /* ~0 indicates all windows */ }; struct iommu_omi_attribute { -- cgit v0.10.2 From 5db32addf766b3b58f4860860e27aef6edd23d95 Mon Sep 17 00:00:00 2001 From: Varun Sethi Date: Mon, 22 Jul 2013 19:33:54 +0530 Subject: Add operation mapping for PMAN. Setup and operation mapping index for PMAN. Signed-off-by: Varun Sethi Change-Id: I4384a247491293260c1da1d4cf6cfc3b2bec2034 Reviewed-on: http://git.am.freescale.net:8181/3441 Reviewed-by: Yoder Stuart-B08248 Reviewed-by: Fleming Andrew-AFLEMING Tested-by: Fleming Andrew-AFLEMING diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index ddd0836..e85c039 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -617,6 +617,7 @@ found_cpu_node: #define QMAN_PORTAL_PAACE 2 #define BMAN_PAACE 3 #define FMAN_PAACE 4 +#define PMAN_PAACE 5 /** * Setup operation mapping and stash destinations for QMAN and QMAN portal. @@ -652,6 +653,10 @@ static void setup_dpaa_paace(struct paace *ppaace, int paace_type) set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(IOMMU_ATTR_CACHE_L3, 0)); break; + case PMAN_PAACE: + set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); + ppaace->op_encode.index_ot.omi = OMI_PMAN; + break; } } @@ -696,6 +701,13 @@ static void __init setup_omt(struct ome *omt) ome = &omt[OMI_CAAM]; ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI; ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE; + + /* Configure OMI_PMAN */ + ome = &omt[OMI_PMAN]; + ome->moe[IOE_DIRECT0_IDX] = EOE_LDEC | EOE_VALID; + ome->moe[IOE_DIRECT1_IDX] = EOE_LDEC | EOE_VALID; + + } /* @@ -815,6 +827,8 @@ static void __init setup_liodns(void) setup_dpaa_paace(ppaace, QMAN_PAACE); if (of_device_is_compatible(node, "fsl,bman")) setup_dpaa_paace(ppaace, BMAN_PAACE); + if (of_device_is_compatible(node, "fsl,pman")) + setup_dpaa_paace(ppaace, PMAN_PAACE); #ifdef CONFIG_FSL_FMAN_CPC_STASH if (of_device_is_compatible(node, "fsl,fman-port-10g-rx") || of_device_is_compatible(node, "fsl,fman-port-1g-rx")) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index afdf87b..42a2508 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -88,6 +88,8 @@ enum omap_index { OMI_FMAN, OMI_QMAN_PRIV, OMI_CAAM, + OMI_PMAN, + OMI_MAX, }; /* -- cgit v0.10.2 From 6bfea4daf3ef082e1d401c4a8e3807a6e28bc68a Mon Sep 17 00:00:00 2001 From: Varun Sethi Date: Mon, 22 Jul 2013 19:35:49 +0530 Subject: Add operation mapping for DMA controller and Maple. These are required for ALU DSP stashing use case. Signed-off-by: Varun Sethi Change-Id: I7efacb81b60d23e6e5f91632547f8b9a04028a1f Reviewed-on: http://git.am.freescale.net:8181/3442 Reviewed-by: Yoder Stuart-B08248 Reviewed-by: Fleming Andrew-AFLEMING Tested-by: Fleming Andrew-AFLEMING diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index e85c039..4d466d6 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -707,7 +707,26 @@ static void __init setup_omt(struct ome *omt) ome->moe[IOE_DIRECT0_IDX] = EOE_LDEC | EOE_VALID; ome->moe[IOE_DIRECT1_IDX] = EOE_LDEC | EOE_VALID; + /* Configure OMI_DMA */ + ome = &omt[OMI_DMA]; + ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_RSA; + ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA; + ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WWSA; + ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSA; + + /* Configure OMI_DMA_READI */ + ome = &omt[OMI_DMA_READI]; + ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI; + ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_READI; + ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WWSA; + ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSA; + /* Configure OMI_MAPLE */ + ome = &omt[OMI_MAPLE]; + ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_RSA; + ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA; + ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WWSA; + ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSA; } /* diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 42a2508..5153c0c 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -89,6 +89,9 @@ enum omap_index { OMI_QMAN_PRIV, OMI_CAAM, OMI_PMAN, + OMI_DMA, + OMI_DMA_READI, + OMI_MAPLE, OMI_MAX, }; -- cgit v0.10.2 From 4124c6d7744e1bc02ce5c55eb8ce7c009520f5b3 Mon Sep 17 00:00:00 2001 From: Wang Dongsheng Date: Fri, 9 Aug 2013 10:23:00 +0800 Subject: powerpc/85xx: add hardware automatically enter altivec idle state Each core's AltiVec unit may be placed into a power savings mode by turning off power to the unit. Core hardware will automatically power down the AltiVec unit after no AltiVec instructions have executed in N cycles. The AltiVec power-control is triggered by hardware. Signed-off-by: Wang Dongsheng Change-Id: I613fba4492d3d65dcf903d13735bc9e45e5d443c Reviewed-on: http://git.am.freescale.net:8181/3731 Tested-by: Review Code-CDREVIEW Reviewed-by: Wood Scott-B07421 Reviewed-by: Rivera Jose-B46482 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index cad9faa..16fbb66 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -995,6 +995,8 @@ #define PVR_8560 0x80200000 #define PVR_VER_E500V1 0x8020 #define PVR_VER_E500V2 0x8021 +#define PVR_VER_E6500 0x8040 + /* * For the 8xx processors, all of them report the same PVR family for * the PowerPC core. The various versions of these processors must be diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 2bc7704..885ffdb 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -179,6 +179,7 @@ #define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */ #define SPRN_DCCR 0x3FA /* Data Cache Cacheability Register */ #define SPRN_ICCR 0x3FB /* Instruction Cache Cacheability Register */ +#define SPRN_PWRMGTCR0 0x3FB /* Power management control register 0 */ #define SPRN_SVR 0x3FF /* System Version Register */ /* @@ -225,6 +226,9 @@ #define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */ #define CCR1_TCS 0x00000080 /* Timer Clock Select */ +/* Bit definitions for PWRMGTCR0. */ +#define PWRMGTCR0_ALTIVEC_IDLE (1 << 22) /* Altivec idle enable */ + /* Bit definitions for the MCSR. */ #define MCSR_MCS 0x80000000 /* Machine Check Summary */ #define MCSR_IB 0x40000000 /* Instruction PLB Error */ diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c index 1269a10..43025d0 100644 --- a/arch/powerpc/platforms/85xx/common.c +++ b/arch/powerpc/platforms/85xx/common.c @@ -7,10 +7,22 @@ */ #include +#include + #include #include "mpc85xx.h" +#define MAX_BIT 64 + +#define ALTIVEC_COUNT_OFFSET 16 +#define ALTIVEC_IDLE_COUNT_MASK 0x003f0000 + +/* + * FIXME - We don't know the AltiVec application scenarios. + */ +#define ALTIVEC_IDLE_TIME 1000 /* 1ms */ + static struct of_device_id __initdata mpc85xx_common_ids[] = { { .type = "soc", }, { .compatible = "soc", }, @@ -82,3 +94,53 @@ void __init mpc85xx_cpm2_pic_init(void) irq_set_chained_handler(irq, cpm2_cascade); } #endif + +static unsigned int get_idle_ticks_bit(unsigned int us) +{ + unsigned int cycle; + + /* + * The time control by TB turn over bit, so we need + * to be divided by 2. + */ + cycle = (us / 2) * tb_ticks_per_usec; + + return ilog2(cycle) + 1; +} + +static void setup_altivec_idle(void *unused) +{ + u32 altivec_idle, pvr, bit; + + pvr = mfspr(SPRN_PVR); + + /* AltiVec idle feature only exists for E6500 */ + if (PVR_VER(pvr) != PVR_VER_E6500) + return; + + /* Fix erratum, e6500 rev1 not support altivec idle */ + if (PVR_REV(pvr) < 0x20) + return; + + /* Enable Altivec Idle */ + altivec_idle = mfspr(SPRN_PWRMGTCR0); + altivec_idle |= PWRMGTCR0_ALTIVEC_IDLE; + + /* Set Automatic AltiVec Idle Count */ + /* clear count */ + altivec_idle &= ~ALTIVEC_IDLE_COUNT_MASK; + + /* set count */ + bit = get_idle_ticks_bit(ALTIVEC_IDLE_TIME); + altivec_idle |= ((MAX_BIT - bit) << ALTIVEC_COUNT_OFFSET); + + mtspr(SPRN_PWRMGTCR0, altivec_idle); +} + +static int __init setup_idle_hw_governor(void) +{ + on_each_cpu(setup_altivec_idle, NULL, 1); + + return 0; +} +late_initcall(setup_idle_hw_governor); -- cgit v0.10.2 From adbc4a23b8b75801076ff366f709186e2a0a07a8 Mon Sep 17 00:00:00 2001 From: Wang Dongsheng Date: Thu, 15 Aug 2013 10:52:23 +0800 Subject: powerpc/85xx: add hardware automatically enter pw20 state Using hardware features make core automatically enter PW20 state. Set a TB count to hardware, the effective count begins when PW10 is entered. When the effective period has expired, the core will proceed from PW10 to PW20 if no exit conditions have occurred during the period. Signed-off-by: Wang Dongsheng Change-Id: I199d6bedeb045f421da69d8ce99c45bea517fc32 Reviewed-on: http://git.am.freescale.net:8181/3948 Tested-by: Review Code-CDREVIEW Reviewed-by: Wood Scott-B07421 Reviewed-by: Rivera Jose-B46482 diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 885ffdb..5152c36 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -228,6 +228,7 @@ /* Bit definitions for PWRMGTCR0. */ #define PWRMGTCR0_ALTIVEC_IDLE (1 << 22) /* Altivec idle enable */ +#define PWRMGTCR0_PW20_WAIT (1 << 14) /* PW20 state enable bit */ /* Bit definitions for the MCSR. */ #define MCSR_MCS 0x80000000 /* Machine Check Summary */ diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c index 43025d0..09978f5 100644 --- a/arch/powerpc/platforms/85xx/common.c +++ b/arch/powerpc/platforms/85xx/common.c @@ -17,12 +17,22 @@ #define ALTIVEC_COUNT_OFFSET 16 #define ALTIVEC_IDLE_COUNT_MASK 0x003f0000 +#define PW20_COUNT_OFFSET 8 +#define PW20_IDLE_COUNT_MASK 0x00003f00 /* * FIXME - We don't know the AltiVec application scenarios. */ #define ALTIVEC_IDLE_TIME 1000 /* 1ms */ +/* + * FIXME - We don't know, what time should we let the core into PW20 state. + * because we don't know the current state of the cpu load. And threads are + * independent, so we can not know the state of different thread has been + * idle. + */ +#define PW20_IDLE_TIME 1000 /* 1ms */ + static struct of_device_id __initdata mpc85xx_common_ids[] = { { .type = "soc", }, { .compatible = "soc", }, @@ -95,6 +105,23 @@ void __init mpc85xx_cpm2_pic_init(void) } #endif +static bool has_pw20_altivec_idle(void) +{ + u32 pvr; + + pvr = mfspr(SPRN_PVR); + + /* PW20 & AltiVec idle feature only exists for E6500 */ + if (PVR_VER(pvr) != PVR_VER_E6500) + return false; + + /* Fix erratum, e6500 rev1 does not support PW20 & AltiVec idle */ + if (PVR_REV(pvr) < 0x20) + return false; + + return true; +} + static unsigned int get_idle_ticks_bit(unsigned int us) { unsigned int cycle; @@ -110,16 +137,9 @@ static unsigned int get_idle_ticks_bit(unsigned int us) static void setup_altivec_idle(void *unused) { - u32 altivec_idle, pvr, bit; - - pvr = mfspr(SPRN_PVR); + u32 altivec_idle, bit; - /* AltiVec idle feature only exists for E6500 */ - if (PVR_VER(pvr) != PVR_VER_E6500) - return; - - /* Fix erratum, e6500 rev1 not support altivec idle */ - if (PVR_REV(pvr) < 0x20) + if (!has_pw20_altivec_idle()) return; /* Enable Altivec Idle */ @@ -137,9 +157,33 @@ static void setup_altivec_idle(void *unused) mtspr(SPRN_PWRMGTCR0, altivec_idle); } +static void setup_pw20_idle(void *unused) +{ + u32 pw20_idle, bit; + + if (!has_pw20_altivec_idle()) + return; + + pw20_idle = mfspr(SPRN_PWRMGTCR0); + + /* set PW20_WAIT bit, enable pw20 */ + pw20_idle |= PWRMGTCR0_PW20_WAIT; + + /* Set Automatic PW20 Core Idle Count */ + /* clear count */ + pw20_idle &= ~PW20_IDLE_COUNT_MASK; + + /* set count */ + bit = get_idle_ticks_bit(PW20_IDLE_TIME); + pw20_idle |= ((MAX_BIT - bit) << PW20_COUNT_OFFSET); + + mtspr(SPRN_PWRMGTCR0, pw20_idle); +} + static int __init setup_idle_hw_governor(void) { on_each_cpu(setup_altivec_idle, NULL, 1); + on_each_cpu(setup_pw20_idle, NULL, 1); return 0; } -- cgit v0.10.2 From 77b906e591b171f06d505c39e0651779a9c75bea Mon Sep 17 00:00:00 2001 From: Roy Pledge Date: Tue, 13 Aug 2013 11:25:33 -0400 Subject: Add API to allocate specific portals based on index. This allows an application to get back the exact portal it was previously using by specifing the portals index value. Signed-off-by: Roy Pledge Change-Id: I8233816f0519731eb65b3671d68a01266eee42dd Reviewed-on: http://git.am.freescale.net:8181/4002 Tested-by: Review Code-CDREVIEW Reviewed-by: Ladouceur Jeffrey-R11498 Reviewed-by: Wang Haiying-R54964 Reviewed-by: Rivera Jose-B46482 diff --git a/drivers/staging/fsl_qbman/bman_driver.c b/drivers/staging/fsl_qbman/bman_driver.c index 06b77ed..27a5b0f 100644 --- a/drivers/staging/fsl_qbman/bman_driver.c +++ b/drivers/staging/fsl_qbman/bman_driver.c @@ -198,11 +198,34 @@ static struct bm_portal_config *get_pcfg(struct list_head *list) return pcfg; } +static struct bm_portal_config *get_pcfg_idx(struct list_head *list, + uint32_t idx) +{ + struct bm_portal_config *pcfg; + if (list_empty(list)) + return NULL; + list_for_each_entry(pcfg, list, list) { + if (pcfg->public_cfg.index == idx) { + list_del(&pcfg->list); + return pcfg; + } + } + return NULL; +} + struct bm_portal_config *bm_get_unused_portal(void) { + return bm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX); +} + +struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx) +{ struct bm_portal_config *ret; spin_lock(&unused_pcfgs_lock); - ret = get_pcfg(&unused_pcfgs); + if (idx == QBMAN_ANY_PORTAL_IDX) + ret = get_pcfg(&unused_pcfgs); + else + ret = get_pcfg_idx(&unused_pcfgs, idx); spin_unlock(&unused_pcfgs_lock); return ret; } diff --git a/drivers/staging/fsl_qbman/bman_private.h b/drivers/staging/fsl_qbman/bman_private.h index 53aa21b..2df9857 100644 --- a/drivers/staging/fsl_qbman/bman_private.h +++ b/drivers/staging/fsl_qbman/bman_private.h @@ -80,6 +80,7 @@ const struct bm_portal_config *bman_destroy_affine_portal(void); /* Hooks from fsl_usdpaa.c to bman_driver.c */ struct bm_portal_config *bm_get_unused_portal(void); +struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx); void bm_put_unused_portal(struct bm_portal_config *pcfg); void bm_set_liodns(struct bm_portal_config *pcfg); diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa.c b/drivers/staging/fsl_qbman/fsl_usdpaa.c index 0167b7b..aebbc15 100644 --- a/drivers/staging/fsl_qbman/fsl_usdpaa.c +++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c @@ -1047,7 +1047,8 @@ static long ioctl_portal_map(struct file *fp, struct ctx *ctx, return -ENOMEM; memcpy(&mapping->user, arg, sizeof(mapping->user)); if (mapping->user.type == usdpaa_portal_qman) { - mapping->qportal = qm_get_unused_portal(); + mapping->qportal = + qm_get_unused_portal_idx(mapping->user.index); if (!mapping->qportal) { ret = -ENODEV; goto err_get_portal; @@ -1055,13 +1056,16 @@ static long ioctl_portal_map(struct file *fp, struct ctx *ctx, mapping->phys = &mapping->qportal->addr_phys[0]; mapping->user.channel = mapping->qportal->public_cfg.channel; mapping->user.pools = mapping->qportal->public_cfg.pools; + mapping->user.index = mapping->qportal->public_cfg.index; } else if (mapping->user.type == usdpaa_portal_bman) { - mapping->bportal = bm_get_unused_portal(); + mapping->bportal = + bm_get_unused_portal_idx(mapping->user.index); if (!mapping->bportal) { ret = -ENODEV; goto err_get_portal; } mapping->phys = &mapping->bportal->addr_phys[0]; + mapping->user.index = mapping->bportal->public_cfg.index; } else { ret = -EINVAL; goto err_copy_from_user; @@ -1255,11 +1259,13 @@ static long usdpaa_ioctl_compat(struct file *fp, unsigned int cmd, if (copy_from_user(&input, a, sizeof(input))) return -EFAULT; converted.type = input.type; + converted.index = input.index; ret = ioctl_portal_map(fp, ctx, &converted); input.addr.cinh = ptr_to_compat(converted.addr.cinh); input.addr.cena = ptr_to_compat(converted.addr.cena); input.channel = converted.channel; input.pools = converted.pools; + input.index = converted.index; if (copy_to_user(a, &input, sizeof(input))) return -EFAULT; return ret; diff --git a/drivers/staging/fsl_qbman/qman_driver.c b/drivers/staging/fsl_qbman/qman_driver.c index 875c0df..b2f60d7 100644 --- a/drivers/staging/fsl_qbman/qman_driver.c +++ b/drivers/staging/fsl_qbman/qman_driver.c @@ -454,6 +454,21 @@ static struct qm_portal_config *get_pcfg(struct list_head *list) return pcfg; } +static struct qm_portal_config *get_pcfg_idx(struct list_head *list, u32 idx) +{ + struct qm_portal_config *pcfg; + if (list_empty(list)) + return NULL; + list_for_each_entry(pcfg, list, list) { + if (pcfg->public_cfg.index == idx) { + list_del(&pcfg->list); + return pcfg; + } + } + return NULL; +} + + static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) { int ret; @@ -530,11 +545,14 @@ _iommu_domain_free: iommu_domain_free(pcfg->iommu_domain); } -struct qm_portal_config *qm_get_unused_portal(void) +struct qm_portal_config *qm_get_unused_portal_idx(u32 idx) { struct qm_portal_config *ret; spin_lock(&unused_pcfgs_lock); - ret = get_pcfg(&unused_pcfgs); + if (idx == QBMAN_ANY_PORTAL_IDX) + ret = get_pcfg(&unused_pcfgs); + else + ret = get_pcfg_idx(&unused_pcfgs, idx); spin_unlock(&unused_pcfgs_lock); /* Bind stashing LIODNs to the CPU we are currently executing on, and * set the portal to use the stashing request queue corresonding to the @@ -549,6 +567,11 @@ struct qm_portal_config *qm_get_unused_portal(void) return ret; } +struct qm_portal_config *qm_get_unused_portal() +{ + return qm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX); +} + void qm_put_unused_portal(struct qm_portal_config *pcfg) { spin_lock(&unused_pcfgs_lock); diff --git a/drivers/staging/fsl_qbman/qman_private.h b/drivers/staging/fsl_qbman/qman_private.h index 6179943..a701006 100644 --- a/drivers/staging/fsl_qbman/qman_private.h +++ b/drivers/staging/fsl_qbman/qman_private.h @@ -218,6 +218,8 @@ void qman_destroy_portal(struct qman_portal *qm); /* Hooks from fsl_usdpaa.c to qman_driver.c */ struct qm_portal_config *qm_get_unused_portal(void); +struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx); + void qm_put_unused_portal(struct qm_portal_config *pcfg); void qm_set_liodns(struct qm_portal_config *pcfg); diff --git a/include/linux/fsl_usdpaa.h b/include/linux/fsl_usdpaa.h index de017a6..fbf9480 100644 --- a/include/linux/fsl_usdpaa.h +++ b/include/linux/fsl_usdpaa.h @@ -165,9 +165,17 @@ enum usdpaa_portal_type { usdpaa_portal_bman, }; +#define QBMAN_ANY_PORTAL_IDX 0xffffffff + struct usdpaa_ioctl_portal_map { /* Input parameter, is a qman or bman portal required. */ + enum usdpaa_portal_type type; + /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX + for don't care. The portal index will be populated by the + driver when the ioctl() successfully completes */ + uint32_t index; + /* Return value if the map succeeds, this gives the mapped * cache-inhibited (cinh) and cache-enabled (cena) addresses. */ struct usdpaa_portal_map { @@ -183,6 +191,10 @@ struct usdpaa_ioctl_portal_map { struct compat_usdpaa_ioctl_portal_map { /* Input parameter, is a qman or bman portal required. */ enum usdpaa_portal_type type; + /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX + for don't care. The portal index will be populated by the + driver when the ioctl() successfully completes */ + uint32_t index; /* Return value if the map succeeds, this gives the mapped * cache-inhibited (cinh) and cache-enabled (cena) addresses. */ struct usdpaa_portal_map_compat { -- cgit v0.10.2 From 1f8783124e8be23a5d6941c640ab1538d3cd589b Mon Sep 17 00:00:00 2001 From: Roy Pledge Date: Wed, 7 Aug 2013 14:56:53 -0400 Subject: Modify USDPAA DMA mapping code to allow non power of 4 mappings This patch modifies the USDPAA code to allow non power of 4 DMA maps. The code will use multiple TLB1 entries if needed. DMA maps are still phyically and virually contiguous. Signed-off-by: Roy Pledge Change-Id: I42942067059a3c06f0b0d031d266d228295c7c45 Reviewed-on: http://git.am.freescale.net:8181/3857 Tested-by: Review Code-CDREVIEW Reviewed-by: Wang Haiying-R54964 Reviewed-by: Rivera Jose-B46482 diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa.c b/drivers/staging/fsl_qbman/fsl_usdpaa.c index aebbc15..033cfbb 100644 --- a/drivers/staging/fsl_qbman/fsl_usdpaa.c +++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c @@ -50,6 +50,7 @@ struct mem_fragment { unsigned long pfn_base; /* PFN version of 'base' */ unsigned long pfn_len; /* PFN version of 'len' */ unsigned int refs; /* zero if unmapped */ + u64 root_len; /* Size of the orignal fragment */ struct list_head list; /* if mapped, flags+name captured at creation time */ u32 flags; @@ -64,7 +65,9 @@ struct mem_fragment { * ioctl(USDPAA_IOCTL_DMA_MAP), though the actual mapping then happens via a * mmap(). */ struct mem_mapping { - struct mem_fragment *frag; + struct mem_fragment *root_frag; + u32 frag_count; + u64 total_size; struct list_head list; }; @@ -167,12 +170,28 @@ static const struct alloc_backend { } }; +/* Determines the largest acceptable page size for a given size + The sizes are determined by what the TLB1 acceptable page sizes are */ +static u32 largest_page_size(u32 size) +{ + int shift = 30; /* Start at 1G size */ + if (size < 4096) + return 0; + do { + if (size >= (1<= 12); /* Up to 4k */ + return 0; +} + /* Helper for ioctl_dma_map() when we have a larger fragment than we need. This * splits the fragment into 4 and returns the upper-most. (The caller can loop * until it has a suitable fragment size.) */ static struct mem_fragment *split_frag(struct mem_fragment *frag) { struct mem_fragment *x[3]; + x[0] = kmalloc(sizeof(struct mem_fragment), GFP_KERNEL); x[1] = kmalloc(sizeof(struct mem_fragment), GFP_KERNEL); x[2] = kmalloc(sizeof(struct mem_fragment), GFP_KERNEL); @@ -194,6 +213,7 @@ static struct mem_fragment *split_frag(struct mem_fragment *frag) x[2]->pfn_base = x[1]->pfn_base + frag->pfn_len; x[0]->pfn_len = x[1]->pfn_len = x[2]->pfn_len = frag->pfn_len; x[0]->refs = x[1]->refs = x[2]->refs = 0; + x[0]->root_len = x[1]->root_len = x[2]->root_len = frag->root_len; list_add(&x[0]->list, &frag->list); list_add(&x[1]->list, &x[0]->list); list_add(&x[2]->list, &x[1]->list); @@ -209,12 +229,16 @@ static struct mem_fragment *merge_frag(struct mem_fragment *frag) uint64_t newlen = frag->len << 2; uint64_t newbase = frag->base & ~(newlen - 1); struct mem_fragment *tmp, *leftmost = frag, *rightmost = frag; + + /* If this fragment is already at root size don't allow merge */ + if (frag->len == frag->root_len) + return NULL; /* Scan left until we find the start */ tmp = list_entry(frag->list.prev, struct mem_fragment, list); while ((&tmp->list != &mem_list) && (tmp->base >= newbase)) { if (tmp->refs) return NULL; - if (tmp->len != tmp->len) + if (tmp->len != frag->len) return NULL; leftmost = tmp; tmp = list_entry(tmp->list.prev, struct mem_fragment, list); @@ -224,7 +248,7 @@ static struct mem_fragment *merge_frag(struct mem_fragment *frag) while ((&tmp->list != &mem_list) && (tmp->base < (newbase + newlen))) { if (tmp->refs) return NULL; - if (tmp->len != tmp->len) + if (tmp->len != frag->len) return NULL; rightmost = tmp; tmp = list_entry(tmp->list.next, struct mem_fragment, list); @@ -249,15 +273,6 @@ static struct mem_fragment *merge_frag(struct mem_fragment *frag) return frag; } -/* Helper to verify that 'sz' is (4096 * 4^x) for some x. */ -static int is_good_size(u64 sz) -{ - int log = ilog2(phys_size); - if ((phys_size & (phys_size - 1)) || (log < 12) || (log & 1)) - return 0; - return 1; -} - /* Hook from arch/powerpc/mm/mem.c */ int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size) { @@ -444,6 +459,18 @@ static bool check_channel_device(void *_ctx, u32 channel) +__maybe_unused static void dump_frags(void) +{ + struct mem_fragment *frag; + int i = 0; + list_for_each_entry(frag, &mem_list, list) { + pr_info("FRAG %d: base 0x%llx len 0x%llx root_len 0x%llx\n", + i, frag->base, frag->len, frag->root_len); + ++i; + } +} + + static int usdpaa_release(struct inode *inode, struct file *filp) { struct ctx *ctx = filp->private_data; @@ -522,15 +549,23 @@ static int usdpaa_release(struct inode *inode, struct file *filp) /* Release any DMA regions */ spin_lock(&mem_lock); list_for_each_entry_safe(map, tmpmap, &ctx->maps, list) { - if (map->frag->has_locking && (map->frag->owner == map)) { - map->frag->owner = NULL; - wake_up(&map->frag->wq); + struct mem_fragment *current_frag = map->root_frag; + int i; + if (map->root_frag->has_locking && + (map->root_frag->owner == map)) { + map->root_frag->owner = NULL; + wake_up(&map->root_frag->wq); } - if (!--map->frag->refs) { - struct mem_fragment *frag = map->frag; - do { - frag = merge_frag(frag); - } while (frag); + /* Check each fragment and merge if the ref count is 0 */ + for (i = 0; i < map->frag_count; i++) { + if (!--current_frag->refs) { + struct mem_fragment *frag = current_frag; + do { + frag = merge_frag(frag); + } while (frag); + } + current_frag = list_entry(current_frag->list.next, + struct mem_fragment, list); } list_del(&map->list); kfree(map); @@ -567,12 +602,19 @@ static int check_mmap_dma(struct ctx *ctx, struct vm_area_struct *vma, struct mem_mapping *map; list_for_each_entry(map, &ctx->maps, list) { - if (map->frag->pfn_base == vma->vm_pgoff) { - *match = 1; - if (map->frag->len != (vma->vm_end - vma->vm_start)) - return -EINVAL; - *pfn = map->frag->pfn_base; - return 0; + int i; + struct mem_fragment *frag = map->root_frag; + + for (i = 0; i < map->frag_count; i++) { + if (frag->pfn_base == vma->vm_pgoff) { + *match = 1; + if (frag->len != (vma->vm_end - vma->vm_start)) + return -EINVAL; + *pfn = frag->pfn_base; + return 0; + } + frag = list_entry(frag->list.next, struct mem_fragment, + list); } } *match = 0; @@ -653,7 +695,7 @@ static unsigned long usdpaa_get_unmapped_area(struct file *file, { struct vm_area_struct *vma; - if (!is_good_size(len)) + if (len % PAGE_SIZE) return -EINVAL; addr = USDPAA_MEM_ROUNDUP(addr, len); @@ -795,15 +837,20 @@ static long ioctl_id_reserve(struct ctx *ctx, void __user *arg) static long ioctl_dma_map(struct file *fp, struct ctx *ctx, struct usdpaa_ioctl_dma_map *i) { - struct mem_fragment *frag; + struct mem_fragment *frag, *start_frag, *next_frag; struct mem_mapping *map, *tmp; - u64 search_size; - int ret = 0; - if (i->len && !is_good_size(i->len)) + int ret = 0, k; + u32 largest_page, so_far = 0; + int frag_count = 0; + unsigned long next_addr = PAGE_SIZE; + + if (i->len && i->len % PAGE_SIZE) return -EINVAL; + map = kmalloc(sizeof(*map), GFP_KERNEL); if (!map) return -ENOMEM; + spin_lock(&mem_lock); if (i->flags & USDPAA_DMA_FLAG_SHARE) { list_for_each_entry(frag, &mem_list, list) { @@ -817,19 +864,23 @@ static long ioctl_dma_map(struct file *fp, struct ctx *ctx, ret = -EBUSY; goto out; } + /* Check if this has already been mapped + to this process */ list_for_each_entry(tmp, &ctx->maps, list) - if (tmp->frag == frag) { + if (tmp->root_frag == frag) { ret = -EBUSY; goto out; } i->has_locking = frag->has_locking; i->did_create = 0; i->len = frag->len; + start_frag = frag; goto do_map; } } /* No matching entry */ if (!(i->flags & USDPAA_DMA_FLAG_CREATE)) { + pr_err("ioctl_dma_map() No matching entry\n"); ret = -ENOMEM; goto out; } @@ -839,52 +890,124 @@ static long ioctl_dma_map(struct file *fp, struct ctx *ctx, ret = -EINVAL; goto out; } - /* We search for the required size and if that fails, for the next - * biggest size, etc. */ - for (search_size = i->len; search_size <= phys_size; - search_size <<= 2) { + /* Verify there is sufficent space to do the mapping */ + down_write(¤t->mm->mmap_sem); + next_addr = usdpaa_get_unmapped_area(fp, next_addr, i->len, 0, 0); + up_write(¤t->mm->mmap_sem); + + if (next_addr & ~PAGE_MASK) { + ret = -ENOMEM; + goto out; + } + + /* Find one of more contiguous fragments that satisfy the total length + trying to minimize the number of fragments + compute the largest page size that the allocation could use */ + largest_page = largest_page_size(i->len); + start_frag = NULL; + while (largest_page && + largest_page <= largest_page_size(phys_size) && + start_frag == NULL) { + /* Search the list for a frag of that size */ list_for_each_entry(frag, &mem_list, list) { - if (!frag->refs && (frag->len == search_size)) { - while (frag->len > i->len) { - frag = split_frag(frag); - if (!frag) { - ret = -ENOMEM; - goto out; - } + if (!frag->refs && (frag->len == largest_page)) { + /* See if the next x fragments are free + and can accomidate the size */ + u32 found_size = largest_page; + next_frag = list_entry(frag->list.next, + struct mem_fragment, + list); + /* If the fragement is too small check + if the neighbours cab support it */ + while (found_size < i->len) { + if (&mem_list == &next_frag->list) + break; /* End of list */ + if (next_frag->refs != 0 || + next_frag->len == 0) + break; /* not enough space */ + found_size += next_frag->len; + } + if (found_size >= i->len) { + /* Success! there is enough contigous + free space */ + start_frag = frag; + break; } - frag->flags = i->flags; - strncpy(frag->name, i->name, - USDPAA_DMA_NAME_MAX); - frag->has_locking = i->has_locking; - init_waitqueue_head(&frag->wq); - frag->owner = NULL; - i->did_create = 1; - goto do_map; } - } + } /* next frag loop */ + /* Couldn't statisfy the request with this + largest page size, try a smaller one */ + largest_page <<= 2; + } + if (start_frag == NULL) { + /* Couldn't find proper amount of space */ + ret = -ENOMEM; + goto out; } - ret = -ENOMEM; - goto out; - + i->did_create = 1; do_map: - map->frag = frag; - frag->refs++; + /* We may need to divide the final fragment to accomidate the mapping */ + next_frag = start_frag; + while (so_far != i->len) { + BUG_ON(next_frag->len == 0); + while ((next_frag->len + so_far) > i->len) { + /* Split frag until they match */ + split_frag(next_frag); + } + so_far += next_frag->len; + ++frag_count; + next_frag = list_entry(next_frag->list.next, + struct mem_fragment, list); + } + + /* we need to reserve start count fragments starting at start frag */ + next_frag = start_frag; + for (k = 0; k < frag_count; k++) { + next_frag->refs++; + next_frag = list_entry(next_frag->list.next, + struct mem_fragment, list); + } + + start_frag->flags = i->flags; + strncpy(start_frag->name, i->name, USDPAA_DMA_NAME_MAX); + start_frag->has_locking = i->has_locking; + init_waitqueue_head(&start_frag->wq); + start_frag->owner = NULL; + + /* Setup the map entry */ + map->root_frag = start_frag; + map->total_size = i->len; + map->frag_count = frag_count; list_add(&map->list, &ctx->maps); - i->phys_addr = frag->base; - + i->phys_addr = start_frag->base; out: spin_unlock(&mem_lock); if (!ret) { unsigned long longret; - down_write(¤t->mm->mmap_sem); - longret = do_mmap_pgoff(fp, PAGE_SIZE, map->frag->len, PROT_READ | - (i->flags & USDPAA_DMA_FLAG_RDONLY ? 0 : PROT_WRITE), - MAP_SHARED, map->frag->pfn_base); - up_write(¤t->mm->mmap_sem); - if (longret & ~PAGE_MASK) - ret = (int)longret; - else - i->ptr = (void *)longret; + unsigned long next_addr = PAGE_SIZE; + next_frag = start_frag; + for (k = 0; k < frag_count; k++) { + down_write(¤t->mm->mmap_sem); + longret = do_mmap_pgoff(fp, next_addr, next_frag->len, + PROT_READ | + (i->flags & + USDPAA_DMA_FLAG_RDONLY ? 0 + : PROT_WRITE), + MAP_SHARED, + next_frag->pfn_base); + up_write(¤t->mm->mmap_sem); + if (longret & ~PAGE_MASK) + ret = (int)longret; + else { + if (k == 0) + i->ptr = (void *)longret; + else + BUG_ON(next_addr != longret); + next_addr = longret + next_frag->len; + } + next_frag = list_entry(next_frag->list.next, + struct mem_fragment, list); + } } else kfree(map); return ret; @@ -904,12 +1027,12 @@ static long ioctl_dma_unmap(struct ctx *ctx, void __user *arg) } spin_lock(&mem_lock); list_for_each_entry(map, &ctx->maps, list) { - if (map->frag->pfn_base == vma->vm_pgoff) { + if (map->root_frag->pfn_base == vma->vm_pgoff) { /* Drop the map lock if we hold it */ - if (map->frag->has_locking && - (map->frag->owner == map)) { - map->frag->owner = NULL; - wake_up(&map->frag->wq); + if (map->root_frag->has_locking && + (map->root_frag->owner == map)) { + map->root_frag->owner = NULL; + wake_up(&map->root_frag->wq); } goto map_match; } @@ -946,8 +1069,8 @@ static int test_lock(struct mem_mapping *map) { int ret = 0; spin_lock(&mem_lock); - if (!map->frag->owner) { - map->frag->owner = map; + if (!map->root_frag->owner) { + map->root_frag->owner = map; ret = 1; } spin_unlock(&mem_lock); @@ -967,7 +1090,7 @@ static long ioctl_dma_lock(struct ctx *ctx, void __user *arg) } spin_lock(&mem_lock); list_for_each_entry(map, &ctx->maps, list) { - if (map->frag->pfn_base == vma->vm_pgoff) + if (map->root_frag->pfn_base == vma->vm_pgoff) goto map_match; } map = NULL; @@ -975,9 +1098,9 @@ map_match: spin_unlock(&mem_lock); up_read(¤t->mm->mmap_sem); - if (!map->frag->has_locking) + if (!map->root_frag->has_locking) return -ENODEV; - return wait_event_interruptible(map->frag->wq, test_lock(map)); + return wait_event_interruptible(map->root_frag->wq, test_lock(map)); } static long ioctl_dma_unlock(struct ctx *ctx, void __user *arg) @@ -993,12 +1116,12 @@ static long ioctl_dma_unlock(struct ctx *ctx, void __user *arg) else { spin_lock(&mem_lock); list_for_each_entry(map, &ctx->maps, list) { - if (map->frag->pfn_base == vma->vm_pgoff) { - if (!map->frag->has_locking) + if (map->root_frag->pfn_base == vma->vm_pgoff) { + if (!map->root_frag->has_locking) ret = -ENODEV; - else if (map->frag->owner == map) { - map->frag->owner = NULL; - wake_up(&map->frag->wq); + else if (map->root_frag->owner == map) { + map->root_frag->owner = NULL; + wake_up(&map->root_frag->wq); ret = 0; } else ret = -EBUSY; @@ -1383,12 +1506,12 @@ __init void fsl_usdpaa_init_early(void) pr_info("No USDPAA memory, no 'usdpaa_mem' bootarg\n"); return; } - if (!is_good_size(phys_size)) { - pr_err("'usdpaa_mem' bootarg must be 4096*4^x\n"); + if (phys_size % PAGE_SIZE) { + pr_err("'usdpaa_mem' bootarg must be a multiple of page size\n"); phys_size = 0; return; } - phys_start = memblock_alloc(phys_size, phys_size); + phys_start = memblock_alloc(phys_size, largest_page_size(phys_size)); if (!phys_start) { pr_err("Failed to reserve USDPAA region (sz:%llx)\n", phys_size); @@ -1406,25 +1529,39 @@ static int __init usdpaa_init(void) { struct mem_fragment *frag; int ret; + u64 tmp_size = phys_size; + u64 tmp_start = phys_start; + u64 tmp_pfn_size = pfn_size; + u64 tmp_pfn_start = pfn_start; pr_info("Freescale USDPAA process driver\n"); if (!phys_start) { pr_warn("fsl-usdpaa: no region found\n"); return 0; } - frag = kmalloc(sizeof(*frag), GFP_KERNEL); - if (!frag) { - pr_err("Failed to setup USDPAA memory accounting\n"); - return -ENOMEM; + + while (tmp_size != 0) { + u32 frag_size = largest_page_size(tmp_size); + frag = kmalloc(sizeof(*frag), GFP_KERNEL); + if (!frag) { + pr_err("Failed to setup USDPAA memory accounting\n"); + return -ENOMEM; + } + frag->base = tmp_start; + frag->len = frag->root_len = frag_size; + frag->pfn_base = tmp_pfn_start; + frag->pfn_len = frag_size / PAGE_SIZE; + frag->refs = 0; + init_waitqueue_head(&frag->wq); + frag->owner = NULL; + list_add(&frag->list, &mem_list); + + /* Adjust for this frag */ + tmp_start += frag_size; + tmp_size -= frag_size; + tmp_pfn_start += frag_size / PAGE_SIZE; + tmp_pfn_size -= frag_size / PAGE_SIZE; } - frag->base = phys_start; - frag->len = phys_size; - frag->pfn_base = pfn_start; - frag->pfn_len = pfn_size; - frag->refs = 0; - init_waitqueue_head(&frag->wq); - frag->owner = NULL; - list_add(&frag->list, &mem_list); ret = misc_register(&usdpaa_miscdev); if (ret) pr_err("fsl-usdpaa: failed to register misc device\n"); -- cgit v0.10.2