diff options
author | Mandy Lavi <mandy.lavi@freescale.com> | 2013-03-24 16:40:18 (GMT) |
---|---|---|
committer | Fleming Andrew-AFLEMING <AFLEMING@freescale.com> | 2013-04-08 23:07:26 (GMT) |
commit | 3cc514986cb4e457458cc826288b6c2107c97907 (patch) | |
tree | 2985e48afc5dd20f44d51ef6aafdae917ce2698f /drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd | |
parent | 5767d52cec831b0e2927d34e91acd738cc1cb0ef (diff) | |
download | linux-fsl-qoriq-3cc514986cb4e457458cc826288b6c2107c97907.tar.xz |
fmd: fmd19 integration
Add fmd19 codebase, plus a minimal set of sources from dpaa-eth,
necessary for bare compilation
Change-Id: I390df8717671204e3d98a987135393bef4534e95
Signed-off-by: Mandy Lavi <mandy.lavi@freescale.com>
Signed-off-by: Bogdan Hamciuc <bogdan.hamciuc@freescale.com>
Reviewed-on: http://git.am.freescale.net:8181/1029
Reviewed-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com>
Tested-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com>
Diffstat (limited to 'drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd')
19 files changed, 23636 insertions, 0 deletions
diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/Makefile b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/Makefile new file mode 100644 index 0000000..70c7d7e --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/Makefile @@ -0,0 +1,19 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/ethernet/freescale/fman/ncsw_config.mk + +NCSW_FM_INC = $(srctree)/drivers/net/ethernet/freescale/fman/Peripherals/FM/inc + +EXTRA_CFLAGS += -I$(NCSW_FM_INC) + +obj-y += fsl-ncsw-Pcd.o + +fsl-ncsw-Pcd-objs := fman_kg.o fman_prs.o fm_cc.o fm_kg.o fm_pcd.o fm_plcr.o fm_prs.o fm_manip.o + +ifeq ($(CONFIG_FMAN_T4240),y) +fsl-ncsw-Pcd-objs += fm_replic.o +endif diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/crc64.h b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/crc64.h new file mode 100644 index 0000000..335ee68 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/crc64.h @@ -0,0 +1,360 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + + /**************************************************************************//** + @File crc64.h + + @Description brief This file contains the CRC64 Table, and __inline__ + functions used for calculating crc. +*//***************************************************************************/ +#ifndef __CRC64_H +#define __CRC64_H + +#include "std_ext.h" + + +#define BITS_PER_BYTE 8 + +#define CRC64_EXPON_ECMA_182 0xC96C5795D7870F42ULL +#define CRC64_DEFAULT_INITVAL 0xFFFFFFFFFFFFFFFFULL + +#define CRC64_BYTE_MASK 0xFF +#define CRC64_TABLE_ENTRIES ( 1 << BITS_PER_BYTE ) +#define CRC64_ODD_MASK 1 + + +/** + \brief '64 bit crc' Table + */ +struct crc64_t { + uint64_t initial; /**< Initial seed */ + uint64_t table[CRC64_TABLE_ENTRIES]; /**< CRC table entries */ +}; + + +static struct crc64_t CRC64_ECMA_182 = { + CRC64_DEFAULT_INITVAL, + { + 0x0000000000000000ULL, + 0xb32e4cbe03a75f6fULL, + 0xf4843657a840a05bULL, + 0x47aa7ae9abe7ff34ULL, + 0x7bd0c384ff8f5e33ULL, + 0xc8fe8f3afc28015cULL, + 0x8f54f5d357cffe68ULL, + 0x3c7ab96d5468a107ULL, + 0xf7a18709ff1ebc66ULL, + 0x448fcbb7fcb9e309ULL, + 0x0325b15e575e1c3dULL, + 0xb00bfde054f94352ULL, + 0x8c71448d0091e255ULL, + 0x3f5f08330336bd3aULL, + 0x78f572daa8d1420eULL, + 0xcbdb3e64ab761d61ULL, + 0x7d9ba13851336649ULL, + 0xceb5ed8652943926ULL, + 0x891f976ff973c612ULL, + 0x3a31dbd1fad4997dULL, + 0x064b62bcaebc387aULL, + 0xb5652e02ad1b6715ULL, + 0xf2cf54eb06fc9821ULL, + 0x41e11855055bc74eULL, + 0x8a3a2631ae2dda2fULL, + 0x39146a8fad8a8540ULL, + 0x7ebe1066066d7a74ULL, + 0xcd905cd805ca251bULL, + 0xf1eae5b551a2841cULL, + 0x42c4a90b5205db73ULL, + 0x056ed3e2f9e22447ULL, + 0xb6409f5cfa457b28ULL, + 0xfb374270a266cc92ULL, + 0x48190ecea1c193fdULL, + 0x0fb374270a266cc9ULL, + 0xbc9d3899098133a6ULL, + 0x80e781f45de992a1ULL, + 0x33c9cd4a5e4ecdceULL, + 0x7463b7a3f5a932faULL, + 0xc74dfb1df60e6d95ULL, + 0x0c96c5795d7870f4ULL, + 0xbfb889c75edf2f9bULL, + 0xf812f32ef538d0afULL, + 0x4b3cbf90f69f8fc0ULL, + 0x774606fda2f72ec7ULL, + 0xc4684a43a15071a8ULL, + 0x83c230aa0ab78e9cULL, + 0x30ec7c140910d1f3ULL, + 0x86ace348f355aadbULL, + 0x3582aff6f0f2f5b4ULL, + 0x7228d51f5b150a80ULL, + 0xc10699a158b255efULL, + 0xfd7c20cc0cdaf4e8ULL, + 0x4e526c720f7dab87ULL, + 0x09f8169ba49a54b3ULL, + 0xbad65a25a73d0bdcULL, + 0x710d64410c4b16bdULL, + 0xc22328ff0fec49d2ULL, + 0x85895216a40bb6e6ULL, + 0x36a71ea8a7ace989ULL, + 0x0adda7c5f3c4488eULL, + 0xb9f3eb7bf06317e1ULL, + 0xfe5991925b84e8d5ULL, + 0x4d77dd2c5823b7baULL, + 0x64b62bcaebc387a1ULL, + 0xd7986774e864d8ceULL, + 0x90321d9d438327faULL, + 0x231c512340247895ULL, + 0x1f66e84e144cd992ULL, + 0xac48a4f017eb86fdULL, + 0xebe2de19bc0c79c9ULL, + 0x58cc92a7bfab26a6ULL, + 0x9317acc314dd3bc7ULL, + 0x2039e07d177a64a8ULL, + 0x67939a94bc9d9b9cULL, + 0xd4bdd62abf3ac4f3ULL, + 0xe8c76f47eb5265f4ULL, + 0x5be923f9e8f53a9bULL, + 0x1c4359104312c5afULL, + 0xaf6d15ae40b59ac0ULL, + 0x192d8af2baf0e1e8ULL, + 0xaa03c64cb957be87ULL, + 0xeda9bca512b041b3ULL, + 0x5e87f01b11171edcULL, + 0x62fd4976457fbfdbULL, + 0xd1d305c846d8e0b4ULL, + 0x96797f21ed3f1f80ULL, + 0x2557339fee9840efULL, + 0xee8c0dfb45ee5d8eULL, + 0x5da24145464902e1ULL, + 0x1a083bacedaefdd5ULL, + 0xa9267712ee09a2baULL, + 0x955cce7fba6103bdULL, + 0x267282c1b9c65cd2ULL, + 0x61d8f8281221a3e6ULL, + 0xd2f6b4961186fc89ULL, + 0x9f8169ba49a54b33ULL, + 0x2caf25044a02145cULL, + 0x6b055fede1e5eb68ULL, + 0xd82b1353e242b407ULL, + 0xe451aa3eb62a1500ULL, + 0x577fe680b58d4a6fULL, + 0x10d59c691e6ab55bULL, + 0xa3fbd0d71dcdea34ULL, + 0x6820eeb3b6bbf755ULL, + 0xdb0ea20db51ca83aULL, + 0x9ca4d8e41efb570eULL, + 0x2f8a945a1d5c0861ULL, + 0x13f02d374934a966ULL, + 0xa0de61894a93f609ULL, + 0xe7741b60e174093dULL, + 0x545a57dee2d35652ULL, + 0xe21ac88218962d7aULL, + 0x5134843c1b317215ULL, + 0x169efed5b0d68d21ULL, + 0xa5b0b26bb371d24eULL, + 0x99ca0b06e7197349ULL, + 0x2ae447b8e4be2c26ULL, + 0x6d4e3d514f59d312ULL, + 0xde6071ef4cfe8c7dULL, + 0x15bb4f8be788911cULL, + 0xa6950335e42fce73ULL, + 0xe13f79dc4fc83147ULL, + 0x521135624c6f6e28ULL, + 0x6e6b8c0f1807cf2fULL, + 0xdd45c0b11ba09040ULL, + 0x9aefba58b0476f74ULL, + 0x29c1f6e6b3e0301bULL, + 0xc96c5795d7870f42ULL, + 0x7a421b2bd420502dULL, + 0x3de861c27fc7af19ULL, + 0x8ec62d7c7c60f076ULL, + 0xb2bc941128085171ULL, + 0x0192d8af2baf0e1eULL, + 0x4638a2468048f12aULL, + 0xf516eef883efae45ULL, + 0x3ecdd09c2899b324ULL, + 0x8de39c222b3eec4bULL, + 0xca49e6cb80d9137fULL, + 0x7967aa75837e4c10ULL, + 0x451d1318d716ed17ULL, + 0xf6335fa6d4b1b278ULL, + 0xb199254f7f564d4cULL, + 0x02b769f17cf11223ULL, + 0xb4f7f6ad86b4690bULL, + 0x07d9ba1385133664ULL, + 0x4073c0fa2ef4c950ULL, + 0xf35d8c442d53963fULL, + 0xcf273529793b3738ULL, + 0x7c0979977a9c6857ULL, + 0x3ba3037ed17b9763ULL, + 0x888d4fc0d2dcc80cULL, + 0x435671a479aad56dULL, + 0xf0783d1a7a0d8a02ULL, + 0xb7d247f3d1ea7536ULL, + 0x04fc0b4dd24d2a59ULL, + 0x3886b22086258b5eULL, + 0x8ba8fe9e8582d431ULL, + 0xcc0284772e652b05ULL, + 0x7f2cc8c92dc2746aULL, + 0x325b15e575e1c3d0ULL, + 0x8175595b76469cbfULL, + 0xc6df23b2dda1638bULL, + 0x75f16f0cde063ce4ULL, + 0x498bd6618a6e9de3ULL, + 0xfaa59adf89c9c28cULL, + 0xbd0fe036222e3db8ULL, + 0x0e21ac88218962d7ULL, + 0xc5fa92ec8aff7fb6ULL, + 0x76d4de52895820d9ULL, + 0x317ea4bb22bfdfedULL, + 0x8250e80521188082ULL, + 0xbe2a516875702185ULL, + 0x0d041dd676d77eeaULL, + 0x4aae673fdd3081deULL, + 0xf9802b81de97deb1ULL, + 0x4fc0b4dd24d2a599ULL, + 0xfceef8632775faf6ULL, + 0xbb44828a8c9205c2ULL, + 0x086ace348f355aadULL, + 0x34107759db5dfbaaULL, + 0x873e3be7d8faa4c5ULL, + 0xc094410e731d5bf1ULL, + 0x73ba0db070ba049eULL, + 0xb86133d4dbcc19ffULL, + 0x0b4f7f6ad86b4690ULL, + 0x4ce50583738cb9a4ULL, + 0xffcb493d702be6cbULL, + 0xc3b1f050244347ccULL, + 0x709fbcee27e418a3ULL, + 0x3735c6078c03e797ULL, + 0x841b8ab98fa4b8f8ULL, + 0xadda7c5f3c4488e3ULL, + 0x1ef430e13fe3d78cULL, + 0x595e4a08940428b8ULL, + 0xea7006b697a377d7ULL, + 0xd60abfdbc3cbd6d0ULL, + 0x6524f365c06c89bfULL, + 0x228e898c6b8b768bULL, + 0x91a0c532682c29e4ULL, + 0x5a7bfb56c35a3485ULL, + 0xe955b7e8c0fd6beaULL, + 0xaeffcd016b1a94deULL, + 0x1dd181bf68bdcbb1ULL, + 0x21ab38d23cd56ab6ULL, + 0x9285746c3f7235d9ULL, + 0xd52f0e859495caedULL, + 0x6601423b97329582ULL, + 0xd041dd676d77eeaaULL, + 0x636f91d96ed0b1c5ULL, + 0x24c5eb30c5374ef1ULL, + 0x97eba78ec690119eULL, + 0xab911ee392f8b099ULL, + 0x18bf525d915feff6ULL, + 0x5f1528b43ab810c2ULL, + 0xec3b640a391f4fadULL, + 0x27e05a6e926952ccULL, + 0x94ce16d091ce0da3ULL, + 0xd3646c393a29f297ULL, + 0x604a2087398eadf8ULL, + 0x5c3099ea6de60cffULL, + 0xef1ed5546e415390ULL, + 0xa8b4afbdc5a6aca4ULL, + 0x1b9ae303c601f3cbULL, + 0x56ed3e2f9e224471ULL, + 0xe5c372919d851b1eULL, + 0xa26908783662e42aULL, + 0x114744c635c5bb45ULL, + 0x2d3dfdab61ad1a42ULL, + 0x9e13b115620a452dULL, + 0xd9b9cbfcc9edba19ULL, + 0x6a978742ca4ae576ULL, + 0xa14cb926613cf817ULL, + 0x1262f598629ba778ULL, + 0x55c88f71c97c584cULL, + 0xe6e6c3cfcadb0723ULL, + 0xda9c7aa29eb3a624ULL, + 0x69b2361c9d14f94bULL, + 0x2e184cf536f3067fULL, + 0x9d36004b35545910ULL, + 0x2b769f17cf112238ULL, + 0x9858d3a9ccb67d57ULL, + 0xdff2a94067518263ULL, + 0x6cdce5fe64f6dd0cULL, + 0x50a65c93309e7c0bULL, + 0xe388102d33392364ULL, + 0xa4226ac498dedc50ULL, + 0x170c267a9b79833fULL, + 0xdcd7181e300f9e5eULL, + 0x6ff954a033a8c131ULL, + 0x28532e49984f3e05ULL, + 0x9b7d62f79be8616aULL, + 0xa707db9acf80c06dULL, + 0x14299724cc279f02ULL, + 0x5383edcd67c06036ULL, + 0xe0ada17364673f59ULL + } +}; + + +/** + \brief Initializes the crc seed + */ +static __inline__ uint64_t crc64_init(void) +{ + return CRC64_ECMA_182.initial; +} + +/** + \brief Computes 64 bit the crc + \param[in] data Pointer to the Data in the frame + \param[in] len Length of the Data + \param[in] crc seed + \return calculated crc + */ +static __inline__ uint64_t crc64_compute(void const *data, + uint32_t len, + uint64_t seed) +{ + uint32_t i; + uint64_t crc = seed; + uint8_t *bdata = (uint8_t *) data; + + for (i = 0; i < len; i++) + crc = + CRC64_ECMA_182. + table[(crc ^ *bdata++) & CRC64_BYTE_MASK] ^ (crc >> 8); + + return crc; +} + + +#endif /* __CRC64_H */ diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c new file mode 100644 index 0000000..85810a9 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.c @@ -0,0 +1,6940 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_cc.c + + @Description FM Coarse Classifier implementation +*//***************************************************************************/ +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "debug_ext.h" +#include "fm_pcd_ext.h" +#include "fm_muram_ext.h" + +#include "fm_common.h" +#include "fm_pcd.h" +#include "fm_hc.h" +#include "fm_cc.h" +#include "crc64.h" + + +/****************************************/ +/* static functions */ +/****************************************/ + + +static t_Error CcRootTryLock(t_Handle h_FmPcdCcTree) +{ + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; + + ASSERT_COND(h_FmPcdCcTree); + + if (FmPcdLockTryLock(p_FmPcdCcTree->p_Lock)) + return E_OK; + + return ERROR_CODE(E_BUSY); +} + +static void CcRootReleaseLock(t_Handle h_FmPcdCcTree) +{ + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; + + ASSERT_COND(h_FmPcdCcTree); + + FmPcdLockUnlock(p_FmPcdCcTree->p_Lock); +} + +static void UpdateNodeOwner(t_FmPcdCcNode *p_CcNode, bool add) +{ + uint32_t intFlags; + + ASSERT_COND(p_CcNode); + + intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock); + + if (add) + p_CcNode->owners++; + else + { + ASSERT_COND(p_CcNode->owners); + p_CcNode->owners--; + } + + XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); +} + +static __inline__ t_FmPcdStatsObj* DequeueStatsObj(t_List *p_List) +{ + t_FmPcdStatsObj *p_StatsObj = NULL; + t_List *p_Next; + + if (!LIST_IsEmpty(p_List)) + { + p_Next = LIST_FIRST(p_List); + p_StatsObj = LIST_OBJECT(p_Next, t_FmPcdStatsObj, node); + ASSERT_COND(p_StatsObj); + LIST_DelAndInit(p_Next); + } + + return p_StatsObj; +} + +static __inline__ void EnqueueStatsObj(t_List *p_List, + t_FmPcdStatsObj *p_StatsObj) +{ + LIST_AddToTail(&p_StatsObj->node, p_List); +} + +static void FreeStatObjects(t_List *p_List, + t_Handle h_FmMuram) +{ + t_FmPcdStatsObj *p_StatsObj; + + while (!LIST_IsEmpty(p_List)) + { + p_StatsObj = DequeueStatsObj(p_List); + ASSERT_COND(p_StatsObj); + + FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsAd); + FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsCounters); + + XX_Free(p_StatsObj); + } +} + +static t_FmPcdStatsObj* GetStatsObj(t_FmPcdCcNode *p_CcNode) +{ + t_FmPcdStatsObj* p_StatsObj; + t_Handle h_FmMuram; + + ASSERT_COND(p_CcNode); + + /* If 'maxNumOfKeys' was passed, all statistics object were preallocated + upon node initialization */ + if (p_CcNode->maxNumOfKeys) + { + p_StatsObj = DequeueStatsObj(&p_CcNode->availableStatsLst); + } + else + { + h_FmMuram = ((t_FmPcd *)(p_CcNode->h_FmPcd))->h_FmMuram; + ASSERT_COND(h_FmMuram); + + p_StatsObj = XX_Malloc(sizeof(t_FmPcdStatsObj)); + if (!p_StatsObj) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("statistics object")); + return NULL; + } + + p_StatsObj->h_StatsAd = (t_Handle)FM_MURAM_AllocMem(h_FmMuram, + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!p_StatsObj->h_StatsAd) + { + XX_Free(p_StatsObj); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics ADs")); + return NULL; + } + IOMemSet32(p_StatsObj->h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + p_StatsObj->h_StatsCounters = (t_Handle)FM_MURAM_AllocMem(h_FmMuram, + p_CcNode->countersArraySize, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!p_StatsObj->h_StatsCounters) + { + FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsAd); + XX_Free(p_StatsObj); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics counters")); + return NULL; + } + IOMemSet32(p_StatsObj->h_StatsCounters, 0, p_CcNode->countersArraySize); + } + + return p_StatsObj; +} + +static void PutStatsObj(t_FmPcdCcNode *p_CcNode, + t_FmPcdStatsObj *p_StatsObj) +{ + t_Handle h_FmMuram; + + ASSERT_COND(p_CcNode); + ASSERT_COND(p_StatsObj); + + /* If 'maxNumOfKeys' was passed, all statistics object were preallocated + upon node initialization and now will be enqueued back to the list */ + if (p_CcNode->maxNumOfKeys) + { + /* Nullify counters */ + IOMemSet32(p_StatsObj->h_StatsCounters, 0, p_CcNode->countersArraySize); + + EnqueueStatsObj(&p_CcNode->availableStatsLst, p_StatsObj); + } + else + { + h_FmMuram = ((t_FmPcd *)(p_CcNode->h_FmPcd))->h_FmMuram; + ASSERT_COND(h_FmMuram); + + FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsAd); + FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsCounters); + + XX_Free(p_StatsObj); + } +} + +static void SetStatsCounters(t_AdOfTypeStats *p_StatsAd, + uint32_t statsCountersAddr) +{ + uint32_t tmp = (statsCountersAddr & FM_PCD_AD_STATS_COUNTERS_ADDR_MASK); + + WRITE_UINT32(p_StatsAd->statsTableAddr, tmp); +} + + +static void UpdateStatsAd(t_FmPcdCcStatsParams *p_FmPcdCcStatsParams, + t_Handle h_Ad, + uint64_t physicalMuramBase) +{ + t_AdOfTypeStats *p_StatsAd; + uint32_t statsCountersAddr, nextActionAddr, tmp; +#if (DPAA_VERSION >= 11) + uint32_t frameLengthRangesAddr; +#endif /* (DPAA_VERSION >= 11) */ + + p_StatsAd = (t_AdOfTypeStats *)p_FmPcdCcStatsParams->h_StatsAd; + + tmp = FM_PCD_AD_STATS_TYPE; + +#if (DPAA_VERSION >= 11) + if (p_FmPcdCcStatsParams->h_StatsFLRs) + { + frameLengthRangesAddr = (uint32_t)((XX_VirtToPhys(p_FmPcdCcStatsParams->h_StatsFLRs) - physicalMuramBase)); + tmp |= (frameLengthRangesAddr & FM_PCD_AD_STATS_FLR_ADDR_MASK); + } +#endif /* (DPAA_VERSION >= 11) */ + WRITE_UINT32(p_StatsAd->profileTableAddr, tmp); + + nextActionAddr = (uint32_t)((XX_VirtToPhys(h_Ad) - physicalMuramBase)); + tmp = 0; + tmp |= (uint32_t)((nextActionAddr << FM_PCD_AD_STATS_NEXT_ACTION_SHIFT) & FM_PCD_AD_STATS_NEXT_ACTION_MASK); + tmp |= (FM_PCD_AD_STATS_NAD_EN | FM_PCD_AD_STATS_OP_CODE); + +#if (DPAA_VERSION >= 11) + if (p_FmPcdCcStatsParams->h_StatsFLRs) + tmp |= FM_PCD_AD_STATS_FLR_EN; +#endif /* (DPAA_VERSION >= 11) */ + + WRITE_UINT32(p_StatsAd->nextActionIndx, tmp); + + statsCountersAddr = (uint32_t)((XX_VirtToPhys(p_FmPcdCcStatsParams->h_StatsCounters) - physicalMuramBase)); + SetStatsCounters(p_StatsAd, statsCountersAddr); +} + +static void FillAdOfTypeContLookup(t_Handle h_Ad, + t_FmPcdCcStatsParams *p_FmPcdCcStatsParams, + t_Handle h_FmPcd, + t_Handle p_CcNode, + t_Handle h_Manip, + t_Handle h_FrmReplic) +{ + t_FmPcdCcNode *p_Node = (t_FmPcdCcNode *)p_CcNode; + t_AdOfTypeContLookup *p_AdContLookup = (t_AdOfTypeContLookup *)h_Ad; + t_Handle h_TmpAd; + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t tmpReg32; + t_Handle p_AdNewPtr = NULL; + + UNUSED(h_Manip); + UNUSED(h_FrmReplic); + + /* there are 3 cases handled in this routine of building a "Continue lookup" type AD. + * Case 1: No Manip. The action descriptor is built within the match table. + * p_AdResult = p_AdNewPtr; + * Case 2: Manip exists. A new AD is created - p_AdNewPtr. It is initialized + * either in the FmPcdManipUpdateAdResultForCc routine or it was already + * initialized and returned here. + * p_AdResult (within the match table) will be initialized after + * this routine returns and point to the existing AD. + * Case 3: Manip exists. The action descriptor is built within the match table. + * FmPcdManipUpdateAdContLookupForCc returns a NULL p_AdNewPtr. + */ + + /* As default, the "new" ptr is the current one. i.e. the content of the result + * AD will be written into the match table itself (case (1))*/ + p_AdNewPtr = p_AdContLookup; + + /* Initialize an action descriptor, if current statistics mode requires an Ad */ + if (p_FmPcdCcStatsParams) + { + ASSERT_COND(p_FmPcdCcStatsParams->h_StatsAd); + ASSERT_COND(p_FmPcdCcStatsParams->h_StatsCounters); + + /* Swapping addresses between statistics Ad and the current lookup AD */ + h_TmpAd = p_FmPcdCcStatsParams->h_StatsAd; + p_FmPcdCcStatsParams->h_StatsAd = h_Ad; + h_Ad = h_TmpAd; + + p_AdNewPtr = h_Ad; + p_AdContLookup = h_Ad; + + /* Init statistics Ad and connect current lookup AD as 'next action' from statistics Ad */ + UpdateStatsAd(p_FmPcdCcStatsParams, + h_Ad, + p_FmPcd->physicalMuramBase); + } + +#if DPAA_VERSION >= 11 + if (h_Manip && h_FrmReplic) + FmPcdManipUpdateAdContLookupForCc(h_Manip, + h_Ad, + &p_AdNewPtr, + (uint32_t)((XX_VirtToPhys(FrmReplicGroupGetSourceTableDescriptor(h_FrmReplic)) - p_FmPcd->physicalMuramBase))); + else if (h_FrmReplic) + FrmReplicGroupUpdateAd(h_FrmReplic, h_Ad, &p_AdNewPtr); + else +#endif /* (DPAA_VERSION >= 11) */ + if (h_Manip) + FmPcdManipUpdateAdContLookupForCc(h_Manip, + h_Ad, + &p_AdNewPtr, + +#ifdef FM_CAPWAP_SUPPORT + /*no check for opcode of manip - this step can be reached only with capwap_applic_specific*/ + (uint32_t)((XX_VirtToPhys(p_Node->h_AdTable) - p_FmPcd->physicalMuramBase)) +#else /* not FM_CAPWAP_SUPPORT */ + (uint32_t)((XX_VirtToPhys(p_Node->h_Ad) - p_FmPcd->physicalMuramBase)) +#endif /* not FM_CAPWAP_SUPPORT */ + ); + + /* if (p_AdNewPtr = NULL) --> Done. (case (3)) */ + if (p_AdNewPtr) + { + /* cases (1) & (2) */ + tmpReg32 = 0; + tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE; + tmpReg32 |= p_Node->sizeOfExtraction ? ((p_Node->sizeOfExtraction - 1) << 24) : 0; + tmpReg32 |= (uint32_t)(XX_VirtToPhys(p_Node->h_AdTable) - p_FmPcd->physicalMuramBase); + WRITE_UINT32(p_AdContLookup->ccAdBase, tmpReg32); + + tmpReg32 = 0; + tmpReg32 |= p_Node->numOfKeys << 24; + tmpReg32 |= (p_Node->lclMask ? FM_PCD_AD_CONT_LOOKUP_LCL_MASK : 0); + tmpReg32 |= p_Node->h_KeysMatchTable ? + (uint32_t)(XX_VirtToPhys(p_Node->h_KeysMatchTable) - p_FmPcd->physicalMuramBase) : 0; + WRITE_UINT32(p_AdContLookup->matchTblPtr, tmpReg32); + + tmpReg32 = 0; + tmpReg32 |= p_Node->prsArrayOffset << 24; + tmpReg32 |= p_Node->offset << 16; + tmpReg32 |= p_Node->parseCode; + WRITE_UINT32(p_AdContLookup->pcAndOffsets, tmpReg32); + + Mem2IOCpy32((void*)&p_AdContLookup->gmask, p_Node->p_GlblMask, CC_GLBL_MASK_SIZE); + } +} + +static t_Error AllocAndFillAdForContLookupManip(t_Handle h_CcNode) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + uint32_t intFlags; + + ASSERT_COND(p_CcNode); + + intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock); + + if (!p_CcNode->h_Ad) + { + p_CcNode->h_Ad = (t_Handle)FM_MURAM_AllocMem(((t_FmPcd *)(p_CcNode->h_FmPcd))->h_FmMuram, + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + + XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); + + if (!p_CcNode->h_Ad) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC action descriptor")); + + IOMemSet32(p_CcNode->h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + FillAdOfTypeContLookup(p_CcNode->h_Ad, + NULL, + p_CcNode->h_FmPcd, + p_CcNode, + NULL, + NULL); + } + else + XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); + + return E_OK; +} + +static t_Error SetRequiredAction(t_Handle h_FmPcd, + uint32_t requiredAction, + t_FmPcdCcKeyAndNextEngineParams *p_CcKeyAndNextEngineParamsTmp, + t_Handle h_AdTmp, + uint16_t numOfEntries, + t_Handle h_Tree) +{ + t_AdOfTypeResult *p_AdTmp = (t_AdOfTypeResult *)h_AdTmp; + uint32_t tmpReg32; + t_Error err; + t_FmPcdCcNode *p_CcNode; + int i = 0; + uint16_t tmp = 0; + uint16_t profileId; + uint8_t relativeSchemeId, physicalSchemeId; + t_CcNodeInformation ccNodeInfo; + + for (i = 0; i < numOfEntries; i++) + { + if (i == 0) + h_AdTmp = PTR_MOVE(h_AdTmp, i*FM_PCD_CC_AD_ENTRY_SIZE); + else + h_AdTmp = PTR_MOVE(h_AdTmp, FM_PCD_CC_AD_ENTRY_SIZE); + + switch (p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.nextEngine) + { + case (e_FM_PCD_CC): + if (requiredAction) + { + p_CcNode = p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.ccParams.h_CcNode; + ASSERT_COND(p_CcNode); + if (p_CcNode->shadowAction == requiredAction) + break; + if ((requiredAction & UPDATE_CC_WITH_TREE) && !(p_CcNode->shadowAction & UPDATE_CC_WITH_TREE)) + { + + ASSERT_COND(LIST_NumOfObjs(&p_CcNode->ccTreesLst) == 0); + if (p_CcNode->shadowAction & UPDATE_CC_WITH_DELETE_TREE) + p_CcNode->shadowAction &= ~UPDATE_CC_WITH_DELETE_TREE; + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = h_Tree; + EnqueueNodeInfoToRelevantLst(&p_CcNode->ccTreesLst, &ccNodeInfo, NULL); + p_CcKeyAndNextEngineParamsTmp[i].shadowAction |= UPDATE_CC_WITH_TREE; + } + if ((requiredAction & UPDATE_CC_WITH_DELETE_TREE) && !(p_CcNode->shadowAction & UPDATE_CC_WITH_DELETE_TREE)) + { + ASSERT_COND(LIST_NumOfObjs(&p_CcNode->ccTreesLst) == 1); + if (p_CcNode->shadowAction & UPDATE_CC_WITH_TREE) + p_CcNode->shadowAction &= ~UPDATE_CC_WITH_TREE; + DequeueNodeInfoFromRelevantLst(&p_CcNode->ccTreesLst, h_Tree, NULL); + p_CcKeyAndNextEngineParamsTmp[i].shadowAction |= UPDATE_CC_WITH_DELETE_TREE; + } + if (p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine != e_FM_PCD_INVALID) + tmp = (uint8_t)(p_CcNode->numOfKeys + 1); + else + tmp = p_CcNode->numOfKeys; + err = SetRequiredAction(h_FmPcd, + requiredAction, + p_CcNode->keyAndNextEngineParams, + p_CcNode->h_AdTable, + tmp, + h_Tree); + if (err != E_OK) + return err; + p_CcNode->shadowAction |= requiredAction; + } + break; + + case (e_FM_PCD_KG): + if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) && !(p_CcKeyAndNextEngineParamsTmp[i].shadowAction & UPDATE_NIA_ENQ_WITHOUT_DMA)) + { + physicalSchemeId = FmPcdKgGetSchemeId(p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme); + relativeSchemeId = FmPcdKgGetRelativeSchemeId(h_FmPcd, physicalSchemeId); + if (relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + if (!FmPcdKgIsSchemeValidSw(p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid direct scheme.")); + if (!KgIsSchemeAlwaysDirect(h_FmPcd, relativeSchemeId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("For this action scheme has to be direct.")); + err = FmPcdKgCcGetSetParams(h_FmPcd, p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme, requiredAction, 0); + if (err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + p_CcKeyAndNextEngineParamsTmp[i].shadowAction |= requiredAction; + } + break; + + case (e_FM_PCD_PLCR): + if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) && !(p_CcKeyAndNextEngineParamsTmp[i].shadowAction & UPDATE_NIA_ENQ_WITHOUT_DMA)) + { + if (!p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.overrideParams) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("In this initialization only overrideFqid can be initialized")); + if (!p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.sharedProfile) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("In this initialization only overrideFqid can be initialized")); + err = FmPcdPlcrGetAbsoluteIdByProfileParams(h_FmPcd, e_FM_PCD_PLCR_SHARED, NULL, p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.newRelativeProfileId, &profileId); + if (err!= E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + err = FmPcdPlcrCcGetSetParams(h_FmPcd, profileId, requiredAction); + if (err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + p_CcKeyAndNextEngineParamsTmp[i].shadowAction |= requiredAction; + } + break; + + case (e_FM_PCD_DONE): + if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) && !(p_CcKeyAndNextEngineParamsTmp[i].shadowAction & UPDATE_NIA_ENQ_WITHOUT_DMA)) + { + tmpReg32 = GET_UINT32(p_AdTmp->nia); + if ((tmpReg32 & GET_NIA_BMI_AC_ENQ_FRAME(h_FmPcd)) != GET_NIA_BMI_AC_ENQ_FRAME(h_FmPcd)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine was previously assigned not as PCD_DONE")); + tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; + WRITE_UINT32(p_AdTmp->nia, tmpReg32); + p_CcKeyAndNextEngineParamsTmp[i].shadowAction |= requiredAction; + } + break; + + default: + break; + } + } + + return E_OK; +} + +static t_Error ReleaseModifiedDataStructure(t_Handle h_FmPcd, + t_List *h_FmPcdOldPointersLst, + t_List *h_FmPcdNewPointersLst, + uint16_t numOfGoodChanges, + t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalParams, + bool useShadowStructs) +{ + t_List *p_Pos; + t_Error err = E_OK; + t_CcNodeInformation ccNodeInfo, *p_CcNodeInformation; + t_Handle h_Muram; + t_FmPcdCcNode *p_FmPcdCcNextNode; + t_List *p_UpdateLst; + uint32_t intFlags; + + UNUSED(numOfGoodChanges); + + SANITY_CHECK_RETURN_ERROR(h_FmPcd,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_AdditionalParams->h_CurrentNode,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(h_FmPcdOldPointersLst,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(h_FmPcdNewPointersLst,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR((numOfGoodChanges == LIST_NumOfObjs(h_FmPcdOldPointersLst)),E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR((1 == LIST_NumOfObjs(h_FmPcdNewPointersLst)),E_INVALID_STATE); + + /* We don't update subtree of the new node with new tree because it was done in the previous stage */ + if (p_AdditionalParams->h_NodeForAdd) + { + p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_AdditionalParams->h_NodeForAdd; + + if (!p_AdditionalParams->tree) + p_UpdateLst = &p_FmPcdCcNextNode->ccPrevNodesLst; + else + p_UpdateLst = &p_FmPcdCcNextNode->ccTreeIdLst; + + p_CcNodeInformation = FindNodeInfoInReleventLst(p_UpdateLst, + p_AdditionalParams->h_CurrentNode, + p_FmPcdCcNextNode->h_Spinlock); + + if (p_CcNodeInformation) + p_CcNodeInformation->index++; + else + { + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = (t_Handle)p_AdditionalParams->h_CurrentNode; + ccNodeInfo.index = 1; + EnqueueNodeInfoToRelevantLst(p_UpdateLst, + &ccNodeInfo, + p_FmPcdCcNextNode->h_Spinlock); + } + if (p_AdditionalParams->h_ManipForAdd) + { + p_CcNodeInformation = FindNodeInfoInReleventLst(FmPcdManipGetNodeLstPointedOnThisManip(p_AdditionalParams->h_ManipForAdd), + p_AdditionalParams->h_CurrentNode, + FmPcdManipGetSpinlock(p_AdditionalParams->h_ManipForAdd)); + + if (p_CcNodeInformation) + p_CcNodeInformation->index++; + else + { + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = (t_Handle)p_AdditionalParams->h_CurrentNode; + ccNodeInfo.index = 1; + EnqueueNodeInfoToRelevantLst(FmPcdManipGetNodeLstPointedOnThisManip(p_AdditionalParams->h_ManipForAdd), + &ccNodeInfo, + FmPcdManipGetSpinlock(p_AdditionalParams->h_ManipForAdd)); + } + } + } + + if (p_AdditionalParams->h_NodeForRmv) + { + p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_AdditionalParams->h_NodeForRmv; + + if (!p_AdditionalParams->tree) + { + p_UpdateLst = &p_FmPcdCcNextNode->ccPrevNodesLst; + + while (!LIST_IsEmpty(&p_FmPcdCcNextNode->ccTreesLst)) + { + p_Pos = LIST_NEXT(&p_FmPcdCcNextNode->ccTreesLst); + p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); + + ASSERT_COND(p_CcNodeInformation->h_CcNode); + + err = SetRequiredAction(h_FmPcd, + UPDATE_CC_WITH_DELETE_TREE, + &((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams[p_AdditionalParams->savedKeyIndex], + PTR_MOVE(((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_AdTable, p_AdditionalParams->savedKeyIndex*FM_PCD_CC_AD_ENTRY_SIZE), + 1, + p_CcNodeInformation->h_CcNode); + } + } + else + { + p_UpdateLst = &p_FmPcdCcNextNode->ccTreeIdLst; + + err = SetRequiredAction(h_FmPcd, + UPDATE_CC_WITH_DELETE_TREE, + &((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams[p_AdditionalParams->savedKeyIndex], + UINT_TO_PTR(((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->ccTreeBaseAddr + p_AdditionalParams->savedKeyIndex*FM_PCD_CC_AD_ENTRY_SIZE), + 1, + p_AdditionalParams->h_CurrentNode); + } + if (err) + return err; + + /* We remove from the subtree of the removed node tree because it wasn't done in the previous stage + Update ccPrevNodesLst or ccTreeIdLst of the removed node + Update of the node owner */ + p_CcNodeInformation = FindNodeInfoInReleventLst(p_UpdateLst, + p_AdditionalParams->h_CurrentNode, + p_FmPcdCcNextNode->h_Spinlock); + + ASSERT_COND(p_CcNodeInformation); + ASSERT_COND(p_CcNodeInformation->index); + + p_CcNodeInformation->index--; + + if (p_CcNodeInformation->index == 0) + DequeueNodeInfoFromRelevantLst(p_UpdateLst, + p_AdditionalParams->h_CurrentNode, + p_FmPcdCcNextNode->h_Spinlock); + + UpdateNodeOwner(p_FmPcdCcNextNode, FALSE); + + if (p_AdditionalParams->h_ManipForRmv) + { + p_CcNodeInformation = FindNodeInfoInReleventLst(FmPcdManipGetNodeLstPointedOnThisManip(p_AdditionalParams->h_ManipForRmv), + p_AdditionalParams->h_CurrentNode, + FmPcdManipGetSpinlock(p_AdditionalParams->h_ManipForRmv)); + + ASSERT_COND(p_CcNodeInformation); + ASSERT_COND(p_CcNodeInformation->index); + + p_CcNodeInformation->index--; + + if (p_CcNodeInformation->index == 0) + DequeueNodeInfoFromRelevantLst(FmPcdManipGetNodeLstPointedOnThisManip(p_AdditionalParams->h_ManipForRmv), + p_AdditionalParams->h_CurrentNode, + FmPcdManipGetSpinlock(p_AdditionalParams->h_ManipForRmv)); + } + } + + if (p_AdditionalParams->h_ManipForRmv) + FmPcdManipUpdateOwner(p_AdditionalParams->h_ManipForRmv, FALSE); + + if (p_AdditionalParams->p_StatsObjForRmv) + PutStatsObj((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode), + p_AdditionalParams->p_StatsObjForRmv); + +#if (DPAA_VERSION >= 11) + if (p_AdditionalParams->h_FrmReplicForRmv) + FrmReplicGroupUpdateOwner(p_AdditionalParams->h_FrmReplicForRmv, + FALSE/* remove */); +#endif /* (DPAA_VERSION >= 11) */ + + if (!useShadowStructs) + { + h_Muram = FmPcdGetMuramHandle(h_FmPcd); + ASSERT_COND(h_Muram); + + if ((p_AdditionalParams->tree && + !((t_FmPcd *)h_FmPcd)->p_CcShadow) || + (!p_AdditionalParams->tree && + !((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->maxNumOfKeys)) + { + /* We release new AD which was allocated and updated for copy from to actual AD */ + p_Pos = LIST_FIRST(h_FmPcdNewPointersLst); + p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); + ASSERT_COND(p_CcNodeInformation->h_CcNode); + FM_MURAM_FreeMem(h_Muram, p_CcNodeInformation->h_CcNode); + } + + /* Free Old data structure if it has to be freed - new data structure was allocated*/ + if (p_AdditionalParams->p_AdTableOld) + FM_MURAM_FreeMem(h_Muram,p_AdditionalParams->p_AdTableOld); + + if (p_AdditionalParams->p_KeysMatchTableOld) + FM_MURAM_FreeMem(h_Muram,p_AdditionalParams->p_KeysMatchTableOld); + } + + /* Update current modified node with changed fields if it's required*/ + if (!p_AdditionalParams->tree) + { + if (p_AdditionalParams->p_AdTableNew) + ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_AdTable = p_AdditionalParams->p_AdTableNew; + + if (p_AdditionalParams->p_KeysMatchTableNew) + ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_KeysMatchTable = p_AdditionalParams->p_KeysMatchTableNew; + + /* Locking node's spinlock before updating 'keys and next engine' structure, + as it maybe used to retrieve keys statistics */ + intFlags = XX_LockIntrSpinlock(((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_Spinlock); + + ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->numOfKeys = p_AdditionalParams->numOfKeys; + + memcpy(((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams, + &p_AdditionalParams->keyAndNextEngineParams, + sizeof(t_FmPcdCcKeyAndNextEngineParams) * (CC_MAX_NUM_OF_KEYS)); + + XX_UnlockIntrSpinlock(((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_Spinlock, intFlags); + } + else + { + uint8_t numEntries = ((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->numOfEntries; + ASSERT_COND(numEntries < FM_PCD_MAX_NUM_OF_CC_GROUPS); + memcpy(&((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams, + &p_AdditionalParams->keyAndNextEngineParams, + sizeof(t_FmPcdCcKeyAndNextEngineParams) * numEntries); + } + + ReleaseLst(h_FmPcdOldPointersLst); + ReleaseLst(h_FmPcdNewPointersLst); + + XX_Free(p_AdditionalParams); + + return E_OK; +} + +static t_Handle BuildNewAd(t_Handle h_Ad, + t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams, + t_FmPcdCcNode *p_CcNode, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcdCcNode *p_FmPcdCcNodeTmp; + + p_FmPcdCcNodeTmp = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode)); + if (!p_FmPcdCcNodeTmp) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_FmPcdCcNodeTmp")); + return NULL; + } + memset(p_FmPcdCcNodeTmp, 0, sizeof(t_FmPcdCcNode)); + + p_FmPcdCcNodeTmp->numOfKeys = p_FmPcdModifyCcKeyAdditionalParams->numOfKeys; + p_FmPcdCcNodeTmp->h_KeysMatchTable = p_FmPcdModifyCcKeyAdditionalParams->p_KeysMatchTableNew; + p_FmPcdCcNodeTmp->h_AdTable = p_FmPcdModifyCcKeyAdditionalParams->p_AdTableNew; + + p_FmPcdCcNodeTmp->lclMask = p_CcNode->lclMask; + p_FmPcdCcNodeTmp->parseCode = p_CcNode->parseCode; + p_FmPcdCcNodeTmp->offset = p_CcNode->offset; + p_FmPcdCcNodeTmp->prsArrayOffset = p_CcNode->prsArrayOffset; + p_FmPcdCcNodeTmp->ctrlFlow = p_CcNode->ctrlFlow; + p_FmPcdCcNodeTmp->ccKeySizeAccExtraction = p_CcNode->ccKeySizeAccExtraction; + p_FmPcdCcNodeTmp->sizeOfExtraction = p_CcNode->sizeOfExtraction; + p_FmPcdCcNodeTmp->glblMaskSize = p_CcNode->glblMaskSize; + p_FmPcdCcNodeTmp->p_GlblMask = p_CcNode->p_GlblMask; + + if (p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_CC) + { + if (p_FmPcdCcNextEngineParams->h_Manip) + { + if (AllocAndFillAdForContLookupManip(p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode)!= E_OK) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + return NULL; + } + } + FillAdOfTypeContLookup(h_Ad, + NULL, + p_CcNode->h_FmPcd, + p_FmPcdCcNodeTmp, + p_FmPcdCcNextEngineParams->h_Manip, + NULL); + } + +#if (DPAA_VERSION >= 11) + if ((p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_FR) && + (p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic)) + { + FillAdOfTypeContLookup(h_Ad, + NULL, + p_CcNode->h_FmPcd, + p_FmPcdCcNodeTmp, + p_FmPcdCcNextEngineParams->h_Manip, + p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic); + } +#endif /* (DPAA_VERSION >= 11) */ + + XX_Free(p_FmPcdCcNodeTmp); + + return E_OK; +} + +static t_Error DynamicChangeHc(t_Handle h_FmPcd, + t_List *h_OldPointersLst, + t_List *h_NewPointersLst, + t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalParams, + bool useShadowStructs) +{ + t_List *p_PosOld, *p_PosNew; + uint32_t oldAdAddrOffset, newAdAddrOffset; + uint16_t i = 0; + t_Error err = E_OK; + uint8_t numOfModifiedPtr; + + ASSERT_COND(h_FmPcd); + ASSERT_COND(h_OldPointersLst); + ASSERT_COND(h_NewPointersLst); + + numOfModifiedPtr = (uint8_t)LIST_NumOfObjs(h_OldPointersLst); + + p_PosNew = LIST_FIRST(h_NewPointersLst); + p_PosOld = LIST_FIRST(h_OldPointersLst); + + /* Retrieve address of new AD */ + newAdAddrOffset = FmPcdCcGetNodeAddrOffsetFromNodeInfo(h_FmPcd, p_PosNew); + if (newAdAddrOffset == (uint32_t)ILLEGAL_BASE) + { + ReleaseModifiedDataStructure(h_FmPcd, + h_OldPointersLst, + h_NewPointersLst, + 0, + p_AdditionalParams, + useShadowStructs); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("New AD address")); + } + + for (i=0; i<numOfModifiedPtr; i++) + { + /* Retrieve address of current AD */ + oldAdAddrOffset = FmPcdCcGetNodeAddrOffsetFromNodeInfo(h_FmPcd, p_PosOld); + if (oldAdAddrOffset == (uint32_t)ILLEGAL_BASE) + { + ReleaseModifiedDataStructure(h_FmPcd, + h_OldPointersLst, + h_NewPointersLst, + i, + p_AdditionalParams, + useShadowStructs); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Old AD address")); + } + + /* Invoke host command to copy from new AD to old AD */ + err = FmHcPcdCcDoDynamicChange(((t_FmPcd *)h_FmPcd)->h_Hc, oldAdAddrOffset, newAdAddrOffset); + if (err) + { + ReleaseModifiedDataStructure(h_FmPcd, + h_OldPointersLst, + h_NewPointersLst, + i, + p_AdditionalParams, + useShadowStructs); + RETURN_ERROR(MAJOR, err, ("For part of nodes changes are done - situation is danger")); + } + + p_PosOld = LIST_NEXT(p_PosOld); + } + + return E_OK; +} + +static t_Error DoDynamicChange(t_Handle h_FmPcd, + t_List *h_OldPointersLst, + t_List *h_NewPointersLst, + t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalParams, + bool useShadowStructs) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode); + t_List *p_PosNew; + t_CcNodeInformation *p_CcNodeInfo; + t_FmPcdCcNextEngineParams nextEngineParams; + t_Handle h_Ad; + uint32_t keySize; + t_Error err = E_OK; + uint8_t numOfModifiedPtr; + + ASSERT_COND(h_FmPcd); + + SANITY_CHECK_RETURN_ERROR((LIST_NumOfObjs(h_OldPointersLst) >= 1),E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR((LIST_NumOfObjs(h_NewPointersLst) == 1),E_INVALID_STATE); + + memset(&nextEngineParams, 0, sizeof(t_FmPcdCcNextEngineParams)); + + numOfModifiedPtr = (uint8_t)LIST_NumOfObjs(h_OldPointersLst); + + p_PosNew = LIST_FIRST(h_NewPointersLst); + + /* Invoke host-command to copy from the new Ad to existing Ads */ + err = DynamicChangeHc(h_FmPcd, h_OldPointersLst, h_NewPointersLst, p_AdditionalParams, useShadowStructs); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + if (useShadowStructs) + { + /* When the host-command above has ended, the old structures are 'free'and we can update + them by copying from the new shadow structures. */ + if (p_CcNode->lclMask) + keySize = (uint32_t)(2 * p_CcNode->ccKeySizeAccExtraction); + else + keySize = p_CcNode->ccKeySizeAccExtraction; + + IO2IOCpy32(p_AdditionalParams->p_KeysMatchTableOld, + p_AdditionalParams->p_KeysMatchTableNew, + p_CcNode->maxNumOfKeys * keySize * sizeof (uint8_t)); + + IO2IOCpy32(p_AdditionalParams->p_AdTableOld, + p_AdditionalParams->p_AdTableNew, + (uint32_t)((p_CcNode->maxNumOfKeys + 1) * FM_PCD_CC_AD_ENTRY_SIZE)); + + /* Retrieve the address of the allocated Ad */ + p_CcNodeInfo = CC_NODE_F_OBJECT(p_PosNew); + h_Ad = p_CcNodeInfo->h_CcNode; + + /* Build a new Ad that holds the old (now updated) structures */ + p_AdditionalParams->p_KeysMatchTableNew = p_AdditionalParams->p_KeysMatchTableOld; + p_AdditionalParams->p_AdTableNew = p_AdditionalParams->p_AdTableOld; + + nextEngineParams.nextEngine = e_FM_PCD_CC; + nextEngineParams.params.ccParams.h_CcNode = (t_Handle)p_CcNode; + + BuildNewAd(h_Ad, p_AdditionalParams, p_CcNode, &nextEngineParams); + + /* HC to copy from the new Ad (old updated structures) to current Ad (uses shadow structures) */ + err = DynamicChangeHc(h_FmPcd, h_OldPointersLst, h_NewPointersLst, p_AdditionalParams, useShadowStructs); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = ReleaseModifiedDataStructure(h_FmPcd, + h_OldPointersLst, + h_NewPointersLst, + numOfModifiedPtr, + p_AdditionalParams, + useShadowStructs); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + return E_OK; +} + +#ifdef FM_CAPWAP_SUPPORT +static bool IsCapwapApplSpecific(t_Handle h_Node) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_Node; + bool isManipForCapwapApplSpecificBuild = FALSE; + int i = 0; + + ASSERT_COND(h_Node); + /* assumption that this function called only for INDEXED_FLOW_ID - so no miss*/ + for (i = 0; i < p_CcNode->numOfKeys; i++) + { + if ( p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip && + FmPcdManipIsCapwapApplSpecific(p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip)) + { + isManipForCapwapApplSpecificBuild = TRUE; + break; + } + } + return isManipForCapwapApplSpecificBuild; + +} +#endif /* FM_CAPWAP_SUPPORT */ + +static t_Error CcUpdateParam(t_Handle h_FmPcd, + t_Handle h_PcdParams, + t_Handle h_FmPort, + t_FmPcdCcKeyAndNextEngineParams *p_CcKeyAndNextEngineParams, + uint16_t numOfEntries, + t_Handle h_Ad, + bool validate, + uint16_t level, + t_Handle h_FmTree, + bool modify) +{ + t_FmPcdCcNode *p_CcNode; + t_Error err; + uint16_t tmp = 0; + int i = 0; + t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *) h_FmTree; + + level++; + + if (p_CcTree->h_IpReassemblyManip) + { + err = FmPcdManipUpdate(h_FmPcd, + h_PcdParams, + h_FmPort, + p_CcTree->h_IpReassemblyManip, + NULL, + validate, + level, + h_FmTree, + modify); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + if (numOfEntries) + { + for (i=0; i<numOfEntries; i++) + { + if (i == 0) + h_Ad = PTR_MOVE(h_Ad, i*FM_PCD_CC_AD_ENTRY_SIZE); + else + h_Ad = PTR_MOVE(h_Ad, FM_PCD_CC_AD_ENTRY_SIZE); + + if (p_CcKeyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + { + p_CcNode = p_CcKeyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode; + ASSERT_COND(p_CcNode); + + if (p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip) + { + err = FmPcdManipUpdate(h_FmPcd, + NULL, + h_FmPort, + p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip, + h_Ad, + validate, + level, + h_FmTree, + modify); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + if (p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine != e_FM_PCD_INVALID) + tmp = (uint8_t)(p_CcNode->numOfKeys + 1); + else + tmp = p_CcNode->numOfKeys; + + err = CcUpdateParam(h_FmPcd, + h_PcdParams, + h_FmPort, + p_CcNode->keyAndNextEngineParams, + tmp, + p_CcNode->h_AdTable, + validate, + level, + h_FmTree, + modify); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + else + { + if (p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip) + { + err = FmPcdManipUpdate(h_FmPcd, + NULL, + h_FmPort, + p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip, + h_Ad, + validate, + level, + h_FmTree, + modify); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + } + } + } + + return E_OK; +} + +static ccPrivateInfo_t IcDefineCode(t_FmPcdCcNodeParams *p_CcNodeParam) +{ + switch (p_CcNodeParam->extractCcParams.extractNonHdr.action) + { + case (e_FM_PCD_ACTION_EXACT_MATCH): + switch (p_CcNodeParam->extractCcParams.extractNonHdr.src) + { + case (e_FM_PCD_EXTRACT_FROM_KEY): + return CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH; + case (e_FM_PCD_EXTRACT_FROM_HASH): + return CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH; + default: + return CC_PRIVATE_INFO_NONE; + } + + case (e_FM_PCD_ACTION_INDEXED_LOOKUP): + switch (p_CcNodeParam->extractCcParams.extractNonHdr.src) + { + case (e_FM_PCD_EXTRACT_FROM_HASH): + return CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP; + case (e_FM_PCD_EXTRACT_FROM_FLOW_ID): + return CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP; + default: + return CC_PRIVATE_INFO_NONE; + } + + default: + break; + } + + return CC_PRIVATE_INFO_NONE; +} + +static t_CcNodeInformation * DequeueAdditionalInfoFromRelevantLst(t_List *p_List) +{ + t_CcNodeInformation *p_CcNodeInfo = NULL; + + if (!LIST_IsEmpty(p_List)) + { + p_CcNodeInfo = CC_NODE_F_OBJECT(p_List->p_Next); + LIST_DelAndInit(&p_CcNodeInfo->node); + } + + return p_CcNodeInfo; +} + +void ReleaseLst(t_List *p_List) +{ + t_CcNodeInformation *p_CcNodeInfo = NULL; + + if (!LIST_IsEmpty(p_List)) + { + p_CcNodeInfo = DequeueAdditionalInfoFromRelevantLst(p_List); + while (p_CcNodeInfo) + { + XX_Free(p_CcNodeInfo); + p_CcNodeInfo = DequeueAdditionalInfoFromRelevantLst(p_List); + } + } + + LIST_Del(p_List); +} + +static void DeleteNode(t_FmPcdCcNode *p_CcNode) +{ + uint32_t i; + + if (!p_CcNode) + return; + + if (p_CcNode->p_GlblMask) + { + XX_Free(p_CcNode->p_GlblMask); + p_CcNode->p_GlblMask = NULL; + } + + if (p_CcNode->h_KeysMatchTable) + { + FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd), p_CcNode->h_KeysMatchTable); + p_CcNode->h_KeysMatchTable = NULL; + } + + if (p_CcNode->h_AdTable) + { + FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd), p_CcNode->h_AdTable); + p_CcNode->h_AdTable = NULL; + } + + if (p_CcNode->h_Ad) + { + FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd), p_CcNode->h_Ad); + p_CcNode->h_Ad = NULL; + } + + if (p_CcNode->h_StatsFLRs) + { + FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd), p_CcNode->h_StatsFLRs); + p_CcNode->h_StatsFLRs = NULL; + } + + if (p_CcNode->h_Spinlock) + { + XX_FreeSpinlock(p_CcNode->h_Spinlock); + p_CcNode->h_Spinlock = NULL; + } + + /* Releasing all currently used statistics objects, including 'miss' entry */ + for (i = 0; i < p_CcNode->numOfKeys + 1; i++) + if (p_CcNode->keyAndNextEngineParams[i].p_StatsObj) + PutStatsObj(p_CcNode, p_CcNode->keyAndNextEngineParams[i].p_StatsObj); + + if (!LIST_IsEmpty(&p_CcNode->availableStatsLst)) + { + t_Handle h_FmMuram = FmPcdGetMuramHandle(p_CcNode->h_FmPcd); + + ASSERT_COND(h_FmMuram); + + FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram); + } + + LIST_Del(&p_CcNode->availableStatsLst); + + ReleaseLst(&p_CcNode->ccPrevNodesLst); + ReleaseLst(&p_CcNode->ccTreeIdLst); + ReleaseLst(&p_CcNode->ccTreesLst); + + XX_Free(p_CcNode); +} + +static void DeleteTree(t_FmPcdCcTree *p_FmPcdTree, t_FmPcd *p_FmPcd) +{ + if (p_FmPcdTree) + { + if (p_FmPcdTree->ccTreeBaseAddr) + { + FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_FmPcd), UINT_TO_PTR(p_FmPcdTree->ccTreeBaseAddr)); + p_FmPcdTree->ccTreeBaseAddr = 0; + } + + ReleaseLst(&p_FmPcdTree->fmPortsLst); + + XX_Free(p_FmPcdTree); + } +} + +static void GetCcExtractKeySize(uint8_t parseCodeRealSize, uint8_t *parseCodeCcSize) +{ + if ((parseCodeRealSize > 0) && (parseCodeRealSize < 2)) + *parseCodeCcSize = 1; + else if (parseCodeRealSize == 2) + *parseCodeCcSize = 2; + else if ((parseCodeRealSize > 2) && (parseCodeRealSize <= 4)) + *parseCodeCcSize = 4; + else if ((parseCodeRealSize > 4) && (parseCodeRealSize <= 8)) + *parseCodeCcSize = 8; + else if ((parseCodeRealSize > 8) && (parseCodeRealSize <= 16)) + *parseCodeCcSize = 16; + else if ((parseCodeRealSize > 16) && (parseCodeRealSize <= 24)) + *parseCodeCcSize = 24; + else if ((parseCodeRealSize > 24) && (parseCodeRealSize <= 32)) + *parseCodeCcSize = 32; + else if ((parseCodeRealSize > 32) && (parseCodeRealSize <= 40)) + *parseCodeCcSize = 40; + else if ((parseCodeRealSize > 40) && (parseCodeRealSize <= 48)) + *parseCodeCcSize = 48; + else if ((parseCodeRealSize > 48) && (parseCodeRealSize <= 56)) + *parseCodeCcSize = 56; + else + *parseCodeCcSize = 0; +} + +static void GetSizeHeaderField(e_NetHeaderType hdr, + e_FmPcdHdrIndex index, + t_FmPcdFields field, + uint8_t *parseCodeRealSize) +{ + UNUSED(index); + switch (hdr) + { + case (HEADER_TYPE_ETH): + switch (field.eth) + { + case (NET_HEADER_FIELD_ETH_DA): + *parseCodeRealSize = 6; + break; + + case (NET_HEADER_FIELD_ETH_SA): + *parseCodeRealSize = 6; + break; + + case (NET_HEADER_FIELD_ETH_TYPE): + *parseCodeRealSize = 2; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported1")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + + case (HEADER_TYPE_PPPoE): + switch (field.pppoe) + { + case (NET_HEADER_FIELD_PPPoE_PID): + *parseCodeRealSize = 2; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported1")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + + case (HEADER_TYPE_VLAN): + switch (field.vlan) + { + case (NET_HEADER_FIELD_VLAN_TCI): + *parseCodeRealSize = 2; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported2")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + + case (HEADER_TYPE_MPLS): + switch (field.mpls) + { + case (NET_HEADER_FIELD_MPLS_LABEL_STACK): + *parseCodeRealSize = 4; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported3")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + + case (HEADER_TYPE_IPv4): + switch (field.ipv4) + { + case (NET_HEADER_FIELD_IPv4_DST_IP): + case (NET_HEADER_FIELD_IPv4_SRC_IP): + *parseCodeRealSize = 4; + break; + + case (NET_HEADER_FIELD_IPv4_TOS): + case (NET_HEADER_FIELD_IPv4_PROTO): + *parseCodeRealSize = 1; + break; + + case (NET_HEADER_FIELD_IPv4_DST_IP | NET_HEADER_FIELD_IPv4_SRC_IP): + *parseCodeRealSize = 8; + break; + + case (NET_HEADER_FIELD_IPv4_TTL): + *parseCodeRealSize = 1; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported4")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + + case (HEADER_TYPE_IPv6): + switch (field.ipv6) + { + case (NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL | NET_HEADER_FIELD_IPv6_TC): + *parseCodeRealSize = 4; + break; + + case (NET_HEADER_FIELD_IPv6_NEXT_HDR): + case (NET_HEADER_FIELD_IPv6_HOP_LIMIT): + *parseCodeRealSize = 1; + break; + + case (NET_HEADER_FIELD_IPv6_DST_IP): + case (NET_HEADER_FIELD_IPv6_SRC_IP): + *parseCodeRealSize = 16; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported5")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + + case (HEADER_TYPE_IP): + switch (field.ip) + { + case (NET_HEADER_FIELD_IP_DSCP): + case (NET_HEADER_FIELD_IP_PROTO): + *parseCodeRealSize = 1; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported5")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + + case (HEADER_TYPE_GRE): + switch (field.gre) + { + case ( NET_HEADER_FIELD_GRE_TYPE): + *parseCodeRealSize = 2; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported6")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + + case (HEADER_TYPE_MINENCAP): + switch (field.minencap) + { + case (NET_HEADER_FIELD_MINENCAP_TYPE): + *parseCodeRealSize = 1; + break; + + case (NET_HEADER_FIELD_MINENCAP_DST_IP): + case (NET_HEADER_FIELD_MINENCAP_SRC_IP): + *parseCodeRealSize = 4; + break; + + case (NET_HEADER_FIELD_MINENCAP_SRC_IP | NET_HEADER_FIELD_MINENCAP_DST_IP): + *parseCodeRealSize = 8; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported7")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + + case (HEADER_TYPE_TCP): + switch (field.tcp) + { + case (NET_HEADER_FIELD_TCP_PORT_SRC): + case (NET_HEADER_FIELD_TCP_PORT_DST): + *parseCodeRealSize = 2; + break; + + case (NET_HEADER_FIELD_TCP_PORT_SRC | NET_HEADER_FIELD_TCP_PORT_DST): + *parseCodeRealSize = 4; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported8")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + + case (HEADER_TYPE_UDP): + switch (field.udp) + { + case (NET_HEADER_FIELD_UDP_PORT_SRC): + case (NET_HEADER_FIELD_UDP_PORT_DST): + *parseCodeRealSize = 2; + break; + + case (NET_HEADER_FIELD_UDP_PORT_SRC | NET_HEADER_FIELD_UDP_PORT_DST): + *parseCodeRealSize = 4; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported9")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported10")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } +} + +t_Error ValidateNextEngineParams(t_Handle h_FmPcd, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams, + e_FmPcdCcStatsMode statsMode) +{ + uint16_t absoluteProfileId; + t_Error err = E_OK; + uint8_t relativeSchemeId; + + if ((statsMode == e_FM_PCD_CC_STATS_MODE_NONE) && + (p_FmPcdCcNextEngineParams->statisticsEn)) + RETURN_ERROR(MAJOR, E_CONFLICT, + ("Statistics are requested for a key, but statistics mode was set" + "to 'NONE' upon initialization of this match table")); + + switch (p_FmPcdCcNextEngineParams->nextEngine) + { + case (e_FM_PCD_INVALID): + err = E_NOT_SUPPORTED; + break; + + case (e_FM_PCD_DONE): + if ((p_FmPcdCcNextEngineParams->params.enqueueParams.action == e_FM_PCD_ENQ_FRAME) && + p_FmPcdCcNextEngineParams->params.enqueueParams.overrideFqid) + { + if (!p_FmPcdCcNextEngineParams->params.enqueueParams.newFqid) + RETURN_ERROR(MAJOR, E_CONFLICT, ("When overrideFqid is set, newFqid must not be zero")); + if (p_FmPcdCcNextEngineParams->params.enqueueParams.newFqid & ~0x00FFFFFF) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fqidForCtrlFlow must be between 1 and 2^24-1")); + } + break; + + case (e_FM_PCD_KG): + relativeSchemeId = FmPcdKgGetRelativeSchemeId(h_FmPcd, + FmPcdKgGetSchemeId(p_FmPcdCcNextEngineParams->params.kgParams.h_DirectScheme)); + if (relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + if (!FmPcdKgIsSchemeValidSw(p_FmPcdCcNextEngineParams->params.kgParams.h_DirectScheme)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("not valid schemeIndex in KG next engine param")); + if (!KgIsSchemeAlwaysDirect(h_FmPcd, relativeSchemeId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("CC Node may point only to a scheme that is always direct.")); + break; + + case (e_FM_PCD_PLCR): + if (p_FmPcdCcNextEngineParams->params.plcrParams.overrideParams) + { + /* if private policer profile, it may be uninitialized yet, therefore no checks are done at this stage */ + if (p_FmPcdCcNextEngineParams->params.plcrParams.sharedProfile) + { + err = FmPcdPlcrGetAbsoluteIdByProfileParams(h_FmPcd, + e_FM_PCD_PLCR_SHARED, + NULL, + p_FmPcdCcNextEngineParams->params.plcrParams.newRelativeProfileId, + &absoluteProfileId); + if (err) + RETURN_ERROR(MAJOR, err, ("Shared profile offset is out of range")); + if (!FmPcdPlcrIsProfileValid(h_FmPcd, absoluteProfileId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid profile")); + } + } + break; + + case (e_FM_PCD_HASH): + p_FmPcdCcNextEngineParams->nextEngine = e_FM_PCD_CC; + case (e_FM_PCD_CC): + if (!p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode) + RETURN_ERROR(MAJOR, E_NULL_POINTER, ("handler to next Node is NULL")); + break; + +#if (DPAA_VERSION >= 11) + case (e_FM_PCD_FR): + if (!p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic) + err = E_NOT_SUPPORTED; + break; +#endif /* (DPAA_VERSION >= 11) */ + + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine is not correct")); + } + + + return err; +} + +static uint8_t GetGenParseCode(t_Handle h_FmPcd, + e_FmPcdExtractFrom src, + uint32_t offset, + bool glblMask, + uint8_t *parseArrayOffset, + bool fromIc, + ccPrivateInfo_t icCode) +{ + UNUSED(h_FmPcd); + + if (!fromIc) + { + switch (src) + { + case (e_FM_PCD_EXTRACT_FROM_FRAME_START): + if (glblMask) + return CC_PC_GENERIC_WITH_MASK ; + else + return CC_PC_GENERIC_WITHOUT_MASK; + + case (e_FM_PCD_EXTRACT_FROM_CURR_END_OF_PARSE): + *parseArrayOffset = CC_PC_PR_NEXT_HEADER_OFFSET; + if (offset) + return CC_PR_OFFSET; + else + return CC_PR_WITHOUT_OFFSET; + + default: + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 'extract from' src")); + return CC_PC_ILLEGAL; + } + } + else + { + switch (icCode) + { + case (CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH): + *parseArrayOffset = 0x50; + return CC_PC_GENERIC_IC_GMASK; + + case (CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH): + *parseArrayOffset = 0x48; + return CC_PC_GENERIC_IC_GMASK; + + case (CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP): + *parseArrayOffset = 0x48; + return CC_PC_GENERIC_IC_HASH_INDEXED; + + case (CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP): + *parseArrayOffset = 0x16; + return CC_PC_GENERIC_IC_HASH_INDEXED; + + default: + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 'extract from' src")); + break; + } + } + + return CC_PC_ILLEGAL; +} + +static uint8_t GetFullFieldParseCode(e_NetHeaderType hdr, + e_FmPcdHdrIndex index, + t_FmPcdFields field) +{ + switch (hdr) + { + case (HEADER_TYPE_NONE): + ASSERT_COND(FALSE); + return CC_PC_ILLEGAL; + + case (HEADER_TYPE_ETH): + switch (field.eth) + { + case (NET_HEADER_FIELD_ETH_DA): + return CC_PC_FF_MACDST; + case (NET_HEADER_FIELD_ETH_SA): + return CC_PC_FF_MACSRC; + case (NET_HEADER_FIELD_ETH_TYPE): + return CC_PC_FF_ETYPE; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case (HEADER_TYPE_VLAN): + switch (field.vlan) + { + case (NET_HEADER_FIELD_VLAN_TCI): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_TCI1; + if (index == e_FM_PCD_HDR_INDEX_LAST) + return CC_PC_FF_TCI2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case (HEADER_TYPE_MPLS): + switch (field.mpls) + { + case (NET_HEADER_FIELD_MPLS_LABEL_STACK): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_MPLS1; + if (index == e_FM_PCD_HDR_INDEX_LAST) + return CC_PC_FF_MPLS_LAST; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS index")); + return CC_PC_ILLEGAL; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case (HEADER_TYPE_IPv4): + switch (field.ipv4) + { + case (NET_HEADER_FIELD_IPv4_DST_IP): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV4DST1; + if (index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV4DST2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return CC_PC_ILLEGAL; + case (NET_HEADER_FIELD_IPv4_TOS): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV4IPTOS_TC1; + if (index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV4IPTOS_TC2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return CC_PC_ILLEGAL; + case (NET_HEADER_FIELD_IPv4_PROTO): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV4PTYPE1; + if(index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV4PTYPE2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return CC_PC_ILLEGAL; + case (NET_HEADER_FIELD_IPv4_SRC_IP): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV4SRC1; + if (index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV4SRC2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return CC_PC_ILLEGAL; + case (NET_HEADER_FIELD_IPv4_SRC_IP | NET_HEADER_FIELD_IPv4_DST_IP): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV4SRC1_IPV4DST1; + if (index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV4SRC2_IPV4DST2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return CC_PC_ILLEGAL; + case (NET_HEADER_FIELD_IPv4_TTL): + return CC_PC_FF_IPV4TTL; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case (HEADER_TYPE_IPv6): + switch (field.ipv6) + { + case (NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL | NET_HEADER_FIELD_IPv6_TC): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1; + if (index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); + return CC_PC_ILLEGAL; + + case (NET_HEADER_FIELD_IPv6_NEXT_HDR): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV6PTYPE1; + if (index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV6PTYPE2; + if (index == e_FM_PCD_HDR_INDEX_LAST) + return CC_PC_FF_IPPID; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); + return CC_PC_ILLEGAL; + + case (NET_HEADER_FIELD_IPv6_DST_IP): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV6DST1; + if (index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV6DST2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); + return CC_PC_ILLEGAL; + + case (NET_HEADER_FIELD_IPv6_SRC_IP): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV6SRC1; + if (index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV6SRC2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); + return CC_PC_ILLEGAL; + + case (NET_HEADER_FIELD_IPv6_HOP_LIMIT): + return CC_PC_FF_IPV6HOP_LIMIT; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case (HEADER_TYPE_IP): + switch (field.ip) + { + case (NET_HEADER_FIELD_IP_DSCP): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPDSCP; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP index")); + return CC_PC_ILLEGAL; + + case (NET_HEADER_FIELD_IP_PROTO): + if (index == e_FM_PCD_HDR_INDEX_LAST) + return CC_PC_FF_IPPID; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP index")); + return CC_PC_ILLEGAL; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case (HEADER_TYPE_GRE): + switch (field.gre) + { + case (NET_HEADER_FIELD_GRE_TYPE): + return CC_PC_FF_GREPTYPE; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case (HEADER_TYPE_MINENCAP): + switch (field.minencap) + { + case (NET_HEADER_FIELD_MINENCAP_TYPE): + return CC_PC_FF_MINENCAP_PTYPE; + + case (NET_HEADER_FIELD_MINENCAP_DST_IP): + return CC_PC_FF_MINENCAP_IPDST; + + case (NET_HEADER_FIELD_MINENCAP_SRC_IP): + return CC_PC_FF_MINENCAP_IPSRC; + + case (NET_HEADER_FIELD_MINENCAP_SRC_IP | NET_HEADER_FIELD_MINENCAP_DST_IP): + return CC_PC_FF_MINENCAP_IPSRC_IPDST; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case (HEADER_TYPE_TCP): + switch (field.tcp) + { + case (NET_HEADER_FIELD_TCP_PORT_SRC): + return CC_PC_FF_L4PSRC; + + case (NET_HEADER_FIELD_TCP_PORT_DST): + return CC_PC_FF_L4PDST; + + case (NET_HEADER_FIELD_TCP_PORT_DST | NET_HEADER_FIELD_TCP_PORT_SRC): + return CC_PC_FF_L4PSRC_L4PDST; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case (HEADER_TYPE_PPPoE): + switch (field.pppoe) + { + case (NET_HEADER_FIELD_PPPoE_PID): + return CC_PC_FF_PPPPID; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case (HEADER_TYPE_UDP): + switch (field.udp) + { + case (NET_HEADER_FIELD_UDP_PORT_SRC): + return CC_PC_FF_L4PSRC; + + case (NET_HEADER_FIELD_UDP_PORT_DST): + return CC_PC_FF_L4PDST; + + case (NET_HEADER_FIELD_UDP_PORT_DST | NET_HEADER_FIELD_UDP_PORT_SRC): + return CC_PC_FF_L4PSRC_L4PDST; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } +} + +static uint8_t GetPrParseCode(e_NetHeaderType hdr, + e_FmPcdHdrIndex hdrIndex, + uint32_t offset, + bool glblMask, + uint8_t *parseArrayOffset) +{ + bool offsetRelevant = FALSE; + + if (offset) + offsetRelevant = TRUE; + + switch (hdr) + { + case (HEADER_TYPE_NONE): + ASSERT_COND(FALSE); + return CC_PC_ILLEGAL; + + case (HEADER_TYPE_ETH): + *parseArrayOffset = (uint8_t)CC_PC_PR_ETH_OFFSET; + break; + + case (HEADER_TYPE_USER_DEFINED_SHIM1): + if (offset || glblMask) + *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM1_OFFSET; + else + return CC_PC_PR_SHIM1; + break; + + case (HEADER_TYPE_USER_DEFINED_SHIM2): + if (offset || glblMask) + *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM2_OFFSET; + else + return CC_PC_PR_SHIM2; + break; + + case (HEADER_TYPE_LLC_SNAP): + *parseArrayOffset = CC_PC_PR_USER_LLC_SNAP_OFFSET; + break; + + case (HEADER_TYPE_PPPoE): + *parseArrayOffset = CC_PC_PR_PPPOE_OFFSET; + break; + + case (HEADER_TYPE_MPLS): + if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + *parseArrayOffset = CC_PC_PR_MPLS1_OFFSET; + else if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST) + *parseArrayOffset = CC_PC_PR_MPLS_LAST_OFFSET; + else + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS header index")); + return CC_PC_ILLEGAL; + } + break; + + case (HEADER_TYPE_IPv4): + case (HEADER_TYPE_IPv6): + if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + *parseArrayOffset = CC_PC_PR_IP1_OFFSET; + else if (hdrIndex == e_FM_PCD_HDR_INDEX_2) + *parseArrayOffset = CC_PC_PR_IP_LAST_OFFSET; + else + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP header index")); + return CC_PC_ILLEGAL; + } + break; + + case (HEADER_TYPE_MINENCAP): + *parseArrayOffset = CC_PC_PR_MINENC_OFFSET; + break; + + case (HEADER_TYPE_GRE): + *parseArrayOffset = CC_PC_PR_GRE_OFFSET; + break; + + case (HEADER_TYPE_TCP): + case (HEADER_TYPE_UDP): + case (HEADER_TYPE_IPSEC_AH): + case (HEADER_TYPE_IPSEC_ESP): + case (HEADER_TYPE_DCCP): + case (HEADER_TYPE_SCTP): + *parseArrayOffset = CC_PC_PR_L4_OFFSET; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP header for this type of operation")); + return CC_PC_ILLEGAL; + } + + if (offsetRelevant) + return CC_PR_OFFSET; + else + return CC_PR_WITHOUT_OFFSET; +} + +static uint8_t GetFieldParseCode(e_NetHeaderType hdr, + t_FmPcdFields field, + uint32_t offset, + uint8_t *parseArrayOffset, + e_FmPcdHdrIndex hdrIndex) +{ + bool offsetRelevant = FALSE; + + if (offset) + offsetRelevant = TRUE; + + switch (hdr) + { + case (HEADER_TYPE_NONE): + ASSERT_COND(FALSE); + case (HEADER_TYPE_ETH): + switch (field.eth) + { + case (NET_HEADER_FIELD_ETH_TYPE): + *parseArrayOffset = CC_PC_PR_ETYPE_LAST_OFFSET; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + break; + + case (HEADER_TYPE_VLAN): + switch (field.vlan) + { + case (NET_HEADER_FIELD_VLAN_TCI): + if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + *parseArrayOffset = CC_PC_PR_VLAN1_OFFSET; + else if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST) + *parseArrayOffset = CC_PC_PR_VLAN2_OFFSET; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal header ")); + return CC_PC_ILLEGAL; + } + + if (offsetRelevant) + return CC_PR_OFFSET; + else + return CC_PR_WITHOUT_OFFSET; +} + +static void FillAdOfTypeResult(t_Handle h_Ad, + t_FmPcdCcStatsParams *p_FmPcdCcStatsParams, + t_FmPcd *p_FmPcd, + t_FmPcdCcNextEngineParams *p_CcNextEngineParams) +{ + t_AdOfTypeResult *p_AdResult = (t_AdOfTypeResult *)h_Ad; + t_Handle h_TmpAd; + uint32_t tmp = 0, tmpNia = 0; + uint16_t profileId; + t_Handle p_AdNewPtr = NULL; + + /* There are 3 cases handled in this routine of building a "result" type AD. + * Case 1: No Manip. The action descriptor is built within the match table. + * Case 2: Manip exists. A new AD is created - p_AdNewPtr. It is initialized + * either in the FmPcdManipUpdateAdResultForCc routine or it was already + * initialized and returned here. + * p_AdResult (within the match table) will be initialized after + * this routine returns and point to the existing AD. + * Case 3: Manip exists. The action descriptor is built within the match table. + * FmPcdManipUpdateAdResultForCc returns a NULL p_AdNewPtr. + * + * If statistics were enabled and the statistics mode of this node requires + * a statistics Ad, it will be placed after the result Ad and before the + * manip Ad, if manip Ad exists here. + */ + + /* As default, the "new" ptr is the current one. i.e. the content of the result + * AD will be written into the match table itself (case (1))*/ + p_AdNewPtr = p_AdResult; + + /* Initialize an action descriptor, if current statistics mode requires an Ad */ + if (p_FmPcdCcStatsParams) + { + ASSERT_COND(p_FmPcdCcStatsParams->h_StatsAd); + ASSERT_COND(p_FmPcdCcStatsParams->h_StatsCounters); + + /* Swapping addresses between statistics Ad and the current lookup AD addresses */ + h_TmpAd = p_FmPcdCcStatsParams->h_StatsAd; + p_FmPcdCcStatsParams->h_StatsAd = h_Ad; + h_Ad = h_TmpAd; + + p_AdNewPtr = h_Ad; + p_AdResult = h_Ad; + + /* Init statistics Ad and connect current lookup AD as 'next action' from statistics Ad */ + UpdateStatsAd(p_FmPcdCcStatsParams, + h_Ad, + p_FmPcd->physicalMuramBase); + } + + /* Create manip and return p_AdNewPtr to either a new descriptor or NULL */ + if (p_CcNextEngineParams->h_Manip) + FmPcdManipUpdateAdResultForCc(p_CcNextEngineParams->h_Manip, + p_CcNextEngineParams, + h_Ad, + &p_AdNewPtr); + + /* if (p_AdNewPtr = NULL) --> Done. (case (3)) */ + if (p_AdNewPtr) + { + /* case (1) and (2) */ + switch (p_CcNextEngineParams->nextEngine) + { + case (e_FM_PCD_DONE): + if (p_CcNextEngineParams->params.enqueueParams.action == e_FM_PCD_ENQ_FRAME) + { + if (p_CcNextEngineParams->params.enqueueParams.overrideFqid) + { + tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE; + tmp |= p_CcNextEngineParams->params.enqueueParams.newFqid; +#if (DPAA_VERSION >= 11) + tmp |= (p_CcNextEngineParams->params.enqueueParams.newRelativeStorageProfileId & FM_PCD_AD_RESULT_VSP_MASK) << FM_PCD_AD_RESULT_VSP_SHIFT; +#endif /* (DPAA_VERSION >= 11) */ + } + else + { + tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE; + tmp |= FM_PCD_AD_RESULT_PLCR_DIS; + } + } + + if (p_CcNextEngineParams->params.enqueueParams.action == e_FM_PCD_DROP_FRAME) + tmpNia |= GET_NIA_BMI_AC_DISCARD_FRAME(p_FmPcd); + else + tmpNia |= GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd); + break; + + case (e_FM_PCD_KG): + if (p_CcNextEngineParams->params.kgParams.overrideFqid) + { + tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE; + tmp |= p_CcNextEngineParams->params.kgParams.newFqid; +#if (DPAA_VERSION >= 11) + tmp |= (p_CcNextEngineParams->params.kgParams.newRelativeStorageProfileId & FM_PCD_AD_RESULT_VSP_MASK) << FM_PCD_AD_RESULT_VSP_SHIFT; +#endif /* (DPAA_VERSION >= 11) */ + } + else + { + tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE; + tmp |= FM_PCD_AD_RESULT_PLCR_DIS; + } + tmpNia = NIA_KG_DIRECT; + tmpNia |= NIA_ENG_KG; + tmpNia |= NIA_KG_CC_EN; + tmpNia |= FmPcdKgGetSchemeId(p_CcNextEngineParams->params.kgParams.h_DirectScheme); + break; + + case (e_FM_PCD_PLCR): + tmp = 0; + if (p_CcNextEngineParams->params.plcrParams.overrideParams) + { + tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE; + + /* if private policer profile, it may be uninitialized yet, therefore no checks are done at this stage */ + if (p_CcNextEngineParams->params.plcrParams.sharedProfile) + { + tmpNia |= NIA_PLCR_ABSOLUTE; + FmPcdPlcrGetAbsoluteIdByProfileParams((t_Handle)p_FmPcd, + e_FM_PCD_PLCR_SHARED, + NULL, + p_CcNextEngineParams->params.plcrParams.newRelativeProfileId, + &profileId); + } + else + profileId = p_CcNextEngineParams->params.plcrParams.newRelativeProfileId; + + tmp |= p_CcNextEngineParams->params.plcrParams.newFqid; +#if (DPAA_VERSION >= 11) + tmp |= (p_CcNextEngineParams->params.plcrParams.newRelativeStorageProfileId & FM_PCD_AD_RESULT_VSP_MASK)<< FM_PCD_AD_RESULT_VSP_SHIFT; +#endif /* (DPAA_VERSION >= 11) */ + WRITE_UINT32(p_AdResult->plcrProfile,(uint32_t)((uint32_t)profileId << FM_PCD_AD_PROFILEID_FOR_CNTRL_SHIFT)); + } + else + tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE; + + tmpNia |= NIA_ENG_PLCR | p_CcNextEngineParams->params.plcrParams.newRelativeProfileId; + break; + + default: + return; + } + WRITE_UINT32(p_AdResult->fqid, tmp); + + if (p_CcNextEngineParams->h_Manip) + { + tmp = GET_UINT32(p_AdResult->plcrProfile); + tmp |= (uint32_t)(XX_VirtToPhys(p_AdNewPtr) - (p_FmPcd->physicalMuramBase)) >> 4; + WRITE_UINT32(p_AdResult->plcrProfile, tmp); + + tmpNia |= FM_PCD_AD_RESULT_EXTENDED_MODE; + tmpNia |= FM_PCD_AD_RESULT_NADEN; + } + +#if (DPAA_VERSION >= 11) + tmpNia |= FM_PCD_AD_RESULT_NO_OM_VSPE; +#endif /* (DPAA_VERSION >= 11) */ + WRITE_UINT32(p_AdResult->nia, tmpNia); + } +} + +static t_Error CcUpdateParams(t_Handle h_FmPcd, + t_Handle h_PcdParams, + t_Handle h_FmPort, + t_Handle h_FmTree, + bool validate) +{ + t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *) h_FmTree; + + return CcUpdateParam(h_FmPcd, + h_PcdParams, + h_FmPort, + p_CcTree->keyAndNextEngineParams, + p_CcTree->numOfEntries, + UINT_TO_PTR(p_CcTree->ccTreeBaseAddr), + validate, + 0, + h_FmTree, + FALSE); +} + + +static void ReleaseNewNodeCommonPart(t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) +{ + if (p_AdditionalInfo->p_AdTableNew) + FM_MURAM_FreeMem(FmPcdGetMuramHandle(((t_FmPcdCcNode *)(p_AdditionalInfo->h_CurrentNode))->h_FmPcd), + p_AdditionalInfo->p_AdTableNew); + + if (p_AdditionalInfo->p_KeysMatchTableNew) + FM_MURAM_FreeMem(FmPcdGetMuramHandle(((t_FmPcdCcNode *)(p_AdditionalInfo->h_CurrentNode))->h_FmPcd), + p_AdditionalInfo->p_KeysMatchTableNew); +} + +static t_Error UpdateGblMask(t_FmPcdCcNode *p_CcNode, + uint8_t keySize, + uint8_t *p_Mask) +{ + uint8_t prvGlblMaskSize = p_CcNode->glblMaskSize; + + if (p_Mask && + !p_CcNode->glblMaskUpdated && + (keySize <= 4) && + !p_CcNode->lclMask) + { + memcpy(p_CcNode->p_GlblMask, p_Mask, (sizeof(uint8_t))*keySize); + p_CcNode->glblMaskUpdated = TRUE; + p_CcNode->glblMaskSize = 4; + } + else if (p_Mask && + (keySize <= 4) && + !p_CcNode->lclMask) + { + if (memcmp(p_CcNode->p_GlblMask, p_Mask, keySize) != 0) + { + p_CcNode->lclMask = TRUE; + p_CcNode->glblMaskSize = 0; + } + } + else if (!p_Mask && p_CcNode->glblMaskUpdated && (keySize <= 4)) + { + uint32_t tmpMask = 0xffffffff; + if (memcmp(p_CcNode->p_GlblMask, &tmpMask, 4) != 0) + { + p_CcNode->lclMask = TRUE; + p_CcNode->glblMaskSize = 0; + } + } + else if (p_Mask) + { + p_CcNode->lclMask = TRUE; + p_CcNode->glblMaskSize = 0; + } + + /* In static mode (maxNumOfKeys > 0), local mask is supported + only is mask support was enabled at initialization */ + if (p_CcNode->maxNumOfKeys && (!p_CcNode->maskSupport) && p_CcNode->lclMask) + { + p_CcNode->lclMask = FALSE; + p_CcNode->glblMaskSize = prvGlblMaskSize; + return ERROR_CODE(E_NOT_SUPPORTED); + } + + return E_OK; +} + +static __inline__ t_Handle GetNewAd(t_Handle h_FmPcdCcNodeOrTree, bool isTree) +{ + t_FmPcd *p_FmPcd; + t_Handle h_Ad; + + if (isTree) + p_FmPcd = (t_FmPcd *)(((t_FmPcdCcTree *)h_FmPcdCcNodeOrTree)->h_FmPcd); + else + p_FmPcd = (t_FmPcd *)(((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->h_FmPcd); + + if ((isTree && p_FmPcd->p_CcShadow) || + (!isTree && ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->maxNumOfKeys)) + { + /* The allocated shadow is divided as follows: + 0 . . . 16 . . . + --------------------------------------------------- + | Shadow | Shadow Keys | Shadow Next | + | Ad | Match Table | Engine Table | + | (16 bytes) | (maximal size) | (maximal size) | + --------------------------------------------------- + */ + if (!p_FmPcd->p_CcShadow) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("CC Shadow not allocated")); + return NULL; + } + + h_Ad = p_FmPcd->p_CcShadow; + } + else + { + h_Ad = (t_Handle)FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcd), + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!h_Ad) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptor")); + return NULL; + } + } + + return h_Ad; +} + +static t_Error BuildNewNodeCommonPart(t_FmPcdCcNode *p_CcNode, + int *size, + t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + + if (p_CcNode->lclMask) + *size = 2 * p_CcNode->ccKeySizeAccExtraction; + else + *size = p_CcNode->ccKeySizeAccExtraction; + + if (p_CcNode->maxNumOfKeys == 0) + { + p_AdditionalInfo->p_AdTableNew = + (t_Handle)FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcd), + (uint32_t)( (p_AdditionalInfo->numOfKeys+1) * FM_PCD_CC_AD_ENTRY_SIZE), + FM_PCD_CC_AD_TABLE_ALIGN); + if (!p_AdditionalInfo->p_AdTableNew) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptors table")); + + p_AdditionalInfo->p_KeysMatchTableNew = + (t_Handle)FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcd), + (uint32_t)(*size * sizeof(uint8_t) * (p_AdditionalInfo->numOfKeys + 1)), + FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN); + if (!p_AdditionalInfo->p_KeysMatchTableNew) + { + FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd), p_AdditionalInfo->p_AdTableNew); + p_AdditionalInfo->p_AdTableNew = NULL; + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node key match table")); + } + + IOMemSet32((uint8_t*)p_AdditionalInfo->p_AdTableNew, 0, (uint32_t)((p_AdditionalInfo->numOfKeys+1) * FM_PCD_CC_AD_ENTRY_SIZE)); + IOMemSet32((uint8_t*)p_AdditionalInfo->p_KeysMatchTableNew, 0, *size * sizeof(uint8_t) * (p_AdditionalInfo->numOfKeys + 1)); + } + else + { + /* The allocated shadow is divided as follows: + 0 . . . 16 . . . + --------------------------------------------------- + | Shadow | Shadow Keys | Shadow Next | + | Ad | Match Table | Engine Table | + | (16 bytes) | (maximal size) | (maximal size) | + --------------------------------------------------- + */ + + if (!p_FmPcd->p_CcShadow) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("CC Shadow not allocated")); + + p_AdditionalInfo->p_KeysMatchTableNew = PTR_MOVE(p_FmPcd->p_CcShadow, FM_PCD_CC_AD_ENTRY_SIZE); + p_AdditionalInfo->p_AdTableNew = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, p_CcNode->keysMatchTableMaxSize); + + IOMemSet32((uint8_t*)p_AdditionalInfo->p_AdTableNew, 0, (uint32_t)((p_CcNode->maxNumOfKeys + 1) * FM_PCD_CC_AD_ENTRY_SIZE)); + IOMemSet32((uint8_t*)p_AdditionalInfo->p_KeysMatchTableNew, 0, (*size) * sizeof(uint8_t) * (p_CcNode->maxNumOfKeys)); + } + + p_AdditionalInfo->p_AdTableOld = p_CcNode->h_AdTable; + p_AdditionalInfo->p_KeysMatchTableOld = p_CcNode->h_KeysMatchTable; + + return E_OK; +} + +static t_Error BuildNewNodeAddOrMdfyKeyAndNextEngine(t_Handle h_FmPcd, + t_FmPcdCcNode *p_CcNode, + uint16_t keyIndex, + t_FmPcdCcKeyParams *p_KeyParams, + t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo, + bool add) +{ + t_Error err = E_OK; + t_Handle p_AdTableNewTmp, p_KeysMatchTableNewTmp; + t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp; + int size; + int i = 0, j = 0; + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t requiredAction = 0; + bool prvLclMask; + t_CcNodeInformation *p_CcNodeInformation; + t_FmPcdCcStatsParams statsParams = {0}; + t_List *p_Pos; + t_FmPcdStatsObj *p_StatsObj; + + /* Check that new NIA is legal */ + err = ValidateNextEngineParams(h_FmPcd, + &p_KeyParams->ccNextEngineParams, + p_CcNode->statisticsMode); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + prvLclMask = p_CcNode->lclMask; + + /* Check that new key is not require update of localMask */ + err = UpdateGblMask(p_CcNode, + p_CcNode->ccKeySizeAccExtraction, + p_KeyParams->p_Mask); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + + /* Update internal data structure with new next engine for the given index */ + memcpy(&p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams, + &p_KeyParams->ccNextEngineParams, + sizeof(t_FmPcdCcNextEngineParams)); + + memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].key, + p_KeyParams->p_Key, + p_CcNode->userSizeOfExtraction); + + if ((p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) + && p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip) + { + err = AllocAndFillAdForContLookupManip(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + + if (p_KeyParams->p_Mask) + memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, + p_KeyParams->p_Mask, + p_CcNode->userSizeOfExtraction); + else + memset(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, + 0xFF, + p_CcNode->userSizeOfExtraction); + + /* Update numOfKeys */ + if (add) + p_AdditionalInfo->numOfKeys = (uint8_t)(p_CcNode->numOfKeys + 1); + else + p_AdditionalInfo->numOfKeys = (uint8_t)p_CcNode->numOfKeys; + + /* Allocate new tables in MURAM: keys match table and action descriptors table */ + err = BuildNewNodeCommonPart(p_CcNode, &size, p_AdditionalInfo); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + /* Check that manip is legal and what requiredAction is necessary for this manip */ + if (p_KeyParams->ccNextEngineParams.h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEngine(&p_KeyParams->ccNextEngineParams,&requiredAction); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + + p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction = requiredAction; + p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction |= UPDATE_CC_WITH_TREE; + + /* Update new Ad and new Key Table according to new requirement */ + i = 0; + for (j = 0; j < p_AdditionalInfo->numOfKeys; j++) + { + p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j*FM_PCD_CC_AD_ENTRY_SIZE); + + if (j == keyIndex) + { + if (p_KeyParams->ccNextEngineParams.statisticsEn) + { + /* Allocate a statistics object that holds statistics AD and counters. + - For added key - New statistics AD and counters pointer need to be allocated + new statistics object. If statistics were enabled, we need to replace the + existing descriptor with a new descriptor with nullified counters. + */ + p_StatsObj = GetStatsObj(p_CcNode); + ASSERT_COND(p_StatsObj); + + /* Store allocated statistics object */ + ASSERT_COND(keyIndex < CC_MAX_NUM_OF_KEYS); + p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj = p_StatsObj; + + statsParams.h_StatsAd = p_StatsObj->h_StatsAd; + statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters; +#if (DPAA_VERSION >= 11) + statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs; + +#endif /* (DPAA_VERSION >= 11) */ + + /* Building action descriptor for the received new key */ + NextStepAd(p_AdTableNewTmp, + &statsParams, + &p_KeyParams->ccNextEngineParams, + p_FmPcd); + } + else + { + /* Building action descriptor for the received new key */ + NextStepAd(p_AdTableNewTmp, + NULL, + &p_KeyParams->ccNextEngineParams, + p_FmPcd); + } + + /* Copy the received new key into keys match table */ + p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j*size*sizeof(uint8_t)); + + Mem2IOCpy32((void*)p_KeysMatchTableNewTmp, p_KeyParams->p_Key, p_CcNode->userSizeOfExtraction); + + /* Update mask for the received new key */ + if (p_CcNode->lclMask) + { + if (p_KeyParams->p_Mask) + { + Mem2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, + p_CcNode->ccKeySizeAccExtraction), + p_KeyParams->p_Mask, + p_CcNode->userSizeOfExtraction); + } + else if (p_CcNode->ccKeySizeAccExtraction > 4) + { + IOMemSet32(PTR_MOVE(p_KeysMatchTableNewTmp, + p_CcNode->ccKeySizeAccExtraction), + 0xff, + p_CcNode->userSizeOfExtraction); + } + else + { + Mem2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, + p_CcNode->ccKeySizeAccExtraction), + p_CcNode->p_GlblMask, + p_CcNode->userSizeOfExtraction); + } + } + + /* If key modification requested, the old entry is omitted and replaced by the new parameters */ + if (!add) + i++; + } + else + { + /* Copy existing action descriptors to the newly allocated Ad table */ + p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i*FM_PCD_CC_AD_ENTRY_SIZE); + IO2IOCpy32(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); + + /* Copy existing keys and their masks to the newly allocated keys match table */ + p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j * size * sizeof(uint8_t)); + p_KeysMatchTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableOld, i * size * sizeof(uint8_t)); + + if (p_CcNode->lclMask) + { + if (prvLclMask) + { + IO2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), + PTR_MOVE(p_KeysMatchTableOldTmp, p_CcNode->ccKeySizeAccExtraction), + p_CcNode->ccKeySizeAccExtraction); + } + else + { + p_KeysMatchTableOldTmp = PTR_MOVE(p_CcNode->h_KeysMatchTable, + i * p_CcNode->ccKeySizeAccExtraction*sizeof(uint8_t)); + + if (p_CcNode->ccKeySizeAccExtraction > 4) + { + IOMemSet32(PTR_MOVE(p_KeysMatchTableNewTmp, + p_CcNode->ccKeySizeAccExtraction), + 0xff, + p_CcNode->userSizeOfExtraction); + } + else + { + IO2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, + p_CcNode->ccKeySizeAccExtraction), + p_CcNode->p_GlblMask, + p_CcNode->userSizeOfExtraction); + } + } + } + + IO2IOCpy32(p_KeysMatchTableNewTmp, p_KeysMatchTableOldTmp, p_CcNode->ccKeySizeAccExtraction); + + i++; + } + } + + /* Miss action descriptor */ + p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j * FM_PCD_CC_AD_ENTRY_SIZE); + p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i * FM_PCD_CC_AD_ENTRY_SIZE); + IO2IOCpy32(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); + + if (!LIST_IsEmpty(&p_CcNode->ccTreesLst)) + { + LIST_FOR_EACH(p_Pos, &p_CcNode->ccTreesLst) + { + p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); + ASSERT_COND(p_CcNodeInformation->h_CcNode); + /* Update the manipulation which has to be updated from parameters of the port */ + /* It's has to be updated with restrictions defined in the function */ + err = SetRequiredAction(p_CcNode->h_FmPcd, + p_CcNode->shadowAction | p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction, + &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], + PTR_MOVE(p_AdditionalInfo->p_AdTableNew, keyIndex*FM_PCD_CC_AD_ENTRY_SIZE), + 1, + p_CcNodeInformation->h_CcNode); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + + err = CcUpdateParam(p_CcNode->h_FmPcd, + NULL, + NULL, + &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], + 1, + PTR_MOVE(p_AdditionalInfo->p_AdTableNew, keyIndex*FM_PCD_CC_AD_ENTRY_SIZE), + TRUE, + p_CcNodeInformation->index, + p_CcNodeInformation->h_CcNode, + TRUE); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + } + + if (p_CcNode->lclMask) + memset(p_CcNode->p_GlblMask, 0xff, CC_GLBL_MASK_SIZE * sizeof(uint8_t)); + + if (p_KeyParams->ccNextEngineParams.nextEngine == e_FM_PCD_CC) + p_AdditionalInfo->h_NodeForAdd = p_KeyParams->ccNextEngineParams.params.ccParams.h_CcNode; + if (p_KeyParams->ccNextEngineParams.h_Manip) + p_AdditionalInfo->h_ManipForAdd = p_KeyParams->ccNextEngineParams.h_Manip; + +#if (DPAA_VERSION >= 11) + if ((p_KeyParams->ccNextEngineParams.nextEngine == e_FM_PCD_FR) && + (p_KeyParams->ccNextEngineParams.params.frParams.h_FrmReplic)) + p_AdditionalInfo->h_FrmReplicForAdd = p_KeyParams->ccNextEngineParams.params.frParams.h_FrmReplic; +#endif /* (DPAA_VERSION >= 11) */ + + if (!add) + { + if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) + p_AdditionalInfo->h_NodeForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode; + + if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip) + p_AdditionalInfo->h_ManipForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip; + + /* If statistics were previously enabled, store the old statistics object to be released */ + if (p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj) + { + p_AdditionalInfo->p_StatsObjForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj; + } + +#if (DPAA_VERSION >= 11) + if ((p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_FR) && + (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic)) + p_AdditionalInfo->h_FrmReplicForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic; +#endif /* (DPAA_VERSION >= 11) */ + } + + return E_OK; +} + +static t_Error BuildNewNodeRemoveKey(t_FmPcdCcNode *p_CcNode, + uint16_t keyIndex, + t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) +{ + int i = 0, j = 0; + t_Handle p_AdTableNewTmp,p_KeysMatchTableNewTmp; + t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp; + int size; + t_Error err = E_OK; + + /*save new numOfKeys*/ + p_AdditionalInfo->numOfKeys = (uint16_t)(p_CcNode->numOfKeys - 1); + + /*function which allocates in the memory new KeyTbl, AdTbl*/ + err = BuildNewNodeCommonPart(p_CcNode, &size, p_AdditionalInfo); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + /*update new Ad and new Key Table according to new requirement*/ + for (i=0, j=0; j<p_CcNode->numOfKeys; i++, j++) + { + if (j == keyIndex) + { + p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, j * FM_PCD_CC_AD_ENTRY_SIZE); + j++; + } + if (j == p_CcNode->numOfKeys) + break; + p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, i * FM_PCD_CC_AD_ENTRY_SIZE); + p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, j * FM_PCD_CC_AD_ENTRY_SIZE); + IO2IOCpy32(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); + + p_KeysMatchTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableOld, j * size * sizeof(uint8_t)); + p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, i * size * sizeof(uint8_t)); + IO2IOCpy32(p_KeysMatchTableNewTmp, p_KeysMatchTableOldTmp, size * sizeof(uint8_t)); + } + + p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, i * FM_PCD_CC_AD_ENTRY_SIZE); + p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, j * FM_PCD_CC_AD_ENTRY_SIZE); + IO2IOCpy32(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); + + if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) + p_AdditionalInfo->h_NodeForRmv = + p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode; + + if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip) + p_AdditionalInfo->h_ManipForRmv = + p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip; + + /* If statistics were previously enabled, store the old statistics object to be released */ + if (p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj) + { + p_AdditionalInfo->p_StatsObjForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj; + } + +#if (DPAA_VERSION >= 11) + if ((p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_FR) && + (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic)) + p_AdditionalInfo->h_FrmReplicForRmv = + p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic; +#endif /* (DPAA_VERSION >= 11) */ + + return E_OK; +} + +static t_Error BuildNewNodeModifyKey(t_FmPcdCcNode *p_CcNode, + uint16_t keyIndex, + uint8_t *p_Key, + uint8_t *p_Mask, + t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + t_Error err = E_OK; + t_Handle p_AdTableNewTmp, p_KeysMatchTableNewTmp; + t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp; + int size; + int i = 0, j = 0; + bool prvLclMask; + t_FmPcdStatsObj *p_StatsObj, tmpStatsObj; + p_AdditionalInfo->numOfKeys = p_CcNode->numOfKeys; + + prvLclMask = p_CcNode->lclMask; + + /* Check that new key is not require update of localMask */ + err = UpdateGblMask(p_CcNode, + p_CcNode->ccKeySizeAccExtraction, + p_Mask); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + + /* Update internal data structure with new next engine for the given index */ + memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].key, + p_Key, + p_CcNode->userSizeOfExtraction); + + if (p_Mask) + memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, + p_Mask, + p_CcNode->userSizeOfExtraction); + else + memset(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, + 0xFF, + p_CcNode->userSizeOfExtraction); + + /*function which build in the memory new KeyTbl, AdTbl*/ + err = BuildNewNodeCommonPart(p_CcNode, &size, p_AdditionalInfo); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + /*fill the New AdTable and New KeyTable*/ + for (j=0, i=0; j<p_AdditionalInfo->numOfKeys; j++, i++) + { + p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j*FM_PCD_CC_AD_ENTRY_SIZE); + p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i*FM_PCD_CC_AD_ENTRY_SIZE); + + IO2IOCpy32(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); + + if (j == keyIndex) + { + ASSERT_COND(keyIndex < CC_MAX_NUM_OF_KEYS); + if (p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj) + { + /* As statistics were enabled, we need to update the existing + statistics descriptor with a new nullified counters. */ + p_StatsObj = GetStatsObj(p_CcNode); + ASSERT_COND(p_StatsObj); + + SetStatsCounters(p_AdTableNewTmp, + (uint32_t)((XX_VirtToPhys(p_StatsObj->h_StatsCounters) - p_FmPcd->physicalMuramBase))); + + tmpStatsObj.h_StatsAd = p_StatsObj->h_StatsAd; + tmpStatsObj.h_StatsCounters = p_StatsObj->h_StatsCounters; + + /* As we need to replace only the counters, we build a new statistics + object that holds the old AD and the new counters - this will be the + currently used statistics object. + The newly allocated AD is not required and may be released back to + the available objects with the previous counters pointer. */ + p_StatsObj->h_StatsAd = p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsAd; + + p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsAd = tmpStatsObj.h_StatsAd; + + /* Store allocated statistics object */ + p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj = p_StatsObj; + + /* As statistics were previously enabled, store the old statistics object to be released */ + p_AdditionalInfo->p_StatsObjForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj; + } + + p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j * size * sizeof(uint8_t)); + + Mem2IOCpy32(p_KeysMatchTableNewTmp, p_Key, p_CcNode->userSizeOfExtraction); + + if (p_CcNode->lclMask) + { + if (p_Mask) + Mem2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, + p_CcNode->ccKeySizeAccExtraction), + p_Mask, + p_CcNode->userSizeOfExtraction); + else if (p_CcNode->ccKeySizeAccExtraction > 4) + IOMemSet32(PTR_MOVE(p_KeysMatchTableNewTmp, + p_CcNode->ccKeySizeAccExtraction), + 0xff, + p_CcNode->userSizeOfExtraction); + else + Mem2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, + p_CcNode->ccKeySizeAccExtraction), + p_CcNode->p_GlblMask, + p_CcNode->userSizeOfExtraction); + } + } + else + { + p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j * size * sizeof(uint8_t)); + p_KeysMatchTableOldTmp = PTR_MOVE(p_CcNode->h_KeysMatchTable, i * size * sizeof(uint8_t)); + + if (p_CcNode->lclMask) + { + if (prvLclMask) + IO2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), + PTR_MOVE(p_KeysMatchTableOldTmp, p_CcNode->ccKeySizeAccExtraction), + p_CcNode->userSizeOfExtraction); + else + { + p_KeysMatchTableOldTmp = PTR_MOVE(p_CcNode->h_KeysMatchTable, i * p_CcNode->ccKeySizeAccExtraction * sizeof(uint8_t)); + + if (p_CcNode->ccKeySizeAccExtraction > 4) + IOMemSet32(PTR_MOVE(p_KeysMatchTableNewTmp, + p_CcNode->ccKeySizeAccExtraction), + 0xff, + p_CcNode->userSizeOfExtraction); + else + IO2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), + p_CcNode->p_GlblMask, + p_CcNode->userSizeOfExtraction); + } + } + IO2IOCpy32((void*)p_KeysMatchTableNewTmp, + p_KeysMatchTableOldTmp, + p_CcNode->ccKeySizeAccExtraction); + } + } + + p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j * FM_PCD_CC_AD_ENTRY_SIZE); + p_AdTableOldTmp = PTR_MOVE(p_CcNode->h_AdTable, i * FM_PCD_CC_AD_ENTRY_SIZE); + + IO2IOCpy32(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); + + return E_OK; +} + +static t_Error BuildNewNodeModifyNextEngine(t_Handle h_FmPcd, + t_Handle h_FmPcdCcNodeOrTree, + uint16_t keyIndex, + t_FmPcdCcNextEngineParams *p_CcNextEngineParams, + t_List *h_OldLst, + t_List *h_NewLst, + t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) +{ + t_Error err = E_OK; + uint32_t requiredAction = 0; + t_List *p_Pos; + t_CcNodeInformation *p_CcNodeInformation, ccNodeInfo; + t_Handle p_Ad; + t_FmPcdCcNode *p_FmPcdCcNode1 = NULL; + t_FmPcdCcTree *p_FmPcdCcTree = NULL; + t_FmPcdStatsObj *p_StatsObj; + t_FmPcdCcStatsParams statsParams = {0}; + + ASSERT_COND(p_CcNextEngineParams); + + /* check that new NIA is legal */ + if (!p_AdditionalInfo->tree) + err = ValidateNextEngineParams(h_FmPcd, + p_CcNextEngineParams, + ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->statisticsMode); + else + /* Statistics are not supported for CC root */ + err = ValidateNextEngineParams(h_FmPcd, + p_CcNextEngineParams, + e_FM_PCD_CC_STATS_MODE_NONE); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + /* Update internal data structure for next engine per index (index - key) */ + memcpy(&p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams, + p_CcNextEngineParams, + sizeof(t_FmPcdCcNextEngineParams)); + + /* Check that manip is legal and what requiredAction is necessary for this manip */ + if (p_CcNextEngineParams->h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEngine(p_CcNextEngineParams, &requiredAction); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + + if (!p_AdditionalInfo->tree) + { + p_FmPcdCcNode1 = (t_FmPcdCcNode *)h_FmPcdCcNodeOrTree; + p_AdditionalInfo->numOfKeys = p_FmPcdCcNode1->numOfKeys; + p_Ad = p_FmPcdCcNode1->h_AdTable; + + if (p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) + p_AdditionalInfo->h_NodeForRmv = p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode; + + if (p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip) + p_AdditionalInfo->h_ManipForRmv = p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip; + +#if (DPAA_VERSION >= 11) + if ((p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_FR) && + (p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic)) + p_AdditionalInfo->h_FrmReplicForRmv = p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic; +#endif /* (DPAA_VERSION >= 11) */ + } + else + { + p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcNodeOrTree; + p_Ad = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); + + if (p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) + p_AdditionalInfo->h_NodeForRmv = p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode; + + if (p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip) + p_AdditionalInfo->h_ManipForRmv = p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip; + +#if (DPAA_VERSION >= 11) + if ((p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_FR) && + (p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic)) + p_AdditionalInfo->h_FrmReplicForRmv = p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic; +#endif /* (DPAA_VERSION >= 11) */ + } + + if ((p_CcNextEngineParams->nextEngine == e_FM_PCD_CC) + && p_CcNextEngineParams->h_Manip) + { + err = AllocAndFillAdForContLookupManip(p_CcNextEngineParams->params.ccParams.h_CcNode); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + + ASSERT_COND(p_Ad); + + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = PTR_MOVE(p_Ad, keyIndex * FM_PCD_CC_AD_ENTRY_SIZE); + + /* If statistics were enabled, this Ad is the statistics Ad. Need to follow its + nextAction to retrieve the actual Nia-Ad. If statistics should remain enabled, + only the actual Nia-Ad should be modified. */ + if ((!p_AdditionalInfo->tree) && + (((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj) && + (p_CcNextEngineParams->statisticsEn)) + ccNodeInfo.h_CcNode = ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsAd; + + EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo, NULL); + + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + p_Ad = GetNewAd(h_FmPcdCcNodeOrTree, p_AdditionalInfo->tree); + if (!p_Ad) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptor")); + IOMemSet32((uint8_t *)p_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + /* If statistics were not enabled before, but requested now - Allocate a statistics + object that holds statistics AD and counters. */ + if ((!p_AdditionalInfo->tree) && + (!((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj) && + (p_CcNextEngineParams->statisticsEn)) + { + p_StatsObj = GetStatsObj((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree); + ASSERT_COND(p_StatsObj); + + /* Store allocated statistics object */ + p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj = p_StatsObj; + + statsParams.h_StatsAd = p_StatsObj->h_StatsAd; + statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters; + +#if (DPAA_VERSION >= 11) + statsParams.h_StatsFLRs = ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->h_StatsFLRs; + +#endif /* (DPAA_VERSION >= 11) */ + + NextStepAd(p_Ad, + &statsParams, + p_CcNextEngineParams, + h_FmPcd); + } + else + NextStepAd(p_Ad, + NULL, + p_CcNextEngineParams, + h_FmPcd); + + ccNodeInfo.h_CcNode = p_Ad; + EnqueueNodeInfoToRelevantLst(h_NewLst, &ccNodeInfo, NULL); + + p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction = requiredAction; + p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction |= UPDATE_CC_WITH_TREE; + + if (!p_AdditionalInfo->tree) + { + ASSERT_COND(p_FmPcdCcNode1); + if (!LIST_IsEmpty(&p_FmPcdCcNode1->ccTreesLst)) + { + LIST_FOR_EACH(p_Pos, &p_FmPcdCcNode1->ccTreesLst) + { + p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); + + ASSERT_COND(p_CcNodeInformation->h_CcNode); + /* Update the manipulation which has to be updated from parameters of the port + it's has to be updated with restrictions defined in the function */ + + err = SetRequiredAction(p_FmPcdCcNode1->h_FmPcd, + p_FmPcdCcNode1->shadowAction | p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction, + &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], + p_Ad, + 1, + p_CcNodeInformation->h_CcNode); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + + err = CcUpdateParam(p_FmPcdCcNode1->h_FmPcd, + NULL, + NULL, + &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], + 1, + p_Ad, + TRUE, + p_CcNodeInformation->index, + p_CcNodeInformation->h_CcNode, + TRUE); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + } + } + else + { + ASSERT_COND(p_FmPcdCcTree); + + err = SetRequiredAction(h_FmPcd, + p_FmPcdCcTree->requiredAction | p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction, + &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], + p_Ad, + 1, + (t_Handle)p_FmPcdCcTree); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + + err = CcUpdateParam(h_FmPcd, + NULL, + NULL, + &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], + 1, + p_Ad, + TRUE, + 0, + (t_Handle)p_FmPcdCcTree, TRUE); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + + if (p_CcNextEngineParams->nextEngine == e_FM_PCD_CC) + p_AdditionalInfo->h_NodeForAdd = p_CcNextEngineParams->params.ccParams.h_CcNode; + if (p_CcNextEngineParams->h_Manip) + p_AdditionalInfo->h_ManipForAdd = p_CcNextEngineParams->h_Manip; + + /* If statistics were previously enabled, but now are disabled, + store the old statistics object to be released */ + if ((!p_AdditionalInfo->tree) && + (((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj) && + (!p_CcNextEngineParams->statisticsEn)) + { + p_AdditionalInfo->p_StatsObjForRmv = + ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj; + + + p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj = NULL; + } +#if (DPAA_VERSION >= 11) + if ((p_CcNextEngineParams->nextEngine == e_FM_PCD_FR) && + (p_CcNextEngineParams->params.frParams.h_FrmReplic)) + p_AdditionalInfo->h_FrmReplicForAdd = p_CcNextEngineParams->params.frParams.h_FrmReplic; +#endif /* (DPAA_VERSION >= 11) */ + + return E_OK; +} + +static void UpdateAdPtrOfNodesWhichPointsOnCrntMdfNode(t_FmPcdCcNode *p_CrntMdfNode, + t_List *h_OldLst, + t_FmPcdCcNextEngineParams **p_NextEngineParams) +{ + t_CcNodeInformation *p_CcNodeInformation; + t_FmPcdCcNode *p_NodePtrOnCurrentMdfNode = NULL; + t_List *p_Pos; + int i = 0; + t_Handle p_AdTablePtOnCrntCurrentMdfNode/*, p_AdTableNewModified*/; + t_CcNodeInformation ccNodeInfo; + + LIST_FOR_EACH(p_Pos, &p_CrntMdfNode->ccPrevNodesLst) + { + p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); + p_NodePtrOnCurrentMdfNode = (t_FmPcdCcNode *)p_CcNodeInformation->h_CcNode; + + ASSERT_COND(p_NodePtrOnCurrentMdfNode); + + /* Search in the previous node which exact index points on this current modified node for getting AD */ + for (i = 0; i < p_NodePtrOnCurrentMdfNode->numOfKeys + 1; i++) + { + if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + { + if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode == (t_Handle)p_CrntMdfNode) + { + if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip) + p_AdTablePtOnCrntCurrentMdfNode = p_CrntMdfNode->h_Ad; + else if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].p_StatsObj) + p_AdTablePtOnCrntCurrentMdfNode = + p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].p_StatsObj->h_StatsAd; + else + p_AdTablePtOnCrntCurrentMdfNode = + PTR_MOVE(p_NodePtrOnCurrentMdfNode->h_AdTable, i*FM_PCD_CC_AD_ENTRY_SIZE); + + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = p_AdTablePtOnCrntCurrentMdfNode; + EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo, NULL); + + if (!(*p_NextEngineParams)) + *p_NextEngineParams = &p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams; + } + } + } + + ASSERT_COND(i != p_NodePtrOnCurrentMdfNode->numOfKeys); + } +} + +static void UpdateAdPtrOfTreesWhichPointsOnCrntMdfNode(t_FmPcdCcNode *p_CrntMdfNode, + t_List *h_OldLst, + t_FmPcdCcNextEngineParams **p_NextEngineParams) +{ + t_CcNodeInformation *p_CcNodeInformation; + t_FmPcdCcTree *p_TreePtrOnCurrentMdfNode = NULL; + t_List *p_Pos; + int i = 0; + t_Handle p_AdTableTmp; + t_CcNodeInformation ccNodeInfo; + + LIST_FOR_EACH(p_Pos, &p_CrntMdfNode->ccTreeIdLst) + { + p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); + p_TreePtrOnCurrentMdfNode = (t_FmPcdCcTree *)p_CcNodeInformation->h_CcNode; + + ASSERT_COND(p_TreePtrOnCurrentMdfNode); + + /*search in the trees which exact index points on this current modified node for getting AD */ + for (i = 0; i < p_TreePtrOnCurrentMdfNode->numOfEntries; i++) + { + if (p_TreePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + { + if (p_TreePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode == (t_Handle)p_CrntMdfNode) + { + p_AdTableTmp = UINT_TO_PTR(p_TreePtrOnCurrentMdfNode->ccTreeBaseAddr + i*FM_PCD_CC_AD_ENTRY_SIZE); + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = p_AdTableTmp; + EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo, NULL); + + if (!(*p_NextEngineParams)) + *p_NextEngineParams = &p_TreePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams; + } + } + } + + ASSERT_COND(i == p_TreePtrOnCurrentMdfNode->numOfEntries); + } +} + +static t_FmPcdModifyCcKeyAdditionalParams* ModifyKeyCommonPart1(t_Handle h_FmPcdCcNodeOrTree, + uint16_t keyIndex, + e_ModifyState modifyState, + bool ttlCheck, + bool hashCheck, + bool tree) +{ + t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams; + int i = 0, j = 0; + bool wasUpdate = FALSE; + t_FmPcdCcNode *p_CcNode = NULL; + t_FmPcdCcTree *p_FmPcdCcTree; + uint16_t numOfKeys; + t_FmPcdCcKeyAndNextEngineParams *p_KeyAndNextEngineParams; + + SANITY_CHECK_RETURN_VALUE(h_FmPcdCcNodeOrTree, E_INVALID_HANDLE, NULL); + + p_KeyAndNextEngineParams = (t_FmPcdCcKeyAndNextEngineParams *)XX_Malloc(sizeof(t_FmPcdCcKeyAndNextEngineParams)*CC_MAX_NUM_OF_KEYS); + if (!p_KeyAndNextEngineParams) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Next engine and required action structure")); + return NULL; + } + memset(p_KeyAndNextEngineParams, 0, sizeof(t_FmPcdCcKeyAndNextEngineParams)*CC_MAX_NUM_OF_KEYS); + + if (!tree) + { + p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNodeOrTree; + numOfKeys = p_CcNode->numOfKeys; + + /* node has to be pointed by another node or tree */ + if (!LIST_NumOfObjs(&p_CcNode->ccPrevNodesLst) && + !LIST_NumOfObjs(&p_CcNode->ccTreeIdLst)) + { + XX_Free(p_KeyAndNextEngineParams); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("node has to be pointed by node or tree")); + return NULL; + } + + if (!LIST_NumOfObjs(&p_CcNode->ccTreesLst) || + (LIST_NumOfObjs(&p_CcNode->ccTreesLst) != 1)) + { + XX_Free(p_KeyAndNextEngineParams); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("node has to be belonging to some tree and only to one tree")); + return NULL; + } + + memcpy(p_KeyAndNextEngineParams, + p_CcNode->keyAndNextEngineParams, + CC_MAX_NUM_OF_KEYS * sizeof(t_FmPcdCcKeyAndNextEngineParams)); + + if (ttlCheck) + { + if ((p_CcNode->parseCode == CC_PC_FF_IPV4TTL) || + (p_CcNode->parseCode == CC_PC_FF_IPV6HOP_LIMIT)) + { + XX_Free(p_KeyAndNextEngineParams); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("nodeId of CC_PC_FF_IPV4TTL or CC_PC_FF_IPV6HOP_LIMIT can not be used for this operation")); + return NULL; + } + } + + if (hashCheck) + { + if (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED) + { + XX_Free(p_KeyAndNextEngineParams); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("nodeId of CC_PC_GENERIC_IC_HASH_INDEXED can not be used for this operation")); + return NULL; + } + } + } + else + { + p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcNodeOrTree; + numOfKeys = p_FmPcdCcTree->numOfEntries; + memcpy(p_KeyAndNextEngineParams, + p_FmPcdCcTree->keyAndNextEngineParams, + FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams)); + } + + p_FmPcdModifyCcKeyAdditionalParams = + (t_FmPcdModifyCcKeyAdditionalParams *)XX_Malloc(sizeof(t_FmPcdModifyCcKeyAdditionalParams)); + if (!p_FmPcdModifyCcKeyAdditionalParams) + { + XX_Free(p_KeyAndNextEngineParams); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Allocation of internal data structure FAILED")); + return NULL; + } + memset(p_FmPcdModifyCcKeyAdditionalParams, 0, sizeof(t_FmPcdModifyCcKeyAdditionalParams)); + + p_FmPcdModifyCcKeyAdditionalParams->h_CurrentNode = h_FmPcdCcNodeOrTree; + p_FmPcdModifyCcKeyAdditionalParams->savedKeyIndex = keyIndex; + + while (i < numOfKeys) + { + if ((j == keyIndex) && !wasUpdate) + { + if (modifyState == e_MODIFY_STATE_ADD) + j++; + else if (modifyState == e_MODIFY_STATE_REMOVE) + i++; + wasUpdate = TRUE; + } + else + { + memcpy(&p_FmPcdModifyCcKeyAdditionalParams->keyAndNextEngineParams[j], + p_KeyAndNextEngineParams + i, + sizeof(t_FmPcdCcKeyAndNextEngineParams)); + i++; + j++; + } + } + + if (keyIndex == numOfKeys) + { + if (modifyState == e_MODIFY_STATE_ADD) + j++; + else if (modifyState == e_MODIFY_STATE_REMOVE) + i++; + } + + memcpy(&p_FmPcdModifyCcKeyAdditionalParams->keyAndNextEngineParams[j], + p_KeyAndNextEngineParams + numOfKeys, + sizeof(t_FmPcdCcKeyAndNextEngineParams)); + + XX_Free(p_KeyAndNextEngineParams); + + return p_FmPcdModifyCcKeyAdditionalParams; +} + +static t_Error UpdatePtrWhichPointOnCrntMdfNode(t_FmPcdCcNode *p_CcNode, + t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams, + t_List *h_OldLst, + t_List *h_NewLst) +{ + t_FmPcdCcNextEngineParams *p_NextEngineParams = NULL; + t_CcNodeInformation ccNodeInfo = {0}; + t_Handle h_NewAd; + + /* Building a list of all action descriptors that point to the previous node */ + if (!LIST_IsEmpty(&p_CcNode->ccPrevNodesLst)) + UpdateAdPtrOfNodesWhichPointsOnCrntMdfNode(p_CcNode, h_OldLst, &p_NextEngineParams); + + if (!LIST_IsEmpty(&p_CcNode->ccTreeIdLst)) + UpdateAdPtrOfTreesWhichPointsOnCrntMdfNode(p_CcNode, h_OldLst, &p_NextEngineParams); + + /* This node must be found as next engine of one of its previous nodes or trees*/ + ASSERT_COND(p_NextEngineParams); + + /* Building a new action descriptor that points to the modified node */ + h_NewAd = GetNewAd(p_CcNode, FALSE); + if (!h_NewAd) + RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + IOMemSet32(h_NewAd, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + BuildNewAd(h_NewAd, + p_FmPcdModifyCcKeyAdditionalParams, + p_CcNode, + p_NextEngineParams); + + ccNodeInfo.h_CcNode = h_NewAd; + EnqueueNodeInfoToRelevantLst(h_NewLst, &ccNodeInfo, NULL); + + return E_OK; +} + +static void UpdateCcRootOwner(t_FmPcdCcTree *p_FmPcdCcTree, bool add) +{ + ASSERT_COND(p_FmPcdCcTree); + + /* this routine must be protected by the calling routine! */ + + if (add) + p_FmPcdCcTree->owners++; + else + { + ASSERT_COND(p_FmPcdCcTree->owners); + p_FmPcdCcTree->owners--; + } +} + +static t_Error CheckAndSetManipParamsWithCcNodeParams(t_FmPcdCcNode *p_CcNode) +{ + t_Error err = E_OK; + int i = 0; + + for (i = 0; i < p_CcNode->numOfKeys; i++) + { + if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip) + { + err = FmPcdManipCheckParamsWithCcNodeParams(p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip, + (t_Handle)p_CcNode); + if (err) + return err; + } + } + + return err; +} +static t_Error ValidateAndCalcStatsParams(t_FmPcdCcNode *p_CcNode, + t_FmPcdCcNodeParams *p_CcNodeParam, + uint32_t *p_NumOfRanges, + uint32_t *p_CountersArraySize) +{ + e_FmPcdCcStatsMode statisticsMode = p_CcNode->statisticsMode; + + UNUSED(p_CcNodeParam); + + switch (statisticsMode) + { + case e_FM_PCD_CC_STATS_MODE_NONE: + return E_OK; + + case e_FM_PCD_CC_STATS_MODE_FRAME: + case e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME: + *p_NumOfRanges = 1; + *p_CountersArraySize = 2 * FM_PCD_CC_STATS_COUNTER_SIZE; + return E_OK; + +#if (DPAA_VERSION >= 11) + case e_FM_PCD_CC_STATS_MODE_RMON: + { + uint16_t *p_FrameLengthRanges = p_CcNodeParam->keysParams.frameLengthRanges; + uint32_t i; + + if (p_FrameLengthRanges[0] <= 0) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Statistics mode")); + + if (p_FrameLengthRanges[0] == 0xFFFF) + { + *p_NumOfRanges = 1; + *p_CountersArraySize = 2 * FM_PCD_CC_STATS_COUNTER_SIZE; + return E_OK; + } + + for (i = 1; i < FM_PCD_CC_STATS_MAX_NUM_OF_FLR; i++) + { + if (p_FrameLengthRanges[i-1] >= p_FrameLengthRanges[i]) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("Frame length range must be larger at least by 1 from preceding range")); + + /* Stop when last range is reached */ + if (p_FrameLengthRanges[i] == 0xFFFF) + break; + } + + if ((i >= FM_PCD_CC_STATS_MAX_NUM_OF_FLR) || + (p_FrameLengthRanges[i] != 0xFFFF)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Last Frame length range must be 0xFFFF")); + + *p_NumOfRanges = i+1; + + /* Allocate an extra counter for byte count, as counters + array always begins with byte count */ + *p_CountersArraySize = (*p_NumOfRanges + 1) * FM_PCD_CC_STATS_COUNTER_SIZE; + + } + return E_OK; +#endif /* (DPAA_VERSION >= 11) */ + + default: + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Statistics mode")); + } +} + +static t_Error CheckParams(t_Handle h_FmPcd, + t_FmPcdCcNodeParams *p_CcNodeParam, + t_FmPcdCcNode *p_CcNode, + bool *isKeyTblAlloc) +{ + int tmp = 0; + t_FmPcdCcKeyParams *p_KeyParams; + t_Error err; + uint32_t requiredAction = 0; + + /* Validate statistics parameters */ + err = ValidateAndCalcStatsParams(p_CcNode, + p_CcNodeParam, + &(p_CcNode->numOfStatsFLRs), + &(p_CcNode->countersArraySize)); + if (err) + RETURN_ERROR(MAJOR, err, ("Invalid statistics parameters")); + + /* Validate next engine parameters on Miss */ + err = ValidateNextEngineParams(h_FmPcd, + &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, + p_CcNode->statisticsMode); + if (err) + RETURN_ERROR(MAJOR, err, ("For this node MissNextEngineParams are not valid")); + + if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEngine(&p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, &requiredAction); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + + memcpy(&p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams, + &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, + sizeof(t_FmPcdCcNextEngineParams)); + + p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].requiredAction = requiredAction; + + if ((p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine == e_FM_PCD_CC) + && p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.h_Manip) + { + err = AllocAndFillAdForContLookupManip(p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.params.ccParams.h_CcNode); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + + for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++) + { + p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; + + if (!p_KeyParams->p_Key) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_Key is not initialized")); + + err = ValidateNextEngineParams(h_FmPcd, + &p_KeyParams->ccNextEngineParams, + p_CcNode->statisticsMode); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + + err = UpdateGblMask(p_CcNode, + p_CcNodeParam->keysParams.keySize, + p_KeyParams->p_Mask); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + + if (p_KeyParams->ccNextEngineParams.h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEngine(&p_KeyParams->ccNextEngineParams, &requiredAction); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + + /* Store 'key' parameters - key, mask (if passed by the user) */ + memcpy(p_CcNode->keyAndNextEngineParams[tmp].key, p_KeyParams->p_Key, p_CcNodeParam->keysParams.keySize); + + if (p_KeyParams->p_Mask) + memcpy(p_CcNode->keyAndNextEngineParams[tmp].mask, + p_KeyParams->p_Mask, + p_CcNodeParam->keysParams.keySize); + else + memset((void *)(p_CcNode->keyAndNextEngineParams[tmp].mask), + 0xFF, + p_CcNodeParam->keysParams.keySize); + + /* Store next engine parameters */ + memcpy(&p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams, + &p_KeyParams->ccNextEngineParams, + sizeof(t_FmPcdCcNextEngineParams)); + + p_CcNode->keyAndNextEngineParams[tmp].requiredAction = requiredAction; + + if ((p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine == e_FM_PCD_CC) + && p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip) + { + err = AllocAndFillAdForContLookupManip(p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + } + + if (p_CcNode->maxNumOfKeys) + { + if (p_CcNode->maxNumOfKeys < p_CcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Number of keys exceed the provided maximal number of keys")); + } + + *isKeyTblAlloc = TRUE; + + return E_OK; +} + +static t_Error Ipv4TtlOrIpv6HopLimitCheckParams(t_Handle h_FmPcd, + t_FmPcdCcNodeParams *p_CcNodeParam, + t_FmPcdCcNode *p_CcNode, + bool *isKeyTblAlloc) +{ + int tmp = 0; + t_FmPcdCcKeyParams *p_KeyParams; + t_Error err; + uint8_t key = 0x01; + uint32_t requiredAction = 0; + + if (p_CcNode->numOfKeys != 1) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT the maximal supported 'numOfKeys' is 1")); + + if ((p_CcNodeParam->keysParams.maxNumOfKeys) && (p_CcNodeParam->keysParams.maxNumOfKeys != 1)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT the maximal supported 'maxNumOfKeys' is 1")); + + /* Validate statistics parameters */ + err = ValidateAndCalcStatsParams(p_CcNode, + p_CcNodeParam, + &(p_CcNode->numOfStatsFLRs), + &(p_CcNode->countersArraySize)); + if (err) + RETURN_ERROR(MAJOR, err, ("Invalid statistics parameters")); + + err = ValidateNextEngineParams(h_FmPcd, + &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, + p_CcNodeParam->keysParams.statisticsMode); + if (err) + RETURN_ERROR(MAJOR, err, ("For this node MissNextEngineParams are not valid")); + + if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEngine(&p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, &requiredAction); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + + memcpy(&p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams, + &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, + sizeof(t_FmPcdCcNextEngineParams)); + + p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].requiredAction = requiredAction; + + if ((p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine == e_FM_PCD_CC) + && p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.h_Manip) + { + err = AllocAndFillAdForContLookupManip(p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.params.ccParams.h_CcNode); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + + for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++) + { + p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; + + if (p_KeyParams->p_Mask) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT p_Mask can not be initialized")); + + if (memcmp(p_KeyParams->p_Key, &key, 1) != 0) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT p_Key has to be 1")); + + err = ValidateNextEngineParams(h_FmPcd, + &p_KeyParams->ccNextEngineParams, + p_CcNode->statisticsMode); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + + if (p_KeyParams->ccNextEngineParams.h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEngine(&p_KeyParams->ccNextEngineParams, &requiredAction); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + + /* Store 'key' parameters - key (fixed to 0x01), key size of 1 byte and full mask */ + p_CcNode->keyAndNextEngineParams[tmp].key[0] = key; + p_CcNode->keyAndNextEngineParams[tmp].mask[0] = 0xFF; + + /* Store NextEngine parameters */ + memcpy(&p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams, + &p_KeyParams->ccNextEngineParams, + sizeof(t_FmPcdCcNextEngineParams)); + + if ((p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine == e_FM_PCD_CC) + && p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip) + { + err = AllocAndFillAdForContLookupManip(p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + p_CcNode->keyAndNextEngineParams[tmp].requiredAction = requiredAction; + } + + *isKeyTblAlloc = FALSE; + + return E_OK; +} + +static t_Error IcHashIndexedCheckParams(t_Handle h_FmPcd, + t_FmPcdCcNodeParams *p_CcNodeParam, + t_FmPcdCcNode *p_CcNode, + bool *isKeyTblAlloc) +{ + int tmp = 0, countOnes = 0; + t_FmPcdCcKeyParams *p_KeyParams; + t_Error err; + uint16_t glblMask = p_CcNodeParam->extractCcParams.extractNonHdr.icIndxMask; + uint16_t countMask = (uint16_t)(glblMask >> 4); + uint32_t requiredAction = 0; + + if (glblMask & 0x000f) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("icIndxMask has to be with last nibble 0")); + + while (countMask) + { + countOnes++; + countMask = (uint16_t)(countMask >> 1); + } + + if (!POWER_OF_2(p_CcNode->numOfKeys)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("For Node of the type INDEXED numOfKeys has to be powerOfTwo")); + + if (p_CcNode->numOfKeys != ((uint32_t)1 << countOnes)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("For Node of the type IC_HASH_INDEXED numOfKeys has to be powerOfTwo")); + + if (p_CcNodeParam->keysParams.maxNumOfKeys && + (p_CcNodeParam->keysParams.maxNumOfKeys != p_CcNode->numOfKeys)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("For Node of the type INDEXED 'maxNumOfKeys' should be 0 or equal 'numOfKeys'")); + + /* Validate statistics parameters */ + err = ValidateAndCalcStatsParams(p_CcNode, + p_CcNodeParam, + &(p_CcNode->numOfStatsFLRs), + &(p_CcNode->countersArraySize)); + if (err) + RETURN_ERROR(MAJOR, err, ("Invalid statistics parameters")); + + err = ValidateNextEngineParams(h_FmPcd, + &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, + p_CcNode->statisticsMode); + if (GET_ERROR_TYPE(err)!= E_NOT_SUPPORTED) + RETURN_ERROR(MAJOR, err, ("MissNextEngineParams for the node of the type IC_INDEX_HASH has to be UnInitialized")); + + for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++) + { + p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; + + if (p_KeyParams->p_Mask || p_KeyParams->p_Key) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("For Node of the type IC_HASH_INDEXED p_Key or p_Mask has to be NULL")); + + if ((glblMask & (tmp * 16)) == (tmp * 16)) + { + err = ValidateNextEngineParams(h_FmPcd, + &p_KeyParams->ccNextEngineParams, + p_CcNode->statisticsMode); + if (err) + RETURN_ERROR(MAJOR, err, ("This index has to be initialized for the node of the type IC_INDEX_HASH according to settings of GlobalMask ")); + + if (p_KeyParams->ccNextEngineParams.h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEngine(&p_KeyParams->ccNextEngineParams, &requiredAction); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + p_CcNode->keyAndNextEngineParams[tmp].requiredAction = requiredAction; + } + + memcpy(&p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams, + &p_KeyParams->ccNextEngineParams, + sizeof(t_FmPcdCcNextEngineParams)); + + if ((p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine == e_FM_PCD_CC) + && p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip) + { + err = AllocAndFillAdForContLookupManip(p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + } + else + { + err = ValidateNextEngineParams(h_FmPcd, + &p_KeyParams->ccNextEngineParams, + p_CcNode->statisticsMode); + if (GET_ERROR_TYPE(err)!= E_NOT_SUPPORTED) + RETURN_ERROR(MAJOR, err, ("This index has to be UnInitialized for the node of the type IC_INDEX_HASH according to settings of GlobalMask")); + } + } + + *isKeyTblAlloc = FALSE; + memcpy(PTR_MOVE(p_CcNode->p_GlblMask, 2), &glblMask, 2); + + return E_OK; +} + +static t_Error ModifyNextEngineParamNode(t_Handle h_FmPcd, + t_Handle h_FmPcdCcNode, + uint16_t keyIndex, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; + t_FmPcd *p_FmPcd; + t_List h_OldPointersLst, h_NewPointersLst; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd,E_INVALID_VALUE); + SANITY_CHECK_RETURN_ERROR(p_CcNode,E_INVALID_HANDLE); + + if (keyIndex >= p_CcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("keyIndex > previously cleared last index + 1")); + + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + + p_ModifyKeyParams = ModifyKeyCommonPart1(p_CcNode, keyIndex, e_MODIFY_STATE_CHANGE, FALSE, FALSE, FALSE); + if (!p_ModifyKeyParams) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + if (p_CcNode->maxNumOfKeys) + { + if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) + { + XX_Free(p_ModifyKeyParams); + return ERROR_CODE(E_BUSY); + } + } + + err = BuildNewNodeModifyNextEngine(h_FmPcd, + p_CcNode, + keyIndex, + p_FmPcdCcNextEngineParams, + &h_OldPointersLst, + &h_NewPointersLst, + p_ModifyKeyParams); + if (err) + { + XX_Free(p_ModifyKeyParams); + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst, p_ModifyKeyParams, FALSE); + + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + + return err; +} + +static t_Error FindKeyIndex(t_Handle h_CcNode, + uint8_t keySize, + uint8_t *p_Key, + uint8_t *p_Mask, + uint16_t *p_KeyIndex) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + uint8_t tmpMask[FM_PCD_MAX_SIZE_OF_KEY]; + uint16_t i; + + ASSERT_COND(p_Key); + ASSERT_COND(p_KeyIndex); + ASSERT_COND(keySize < FM_PCD_MAX_SIZE_OF_KEY); + + if (keySize != p_CcNode->userSizeOfExtraction) + RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Key size doesn't match the extraction size of the node")); + + /* If user didn't pass a mask for this key, we'll look for full extraction mask */ + if (!p_Mask) + memset(tmpMask, 0xFF, keySize); + + for (i = 0 ; i < p_CcNode->numOfKeys; i++) + { + /* Comparing received key */ + if (memcmp(p_Key, p_CcNode->keyAndNextEngineParams[i].key, keySize) == 0) + { + if (p_Mask) + { + /* If a user passed a mask for this key, it must match to the existing key's mask for a correct match */ + if (memcmp(p_Mask, p_CcNode->keyAndNextEngineParams[i].mask, keySize) == 0) + { + *p_KeyIndex = i; + return E_OK; + } + } + else + { + /* If user didn't pass a mask for this key, check if the existing key mask is full extraction */ + if (memcmp(tmpMask, p_CcNode->keyAndNextEngineParams[i].mask, keySize) == 0) + { + *p_KeyIndex = i; + return E_OK; + } + } + } + } + + return ERROR_CODE(E_NOT_FOUND); +} + +static t_Error CalcAndUpdateCcShadow(t_FmPcdCcNode *p_CcNode, + bool isKeyTblAlloc, + uint32_t *p_MatchTableSize, + uint32_t *p_AdTableSize) +{ + uint32_t shadowSize; + t_Error err; + + /* Calculate keys table maximal size - each entry consists of a key and a mask, + (if local mask support is requested) */ + *p_MatchTableSize = p_CcNode->ccKeySizeAccExtraction * sizeof(uint8_t) * p_CcNode->maxNumOfKeys; + + if (p_CcNode->maskSupport) + *p_MatchTableSize *= 2; + + /* Calculate next action descriptors table, including one more entry for miss */ + *p_AdTableSize = (uint32_t)((p_CcNode->maxNumOfKeys + 1) * FM_PCD_CC_AD_ENTRY_SIZE); + + /* Calculate maximal shadow size of this node. + All shadow structures will be used for runtime modifications host command. If + keys table was allocated for this node, the keys table and next engines table may + be modified in run time (entries added or removed), so shadow tables are requires. + Otherwise, the only supported runtime modification is a specific next engine update + and this requires shadow memory of a single AD */ + + /* Shadow size should be enough to hold the following 3 structures: + * 1 - an action descriptor */ + shadowSize = FM_PCD_CC_AD_ENTRY_SIZE; + + /* 2 - keys match table, if was allocated for the current node */ + if (isKeyTblAlloc) + shadowSize += *p_MatchTableSize; + + /* 3 - next action descriptors table */ + shadowSize += *p_AdTableSize; + + /* Update shadow to the calculated size */ + err = FmPcdUpdateCcShadow (p_CcNode->h_FmPcd, (uint32_t)shadowSize, FM_PCD_CC_AD_TABLE_ALIGN); + if (err != E_OK) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node shadow")); + } + + return E_OK; +} + +static t_Error AllocStatsObjs(t_FmPcdCcNode *p_CcNode) +{ + t_FmPcdStatsObj *p_StatsObj; + t_Handle h_FmMuram, h_StatsAd, h_StatsCounters; + uint32_t i; + + h_FmMuram = FmPcdGetMuramHandle(p_CcNode->h_FmPcd); + if (!h_FmMuram) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM MURAM")); + + /* Allocate statistics ADs and statistics counter. An extra pair (AD + counters) + will be allocated to support runtime modifications */ + for (i = 0; i < p_CcNode->maxNumOfKeys + 2; i++) + { + /* Allocate list object structure */ + p_StatsObj = XX_Malloc(sizeof(t_FmPcdStatsObj)); + if (!p_StatsObj) + { + FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Statistics object")); + } + memset(p_StatsObj, 0, sizeof(t_FmPcdStatsObj)); + + /* Allocate statistics AD from MURAM */ + h_StatsAd = (t_Handle)FM_MURAM_AllocMem(h_FmMuram, + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!h_StatsAd) + { + FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram); + XX_Free(p_StatsObj); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics ADs")); + } + IOMemSet32(h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + /* Allocate statistics counters from MURAM */ + h_StatsCounters = (t_Handle)FM_MURAM_AllocMem(h_FmMuram, + p_CcNode->countersArraySize, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!h_StatsCounters) + { + FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram); + FM_MURAM_FreeMem(h_FmMuram, h_StatsAd); + XX_Free(p_StatsObj); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics counters")); + } + IOMemSet32(h_StatsCounters, 0, p_CcNode->countersArraySize); + + p_StatsObj->h_StatsAd = h_StatsAd; + p_StatsObj->h_StatsCounters = h_StatsCounters; + + EnqueueStatsObj(&p_CcNode->availableStatsLst, p_StatsObj); + } + + return E_OK; +} + +static t_Error MatchTableGetKeyStatistics(t_FmPcdCcNode *p_CcNode, + uint16_t keyIndex, + t_FmPcdCcKeyStatistics *p_KeyStatistics) +{ + uint32_t *p_StatsCounters, i; + + if (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this match table")); + + if (keyIndex >= p_CcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("The provided keyIndex exceeds the number of keys in this match table")); + + if (!p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this key")); + + memset(p_KeyStatistics, 0, sizeof (t_FmPcdCcKeyStatistics)); + + p_StatsCounters = p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsCounters; + ASSERT_COND(p_StatsCounters); + + p_KeyStatistics->byteCount = GET_UINT32(*p_StatsCounters); + + for (i = 1; i <= p_CcNode->numOfStatsFLRs; i++) + { + p_StatsCounters = PTR_MOVE(p_StatsCounters, FM_PCD_CC_STATS_COUNTER_SIZE); + + p_KeyStatistics->frameCount += GET_UINT32(*p_StatsCounters); + +#if (DPAA_VERSION >= 11) + p_KeyStatistics->frameLengthRangeCount[i-1] = GET_UINT32(*p_StatsCounters); +#endif /* (DPAA_VERSION >= 11) */ + } + + return E_OK; +} + + +/*****************************************************************************/ +/* Inter-module API routines */ +/*****************************************************************************/ + +t_CcNodeInformation* FindNodeInfoInReleventLst(t_List *p_List, t_Handle h_Info, t_Handle h_Spinlock) +{ + t_CcNodeInformation *p_CcInformation; + t_List *p_Pos; + uint32_t intFlags; + + intFlags = XX_LockIntrSpinlock(h_Spinlock); + + for (p_Pos = LIST_FIRST(p_List); p_Pos != (p_List); p_Pos = LIST_NEXT(p_Pos)) + { + p_CcInformation = CC_NODE_F_OBJECT(p_Pos); + + ASSERT_COND(p_CcInformation->h_CcNode); + + if (p_CcInformation->h_CcNode == h_Info) + { + XX_UnlockIntrSpinlock(h_Spinlock, intFlags); + return p_CcInformation; + } + } + + XX_UnlockIntrSpinlock(h_Spinlock, intFlags); + + return NULL; +} + +void EnqueueNodeInfoToRelevantLst(t_List *p_List, t_CcNodeInformation *p_CcInfo, t_Handle h_Spinlock) +{ + t_CcNodeInformation *p_CcInformation; + uint32_t intFlags = 0; + + p_CcInformation = (t_CcNodeInformation *)XX_Malloc(sizeof(t_CcNodeInformation)); + + if (p_CcInformation) + { + memset(p_CcInformation, 0, sizeof(t_CcNodeInformation)); + memcpy(p_CcInformation, p_CcInfo, sizeof(t_CcNodeInformation)); + INIT_LIST(&p_CcInformation->node); + + if (h_Spinlock) + intFlags = XX_LockIntrSpinlock(h_Spinlock); + + LIST_AddToTail(&p_CcInformation->node, p_List); + + if (h_Spinlock) + XX_UnlockIntrSpinlock(h_Spinlock, intFlags); + } + else + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("CC Node Information")); +} + +void DequeueNodeInfoFromRelevantLst(t_List *p_List, t_Handle h_Info, t_Handle h_Spinlock) +{ + t_CcNodeInformation *p_CcInformation = NULL; + uint32_t intFlags = 0; + t_List *p_Pos; + + if (h_Spinlock) + intFlags = XX_LockIntrSpinlock(h_Spinlock); + + if (LIST_IsEmpty(p_List)) + { + XX_RestoreAllIntr(intFlags); + return; + } + + for (p_Pos = LIST_FIRST(p_List); p_Pos != (p_List); p_Pos = LIST_NEXT(p_Pos)) + { + p_CcInformation = CC_NODE_F_OBJECT(p_Pos); + ASSERT_COND(p_CcInformation); + ASSERT_COND(p_CcInformation->h_CcNode); + if (p_CcInformation->h_CcNode == h_Info) + break; + } + + if (p_CcInformation) + { + LIST_DelAndInit(&p_CcInformation->node); + XX_Free(p_CcInformation); + } + + if (h_Spinlock) + XX_UnlockIntrSpinlock(h_Spinlock, intFlags); +} + +void NextStepAd(t_Handle h_Ad, + t_FmPcdCcStatsParams *p_FmPcdCcStatsParams, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams, + t_FmPcd *p_FmPcd) +{ + switch (p_FmPcdCcNextEngineParams->nextEngine) + { + case (e_FM_PCD_KG): + case (e_FM_PCD_PLCR): + case (e_FM_PCD_DONE): + /* if NIA is not CC, create a "result" type AD */ + FillAdOfTypeResult(h_Ad, + p_FmPcdCcStatsParams, + p_FmPcd, + p_FmPcdCcNextEngineParams); + break; +#if (DPAA_VERSION >= 11) + case (e_FM_PCD_FR): + if (p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic) + { + FillAdOfTypeContLookup(h_Ad, + p_FmPcdCcStatsParams, + p_FmPcd, + p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode, + p_FmPcdCcNextEngineParams->h_Manip, + p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic); + FrmReplicGroupUpdateOwner(p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic, + TRUE/* add */); + } + break; +#endif /* (DPAA_VERSION >= 11) */ + + case (e_FM_PCD_CC): + /* if NIA is not CC, create a TD to continue the CC lookup */ + FillAdOfTypeContLookup(h_Ad, + p_FmPcdCcStatsParams, + p_FmPcd, + p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode, + p_FmPcdCcNextEngineParams->h_Manip, + NULL); + + UpdateNodeOwner(p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode, TRUE); + break; + + default: + return; + } +} + +t_Error FmPcdCcTreeAddIPR(t_Handle h_FmPcd, + t_Handle h_FmTree, + t_Handle h_NetEnv, + t_Handle h_IpReassemblyManip, + bool createSchemes) +{ + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree; + t_FmPcdCcNextEngineParams nextEngineParams; + t_NetEnvParams netEnvParams; + t_Handle h_Ad; + bool isIpv6Present; + uint8_t ipv4GroupId, ipv6GroupId; + t_Error err; + + ASSERT_COND(p_FmPcdCcTree); + + /* this routine must be protected by the calling routine! */ + + memset(&nextEngineParams, 0, sizeof(t_FmPcdCcNextEngineParams)); + memset(&netEnvParams, 0, sizeof(t_NetEnvParams)); + + h_Ad = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); + + isIpv6Present = FmPcdManipIpReassmIsIpv6Hdr(h_IpReassemblyManip); + + if (isIpv6Present && (p_FmPcdCcTree->numOfEntries > (FM_PCD_MAX_NUM_OF_CC_GROUPS-2))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("need two free entries for IPR")); + + if (p_FmPcdCcTree->numOfEntries > (FM_PCD_MAX_NUM_OF_CC_GROUPS-1)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("need two free entries for IPR")); + + nextEngineParams.nextEngine = e_FM_PCD_DONE; + nextEngineParams.h_Manip = h_IpReassemblyManip; + + /* Lock tree */ + err = CcRootTryLock(p_FmPcdCcTree); + if (err) + return ERROR_CODE(E_BUSY); + + if (p_FmPcdCcTree->h_IpReassemblyManip == h_IpReassemblyManip) + { + CcRootReleaseLock(p_FmPcdCcTree); + return E_OK; + } + + if ((p_FmPcdCcTree->h_IpReassemblyManip) && + (p_FmPcdCcTree->h_IpReassemblyManip != h_IpReassemblyManip)) + { + CcRootReleaseLock(p_FmPcdCcTree); + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("This tree was previously updated with different IPR")); + } + + /* Initialize IPR for the first time for this tree */ + if (isIpv6Present) + { + ipv6GroupId = p_FmPcdCcTree->numOfGrps++; + p_FmPcdCcTree->fmPcdGroupParam[ipv6GroupId].baseGroupEntry = (FM_PCD_MAX_NUM_OF_CC_GROUPS-2); + + if (createSchemes) + { + err = FmPcdManipBuildIpReassmScheme(h_FmPcd, h_NetEnv, p_FmPcdCcTree, h_IpReassemblyManip, FALSE, ipv6GroupId); + if (err) + { + p_FmPcdCcTree->numOfGrps--; + CcRootReleaseLock(p_FmPcdCcTree); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + } + + NextStepAd(PTR_MOVE(h_Ad, (FM_PCD_MAX_NUM_OF_CC_GROUPS-2) * FM_PCD_CC_AD_ENTRY_SIZE), + NULL, + &nextEngineParams, + h_FmPcd); + } + + ipv4GroupId = p_FmPcdCcTree->numOfGrps++; + p_FmPcdCcTree->fmPcdGroupParam[ipv4GroupId].totalBitsMask = 0; + p_FmPcdCcTree->fmPcdGroupParam[ipv4GroupId].baseGroupEntry = (FM_PCD_MAX_NUM_OF_CC_GROUPS-1); + + if (createSchemes) + { + err = FmPcdManipBuildIpReassmScheme(h_FmPcd, h_NetEnv, p_FmPcdCcTree, h_IpReassemblyManip, TRUE, ipv4GroupId); + if (err) + { + p_FmPcdCcTree->numOfGrps--; + if (isIpv6Present) + { + p_FmPcdCcTree->numOfGrps--; + FmPcdManipDeleteIpReassmSchemes(h_IpReassemblyManip); + } + CcRootReleaseLock(p_FmPcdCcTree); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + } + + NextStepAd(PTR_MOVE(h_Ad, (FM_PCD_MAX_NUM_OF_CC_GROUPS-1) * FM_PCD_CC_AD_ENTRY_SIZE), + NULL, + &nextEngineParams, + h_FmPcd); + + p_FmPcdCcTree->h_IpReassemblyManip = h_IpReassemblyManip; + + CcRootReleaseLock(p_FmPcdCcTree); + + return E_OK; +} + + +t_Handle FmPcdCcTreeGetSavedManipParams(t_Handle h_FmTree) +{ + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree; + + ASSERT_COND(p_FmPcdCcTree); + + return p_FmPcdCcTree->h_FmPcdCcSavedManipParams; +} + +void FmPcdCcTreeSetSavedManipParams(t_Handle h_FmTree, t_Handle h_SavedManipParams) +{ + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree; + + ASSERT_COND(p_FmPcdCcTree); + + p_FmPcdCcTree->h_FmPcdCcSavedManipParams = h_SavedManipParams; +} + +uint8_t FmPcdCcGetParseCode(t_Handle h_CcNode) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + + ASSERT_COND(p_CcNode); + + return p_CcNode->parseCode; +} + +uint8_t FmPcdCcGetOffset(t_Handle h_CcNode) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + + ASSERT_COND(p_CcNode); + + return p_CcNode->offset; +} + +uint16_t FmPcdCcGetNumOfKeys(t_Handle h_CcNode) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + + ASSERT_COND(p_CcNode); + + return p_CcNode->numOfKeys; +} + +t_Error FmPcdCcModifyNextEngineParamTree(t_Handle h_FmPcd, + t_Handle h_FmPcdCcTree, + uint8_t grpId, + uint8_t index, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; + t_FmPcd *p_FmPcd; + t_List h_OldPointersLst, h_NewPointersLst; + uint16_t keyIndex; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR((grpId <= 7),E_INVALID_VALUE); + + if (grpId >= p_FmPcdCcTree->numOfGrps) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("grpId you asked > numOfGroup of relevant tree")); + + if (index >= p_FmPcdCcTree->fmPcdGroupParam[grpId].numOfEntriesInGroup) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("index > numOfEntriesInGroup")); + + p_FmPcd = (t_FmPcd *)h_FmPcd; + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + + keyIndex = (uint16_t)(p_FmPcdCcTree->fmPcdGroupParam[grpId].baseGroupEntry + index); + + p_ModifyKeyParams = ModifyKeyCommonPart1(p_FmPcdCcTree, keyIndex, e_MODIFY_STATE_CHANGE, FALSE, FALSE, TRUE); + if (!p_ModifyKeyParams) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + p_ModifyKeyParams->tree = TRUE; + + if (p_FmPcd->p_CcShadow) + if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) + { + XX_Free(p_ModifyKeyParams); + return ERROR_CODE(E_BUSY); + } + + err = BuildNewNodeModifyNextEngine(p_FmPcd, + p_FmPcdCcTree, + keyIndex, + p_FmPcdCcNextEngineParams, + &h_OldPointersLst, + &h_NewPointersLst, + p_ModifyKeyParams); + if (err) + { + XX_Free(p_ModifyKeyParams); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst, p_ModifyKeyParams, FALSE); + + if (p_FmPcd->p_CcShadow) + RELEASE_LOCK(p_FmPcd->shadowLock); + + return err; + +} + +t_Error FmPcdCcRemoveKey(t_Handle h_FmPcd, + t_Handle h_FmPcdCcNode, + uint16_t keyIndex) +{ + + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *) h_FmPcdCcNode; + t_FmPcd *p_FmPcd; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + t_List h_OldPointersLst, h_NewPointersLst; + bool useShadowStructs = FALSE; + t_Error err = E_OK; + + if (keyIndex >= p_CcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("impossible to remove key when numOfKeys <= keyIndex")); + + if (!p_CcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("keyIndex you asked > numOfKeys of relevant node that was initialized")); + + if (p_CcNode->h_FmPcd != h_FmPcd) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("handler to FmPcd is different from the handle provided at node initialization time")); + + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + + p_ModifyKeyParams = ModifyKeyCommonPart1(p_CcNode, keyIndex, e_MODIFY_STATE_REMOVE, TRUE, TRUE, FALSE); + if (!p_ModifyKeyParams) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + if (p_CcNode->maxNumOfKeys) + { + if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) + { + XX_Free(p_ModifyKeyParams); + return ERROR_CODE(E_BUSY); + } + + useShadowStructs = TRUE; + } + + err = BuildNewNodeRemoveKey(p_CcNode, keyIndex, p_ModifyKeyParams); + if (err) + { + XX_Free(p_ModifyKeyParams); + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, + p_ModifyKeyParams, + &h_OldPointersLst, + &h_NewPointersLst); + if (err) + { + ReleaseNewNodeCommonPart(p_ModifyKeyParams); + XX_Free(p_ModifyKeyParams); + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = DoDynamicChange(p_FmPcd, + &h_OldPointersLst, + &h_NewPointersLst, + p_ModifyKeyParams, + useShadowStructs); + + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + + return err; +} + +t_Error FmPcdCcModifyKey(t_Handle h_FmPcd, + t_Handle h_FmPcdCcNode, + uint16_t keyIndex, + uint8_t keySize, + uint8_t *p_Key, + uint8_t *p_Mask) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; + t_FmPcd *p_FmPcd; + t_List h_OldPointersLst, h_NewPointersLst; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + uint16_t tmpKeyIndex; + bool useShadowStructs = FALSE; + t_Error err = E_OK; + + if (keyIndex >= p_CcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("keyIndex > previously cleared last index + 1")); + + if (keySize != p_CcNode->userSizeOfExtraction) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("size for ModifyKey has to be the same as defined in SetNode")); + + if (p_CcNode->h_FmPcd != h_FmPcd) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("handler to FmPcd is different from the handle provided at node initialization time")); + + err = FindKeyIndex(h_FmPcdCcNode, + keySize, + p_Key, + p_Mask, + &tmpKeyIndex); + if (GET_ERROR_TYPE(err) != E_NOT_FOUND) + RETURN_ERROR(MINOR, E_ALREADY_EXISTS, + ("The received key and mask pair was already found in the match table of the provided node")); + + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + + p_ModifyKeyParams = ModifyKeyCommonPart1(p_CcNode, keyIndex, e_MODIFY_STATE_CHANGE, TRUE, TRUE, FALSE); + if (!p_ModifyKeyParams) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + if (p_CcNode->maxNumOfKeys) + { + if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) + { + XX_Free(p_ModifyKeyParams); + return ERROR_CODE(E_BUSY); + } + + useShadowStructs = TRUE; + } + + err = BuildNewNodeModifyKey(p_CcNode, + keyIndex, + p_Key, + p_Mask, + p_ModifyKeyParams); + if (err) + { + XX_Free(p_ModifyKeyParams); + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, + p_ModifyKeyParams, + &h_OldPointersLst, + &h_NewPointersLst); + if (err) + { + ReleaseNewNodeCommonPart(p_ModifyKeyParams); + XX_Free(p_ModifyKeyParams); + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = DoDynamicChange(p_FmPcd, + &h_OldPointersLst, + &h_NewPointersLst, + p_ModifyKeyParams, + useShadowStructs); + + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + + return err; +} + +t_Error FmPcdCcModifyMissNextEngineParamNode(t_Handle h_FmPcd, + t_Handle h_FmPcdCcNode, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; + t_FmPcd *p_FmPcd; + t_List h_OldPointersLst, h_NewPointersLst; + uint16_t keyIndex; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(p_CcNode,E_INVALID_VALUE); + + keyIndex = p_CcNode->numOfKeys; + + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + + p_ModifyKeyParams = ModifyKeyCommonPart1(p_CcNode, keyIndex, e_MODIFY_STATE_CHANGE, FALSE, TRUE, FALSE); + if (!p_ModifyKeyParams) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + if (p_CcNode->maxNumOfKeys) + { + if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) + { + XX_Free(p_ModifyKeyParams); + return ERROR_CODE(E_BUSY); + } + } + + err = BuildNewNodeModifyNextEngine(h_FmPcd, + p_CcNode, + keyIndex, + p_FmPcdCcNextEngineParams, + &h_OldPointersLst, + &h_NewPointersLst, + p_ModifyKeyParams); + if (err) + { + XX_Free(p_ModifyKeyParams); + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst, p_ModifyKeyParams, FALSE); + + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + + return err; +} + +t_Error FmPcdCcAddKey(t_Handle h_FmPcd, + t_Handle h_FmPcdCcNode, + uint16_t keyIndex, + uint8_t keySize, + t_FmPcdCcKeyParams *p_FmPcdCcKeyParams) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; + t_FmPcd *p_FmPcd; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + t_List h_OldPointersLst, h_NewPointersLst; + bool useShadowStructs = FALSE; + uint16_t tmpKeyIndex; + t_Error err = E_OK; + + if (keyIndex > p_CcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, ("keyIndex > previously cleared last index + 1")); + + if (keySize != p_CcNode->userSizeOfExtraction) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be defined as it was defined in initialization step")); + + if (p_CcNode->h_FmPcd != h_FmPcd) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("handler to FmPcd is different from the handle provided at node initialization time")); + + if (p_CcNode->maxNumOfKeys) + { + if (p_CcNode->numOfKeys == p_CcNode->maxNumOfKeys) + RETURN_ERROR(MAJOR, E_FULL, ("number of keys exceeds the maximal number of keys provided at node initialization time")); + } + else if (p_CcNode->numOfKeys == FM_PCD_MAX_NUM_OF_KEYS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("number of keys can not be larger than %d", FM_PCD_MAX_NUM_OF_KEYS)); + + err = FindKeyIndex(h_FmPcdCcNode, + keySize, + p_FmPcdCcKeyParams->p_Key, + p_FmPcdCcKeyParams->p_Mask, + &tmpKeyIndex); + if (GET_ERROR_TYPE(err) != E_NOT_FOUND) + RETURN_ERROR(MINOR, E_ALREADY_EXISTS, + ("The received key and mask pair was already found in the match table of the provided node")); + + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + + p_ModifyKeyParams = ModifyKeyCommonPart1(p_CcNode, + keyIndex, + e_MODIFY_STATE_ADD, + TRUE, + TRUE, + FALSE); + if (!p_ModifyKeyParams) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + if (p_CcNode->maxNumOfKeys) + { + if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) + { + XX_Free(p_ModifyKeyParams); + return ERROR_CODE(E_BUSY); + } + + useShadowStructs = TRUE; + } + + err = BuildNewNodeAddOrMdfyKeyAndNextEngine (h_FmPcd, + p_CcNode, + keyIndex, + p_FmPcdCcKeyParams, + p_ModifyKeyParams, + TRUE); + if (err) + { + ReleaseNewNodeCommonPart(p_ModifyKeyParams); + XX_Free(p_ModifyKeyParams); + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, + p_ModifyKeyParams, + &h_OldPointersLst, + &h_NewPointersLst); + if (err) + { + ReleaseNewNodeCommonPart(p_ModifyKeyParams); + XX_Free(p_ModifyKeyParams); + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = DoDynamicChange(p_FmPcd, + &h_OldPointersLst, + &h_NewPointersLst, + p_ModifyKeyParams, + useShadowStructs); + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + + return err; +} + +t_Error FmPcdCcModifyKeyAndNextEngine(t_Handle h_FmPcd, + t_Handle h_FmPcdCcNode, + uint16_t keyIndex, + uint8_t keySize, + t_FmPcdCcKeyParams *p_FmPcdCcKeyParams) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; + t_FmPcd *p_FmPcd; + t_List h_OldPointersLst, h_NewPointersLst; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + uint16_t tmpKeyIndex; + bool useShadowStructs = FALSE; + t_Error err = E_OK; + + if (keyIndex > p_CcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("keyIndex > previously cleared last index + 1")); + + if (keySize != p_CcNode->userSizeOfExtraction) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be defined as it was defined in initialization step")); + + if (p_CcNode->h_FmPcd != h_FmPcd) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("handler to FmPcd is different from the handle provided at node initialization time")); + + err = FindKeyIndex(h_FmPcdCcNode, + keySize, + p_FmPcdCcKeyParams->p_Key, + p_FmPcdCcKeyParams->p_Mask, + &tmpKeyIndex); + if (GET_ERROR_TYPE(err) != E_NOT_FOUND) + RETURN_ERROR(MINOR, E_ALREADY_EXISTS, + ("The received key and mask pair was already found in the match table of the provided node")); + + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + + p_ModifyKeyParams = ModifyKeyCommonPart1(p_CcNode, keyIndex, e_MODIFY_STATE_CHANGE, TRUE, TRUE, FALSE); + if (!p_ModifyKeyParams) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + if (p_CcNode->maxNumOfKeys) + { + if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) + { + XX_Free(p_ModifyKeyParams); + return ERROR_CODE(E_BUSY); + } + + useShadowStructs = TRUE; + } + + err = BuildNewNodeAddOrMdfyKeyAndNextEngine (h_FmPcd, + p_CcNode, + keyIndex, + p_FmPcdCcKeyParams, + p_ModifyKeyParams, + FALSE); + if (err) + { + ReleaseNewNodeCommonPart(p_ModifyKeyParams); + XX_Free(p_ModifyKeyParams); + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, + p_ModifyKeyParams, + &h_OldPointersLst, + &h_NewPointersLst); + if (err) + { + ReleaseNewNodeCommonPart(p_ModifyKeyParams); + XX_Free(p_ModifyKeyParams); + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = DoDynamicChange(p_FmPcd, + &h_OldPointersLst, + &h_NewPointersLst, + p_ModifyKeyParams, + useShadowStructs); + + if (p_CcNode->maxNumOfKeys) + RELEASE_LOCK(p_FmPcd->shadowLock); + + return err; +} + +uint32_t FmPcdCcGetNodeAddrOffsetFromNodeInfo(t_Handle h_FmPcd, t_Handle h_Pointer) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_CcNodeInformation *p_CcNodeInfo; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE, (uint32_t)ILLEGAL_BASE); + + p_CcNodeInfo = CC_NODE_F_OBJECT(h_Pointer); + + return (uint32_t)(XX_VirtToPhys(p_CcNodeInfo->h_CcNode) - p_FmPcd->physicalMuramBase); +} + +t_Error FmPcdCcGetGrpParams(t_Handle h_FmPcdCcTree, uint8_t grpId, uint32_t *p_GrpBits, uint8_t *p_GrpBase) +{ + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *) h_FmPcdCcTree; + + SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE); + + if (grpId >= p_FmPcdCcTree->numOfGrps) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("grpId you asked > numOfGroup of relevant tree")); + + *p_GrpBits = p_FmPcdCcTree->fmPcdGroupParam[grpId].totalBitsMask; + *p_GrpBase = p_FmPcdCcTree->fmPcdGroupParam[grpId].baseGroupEntry; + + return E_OK; +} + +t_Error FmPcdCcBindTree(t_Handle h_FmPcd, + t_Handle h_PcdParams, + t_Handle h_FmPcdCcTree, + uint32_t *p_Offset, + t_Handle h_FmPort) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE); + + /* this routine must be protected by the calling routine by locking all PCD modules! */ + + err = CcUpdateParams(h_FmPcd, h_PcdParams, h_FmPort, h_FmPcdCcTree, TRUE); + + if (err == E_OK) + UpdateCcRootOwner(p_FmPcdCcTree, TRUE); + + *p_Offset = (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr)) - + p_FmPcd->physicalMuramBase); + + return err; +} + +t_Error FmPcdCcUnbindTree(t_Handle h_FmPcd, t_Handle h_FmPcdCcTree) +{ + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; + + /* this routine must be protected by the calling routine by locking all PCD modules! */ + + UNUSED(h_FmPcd); + + SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree,E_INVALID_HANDLE); + + UpdateCcRootOwner(p_FmPcdCcTree, FALSE); + + return E_OK; +} + +t_Error FmPcdCcNodeTreeTryLock(t_Handle h_FmPcd,t_Handle h_FmPcdCcNode, t_List *p_List) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; + t_List *p_Pos, *p_Tmp; + t_CcNodeInformation *p_CcNodeInfo, nodeInfo; + uint32_t intFlags; + t_Error err = E_OK; + + intFlags = FmPcdLock(h_FmPcd); + + if (LIST_IsEmpty(&p_CcNode->ccTreesLst)) + RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("asked for more nodes in CC than MAX")); + + LIST_FOR_EACH(p_Pos, &p_CcNode->ccTreesLst) + { + p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos); + ASSERT_COND(p_CcNodeInfo->h_CcNode); + + err = CcRootTryLock(p_CcNodeInfo->h_CcNode); + + if (err) + { + LIST_FOR_EACH(p_Tmp, &p_CcNode->ccTreesLst) + { + if (p_Tmp == p_Pos) + break; + + CcRootReleaseLock(p_CcNodeInfo->h_CcNode); + } + break; + } + + memset(&nodeInfo, 0, sizeof(t_CcNodeInformation)); + nodeInfo.h_CcNode = p_CcNodeInfo->h_CcNode; + EnqueueNodeInfoToRelevantLst(p_List, &nodeInfo, NULL); + } + + FmPcdUnlock(h_FmPcd, intFlags); + CORE_MemoryBarrier(); + + return err; +} + +void FmPcdCcNodeTreeReleaseLock(t_Handle h_FmPcd, t_List *p_List) +{ + t_List *p_Pos; + t_CcNodeInformation *p_CcNodeInfo; + t_Handle h_FmPcdCcTree; + uint32_t intFlags; + + intFlags = FmPcdLock(h_FmPcd); + + LIST_FOR_EACH(p_Pos, p_List) + { + p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos); + h_FmPcdCcTree = p_CcNodeInfo->h_CcNode; + CcRootReleaseLock(h_FmPcdCcTree); + } + + ReleaseLst(p_List); + + FmPcdUnlock(h_FmPcd, intFlags); + CORE_MemoryBarrier(); +} + + +t_Error FmPcdUpdateCcShadow (t_FmPcd *p_FmPcd, uint32_t size, uint32_t align) +{ + uint32_t intFlags; + uint32_t newSize = 0, newAlign = 0; + bool allocFail = FALSE; + + ASSERT_COND(p_FmPcd); + + if (!size) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("size must be larger then 0")); + + if (!POWER_OF_2(align)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("alignment must be power of 2")); + + newSize = p_FmPcd->ccShadowSize; + newAlign = p_FmPcd->ccShadowAlign; + + /* Check if current shadow is large enough to hold the requested size */ + if (size > p_FmPcd->ccShadowSize) + newSize = size; + + /* Check if current shadow matches the requested alignment */ + if (align > p_FmPcd->ccShadowAlign) + newAlign = align; + + /* If a bigger shadow size or bigger shadow alignment are required, + a new shadow will be allocated */ + if ((newSize != p_FmPcd->ccShadowSize) || (newAlign != p_FmPcd->ccShadowAlign)) + { + intFlags = FmPcdLock(p_FmPcd); + + if (p_FmPcd->p_CcShadow) + { + FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_FmPcd), p_FmPcd->p_CcShadow); + p_FmPcd->ccShadowSize = 0; + p_FmPcd->ccShadowAlign = 0; + } + + p_FmPcd->p_CcShadow = FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcd), + newSize, + newAlign); + if (!p_FmPcd->p_CcShadow) + { + allocFail = TRUE; + + /* If new shadow size allocation failed, + re-allocate with previous parameters */ + p_FmPcd->p_CcShadow = FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcd), + p_FmPcd->ccShadowSize, + p_FmPcd->ccShadowAlign); + } + + FmPcdUnlock(p_FmPcd, intFlags); + + if (allocFail) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Shadow memory")); + + p_FmPcd->ccShadowSize = newSize; + p_FmPcd->ccShadowAlign = newAlign; + } + + return E_OK; +} + +#if (DPAA_VERSION >= 11) +void FmPcdCcGetAdTablesThatPointOnReplicGroup(t_Handle h_Node, + t_Handle h_ReplicGroup, + t_List *p_AdTables, + uint32_t *p_NumOfAdTables) +{ + t_FmPcdCcNode *p_CurrentNode = (t_FmPcdCcNode *)h_Node; + int i = 0; + void * p_AdTable; + t_CcNodeInformation ccNodeInfo; + + ASSERT_COND(h_Node); + *p_NumOfAdTables = 0; + + /* search in the current node which exact index points on this current replicator group for getting AD */ + for (i = 0; i < p_CurrentNode->numOfKeys + 1; i++) + { + if ((p_CurrentNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_FR) && + ((p_CurrentNode->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic == (t_Handle)h_ReplicGroup))) + { + /* save the current ad table in the list */ + /* this entry uses the input replicator group */ + p_AdTable = PTR_MOVE(p_CurrentNode->h_AdTable, i*FM_PCD_CC_AD_ENTRY_SIZE); + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = p_AdTable; + EnqueueNodeInfoToRelevantLst(p_AdTables, &ccNodeInfo, NULL); + (*p_NumOfAdTables)++; + } + } + + ASSERT_COND(i != p_CurrentNode->numOfKeys); +} +#endif /* (DPAA_VERSION >= 11) */ +/*********************** End of inter-module routines ************************/ + + +/****************************************/ +/* API Init unit functions */ +/****************************************/ + +t_Handle FM_PCD_CcRootBuild(t_Handle h_FmPcd, t_FmPcdCcTreeParams *p_PcdGroupsParam) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_Error err = E_OK; + int i = 0, j = 0, k = 0; + t_FmPcdCcTree *p_FmPcdCcTree; + uint8_t numOfEntries; + t_Handle p_CcTreeTmp; + t_FmPcdCcGrpParams *p_FmPcdCcGroupParams; + t_FmPcdCcKeyAndNextEngineParams *p_Params, *p_KeyAndNextEngineParams; + t_NetEnvParams netEnvParams; + uint8_t lastOne = 0; + uint32_t requiredAction = 0; + t_FmPcdCcNode *p_FmPcdCcNextNode; + t_CcNodeInformation ccNodeInfo, *p_CcInformation; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(p_PcdGroupsParam,E_INVALID_HANDLE, NULL); + + if (p_PcdGroupsParam->numOfGrps > FM_PCD_MAX_NUM_OF_CC_GROUPS) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfGrps should not exceed %d", FM_PCD_MAX_NUM_OF_CC_GROUPS)); + return NULL; + } + + p_FmPcdCcTree = (t_FmPcdCcTree*)XX_Malloc(sizeof(t_FmPcdCcTree)); + if (!p_FmPcdCcTree) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("PCD tree structure")); + return NULL; + } + memset(p_FmPcdCcTree, 0, sizeof(t_FmPcdCcTree)); + p_FmPcdCcTree->h_FmPcd = h_FmPcd; + + p_Params = (t_FmPcdCcKeyAndNextEngineParams*)XX_Malloc(FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams)); + memset(p_Params, 0, FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams)); + + INIT_LIST(&p_FmPcdCcTree->fmPortsLst); + +#ifdef FM_CAPWAP_SUPPORT + if ((p_PcdGroupsParam->numOfGrps == 1) && + (p_PcdGroupsParam->ccGrpParams[0].numOfDistinctionUnits == 0) && + (p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].nextEngine == e_FM_PCD_CC) && + p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode && + IsCapwapApplSpecific(p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode)) + { + p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip = FmPcdManipApplSpecificBuild(); + if (!p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + return NULL; + } + } +#endif /* FM_CAPWAP_SUPPORT */ + + numOfEntries = 0; + p_FmPcdCcTree->netEnvId = FmPcdGetNetEnvId(p_PcdGroupsParam->h_NetEnv); + + for (i = 0; i < p_PcdGroupsParam->numOfGrps; i++) + { + p_FmPcdCcGroupParams = &p_PcdGroupsParam->ccGrpParams[i]; + + if (p_FmPcdCcGroupParams->numOfDistinctionUnits > FM_PCD_MAX_NUM_OF_CC_UNITS) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("numOfDistinctionUnits (group %d) should not exceed %d", i, FM_PCD_MAX_NUM_OF_CC_UNITS)); + return NULL; + } + + p_FmPcdCcTree->fmPcdGroupParam[i].baseGroupEntry = numOfEntries; + p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup =(uint8_t)( 0x01 << p_FmPcdCcGroupParams->numOfDistinctionUnits); + numOfEntries += p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; + if (numOfEntries > FM_PCD_MAX_NUM_OF_CC_GROUPS) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfEntries can not be larger than %d", FM_PCD_MAX_NUM_OF_CC_GROUPS)); + return NULL; + } + + if (lastOne) + { + if (p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup > lastOne) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_CONFLICT, ("numOfEntries per group must be set in descending order")); + return NULL; + } + } + + lastOne = p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; + + netEnvParams.netEnvId = p_FmPcdCcTree->netEnvId; + netEnvParams.numOfDistinctionUnits = p_FmPcdCcGroupParams->numOfDistinctionUnits; + + memcpy(netEnvParams.unitIds, + &p_FmPcdCcGroupParams->unitIds, + (sizeof(uint8_t)) * p_FmPcdCcGroupParams->numOfDistinctionUnits); + + err = PcdGetUnitsVector(p_FmPcd, &netEnvParams); + if (err) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, err, NO_MSG); + return NULL; + } + + p_FmPcdCcTree->fmPcdGroupParam[i].totalBitsMask = netEnvParams.vector; + for (j = 0; j < p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; j++) + { + err = ValidateNextEngineParams(h_FmPcd, + &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], + e_FM_PCD_CC_STATS_MODE_NONE); + if (err) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, err, (NO_MSG)); + return NULL; + } + + if (p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j].h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEngine(&p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], &requiredAction); + if (err) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + return NULL; + } + } + p_KeyAndNextEngineParams = p_Params+k; + + memcpy(&p_KeyAndNextEngineParams->nextEngineParams, + &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], + sizeof(t_FmPcdCcNextEngineParams)); + + if ((p_KeyAndNextEngineParams->nextEngineParams.nextEngine == e_FM_PCD_CC) + && p_KeyAndNextEngineParams->nextEngineParams.h_Manip) + { + err = AllocAndFillAdForContLookupManip(p_KeyAndNextEngineParams->nextEngineParams.params.ccParams.h_CcNode); + if (err) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree")); + return NULL; + } + } + + requiredAction |= UPDATE_CC_WITH_TREE; + p_KeyAndNextEngineParams->requiredAction = requiredAction; + + k++; + } + } + + p_FmPcdCcTree->numOfEntries = (uint8_t)k; + p_FmPcdCcTree->numOfGrps = p_PcdGroupsParam->numOfGrps; + + p_FmPcdCcTree->ccTreeBaseAddr = + PTR_TO_UINT(FM_MURAM_AllocMem(FmPcdGetMuramHandle(h_FmPcd), + (uint32_t)( FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE), + FM_PCD_CC_TREE_ADDR_ALIGN)); + if (!p_FmPcdCcTree->ccTreeBaseAddr) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree")); + return NULL; + } + IOMemSet32(UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr), 0, (uint32_t)(FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE)); + + p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); + + j = 0; + for (i = 0; i < numOfEntries; i++) + { + p_KeyAndNextEngineParams = p_Params + i; + + NextStepAd(p_CcTreeTmp, + NULL, + &p_KeyAndNextEngineParams->nextEngineParams, + p_FmPcd); + + p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE); + + memcpy(&p_FmPcdCcTree->keyAndNextEngineParams[i], + p_KeyAndNextEngineParams, + sizeof(t_FmPcdCcKeyAndNextEngineParams)); + + if (p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine== e_FM_PCD_CC) + { + p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode; + p_CcInformation = FindNodeInfoInReleventLst(&p_FmPcdCcNextNode->ccTreeIdLst, + (t_Handle)p_FmPcdCcTree, + p_FmPcdCcNextNode->h_Spinlock); + + if (!p_CcInformation) + { + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = (t_Handle)p_FmPcdCcTree; + ccNodeInfo.index = 1; + EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccTreeIdLst, + &ccNodeInfo, + p_FmPcdCcNextNode->h_Spinlock); + } + else + p_CcInformation->index++; + } + } + + FmPcdIncNetEnvOwners(h_FmPcd, p_FmPcdCcTree->netEnvId); + p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); + + if (!FmPcdLockTryLockAll(p_FmPcd)) + { + FM_PCD_CcRootDelete(p_FmPcdCcTree); + XX_Free(p_Params); + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return NULL; + } + + for (i = 0; i < numOfEntries; i++) + { + if (p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction) + { + err = SetRequiredAction(h_FmPcd, + p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction, + &p_FmPcdCcTree->keyAndNextEngineParams[i], + p_CcTreeTmp, + 1, + p_FmPcdCcTree); + if (err) + { + FmPcdLockUnlockAll(p_FmPcd); + FM_PCD_CcRootDelete(p_FmPcdCcTree); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); + return NULL; + } + p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE); + } + } + + FmPcdLockUnlockAll(p_FmPcd); + p_FmPcdCcTree->p_Lock = FmPcdAcquireLock(p_FmPcd); + if (!p_FmPcdCcTree->p_Lock) + { + FM_PCD_CcRootDelete(p_FmPcdCcTree); + XX_Free(p_Params); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM CC lock")); + return NULL; + } + + XX_Free(p_Params); + + return p_FmPcdCcTree; +} + +t_Error FM_PCD_CcRootDelete(t_Handle h_CcTree) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree; + int i= 0; + + SANITY_CHECK_RETURN_ERROR(p_CcTree,E_INVALID_STATE); + p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + FmPcdDecNetEnvOwners(p_FmPcd, p_CcTree->netEnvId); + + if (p_CcTree->owners) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("the tree with this ID can not be removed because this tree is occupied, first - unbind this tree")); + + /* Delete reassembly schemes if exist */ + if (p_CcTree->h_IpReassemblyManip) + { + FmPcdManipDeleteIpReassmSchemes(p_CcTree->h_IpReassemblyManip); + FmPcdManipUpdateOwner(p_CcTree->h_IpReassemblyManip, FALSE); + } + + for (i = 0; i <p_CcTree->numOfEntries; i++) + { + if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + UpdateNodeOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode, FALSE); + + if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip) + FmPcdManipUpdateOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip, FALSE); + +#ifdef FM_CAPWAP_SUPPORT + if ((p_CcTree->numOfGrps == 1) && + (p_CcTree->fmPcdGroupParam[0].numOfEntriesInGroup == 1) && + (p_CcTree->keyAndNextEngineParams[0].nextEngineParams.nextEngine == e_FM_PCD_CC) && + p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode && + IsCapwapApplSpecific(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode)) + { + if (FM_PCD_ManipNodeDelete(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.h_Manip) != E_OK) + return E_INVALID_STATE; + } +#endif /* FM_CAPWAP_SUPPORT */ + +#if (DPAA_VERSION >= 11) + if ((p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_FR) && + (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic)) + FrmReplicGroupUpdateOwner(p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic, + FALSE); +#endif /* (DPAA_VERSION >= 11) */ + } + + if (p_CcTree->p_Lock) + FmPcdReleaseLock(p_CcTree->h_FmPcd, p_CcTree->p_Lock); + + DeleteTree(p_CcTree, p_FmPcd); + + return E_OK; +} + +t_Error FM_PCD_CcRootModifyNextEngine(t_Handle h_CcTree, + uint8_t grpId, + uint8_t index, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_CcTree,E_INVALID_STATE); + p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + if (!FmPcdLockTryLockAll(p_FmPcd)) + { + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return ERROR_CODE(E_BUSY); + } + + err = FmPcdCcModifyNextEngineParamTree(p_FmPcd, + p_CcTree, + grpId, + index, + p_FmPcdCcNextEngineParams); + FmPcdLockUnlockAll(p_FmPcd); + + if (err) + { + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + return E_OK; +} + +t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *) h_FmPcd; + t_FmPcdCcNode *p_CcNode, *p_FmPcdCcNextNode; + t_Error err = E_OK; + uint32_t tmp, keySize; + bool glblMask = FALSE; + t_FmPcdCcKeyParams *p_KeyParams; + t_Handle h_FmMuram, p_KeysMatchTblTmp, p_AdTableTmp; +#if (DPAA_VERSION >= 11) + t_Handle h_StatsFLRs; +#endif /* (DPAA_VERSION >= 11) */ + bool fullField = FALSE; + ccPrivateInfo_t icCode = CC_PRIVATE_INFO_NONE; + bool isKeyTblAlloc, fromIc = FALSE; + uint32_t matchTableSize, adTableSize; + t_CcNodeInformation ccNodeInfo, *p_CcInformation; + t_FmPcdStatsObj *p_StatsObj; + t_FmPcdCcStatsParams statsParams = {0}; + t_Handle h_Manip; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE,NULL); + + p_CcNode = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode)); + if (!p_CcNode) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); + return NULL; + } + memset(p_CcNode, 0, sizeof(t_FmPcdCcNode)); + + p_CcNode->p_GlblMask = (t_Handle)XX_Malloc(CC_GLBL_MASK_SIZE * sizeof(uint8_t)); + memset(p_CcNode->p_GlblMask, 0, CC_GLBL_MASK_SIZE * sizeof(uint8_t)); + + p_CcNode->h_FmPcd = h_FmPcd; + p_CcNode->numOfKeys = p_CcNodeParam->keysParams.numOfKeys; + p_CcNode->maxNumOfKeys = p_CcNodeParam->keysParams.maxNumOfKeys; + p_CcNode->maskSupport = p_CcNodeParam->keysParams.maskSupport; + p_CcNode->statisticsMode = p_CcNodeParam->keysParams.statisticsMode; + + /* For backward compatibility - even if statistics mode is nullified, + we'll fix it to frame mode so we can support per-key request for + statistics using 'statisticsEn' in next engine parameters */ + if (!p_CcNode->maxNumOfKeys && + (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE)) + p_CcNode->statisticsMode = e_FM_PCD_CC_STATS_MODE_FRAME; + + h_FmMuram = FmPcdGetMuramHandle(h_FmPcd); + if (!h_FmMuram) + { + REPORT_ERROR(MAJOR, E_INVALID_HANDLE, ("FM MURAM")); + return NULL; + } + + INIT_LIST(&p_CcNode->ccPrevNodesLst); + INIT_LIST(&p_CcNode->ccTreeIdLst); + INIT_LIST(&p_CcNode->ccTreesLst); + INIT_LIST(&p_CcNode->availableStatsLst); + + p_CcNode->h_Spinlock = XX_InitSpinlock(); + if (!p_CcNode->h_Spinlock) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("CC node spinlock")); + return NULL; + } + + if ((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_BY_HDR) && + ((p_CcNodeParam->extractCcParams.extractByHdr.hdr == HEADER_TYPE_IPv4) || + (p_CcNodeParam->extractCcParams.extractByHdr.hdr == HEADER_TYPE_IPv6)) && + (p_CcNodeParam->extractCcParams.extractByHdr.type == e_FM_PCD_EXTRACT_FULL_FIELD) && + ((p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv6 == NET_HEADER_FIELD_IPv6_HOP_LIMIT) || + (p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv4 == NET_HEADER_FIELD_IPv4_TTL))) + { + err = Ipv4TtlOrIpv6HopLimitCheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); + glblMask = FALSE; + } + else if ((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_NON_HDR) && + ((p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_KEY) || + (p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_HASH) || + (p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_FLOW_ID))) + { + if ((p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_FLOW_ID) && + (p_CcNodeParam->extractCcParams.extractNonHdr.offset != 0)) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("In the case of the extraction from e_FM_PCD_EXTRACT_FROM_FLOW_ID offset has to be 0")); + return NULL; + } + + icCode = IcDefineCode(p_CcNodeParam); + fromIc = TRUE; + if (icCode == CC_PRIVATE_INFO_NONE) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, + ("user asked extraction from IC and field in internal context or action wasn't initialized in the right way")); + return NULL; + } + + if ((icCode == CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP) || + (icCode == CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP)) + { + err = IcHashIndexedCheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); + glblMask = TRUE; + } + else + { + err = CheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); + if (p_CcNode->glblMaskSize) + glblMask = TRUE; + } + } + else + { + err = CheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); + if (p_CcNode->glblMaskSize) + glblMask = TRUE; + } + + if (err) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, err, NO_MSG); + return NULL; + } + + switch (p_CcNodeParam->extractCcParams.type) + { + case (e_FM_PCD_EXTRACT_BY_HDR): + switch (p_CcNodeParam->extractCcParams.extractByHdr.type) + { + case (e_FM_PCD_EXTRACT_FULL_FIELD): + p_CcNode->parseCode = + GetFullFieldParseCode(p_CcNodeParam->extractCcParams.extractByHdr.hdr, + p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex, + p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField); + GetSizeHeaderField(p_CcNodeParam->extractCcParams.extractByHdr.hdr, + p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex, + p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField, + &p_CcNode->sizeOfExtraction); + fullField = TRUE; + if ((p_CcNode->parseCode != CC_PC_FF_TCI1) && + (p_CcNode->parseCode != CC_PC_FF_TCI2) && + (p_CcNode->parseCode != CC_PC_FF_MPLS1) && + (p_CcNode->parseCode != CC_PC_FF_MPLS1) && + (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC1) && + (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC2) && + (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1) && + (p_CcNode->parseCode != CC_PC_FF_IPDSCP) && + (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2) && + glblMask) + { + glblMask = FALSE; + p_CcNode->glblMaskSize = 4; + p_CcNode->lclMask = TRUE; + } + break; + + case (e_FM_PCD_EXTRACT_FROM_HDR): + p_CcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.size; + p_CcNode->offset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.offset; + p_CcNode->userOffset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.offset; + p_CcNode->parseCode = + GetPrParseCode(p_CcNodeParam->extractCcParams.extractByHdr.hdr, + p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex, + p_CcNode->offset,glblMask, + &p_CcNode->prsArrayOffset); + break; + + case (e_FM_PCD_EXTRACT_FROM_FIELD): + p_CcNode->offset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.offset; + p_CcNode->userOffset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.offset; + p_CcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.size; + p_CcNode->parseCode = + GetFieldParseCode(p_CcNodeParam->extractCcParams.extractByHdr.hdr, + p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.field, + p_CcNode->offset, + &p_CcNode->prsArrayOffset, + p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex); + break; + + default: + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + return NULL; + } + break; + + case (e_FM_PCD_EXTRACT_NON_HDR): + /* get the field code for the generic extract */ + p_CcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractNonHdr.size; + p_CcNode->offset = p_CcNodeParam->extractCcParams.extractNonHdr.offset; + p_CcNode->userOffset = p_CcNodeParam->extractCcParams.extractNonHdr.offset; + p_CcNode->parseCode = + GetGenParseCode(h_FmPcd, + p_CcNodeParam->extractCcParams.extractNonHdr.src, + p_CcNode->offset, + glblMask, + &p_CcNode->prsArrayOffset, + fromIc,icCode); + + if (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED) + { + if ((p_CcNode->offset + p_CcNode->sizeOfExtraction) > 8) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, E_INVALID_SELECTION,("when node of the type CC_PC_GENERIC_IC_HASH_INDEXED offset + size can not be bigger then size of HASH 64 bits (8 bytes)")); + return NULL; + } + } + if ((p_CcNode->parseCode == CC_PC_GENERIC_IC_GMASK) || + (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED)) + { + p_CcNode->offset += p_CcNode->prsArrayOffset; + p_CcNode->prsArrayOffset = 0; + } + break; + + default: + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + return NULL; + } + + if (p_CcNode->parseCode == CC_PC_ILLEGAL) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("illegal extraction type")); + return NULL; + } + + if ((p_CcNode->sizeOfExtraction > FM_PCD_MAX_SIZE_OF_KEY) || + !p_CcNode->sizeOfExtraction) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("sizeOfExatrction can not be greater than 56 and not 0")); + return NULL; + } + + if (p_CcNodeParam->keysParams.keySize != p_CcNode->sizeOfExtraction) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction")); + return NULL; + } + + p_CcNode->userSizeOfExtraction = p_CcNode->sizeOfExtraction; + + if (!glblMask) + memset(p_CcNode->p_GlblMask, 0xff, CC_GLBL_MASK_SIZE * sizeof(uint8_t)); + + err = CheckAndSetManipParamsWithCcNodeParams(p_CcNode); + if (err != E_OK) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction")); + return NULL; + } + + /* Calculating matching table entry size by rounding up the user-defined size of extraction to valid entry size */ + GetCcExtractKeySize(p_CcNode->sizeOfExtraction, &p_CcNode->ccKeySizeAccExtraction); + + /* If local mask is used, it is stored next to each key in the keys match table */ + if (p_CcNode->lclMask) + keySize = (uint32_t)(2 * p_CcNode->ccKeySizeAccExtraction); + else + keySize = p_CcNode->ccKeySizeAccExtraction; + + /* Update CC shadow with maximal size required by this node */ + if (p_CcNode->maxNumOfKeys) + { + err = CalcAndUpdateCcShadow(p_CcNode, + isKeyTblAlloc, + &matchTableSize, + &adTableSize); + if (err != E_OK) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, err, NO_MSG); + return NULL; + } + + p_CcNode->keysMatchTableMaxSize = matchTableSize; + + if (p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_NONE) + { + err = AllocStatsObjs(p_CcNode); + if (err != E_OK) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, err, NO_MSG); + return NULL; + } + } + + /* If manipulation will be initialized before this node, it will use the table + descriptor in the AD table of previous node and this node will need an extra + AD as his table descriptor. */ + p_CcNode->h_Ad = (t_Handle)FM_MURAM_AllocMem(h_FmMuram, + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!p_CcNode->h_Ad) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC action descriptor")); + return NULL; + } + } + else + { + matchTableSize = (uint32_t)(keySize * sizeof(uint8_t) * (p_CcNode->numOfKeys + 1)); + adTableSize = (uint32_t)(FM_PCD_CC_AD_ENTRY_SIZE * (p_CcNode->numOfKeys + 1)); + } + +#if (DPAA_VERSION >= 11) + switch (p_CcNode->statisticsMode) + { + + case e_FM_PCD_CC_STATS_MODE_RMON: + /* If RMON statistics or RMON conditional statistics modes are requested, + allocate frame length ranges array */ + p_CcNode->h_StatsFLRs = + FM_MURAM_AllocMem(h_FmMuram, + (uint32_t)(p_CcNode->numOfStatsFLRs) * FM_PCD_CC_STATS_FLR_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + + if (!p_CcNode->h_StatsFLRs) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC frame length ranges array")); + return NULL; + } + + /* Initialize using value received from the user */ + for (tmp = 0; tmp < p_CcNode->numOfStatsFLRs; tmp++) + { + h_StatsFLRs = PTR_MOVE(p_CcNode->h_StatsFLRs, tmp * FM_PCD_CC_STATS_FLR_SIZE); + + Mem2IOCpy32(h_StatsFLRs, + &(p_CcNodeParam->keysParams.frameLengthRanges[tmp]), + FM_PCD_CC_STATS_FLR_SIZE); + } + break; + + default: + break; + } +#endif /* (DPAA_VERSION >= 11) */ + + + /* Allocate keys match table. Not required for some CC nodes, for example for IPv4 TTL + identification, IPv6 hop count identification, etc. */ + if (isKeyTblAlloc) + { + p_CcNode->h_KeysMatchTable = + (t_Handle)FM_MURAM_AllocMem(h_FmMuram, + matchTableSize, + FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN); + if (!p_CcNode->h_KeysMatchTable) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node key match table")); + return NULL; + } + IOMemSet32((uint8_t *)p_CcNode->h_KeysMatchTable, + 0, + matchTableSize); + } + + /* Allocate action descriptors table */ + p_CcNode->h_AdTable = + (t_Handle)FM_MURAM_AllocMem(h_FmMuram, + adTableSize, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!p_CcNode->h_AdTable) + { + DeleteNode(p_CcNode); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptors table")); + return NULL; + } + IOMemSet32((uint8_t *)p_CcNode->h_AdTable, 0, adTableSize); + + p_KeysMatchTblTmp = p_CcNode->h_KeysMatchTable; + p_AdTableTmp = p_CcNode->h_AdTable; + + /* For each key, create the key and the next step AD */ + for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++) + { + p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; + + if (p_KeysMatchTblTmp) + { + /* Copy the key */ + Mem2IOCpy32((void*)p_KeysMatchTblTmp, p_KeyParams->p_Key, p_CcNode->sizeOfExtraction); + + /* Copy the key mask or initialize it to 0xFF..F */ + if (p_CcNode->lclMask && p_KeyParams->p_Mask) + { + Mem2IOCpy32(PTR_MOVE(p_KeysMatchTblTmp, + p_CcNode->ccKeySizeAccExtraction), /* User's size of extraction rounded up to a valid matching table entry size */ + p_KeyParams->p_Mask, + p_CcNode->sizeOfExtraction); /* Exact size of extraction as received from the user */ + } + else if (p_CcNode->lclMask) + { + IOMemSet32(PTR_MOVE(p_KeysMatchTblTmp, + p_CcNode->ccKeySizeAccExtraction), /* User's size of extraction rounded up to a valid matching table entry size */ + 0xff, + p_CcNode->sizeOfExtraction); /* Exact size of extraction as received from the user */ + } + + p_KeysMatchTblTmp = PTR_MOVE(p_KeysMatchTblTmp, keySize * sizeof(uint8_t)); + } + + /* Create the next action descriptor in the match table */ + if (p_KeyParams->ccNextEngineParams.statisticsEn) + { + p_StatsObj = GetStatsObj(p_CcNode); + ASSERT_COND(p_StatsObj); + + statsParams.h_StatsAd = p_StatsObj->h_StatsAd; + statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters; +#if (DPAA_VERSION >= 11) + statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs; + +#endif /* (DPAA_VERSION >= 11) */ + NextStepAd(p_AdTableTmp, + &statsParams, + &p_KeyParams->ccNextEngineParams, + p_FmPcd); + + p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = p_StatsObj; + } + else + { + NextStepAd(p_AdTableTmp, + NULL, + &p_KeyParams->ccNextEngineParams, + p_FmPcd); + + p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = NULL; + } + + p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE); + } + + /* Update next engine for the 'miss' entry */ + if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.statisticsEn) + { + p_StatsObj = GetStatsObj(p_CcNode); + ASSERT_COND(p_StatsObj); + + statsParams.h_StatsAd = p_StatsObj->h_StatsAd; + statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters; +#if (DPAA_VERSION >= 11) + statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs; + +#endif /* (DPAA_VERSION >= 11) */ + + NextStepAd(p_AdTableTmp, + &statsParams, + &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, + p_FmPcd); + + p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = p_StatsObj; + } + else + { + NextStepAd(p_AdTableTmp, + NULL, + &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, + p_FmPcd); + + p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = NULL; + } + + /* This parameter will be used to initialize the "key length" field in the action descriptor + that points to this node and it should be 0 for full field extraction */ + if (fullField == TRUE) + p_CcNode->sizeOfExtraction = 0; + + for (tmp = 0; tmp < MIN(p_CcNode->numOfKeys + 1, CC_MAX_NUM_OF_KEYS); tmp++) + { + if (p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine == e_FM_PCD_CC) + { + p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode; + p_CcInformation = FindNodeInfoInReleventLst(&p_FmPcdCcNextNode->ccPrevNodesLst, + (t_Handle)p_CcNode, + p_FmPcdCcNextNode->h_Spinlock); + if (!p_CcInformation) + { + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = (t_Handle)p_CcNode; + ccNodeInfo.index = 1; + EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccPrevNodesLst, + &ccNodeInfo, + p_FmPcdCcNextNode->h_Spinlock); + } + else + p_CcInformation->index++; + + if (p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip) + { + h_Manip = p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip; + p_CcInformation = FindNodeInfoInReleventLst(FmPcdManipGetNodeLstPointedOnThisManip(h_Manip), + (t_Handle)p_CcNode, + FmPcdManipGetSpinlock(h_Manip)); + if (!p_CcInformation) + { + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = (t_Handle)p_CcNode; + ccNodeInfo.index = 1; + EnqueueNodeInfoToRelevantLst(FmPcdManipGetNodeLstPointedOnThisManip(h_Manip), + &ccNodeInfo, + FmPcdManipGetSpinlock(h_Manip)); + } + else + p_CcInformation->index++; + } + } + } + + p_AdTableTmp = p_CcNode->h_AdTable; + + if (!FmPcdLockTryLockAll(h_FmPcd)) + { + FM_PCD_MatchTableDelete((t_Handle)p_CcNode); + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return NULL; + } + + /* Required action for each next engine */ + for (tmp = 0; tmp < MIN(p_CcNode->numOfKeys + 1, CC_MAX_NUM_OF_KEYS); tmp++) + { + if (p_CcNode->keyAndNextEngineParams[tmp].requiredAction) + { + err = SetRequiredAction(h_FmPcd, + p_CcNode->keyAndNextEngineParams[tmp].requiredAction, + &p_CcNode->keyAndNextEngineParams[tmp], + p_AdTableTmp, + 1, + NULL); + if (err) + { + FmPcdLockUnlockAll(h_FmPcd); + FM_PCD_MatchTableDelete((t_Handle)p_CcNode); + REPORT_ERROR(MAJOR, err, NO_MSG); + return NULL; + } + p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE); + } + } + + FmPcdLockUnlockAll(h_FmPcd); + return p_CcNode; +} + +t_Error FM_PCD_MatchTableDelete(t_Handle h_CcNode) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + int i = 0; + + SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + if (p_CcNode->owners) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("This node cannot be removed because it is occupied; first unbind this node")); + + for (i = 0; i < p_CcNode->numOfKeys; i++) + if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + UpdateNodeOwner(p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode, FALSE); + + if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + UpdateNodeOwner(p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode, FALSE); + + /* Handle also Miss entry */ + for (i = 0; i < p_CcNode->numOfKeys + 1; i++) + { + if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip) + FmPcdManipUpdateOwner(p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip, FALSE); + +#if (DPAA_VERSION >= 11) + if ((p_CcNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_FR) && + (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic)) + { + FrmReplicGroupUpdateOwner(p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic, + FALSE); + } +#endif /* (DPAA_VERSION >= 11) */ + } + + DeleteNode(p_CcNode); + + return E_OK; +} + +t_Error FM_PCD_MatchTableAddKey(t_Handle h_CcNode, + uint16_t keyIndex, + uint8_t keySize, + t_FmPcdCcKeyParams *p_KeyParams) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + if (keyIndex == FM_PCD_LAST_KEY_INDEX) + keyIndex = p_CcNode->numOfKeys; + + if (!FmPcdLockTryLockAll(p_FmPcd)) + { + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return ERROR_CODE(E_BUSY); + } + + err = FmPcdCcAddKey(p_FmPcd, + p_CcNode, + keyIndex, + keySize, + p_KeyParams); + + FmPcdLockUnlockAll(p_FmPcd); + + switch (GET_ERROR_TYPE(err)) + { + case E_OK: + return E_OK; + + case E_BUSY: + DBG(TRACE, ("E_BUSY error")); + return ERROR_CODE(E_BUSY); + + default: + RETURN_ERROR(MAJOR, err, NO_MSG); + } +} + +t_Error FM_PCD_MatchTableRemoveKey(t_Handle h_CcNode, uint16_t keyIndex) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + if (!FmPcdLockTryLockAll(p_FmPcd)) + { + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return ERROR_CODE(E_BUSY); + } + + err = FmPcdCcRemoveKey(p_FmPcd, p_CcNode, keyIndex); + + FmPcdLockUnlockAll(p_FmPcd); + + switch (GET_ERROR_TYPE(err)) + { + case E_OK: + return E_OK; + + case E_BUSY: + DBG(TRACE, ("E_BUSY error")); + return ERROR_CODE(E_BUSY); + + default: + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + return E_OK; +} + +t_Error FM_PCD_MatchTableModifyKey(t_Handle h_CcNode, + uint16_t keyIndex, + uint8_t keySize, + uint8_t *p_Key, + uint8_t *p_Mask) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + t_List h_List; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + INIT_LIST(&h_List); + + err = FmPcdCcNodeTreeTryLock(p_FmPcd, p_CcNode, &h_List); + if (err) + { + DBG(TRACE, ("Node's trees lock failed")); + return ERROR_CODE(E_BUSY); + } + + err = FmPcdCcModifyKey(p_FmPcd, + p_CcNode, + keyIndex, + keySize, + p_Key, + p_Mask); + + FmPcdCcNodeTreeReleaseLock(p_FmPcd, &h_List); + + switch (GET_ERROR_TYPE(err)) + { + case E_OK: + return E_OK; + + case E_BUSY: + DBG(TRACE, ("E_BUSY error")); + return ERROR_CODE(E_BUSY); + + default: + RETURN_ERROR(MAJOR, err, NO_MSG); + } +} + +t_Error FM_PCD_MatchTableModifyNextEngine(t_Handle h_CcNode, + uint16_t keyIndex, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + if (!FmPcdLockTryLockAll(p_FmPcd)) + { + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return ERROR_CODE(E_BUSY); + } + + err = ModifyNextEngineParamNode(p_FmPcd, + p_CcNode, + keyIndex, + p_FmPcdCcNextEngineParams); + + FmPcdLockUnlockAll(p_FmPcd); + + switch (GET_ERROR_TYPE(err)) + { + case E_OK: + return E_OK; + + case E_BUSY: + DBG(TRACE, ("E_BUSY error")); + return ERROR_CODE(E_BUSY); + + default: + RETURN_ERROR(MAJOR, err, NO_MSG); + } +} + +t_Error FM_PCD_MatchTableModifyMissNextEngine(t_Handle h_CcNode, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + if (!FmPcdLockTryLockAll(p_FmPcd)) + { + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return ERROR_CODE(E_BUSY); + } + + err = FmPcdCcModifyMissNextEngineParamNode(p_FmPcd, + p_CcNode, + p_FmPcdCcNextEngineParams); + + FmPcdLockUnlockAll(p_FmPcd); + + switch (GET_ERROR_TYPE(err)) + { + case E_OK: + return E_OK; + + case E_BUSY: + DBG(TRACE, ("E_BUSY error")); + return ERROR_CODE(E_BUSY); + + default: + RETURN_ERROR(MAJOR, err, NO_MSG); + } +} + +t_Error FM_PCD_MatchTableModifyKeyAndNextEngine(t_Handle h_CcNode, + uint16_t keyIndex, + uint8_t keySize, + t_FmPcdCcKeyParams *p_KeyParams) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + if (!FmPcdLockTryLockAll(p_FmPcd)) + { + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return ERROR_CODE(E_BUSY); + } + + err = FmPcdCcModifyKeyAndNextEngine(p_FmPcd, + p_CcNode, + keyIndex, + keySize, + p_KeyParams); + + FmPcdLockUnlockAll(p_FmPcd); + + switch (GET_ERROR_TYPE(err)) + { + case E_OK: + return E_OK; + + case E_BUSY: + DBG(TRACE, ("E_BUSY error")); + return ERROR_CODE(E_BUSY); + + default: + RETURN_ERROR(MAJOR, err, NO_MSG); + } +} + + +t_Error FM_PCD_MatchTableFindNRemoveKey(t_Handle h_CcNode, + uint8_t keySize, + uint8_t *p_Key, + uint8_t *p_Mask) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + uint16_t keyIndex; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + if (!FmPcdLockTryLockAll(p_FmPcd)) + { + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return ERROR_CODE(E_BUSY); + } + + err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex); + if (GET_ERROR_TYPE(err) != E_OK) + { + FmPcdLockUnlockAll(p_FmPcd); + RETURN_ERROR(MAJOR, err, ("The received key and mask pair was not found in the match table of the provided node")); + } + + err = FmPcdCcRemoveKey(p_FmPcd, p_CcNode, keyIndex); + + FmPcdLockUnlockAll(p_FmPcd); + + switch (GET_ERROR_TYPE(err)) + { + case E_OK: + return E_OK; + + case E_BUSY: + DBG(TRACE, ("E_BUSY error")); + return ERROR_CODE(E_BUSY); + + default: + RETURN_ERROR(MAJOR, err, NO_MSG); + } +} + + +t_Error FM_PCD_MatchTableFindNModifyNextEngine(t_Handle h_CcNode, + uint8_t keySize, + uint8_t *p_Key, + uint8_t *p_Mask, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + uint16_t keyIndex; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + if (!FmPcdLockTryLockAll(p_FmPcd)) + { + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return ERROR_CODE(E_BUSY); + } + + err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex); + if (GET_ERROR_TYPE(err) != E_OK) + { + FmPcdLockUnlockAll(p_FmPcd); + RETURN_ERROR(MAJOR, err, ("The received key and mask pair was not found in the match table of the provided node")); + } + + err = ModifyNextEngineParamNode(p_FmPcd, + p_CcNode, + keyIndex, + p_FmPcdCcNextEngineParams); + + FmPcdLockUnlockAll(p_FmPcd); + + switch (GET_ERROR_TYPE(err)) + { + case E_OK: + return E_OK; + + case E_BUSY: + DBG(TRACE, ("E_BUSY error")); + return ERROR_CODE(E_BUSY); + + default: + RETURN_ERROR(MAJOR, err, NO_MSG); + } +} + +t_Error FM_PCD_MatchTableFindNModifyKeyAndNextEngine(t_Handle h_CcNode, + uint8_t keySize, + uint8_t *p_Key, + uint8_t *p_Mask, + t_FmPcdCcKeyParams *p_KeyParams) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + uint16_t keyIndex; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + if (!FmPcdLockTryLockAll(p_FmPcd)) + { + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return ERROR_CODE(E_BUSY); + } + + err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex); + if (GET_ERROR_TYPE(err) != E_OK) + { + FmPcdLockUnlockAll(p_FmPcd); + RETURN_ERROR(MAJOR, err, ("The received key and mask pair was not found in the match table of the provided node")); + } + + err = FmPcdCcModifyKeyAndNextEngine(p_FmPcd, + h_CcNode, + keyIndex, + keySize, + p_KeyParams); + + FmPcdLockUnlockAll(p_FmPcd); + + switch (GET_ERROR_TYPE(err)) + { + case E_OK: + return E_OK; + + case E_BUSY: + DBG(TRACE, ("E_BUSY error")); + return ERROR_CODE(E_BUSY); + + default: + RETURN_ERROR(MAJOR, err, NO_MSG); + } +} + +t_Error FM_PCD_MatchTableFindNModifyKey(t_Handle h_CcNode, + uint8_t keySize, + uint8_t *p_Key, + uint8_t *p_Mask, + uint8_t *p_NewKey, + uint8_t *p_NewMask) +{ + t_FmPcd *p_FmPcd; + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + t_List h_List; + uint16_t keyIndex; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_NewKey, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); + p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + INIT_LIST(&h_List); + + err = FmPcdCcNodeTreeTryLock(p_FmPcd, p_CcNode, &h_List); + if (err) + { + DBG(TRACE, ("Node's trees lock failed")); + return ERROR_CODE(E_BUSY); + } + + err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex); + if (GET_ERROR_TYPE(err) != E_OK) + { + FmPcdCcNodeTreeReleaseLock(p_FmPcd, &h_List); + RETURN_ERROR(MAJOR, err, ("The received key and mask pair was not found in the " + "match table of the provided node")); + } + + err = FmPcdCcModifyKey(p_FmPcd, + p_CcNode, + keyIndex, + keySize, + p_NewKey, + p_NewMask); + + FmPcdCcNodeTreeReleaseLock(p_FmPcd, &h_List); + + switch (GET_ERROR_TYPE(err)) + { + case E_OK: + return E_OK; + + case E_BUSY: + DBG(TRACE, ("E_BUSY error")); + return ERROR_CODE(E_BUSY); + + default: + RETURN_ERROR(MAJOR, err, NO_MSG); + } +} + +t_Error FM_PCD_MatchTableGetNextEngine(t_Handle h_CcNode, + uint16_t keyIndex, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + + SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); + + if (keyIndex >= p_CcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("keyIndex exceeds current number of keys")); + + if (keyIndex > (FM_PCD_MAX_NUM_OF_KEYS - 1)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("keyIndex can not be larger than %d", (FM_PCD_MAX_NUM_OF_KEYS - 1))); + + memcpy(p_FmPcdCcNextEngineParams, + &p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams, + sizeof(t_FmPcdCcNextEngineParams)); + + return E_OK; +} + + +uint32_t FM_PCD_MatchTableGetKeyCounter(t_Handle h_CcNode, uint16_t keyIndex) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + uint32_t *p_StatsCounters, frameCount; + uint32_t intFlags; + + SANITY_CHECK_RETURN_VALUE(p_CcNode, E_INVALID_HANDLE, 0); + + if (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this match table")); + return 0; + } + + if ((p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_FRAME) && + (p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME)) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Frame count is not supported in the statistics mode of this match table")); + return 0; + } + + intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock); + + if (keyIndex >= p_CcNode->numOfKeys) + { + XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("The provided keyIndex exceeds the number of keys in this match table")); + return 0; + } + + if (!p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj) + { + XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this key")); + return 0; + } + + p_StatsCounters = p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsCounters; + ASSERT_COND(p_StatsCounters); + + /* The first counter is byte counter, so we need to advance to the next counter */ + frameCount = GET_UINT32(*(uint32_t *)(PTR_MOVE(p_StatsCounters, + FM_PCD_CC_STATS_COUNTER_SIZE))); + + XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); + + return frameCount; +} + +t_Error FM_PCD_MatchTableGetKeyStatistics(t_Handle h_CcNode, + uint16_t keyIndex, + t_FmPcdCcKeyStatistics *p_KeyStatistics) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + uint32_t intFlags; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(h_CcNode, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_KeyStatistics, E_NULL_POINTER); + + intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock); + + err = MatchTableGetKeyStatistics(p_CcNode, + keyIndex, + p_KeyStatistics); + + XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); + + if (err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + return E_OK; +} + +t_Error FM_PCD_MatchTableFindNGetKeyStatistics(t_Handle h_CcNode, + uint8_t keySize, + uint8_t *p_Key, + uint8_t *p_Mask, + t_FmPcdCcKeyStatistics *p_KeyStatistics) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + uint16_t keyIndex; + uint32_t intFlags; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_KeyStatistics, E_NULL_POINTER); + + intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock); + + err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex); + if (GET_ERROR_TYPE(err) != E_OK) + { + XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); + RETURN_ERROR(MAJOR, err, ("The received key and mask pair was not found in the " + "match table of the provided node")); + } + + err = MatchTableGetKeyStatistics(p_CcNode, + keyIndex, + p_KeyStatistics); + + XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); + + if (err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + return E_OK; +} + +t_Error FM_PCD_MatchTableGetIndexedHashBucket(t_Handle h_CcNode, + uint8_t keySize, + uint8_t *p_Key, + uint8_t hashShift, + t_Handle *p_CcNodeBucketHandle, + uint8_t *p_BucketIndex, + uint16_t *p_LastIndex) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + uint16_t glblMask; + uint64_t crc64 = 0; + + SANITY_CHECK_RETURN_ERROR(h_CcNode, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_CcNodeBucketHandle, E_NULL_POINTER); + + memcpy(&glblMask, PTR_MOVE(p_CcNode->p_GlblMask, 2), 2); + + crc64 = crc64_init(); + crc64 = crc64_compute(p_Key, keySize, crc64); + crc64 >>= hashShift; + + *p_BucketIndex = (uint8_t)(((crc64 >> (8 * (6 - p_CcNode->userOffset))) & glblMask) >> 4); + if (*p_BucketIndex >= p_CcNode->numOfKeys) + RETURN_ERROR(MINOR, E_NOT_IN_RANGE, ("bucket index!")); + + *p_CcNodeBucketHandle = p_CcNode->keyAndNextEngineParams[*p_BucketIndex].nextEngineParams.params.ccParams.h_CcNode; + if (!*p_CcNodeBucketHandle) + RETURN_ERROR(MINOR, E_NOT_FOUND, ("bucket!")); + + *p_LastIndex = ((t_FmPcdCcNode *)*p_CcNodeBucketHandle)->numOfKeys; + + return E_OK; +} + +t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param) +{ + t_FmPcdCcNode *p_CcNodeHashTbl; + t_FmPcdCcNodeParams *p_IndxHashCcNodeParam, *p_ExactMatchCcNodeParam; + t_Handle h_CcNode; + t_FmPcdCcKeyParams *p_HashKeyParams; + int i; + uint16_t numOfSets, numOfWays, countMask, onesCount = 0; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(p_Param, E_NULL_POINTER, NULL); + + if (p_Param->maxNumOfKeys == 0) + { + REPORT_ERROR(MINOR, E_INVALID_VALUE, ("Max number of keys must be higher then 0")); + return NULL; + } + + if (p_Param->hashResMask == 0) + { + REPORT_ERROR(MINOR, E_INVALID_VALUE, ("Hash result mask must differ from 0")); + return NULL; + } + +#if (DPAA_VERSION >= 11) + if (p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_RMON) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("RMON statistics mode is not supported for hash table")); + return NULL; + } +#endif /* (DPAA_VERSION >= 11) */ + + p_ExactMatchCcNodeParam = (t_FmPcdCcNodeParams*)XX_Malloc(sizeof(t_FmPcdCcNodeParams)); + if (!p_ExactMatchCcNodeParam) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_ExactMatchCcNodeParam")); + return NULL; + } + memset(p_ExactMatchCcNodeParam, 0, sizeof(t_FmPcdCcNodeParams)); + + p_IndxHashCcNodeParam = (t_FmPcdCcNodeParams*)XX_Malloc(sizeof(t_FmPcdCcNodeParams)); + if (!p_IndxHashCcNodeParam) + { + XX_Free(p_ExactMatchCcNodeParam); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_IndxHashCcNodeParam")); + return NULL; + } + memset(p_IndxHashCcNodeParam, 0, sizeof(t_FmPcdCcNodeParams)); + + /* Calculate number of sets and number of ways of the hash table */ + countMask = (uint16_t)(p_Param->hashResMask >> 4); + while (countMask) + { + onesCount++; + countMask = (uint16_t)(countMask >> 1); + } + + numOfSets = (uint16_t)(1 << onesCount); + numOfWays = (uint16_t)DIV_CEIL(p_Param->maxNumOfKeys, numOfSets); + + if (p_Param->maxNumOfKeys % numOfSets) + DBG(INFO, ("'maxNumOfKeys' is not a multiple of hash number of ways, so number of ways will be rounded up")); + + /* Building exact-match node params, will be used to create the hash buckets */ + p_ExactMatchCcNodeParam->extractCcParams.type = e_FM_PCD_EXTRACT_NON_HDR; + + p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.src = e_FM_PCD_EXTRACT_FROM_KEY; + p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.action = e_FM_PCD_ACTION_EXACT_MATCH; + p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.offset = 0; + p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.size = p_Param->matchKeySize; + + p_ExactMatchCcNodeParam->keysParams.maxNumOfKeys = numOfWays; + p_ExactMatchCcNodeParam->keysParams.maskSupport = FALSE; + p_ExactMatchCcNodeParam->keysParams.statisticsMode = p_Param->statisticsMode; + p_ExactMatchCcNodeParam->keysParams.numOfKeys = 0; + p_ExactMatchCcNodeParam->keysParams.keySize = p_Param->matchKeySize; + p_ExactMatchCcNodeParam->keysParams.ccNextEngineParamsForMiss = p_Param->ccNextEngineParamsForMiss; + + p_HashKeyParams = p_IndxHashCcNodeParam->keysParams.keyParams; + + for (i = 0; i < numOfSets; i++) + { + h_CcNode = FM_PCD_MatchTableSet(h_FmPcd, p_ExactMatchCcNodeParam); + if (!h_CcNode) + break; + + p_HashKeyParams[i].ccNextEngineParams.nextEngine = e_FM_PCD_CC; + p_HashKeyParams[i].ccNextEngineParams.statisticsEn = FALSE; + p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode = h_CcNode; + } + + if (i < numOfSets) + { + for (i = i-1; i >=0; i--) + FM_PCD_MatchTableDelete(p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode); + + REPORT_ERROR(MAJOR, E_NULL_POINTER, NO_MSG); + XX_Free(p_IndxHashCcNodeParam); + XX_Free(p_ExactMatchCcNodeParam); + return NULL; + } + + /* Creating indexed-hash CC node */ + p_IndxHashCcNodeParam->extractCcParams.type = e_FM_PCD_EXTRACT_NON_HDR; + p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.src = e_FM_PCD_EXTRACT_FROM_HASH; + p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.action = e_FM_PCD_ACTION_INDEXED_LOOKUP; + p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.icIndxMask = p_Param->hashResMask; + p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.offset = p_Param->hashShift; + p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.size = 2; + + p_IndxHashCcNodeParam->keysParams.maxNumOfKeys = numOfSets; + p_IndxHashCcNodeParam->keysParams.maskSupport = FALSE; + p_IndxHashCcNodeParam->keysParams.statisticsMode = e_FM_PCD_CC_STATS_MODE_NONE; + p_IndxHashCcNodeParam->keysParams.numOfKeys = numOfSets; /* Number of keys of this node is number of sets of the hash */ + p_IndxHashCcNodeParam->keysParams.keySize = 2; + + p_CcNodeHashTbl = FM_PCD_MatchTableSet(h_FmPcd, p_IndxHashCcNodeParam); + + XX_Free(p_IndxHashCcNodeParam); + XX_Free(p_ExactMatchCcNodeParam); + + return p_CcNodeHashTbl; +} + +t_Error FM_PCD_HashTableDelete(t_Handle h_HashTbl) +{ + t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; + t_Handle *p_HashBuckets; + uint16_t i, numOfBuckets; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); + + numOfBuckets = p_HashTbl->numOfKeys; + + p_HashBuckets = (t_Handle *)XX_Malloc(numOfBuckets * sizeof(t_Handle)); + if (!p_HashBuckets) + RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + + for (i = 0; i < numOfBuckets; i++) + p_HashBuckets[i] = p_HashTbl->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode; + + err = FM_PCD_MatchTableDelete(p_HashTbl); + + for (i = 0; i < numOfBuckets; i++) + err |= FM_PCD_MatchTableDelete(p_HashBuckets[i]); + + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + XX_Free(p_HashBuckets); + + return E_OK; +} + +t_Error FM_PCD_HashTableAddKey(t_Handle h_HashTbl, + uint8_t keySize, + t_FmPcdCcKeyParams *p_KeyParams) +{ + t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; + t_Handle h_HashBucket; + uint8_t bucketIndex; + uint16_t lastIndex; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_KeyParams->p_Key, E_NULL_POINTER); + + if (p_KeyParams->p_Mask) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Keys masks not supported for hash table")); + + err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, + keySize, + p_KeyParams->p_Key, + p_HashTbl->userOffset, + &h_HashBucket, + &bucketIndex, + &lastIndex); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + return FM_PCD_MatchTableAddKey(h_HashBucket, + FM_PCD_LAST_KEY_INDEX, + keySize, + p_KeyParams); +} + +t_Error FM_PCD_HashTableRemoveKey(t_Handle h_HashTbl, + uint8_t keySize, + uint8_t *p_Key) +{ + t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; + t_Handle h_HashBucket; + uint8_t bucketIndex; + uint16_t lastIndex; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); + + err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, + keySize, + p_Key, + p_HashTbl->userOffset, + &h_HashBucket, + &bucketIndex, + &lastIndex); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + return FM_PCD_MatchTableFindNRemoveKey(h_HashBucket, + keySize, + p_Key, + NULL); +} + +t_Error FM_PCD_HashTableModifyNextEngine(t_Handle h_HashTbl, + uint8_t keySize, + uint8_t *p_Key, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; + t_Handle h_HashBucket; + uint8_t bucketIndex; + uint16_t lastIndex; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); + + err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, + keySize, + p_Key, + p_HashTbl->userOffset, + &h_HashBucket, + &bucketIndex, + &lastIndex); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + return FM_PCD_MatchTableFindNModifyNextEngine(h_HashBucket, + keySize, + p_Key, + NULL, + p_FmPcdCcNextEngineParams); +} + +t_Error FM_PCD_HashTableModifyMissNextEngine(t_Handle h_HashTbl, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; + t_Handle h_HashBucket; + uint8_t i; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(h_HashTbl, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); + + for (i = 0; i < p_HashTbl->numOfKeys; i++) + { + h_HashBucket = p_HashTbl->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode; + + err = FM_PCD_MatchTableModifyMissNextEngine(h_HashBucket, + p_FmPcdCcNextEngineParams); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + return E_OK; +} + + +t_Error FM_PCD_HashTableGetMissNextEngine(t_Handle h_HashTbl, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; + t_FmPcdCcNode *p_HashBucket; + + SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); + + /* Miss next engine of each bucket was initialized with the next engine of the hash table */ + p_HashBucket = p_HashTbl->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode; + + memcpy(p_FmPcdCcNextEngineParams, + &p_HashBucket->keyAndNextEngineParams[p_HashBucket->numOfKeys].nextEngineParams, + sizeof(t_FmPcdCcNextEngineParams)); + + return E_OK; +} + +t_Error FM_PCD_HashTableFindNGetKeyStatistics(t_Handle h_HashTbl, + uint8_t keySize, + uint8_t *p_Key, + t_FmPcdCcKeyStatistics *p_KeyStatistics) +{ + t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; + t_Handle h_HashBucket; + uint8_t bucketIndex; + uint16_t lastIndex; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_KeyStatistics, E_NULL_POINTER); + + err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, + keySize, + p_Key, + p_HashTbl->userOffset, + &h_HashBucket, + &bucketIndex, + &lastIndex); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + return FM_PCD_MatchTableFindNGetKeyStatistics(h_HashBucket, + keySize, + p_Key, + NULL, + p_KeyStatistics); +} diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.h b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.h new file mode 100644 index 0000000..9efe721 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_cc.h @@ -0,0 +1,390 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_cc.h + + @Description FM PCD CC ... +*//***************************************************************************/ +#ifndef __FM_CC_H +#define __FM_CC_H + +#include "std_ext.h" +#include "error_ext.h" +#include "list_ext.h" + +#include "fm_pcd.h" + + +/***********************************************************************/ +/* Coarse classification defines */ +/***********************************************************************/ + +#define CC_MAX_NUM_OF_KEYS MAX(FM_PCD_MAX_NUM_OF_KEYS + 1, FM_PCD_MAX_NUM_OF_FLOWS) + +#define CC_PC_FF_MACDST 0x00 +#define CC_PC_FF_MACSRC 0x01 +#define CC_PC_FF_ETYPE 0x02 + +#define CC_PC_FF_TCI1 0x03 +#define CC_PC_FF_TCI2 0x04 + +#define CC_PC_FF_MPLS1 0x06 +#define CC_PC_FF_MPLS_LAST 0x07 + +#define CC_PC_FF_IPV4DST1 0x08 +#define CC_PC_FF_IPV4DST2 0x16 +#define CC_PC_FF_IPV4IPTOS_TC1 0x09 +#define CC_PC_FF_IPV4IPTOS_TC2 0x17 +#define CC_PC_FF_IPV4PTYPE1 0x0A +#define CC_PC_FF_IPV4PTYPE2 0x18 +#define CC_PC_FF_IPV4SRC1 0x0b +#define CC_PC_FF_IPV4SRC2 0x19 +#define CC_PC_FF_IPV4SRC1_IPV4DST1 0x0c +#define CC_PC_FF_IPV4SRC2_IPV4DST2 0x1a +#define CC_PC_FF_IPV4TTL 0x29 + + +#define CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1 0x0d /*TODO - CLASS - what is it? TOS*/ +#define CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2 0x1b +#define CC_PC_FF_IPV6PTYPE1 0x0e +#define CC_PC_FF_IPV6PTYPE2 0x1c +#define CC_PC_FF_IPV6DST1 0x0f +#define CC_PC_FF_IPV6DST2 0x1d +#define CC_PC_FF_IPV6SRC1 0x10 +#define CC_PC_FF_IPV6SRC2 0x1e +#define CC_PC_FF_IPV6HOP_LIMIT 0x2a +#define CC_PC_FF_IPPID 0x24 +#define CC_PC_FF_IPDSCP 0x76 + +#define CC_PC_FF_GREPTYPE 0x11 + +#define CC_PC_FF_MINENCAP_PTYPE 0x12 +#define CC_PC_FF_MINENCAP_IPDST 0x13 +#define CC_PC_FF_MINENCAP_IPSRC 0x14 +#define CC_PC_FF_MINENCAP_IPSRC_IPDST 0x15 + +#define CC_PC_FF_L4PSRC 0x1f +#define CC_PC_FF_L4PDST 0x20 +#define CC_PC_FF_L4PSRC_L4PDST 0x21 + +#define CC_PC_FF_PPPPID 0x05 + +#define CC_PC_PR_SHIM1 0x22 +#define CC_PC_PR_SHIM2 0x23 + +#define CC_PC_GENERIC_WITHOUT_MASK 0x27 +#define CC_PC_GENERIC_WITH_MASK 0x28 +#define CC_PC_GENERIC_IC_GMASK 0x2B +#define CC_PC_GENERIC_IC_HASH_INDEXED 0x2C + +#define CC_PR_OFFSET 0x25 +#define CC_PR_WITHOUT_OFFSET 0x26 + +#define CC_PC_PR_ETH_OFFSET 19 +#define CC_PC_PR_USER_DEFINED_SHIM1_OFFSET 16 +#define CC_PC_PR_USER_DEFINED_SHIM2_OFFSET 17 +#define CC_PC_PR_USER_LLC_SNAP_OFFSET 20 +#define CC_PC_PR_VLAN1_OFFSET 21 +#define CC_PC_PR_VLAN2_OFFSET 22 +#define CC_PC_PR_PPPOE_OFFSET 24 +#define CC_PC_PR_MPLS1_OFFSET 25 +#define CC_PC_PR_MPLS_LAST_OFFSET 26 +#define CC_PC_PR_IP1_OFFSET 27 +#define CC_PC_PR_IP_LAST_OFFSET 28 +#define CC_PC_PR_MINENC_OFFSET 28 +#define CC_PC_PR_L4_OFFSET 30 +#define CC_PC_PR_GRE_OFFSET 29 +#define CC_PC_PR_ETYPE_LAST_OFFSET 23 +#define CC_PC_PR_NEXT_HEADER_OFFSET 31 + +#define CC_PC_ILLEGAL 0xff +#define CC_SIZE_ILLEGAL 0 + +#define FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN 16 +#define FM_PCD_CC_AD_TABLE_ALIGN 16 +#define FM_PCD_CC_AD_ENTRY_SIZE 16 +#define FM_PCD_CC_NUM_OF_KEYS 255 +#define FM_PCD_CC_TREE_ADDR_ALIGN 256 + +#define FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE 0x00000000 +#define FM_PCD_AD_RESULT_DATA_FLOW_TYPE 0x80000000 +#define FM_PCD_AD_RESULT_PLCR_DIS 0x20000000 +#define FM_PCD_AD_RESULT_EXTENDED_MODE 0x80000000 +#define FM_PCD_AD_RESULT_NADEN 0x20000000 +#define FM_PCD_AD_RESULT_STATISTICS_EN 0x40000000 + +#define FM_PCD_AD_CONT_LOOKUP_TYPE 0x40000000 +#define FM_PCD_AD_CONT_LOOKUP_LCL_MASK 0x00800000 + +#define FM_PCD_AD_STATS_TYPE 0x40000000 +#define FM_PCD_AD_STATS_FLR_ADDR_MASK 0x00FFFFFF +#define FM_PCD_AD_STATS_COUNTERS_ADDR_MASK 0x00FFFFFF +#define FM_PCD_AD_STATS_NEXT_ACTION_MASK 0xFFFF0000 +#define FM_PCD_AD_STATS_NEXT_ACTION_SHIFT 12 +#define FM_PCD_AD_STATS_NAD_EN 0x00008000 +#define FM_PCD_AD_STATS_OP_CODE 0x00000036 +#define FM_PCD_AD_STATS_FLR_EN 0x00004000 +#define FM_PCD_AD_STATS_COND_EN 0x00002000 + + + +#define FM_PCD_AD_BYPASS_TYPE 0xc0000000 + +#define FM_PCD_AD_TYPE_MASK 0xc0000000 +#define FM_PCD_AD_OPCODE_MASK 0x0000000f + +#define FM_PCD_AD_PROFILEID_FOR_CNTRL_SHIFT 16 +#if (DPAA_VERSION >= 11) +#define FM_PCD_AD_RESULT_VSP_SHIFT 24 +#define FM_PCD_AD_RESULT_NO_OM_VSPE 0x02000000 +#define FM_PCD_AD_RESULT_VSP_MASK 0x3f +#define FM_PCD_AD_NCSPFQIDM_MASK 0x80000000 +#endif /* (DPAA_VERSION >= 11) */ + +#define GLBL_MASK_FOR_HASH_INDEXED 0xfff00000 +#define CC_GLBL_MASK_SIZE 4 + +typedef uint32_t ccPrivateInfo_t; /**< private info of CC: */ + +#define CC_PRIVATE_INFO_NONE 0 +#define CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP 0x80000000 +#define CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH 0x40000000 +#define CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH 0x20000000 +#define CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP 0x10000000 + +/***********************************************************************/ +/* Memory map */ +/***********************************************************************/ +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ + +typedef _Packed struct +{ + volatile uint32_t fqid; + volatile uint32_t plcrProfile; + volatile uint32_t nia; + volatile uint32_t res; +} _PackedType t_AdOfTypeResult; + +typedef _Packed struct +{ + volatile uint32_t ccAdBase; + volatile uint32_t matchTblPtr; + volatile uint32_t pcAndOffsets; + volatile uint32_t gmask; +} _PackedType t_AdOfTypeContLookup; + +typedef _Packed struct +{ + volatile uint32_t profileTableAddr; + volatile uint32_t reserved; + volatile uint32_t nextActionIndx; + volatile uint32_t statsTableAddr; +} _PackedType t_AdOfTypeStats; + +typedef _Packed union +{ + volatile t_AdOfTypeResult adResult; + volatile t_AdOfTypeContLookup adContLookup; +} _PackedType t_Ad; + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +/***********************************************************************/ +/* Driver's internal structures */ +/***********************************************************************/ + +typedef enum e_ModifyState +{ + e_MODIFY_STATE_ADD = 0, + e_MODIFY_STATE_REMOVE, + e_MODIFY_STATE_CHANGE +} e_ModifyState; + +typedef struct t_FmPcdStatsObj +{ + t_Handle h_StatsAd; + t_Handle h_StatsCounters; + t_List node; +} t_FmPcdStatsObj; + +typedef struct +{ + uint8_t key[FM_PCD_MAX_SIZE_OF_KEY]; + uint8_t mask[FM_PCD_MAX_SIZE_OF_KEY]; + + t_FmPcdCcNextEngineParams nextEngineParams; + uint32_t requiredAction; + uint32_t shadowAction; + + t_FmPcdStatsObj *p_StatsObj; + +} t_FmPcdCcKeyAndNextEngineParams; + +typedef struct +{ + t_Handle p_Ad; + e_FmPcdEngine fmPcdEngine; + bool adAllocated; + bool isTree; + + uint32_t myInfo; + t_List *h_CcNextNodesLst; + t_Handle h_AdditionalInfo; + t_Handle h_Node; +} t_FmPcdModifyCcAdditionalParams; + +typedef struct +{ + t_Handle p_AdTableNew; + t_Handle p_KeysMatchTableNew; + t_Handle p_AdTableOld; + t_Handle p_KeysMatchTableOld; + uint16_t numOfKeys; + t_Handle h_CurrentNode; + uint16_t savedKeyIndex; + t_Handle h_NodeForAdd; + t_Handle h_NodeForRmv; + t_Handle h_ManipForRmv; + t_Handle h_ManipForAdd; + t_FmPcdStatsObj *p_StatsObjForRmv; +#if (DPAA_VERSION >= 11) + t_Handle h_FrmReplicForAdd; + t_Handle h_FrmReplicForRmv; +#endif /* (DPAA_VERSION >= 11) */ + bool tree; + + t_FmPcdCcKeyAndNextEngineParams keyAndNextEngineParams[CC_MAX_NUM_OF_KEYS]; +} t_FmPcdModifyCcKeyAdditionalParams; + +typedef struct +{ + t_Handle h_Manip; + t_Handle h_CcNode; +} t_CcNextEngineInfo; + +typedef struct +{ + uint16_t numOfKeys; + uint16_t maxNumOfKeys; + + bool maskSupport; + uint32_t keysMatchTableMaxSize; + + e_FmPcdCcStatsMode statisticsMode; + uint32_t numOfStatsFLRs; + uint32_t countersArraySize; + + bool glblMaskUpdated; + t_Handle p_GlblMask; + bool lclMask; + uint8_t parseCode; + uint8_t offset; + uint8_t prsArrayOffset; + bool ctrlFlow; + uint8_t owners; + + uint8_t ccKeySizeAccExtraction; + uint8_t sizeOfExtraction; + uint8_t glblMaskSize; + + t_Handle h_KeysMatchTable; + t_Handle h_AdTable; + t_Handle h_StatsAds; + t_Handle h_Ad; + t_Handle h_StatsFLRs; + + t_List availableStatsLst; + + t_List ccPrevNodesLst; + + t_List ccTreeIdLst; + t_List ccTreesLst; + + t_Handle h_FmPcd; + uint32_t shadowAction; + uint8_t userSizeOfExtraction; + uint8_t userOffset; + + t_Handle h_Spinlock; + + t_FmPcdCcKeyAndNextEngineParams keyAndNextEngineParams[CC_MAX_NUM_OF_KEYS]; +} t_FmPcdCcNode; + +typedef struct +{ + t_FmPcdCcNode *p_FmPcdCcNode; + bool occupied; + uint8_t owners; + volatile bool lock; +} t_FmPcdCcNodeArray; + +typedef struct +{ + uint8_t numOfEntriesInGroup; + uint32_t totalBitsMask; + uint8_t baseGroupEntry; +} t_FmPcdCcGroupParam; + +typedef struct +{ + t_Handle h_FmPcd; + uint8_t netEnvId; + uintptr_t ccTreeBaseAddr; + uint8_t numOfGrps; + t_FmPcdCcGroupParam fmPcdGroupParam[FM_PCD_MAX_NUM_OF_CC_GROUPS]; + t_List fmPortsLst; + t_FmPcdLock *p_Lock; + uint8_t numOfEntries; + uint8_t owners; + t_Handle h_FmPcdCcSavedManipParams; + bool modifiedState; + uint32_t requiredAction; + t_Handle h_IpReassemblyManip; + + t_FmPcdCcKeyAndNextEngineParams keyAndNextEngineParams[FM_PCD_MAX_NUM_OF_CC_GROUPS]; +} t_FmPcdCcTree; + + +bool FmPcdManipIsManipNode(t_Handle h_Ad); +t_Error FmPcdCcNodeTreeTryLock(t_Handle h_FmPcd,t_Handle h_FmPcdCcNode, t_List *p_List); +void FmPcdCcNodeTreeReleaseLock(t_Handle h_FmPcd, t_List *p_List); +t_Error FmPcdUpdateCcShadow (t_FmPcd *p_FmPcd, uint32_t size, uint32_t align); + + +#endif /* __FM_CC_H */ diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_kg.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_kg.c new file mode 100644 index 0000000..bdbc8ae --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_kg.c @@ -0,0 +1,3263 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_kg.c + + @Description FM PCD ... +*//***************************************************************************/ +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "debug_ext.h" +#include "net_ext.h" +#include "fm_port_ext.h" + +#include "fm_common.h" +#include "fm_pcd.h" +#include "fm_hc.h" +#include "fm_pcd_ipc.h" +#include "fm_kg.h" +#include "fsl_fman_kg.h" + + +/****************************************/ +/* static functions */ +/****************************************/ + +static uint32_t KgHwLock(t_Handle h_FmPcdKg) +{ + ASSERT_COND(h_FmPcdKg); + return XX_LockIntrSpinlock(((t_FmPcdKg *)h_FmPcdKg)->h_HwSpinlock); +} + +static void KgHwUnlock(t_Handle h_FmPcdKg, uint32_t intFlags) +{ + ASSERT_COND(h_FmPcdKg); + XX_UnlockIntrSpinlock(((t_FmPcdKg *)h_FmPcdKg)->h_HwSpinlock, intFlags); +} + +static uint32_t KgSchemeLock(t_Handle h_Scheme) +{ + ASSERT_COND(h_Scheme); + return FmPcdLockSpinlock(((t_FmPcdKgScheme *)h_Scheme)->p_Lock); +} + +static void KgSchemeUnlock(t_Handle h_Scheme, uint32_t intFlags) +{ + ASSERT_COND(h_Scheme); + FmPcdUnlockSpinlock(((t_FmPcdKgScheme *)h_Scheme)->p_Lock, intFlags); +} + +static bool KgSchemeFlagTryLock(t_Handle h_Scheme) +{ + ASSERT_COND(h_Scheme); + return FmPcdLockTryLock(((t_FmPcdKgScheme *)h_Scheme)->p_Lock); +} + +static void KgSchemeFlagUnlock(t_Handle h_Scheme) +{ + ASSERT_COND(h_Scheme); + FmPcdLockUnlock(((t_FmPcdKgScheme *)h_Scheme)->p_Lock); +} + +static t_Error WriteKgarWait(t_FmPcd *p_FmPcd, uint32_t fmkg_ar) +{ + + struct fman_kg_regs *regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + + if (fman_kg_write_ar_wait(regs, fmkg_ar)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Keygen scheme access violation")); + + return E_OK; +} + +static e_FmPcdKgExtractDfltSelect GetGenericSwDefault(t_FmPcdKgExtractDflt swDefaults[], uint8_t numOfSwDefaults, uint8_t code) +{ + int i; + + switch (code) + { + case (KG_SCH_GEN_PARSE_RESULT_N_FQID): + case (KG_SCH_GEN_DEFAULT): + case (KG_SCH_GEN_NEXTHDR): + for (i=0 ; i<numOfSwDefaults ; i++) + if (swDefaults[i].type == e_FM_PCD_KG_GENERIC_NOT_FROM_DATA) + return swDefaults[i].dfltSelect; + ASSERT_COND(FALSE); + case (KG_SCH_GEN_SHIM1): + case (KG_SCH_GEN_SHIM2): + case (KG_SCH_GEN_IP_PID_NO_V): + case (KG_SCH_GEN_ETH_NO_V): + case (KG_SCH_GEN_SNAP_NO_V): + case (KG_SCH_GEN_VLAN1_NO_V): + case (KG_SCH_GEN_VLAN2_NO_V): + case (KG_SCH_GEN_ETH_TYPE_NO_V): + case (KG_SCH_GEN_PPP_NO_V): + case (KG_SCH_GEN_MPLS1_NO_V): + case (KG_SCH_GEN_MPLS_LAST_NO_V): + case (KG_SCH_GEN_L3_NO_V): + case (KG_SCH_GEN_IP2_NO_V): + case (KG_SCH_GEN_GRE_NO_V): + case (KG_SCH_GEN_L4_NO_V): + for (i=0 ; i<numOfSwDefaults ; i++) + if (swDefaults[i].type == e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V) + return swDefaults[i].dfltSelect; + + case (KG_SCH_GEN_START_OF_FRM): + case (KG_SCH_GEN_ETH): + case (KG_SCH_GEN_SNAP): + case (KG_SCH_GEN_VLAN1): + case (KG_SCH_GEN_VLAN2): + case (KG_SCH_GEN_ETH_TYPE): + case (KG_SCH_GEN_PPP): + case (KG_SCH_GEN_MPLS1): + case (KG_SCH_GEN_MPLS2): + case (KG_SCH_GEN_MPLS3): + case (KG_SCH_GEN_MPLS_LAST): + case (KG_SCH_GEN_IPV4): + case (KG_SCH_GEN_IPV6): + case (KG_SCH_GEN_IPV4_TUNNELED): + case (KG_SCH_GEN_IPV6_TUNNELED): + case (KG_SCH_GEN_MIN_ENCAP): + case (KG_SCH_GEN_GRE): + case (KG_SCH_GEN_TCP): + case (KG_SCH_GEN_UDP): + case (KG_SCH_GEN_IPSEC_AH): + case (KG_SCH_GEN_SCTP): + case (KG_SCH_GEN_DCCP): + case (KG_SCH_GEN_IPSEC_ESP): + for (i=0 ; i<numOfSwDefaults ; i++) + if (swDefaults[i].type == e_FM_PCD_KG_GENERIC_FROM_DATA) + return swDefaults[i].dfltSelect; + default: + return e_FM_PCD_KG_DFLT_ILLEGAL; + } +} + +static uint8_t GetGenCode(e_FmPcdExtractFrom src, uint8_t *p_Offset) +{ + *p_Offset = 0; + + switch (src) + { + case (e_FM_PCD_EXTRACT_FROM_FRAME_START): + return KG_SCH_GEN_START_OF_FRM; + case (e_FM_PCD_EXTRACT_FROM_DFLT_VALUE): + return KG_SCH_GEN_DEFAULT; + case (e_FM_PCD_EXTRACT_FROM_PARSE_RESULT): + return KG_SCH_GEN_PARSE_RESULT_N_FQID; + case (e_FM_PCD_EXTRACT_FROM_ENQ_FQID): + *p_Offset = 32; + return KG_SCH_GEN_PARSE_RESULT_N_FQID; + case (e_FM_PCD_EXTRACT_FROM_CURR_END_OF_PARSE): + return KG_SCH_GEN_NEXTHDR; + default: + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 'extract from' src")); + return 0; + } +} + +static uint8_t GetGenHdrCode(e_NetHeaderType hdr, e_FmPcdHdrIndex hdrIndex, bool ignoreProtocolValidation) +{ + if (!ignoreProtocolValidation) + switch (hdr) + { + case (HEADER_TYPE_NONE): + ASSERT_COND(FALSE); + case (HEADER_TYPE_ETH): + return KG_SCH_GEN_ETH; + case (HEADER_TYPE_LLC_SNAP): + return KG_SCH_GEN_SNAP; + case (HEADER_TYPE_PPPoE): + return KG_SCH_GEN_PPP; + case (HEADER_TYPE_MPLS): + if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_GEN_MPLS1; + if (hdrIndex == e_FM_PCD_HDR_INDEX_2) + return KG_SCH_GEN_MPLS2; + if (hdrIndex == e_FM_PCD_HDR_INDEX_3) + return KG_SCH_GEN_MPLS3; + if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST) + return KG_SCH_GEN_MPLS_LAST; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS header index")); + return 0; + case (HEADER_TYPE_IPv4): + if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_GEN_IPV4; + if ((hdrIndex == e_FM_PCD_HDR_INDEX_2) || (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)) + return KG_SCH_GEN_IPV4_TUNNELED; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 header index")); + return 0; + case (HEADER_TYPE_IPv6): + if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_GEN_IPV6; + if ((hdrIndex == e_FM_PCD_HDR_INDEX_2) || (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)) + return KG_SCH_GEN_IPV6_TUNNELED; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 header index")); + return 0; + case (HEADER_TYPE_GRE): + return KG_SCH_GEN_GRE; + case (HEADER_TYPE_TCP): + return KG_SCH_GEN_TCP; + case (HEADER_TYPE_UDP): + return KG_SCH_GEN_UDP; + case (HEADER_TYPE_IPSEC_AH): + return KG_SCH_GEN_IPSEC_AH; + case (HEADER_TYPE_IPSEC_ESP): + return KG_SCH_GEN_IPSEC_ESP; + case (HEADER_TYPE_SCTP): + return KG_SCH_GEN_SCTP; + case (HEADER_TYPE_DCCP): + return KG_SCH_GEN_DCCP; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + else + switch (hdr) + { + case (HEADER_TYPE_NONE): + ASSERT_COND(FALSE); + case (HEADER_TYPE_ETH): + return KG_SCH_GEN_ETH_NO_V; + case (HEADER_TYPE_LLC_SNAP): + return KG_SCH_GEN_SNAP_NO_V; + case (HEADER_TYPE_PPPoE): + return KG_SCH_GEN_PPP_NO_V; + case (HEADER_TYPE_MPLS): + if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_GEN_MPLS1_NO_V; + if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST) + return KG_SCH_GEN_MPLS_LAST_NO_V; + if ((hdrIndex == e_FM_PCD_HDR_INDEX_2) || (hdrIndex == e_FM_PCD_HDR_INDEX_3) ) + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Indexed MPLS Extraction not supported")); + else + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS header index")); + return 0; + case (HEADER_TYPE_IPv4): + case (HEADER_TYPE_IPv6): + if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_GEN_L3_NO_V; + if ((hdrIndex == e_FM_PCD_HDR_INDEX_2) || (hdrIndex == e_FM_PCD_HDR_INDEX_LAST)) + return KG_SCH_GEN_IP2_NO_V; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP header index")); + case (HEADER_TYPE_MINENCAP): + return KG_SCH_GEN_IP2_NO_V; + case (HEADER_TYPE_USER_DEFINED_L3): + return KG_SCH_GEN_L3_NO_V; + case (HEADER_TYPE_GRE): + return KG_SCH_GEN_GRE_NO_V; + case (HEADER_TYPE_TCP): + case (HEADER_TYPE_UDP): + case (HEADER_TYPE_IPSEC_AH): + case (HEADER_TYPE_IPSEC_ESP): + case (HEADER_TYPE_SCTP): + case (HEADER_TYPE_DCCP): + return KG_SCH_GEN_L4_NO_V; + case (HEADER_TYPE_USER_DEFINED_SHIM1): + return KG_SCH_GEN_SHIM1; + case (HEADER_TYPE_USER_DEFINED_SHIM2): + return KG_SCH_GEN_SHIM2; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } +} +static t_GenericCodes GetGenFieldCode(e_NetHeaderType hdr, t_FmPcdFields field, bool ignoreProtocolValidation, e_FmPcdHdrIndex hdrIndex) +{ + if (!ignoreProtocolValidation) + switch (hdr) + { + case (HEADER_TYPE_NONE): + ASSERT_COND(FALSE); + case (HEADER_TYPE_ETH): + switch (field.eth) + { + case (NET_HEADER_FIELD_ETH_TYPE): + return KG_SCH_GEN_ETH_TYPE; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_VLAN): + switch (field.vlan) + { + case (NET_HEADER_FIELD_VLAN_TCI): + if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_GEN_VLAN1; + if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST) + return KG_SCH_GEN_VLAN2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal VLAN header index")); + return 0; + } + case (HEADER_TYPE_MPLS): + case (HEADER_TYPE_IPSEC_AH): + case (HEADER_TYPE_IPSEC_ESP): + case (HEADER_TYPE_LLC_SNAP): + case (HEADER_TYPE_PPPoE): + case (HEADER_TYPE_IPv4): + case (HEADER_TYPE_IPv6): + case (HEADER_TYPE_GRE): + case (HEADER_TYPE_MINENCAP): + case (HEADER_TYPE_USER_DEFINED_L3): + case (HEADER_TYPE_TCP): + case (HEADER_TYPE_UDP): + case (HEADER_TYPE_SCTP): + case (HEADER_TYPE_DCCP): + case (HEADER_TYPE_USER_DEFINED_L4): + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Header not supported")); + return 0; + } + else + switch (hdr) + { + case (HEADER_TYPE_NONE): + ASSERT_COND(FALSE); + case (HEADER_TYPE_ETH): + switch (field.eth) + { + case (NET_HEADER_FIELD_ETH_TYPE): + return KG_SCH_GEN_ETH_TYPE_NO_V; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_VLAN): + switch (field.vlan) + { + case (NET_HEADER_FIELD_VLAN_TCI) : + if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_GEN_VLAN1_NO_V; + if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST) + return KG_SCH_GEN_VLAN2_NO_V; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal VLAN header index")); + return 0; + } + case (HEADER_TYPE_IPv4): + switch (field.ipv4) + { + case (NET_HEADER_FIELD_IPv4_PROTO): + return KG_SCH_GEN_IP_PID_NO_V; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_IPv6): + switch (field.ipv6) + { + case (NET_HEADER_FIELD_IPv6_NEXT_HDR): + return KG_SCH_GEN_IP_PID_NO_V; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_MPLS): + case (HEADER_TYPE_LLC_SNAP): + case (HEADER_TYPE_PPPoE): + case (HEADER_TYPE_GRE): + case (HEADER_TYPE_MINENCAP): + case (HEADER_TYPE_USER_DEFINED_L3): + case (HEADER_TYPE_TCP): + case (HEADER_TYPE_UDP): + case (HEADER_TYPE_IPSEC_AH): + case (HEADER_TYPE_IPSEC_ESP): + case (HEADER_TYPE_SCTP): + case (HEADER_TYPE_DCCP): + case (HEADER_TYPE_USER_DEFINED_L4): + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Header not supported")); + return 0; + } +} + +static t_KnownFieldsMasks GetKnownProtMask(t_FmPcd *p_FmPcd, e_NetHeaderType hdr, e_FmPcdHdrIndex index, t_FmPcdFields field) +{ + UNUSED(p_FmPcd); + + switch (hdr) + { + case (HEADER_TYPE_NONE): + ASSERT_COND(FALSE); + case (HEADER_TYPE_ETH): + switch (field.eth) + { + case (NET_HEADER_FIELD_ETH_DA): + return KG_SCH_KN_MACDST; + case (NET_HEADER_FIELD_ETH_SA): + return KG_SCH_KN_MACSRC; + case (NET_HEADER_FIELD_ETH_TYPE): + return KG_SCH_KN_ETYPE; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_LLC_SNAP): + switch (field.llcSnap) + { + case (NET_HEADER_FIELD_LLC_SNAP_TYPE): + return KG_SCH_KN_ETYPE; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_VLAN): + switch (field.vlan) + { + case (NET_HEADER_FIELD_VLAN_TCI): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_KN_TCI1; + if (index == e_FM_PCD_HDR_INDEX_LAST) + return KG_SCH_KN_TCI2; + else + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_MPLS): + switch (field.mpls) + { + case (NET_HEADER_FIELD_MPLS_LABEL_STACK): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_KN_MPLS1; + if (index == e_FM_PCD_HDR_INDEX_2) + return KG_SCH_KN_MPLS2; + if (index == e_FM_PCD_HDR_INDEX_LAST) + return KG_SCH_KN_MPLS_LAST; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS index")); + return 0; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_IPv4): + switch (field.ipv4) + { + case (NET_HEADER_FIELD_IPv4_SRC_IP): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_KN_IPSRC1; + if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST)) + return KG_SCH_KN_IPSRC2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return 0; + case (NET_HEADER_FIELD_IPv4_DST_IP): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_KN_IPDST1; + if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST)) + return KG_SCH_KN_IPDST2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return 0; + case (NET_HEADER_FIELD_IPv4_PROTO): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_KN_PTYPE1; + if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST)) + return KG_SCH_KN_PTYPE2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return 0; + case (NET_HEADER_FIELD_IPv4_TOS): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_KN_IPTOS_TC1; + if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST)) + return KG_SCH_KN_IPTOS_TC2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return 0; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_IPv6): + switch (field.ipv6) + { + case (NET_HEADER_FIELD_IPv6_SRC_IP): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_KN_IPSRC1; + if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST)) + return KG_SCH_KN_IPSRC2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); + return 0; + case (NET_HEADER_FIELD_IPv6_DST_IP): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_KN_IPDST1; + if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST)) + return KG_SCH_KN_IPDST2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); + return 0; + case (NET_HEADER_FIELD_IPv6_NEXT_HDR): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return KG_SCH_KN_PTYPE1; + if (index == e_FM_PCD_HDR_INDEX_2) + return KG_SCH_KN_PTYPE2; + if (index == e_FM_PCD_HDR_INDEX_LAST) +#ifdef FM_KG_NO_IPPID_SUPPORT + if (p_FmPcd->fmRevInfo.majorRev < 6) + return KG_SCH_KN_PTYPE2; +#endif /* FM_KG_NO_IPPID_SUPPORT */ + return KG_SCH_KN_IPPID; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); + return 0; + case (NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL | NET_HEADER_FIELD_IPv6_TC): + if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return (KG_SCH_KN_IPV6FL1 | KG_SCH_KN_IPTOS_TC1); + if ((index == e_FM_PCD_HDR_INDEX_2) || (index == e_FM_PCD_HDR_INDEX_LAST)) + return (KG_SCH_KN_IPV6FL2 | KG_SCH_KN_IPTOS_TC2); + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); + return 0; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_GRE): + switch (field.gre) + { + case (NET_HEADER_FIELD_GRE_TYPE): + return KG_SCH_KN_GREPTYPE; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_MINENCAP): + switch (field.minencap) + { + case (NET_HEADER_FIELD_MINENCAP_SRC_IP): + return KG_SCH_KN_IPSRC2; + case (NET_HEADER_FIELD_MINENCAP_DST_IP): + return KG_SCH_KN_IPDST2; + case (NET_HEADER_FIELD_MINENCAP_TYPE): + return KG_SCH_KN_PTYPE2; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_TCP): + switch (field.tcp) + { + case (NET_HEADER_FIELD_TCP_PORT_SRC): + return KG_SCH_KN_L4PSRC; + case (NET_HEADER_FIELD_TCP_PORT_DST): + return KG_SCH_KN_L4PDST; + case (NET_HEADER_FIELD_TCP_FLAGS): + return KG_SCH_KN_TFLG; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_UDP): + switch (field.udp) + { + case (NET_HEADER_FIELD_UDP_PORT_SRC): + return KG_SCH_KN_L4PSRC; + case (NET_HEADER_FIELD_UDP_PORT_DST): + return KG_SCH_KN_L4PDST; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_IPSEC_AH): + switch (field.ipsecAh) + { + case (NET_HEADER_FIELD_IPSEC_AH_SPI): + return KG_SCH_KN_IPSEC_SPI; + case (NET_HEADER_FIELD_IPSEC_AH_NH): + return KG_SCH_KN_IPSEC_NH; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_IPSEC_ESP): + switch (field.ipsecEsp) + { + case (NET_HEADER_FIELD_IPSEC_ESP_SPI): + return KG_SCH_KN_IPSEC_SPI; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_SCTP): + switch (field.sctp) + { + case (NET_HEADER_FIELD_SCTP_PORT_SRC): + return KG_SCH_KN_L4PSRC; + case (NET_HEADER_FIELD_SCTP_PORT_DST): + return KG_SCH_KN_L4PDST; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_DCCP): + switch (field.dccp) + { + case (NET_HEADER_FIELD_DCCP_PORT_SRC): + return KG_SCH_KN_L4PSRC; + case (NET_HEADER_FIELD_DCCP_PORT_DST): + return KG_SCH_KN_L4PDST; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + case (HEADER_TYPE_PPPoE): + switch (field.pppoe) + { + case (NET_HEADER_FIELD_PPPoE_PID): + return KG_SCH_KN_PPPID; + case (NET_HEADER_FIELD_PPPoE_SID): + return KG_SCH_KN_PPPSID; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return 0; + } +} + + +static uint8_t GetKnownFieldId(uint32_t bitMask) +{ + uint8_t cnt = 0; + + while (bitMask) + if (bitMask & 0x80000000) + break; + else + { + cnt++; + bitMask <<= 1; + } + return cnt; + +} + +static uint8_t GetExtractedOrMask(uint8_t bitOffset, bool fqid) +{ + uint8_t i, mask, numOfOnesToClear, walking1Mask = 1; + + /* bitOffset 1-7 --> mask 0x1-0x7F */ + if (bitOffset<8) + { + mask = 0; + for (i = 0 ; i < bitOffset ; i++, walking1Mask <<= 1) + mask |= walking1Mask; + } + else + { + mask = 0xFF; + numOfOnesToClear = 0; + if (fqid && bitOffset>24) + /* bitOffset 25-31 --> mask 0xFE-0x80 */ + numOfOnesToClear = (uint8_t)(bitOffset-24); + else + /* bitOffset 9-15 --> mask 0xFE-0x80 */ + if (!fqid && bitOffset>8) + numOfOnesToClear = (uint8_t)(bitOffset-8); + for (i = 0 ; i < numOfOnesToClear ; i++, walking1Mask <<= 1) + mask &= ~walking1Mask; + /* bitOffset 8-24 for FQID, 8 for PP --> no mask (0xFF)*/ + } + return mask; +} + +static void IncSchemeOwners(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort) +{ + t_FmPcdKg *p_FmPcdKg; + t_FmPcdKgScheme *p_Scheme; + uint32_t intFlags; + uint8_t relativeSchemeId; + int i; + + p_FmPcdKg = p_FmPcd->p_FmPcdKg; + + /* for each scheme - update owners counters */ + for (i = 0; i < p_BindPort->numOfSchemes; i++) + { + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, p_BindPort->schemesIds[i]); + ASSERT_COND(relativeSchemeId < FM_PCD_KG_NUM_OF_SCHEMES); + + p_Scheme = &p_FmPcdKg->schemes[relativeSchemeId]; + + /* increment owners number */ + intFlags = KgSchemeLock(p_Scheme); + p_Scheme->owners++; + KgSchemeUnlock(p_Scheme, intFlags); + } +} + +static void DecSchemeOwners(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort) +{ + t_FmPcdKg *p_FmPcdKg; + t_FmPcdKgScheme *p_Scheme; + uint32_t intFlags; + uint8_t relativeSchemeId; + int i; + + p_FmPcdKg = p_FmPcd->p_FmPcdKg; + + /* for each scheme - update owners counters */ + for (i = 0; i < p_BindPort->numOfSchemes; i++) + { + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, p_BindPort->schemesIds[i]); + ASSERT_COND(relativeSchemeId < FM_PCD_KG_NUM_OF_SCHEMES); + + p_Scheme = &p_FmPcdKg->schemes[relativeSchemeId]; + + /* increment owners number */ + ASSERT_COND(p_Scheme->owners); + intFlags = KgSchemeLock(p_Scheme); + p_Scheme->owners--; + KgSchemeUnlock(p_Scheme, intFlags); + } +} + +static void UpateSchemePointedOwner(t_FmPcdKgScheme *p_Scheme, bool add) +{ + /* this routine is locked by the calling routine */ + ASSERT_COND(p_Scheme); + ASSERT_COND(p_Scheme->valid); + + if (add) + p_Scheme->pointedOwners++; + else + p_Scheme->pointedOwners--; +} + +static t_Error KgWriteSp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint32_t spReg, bool add) +{ + struct fman_kg_regs *p_KgRegs; + + uint32_t tmpKgarReg = 0, intFlags; + t_Error err = E_OK; + + /* The calling routine had locked the port, so for each port only one core can access + * (so we don't need a lock here) */ + + if (p_FmPcd->h_Hc) + return FmHcKgWriteSp(p_FmPcd->h_Hc, hardwarePortId, spReg, add); + + p_KgRegs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + + tmpKgarReg = FmPcdKgBuildReadPortSchemeBindActionReg(hardwarePortId); + /* lock a common KG reg */ + intFlags = KgHwLock(p_FmPcd->p_FmPcdKg); + err = WriteKgarWait(p_FmPcd, tmpKgarReg); + if (err) + { + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + fman_kg_write_sp(p_KgRegs, spReg, add); + + tmpKgarReg = FmPcdKgBuildWritePortSchemeBindActionReg(hardwarePortId); + + err = WriteKgarWait(p_FmPcd, tmpKgarReg); + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + return err; +} + +static t_Error KgWriteCpp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint32_t cppReg) +{ + struct fman_kg_regs *p_KgRegs; + uint32_t tmpKgarReg, intFlags; + t_Error err; + + p_KgRegs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + + if (p_FmPcd->h_Hc) + { + err = FmHcKgWriteCpp(p_FmPcd->h_Hc, hardwarePortId, cppReg); + return err; + } + + intFlags = KgHwLock(p_FmPcd->p_FmPcdKg); + fman_kg_write_cpp(p_KgRegs, cppReg); + tmpKgarReg = FmPcdKgBuildWritePortClsPlanBindActionReg(hardwarePortId); + err = WriteKgarWait(p_FmPcd, tmpKgarReg); + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + + return err; +} + +static uint32_t BuildCppReg(t_FmPcd *p_FmPcd, uint8_t clsPlanGrpId) +{ + uint32_t tmpKgpeCpp; + + tmpKgpeCpp = (uint32_t)(p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].baseEntry / 8); + tmpKgpeCpp |= (uint32_t)(((p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].sizeOfGrp / 8) - 1) << FM_KG_PE_CPP_MASK_SHIFT); + + return tmpKgpeCpp; +} + +static t_Error BindPortToClsPlanGrp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint8_t clsPlanGrpId) +{ + uint32_t tmpKgpeCpp = 0; + + tmpKgpeCpp = BuildCppReg(p_FmPcd, clsPlanGrpId); + return KgWriteCpp(p_FmPcd, hardwarePortId, tmpKgpeCpp); +} + +static void UnbindPortToClsPlanGrp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId) +{ + KgWriteCpp(p_FmPcd, hardwarePortId, 0); +} + +static uint32_t ReadClsPlanBlockActionReg(uint8_t grpId) +{ + return (uint32_t)(FM_KG_KGAR_GO | + FM_KG_KGAR_READ | + FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY | + DUMMY_PORT_ID | + ((uint32_t)grpId << FM_PCD_KG_KGAR_NUM_SHIFT) | + FM_PCD_KG_KGAR_WSEL_MASK); + + /* if we ever want to write 1 by 1, use: + sel = (uint8_t)(0x01 << (7- (entryId % CLS_PLAN_NUM_PER_GRP))); + */ +} + +static void PcdKgErrorException(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint32_t event,schemeIndexes = 0, index = 0; + struct fman_kg_regs *p_KgRegs; + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + p_KgRegs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + fman_kg_get_event(p_KgRegs, &event, &schemeIndexes); + + if (event & FM_EX_KG_DOUBLE_ECC) + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC); + if (event & FM_EX_KG_KEYSIZE_OVERFLOW) + { + if (schemeIndexes) + { + while (schemeIndexes) + { + if (schemeIndexes & 0x1) + p_FmPcd->f_FmPcdIndexedException(p_FmPcd->h_App,e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW, (uint16_t)(31 - index)); + schemeIndexes >>= 1; + index+=1; + } + } + else /* this should happen only when interrupt is forced. */ + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW); + } +} + +static t_Error KgInitGuest(t_FmPcd *p_FmPcd) +{ + t_Error err = E_OK; + t_FmPcdIpcKgSchemesParams kgAlloc; + uint32_t replyLength; + t_FmPcdIpcReply reply; + t_FmPcdIpcMsg msg; + + ASSERT_COND(p_FmPcd->guestId != NCSW_MASTER_ID); + + /* in GUEST_PARTITION, we use the IPC */ + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + memset(&kgAlloc, 0, sizeof(t_FmPcdIpcKgSchemesParams)); + kgAlloc.numOfSchemes = p_FmPcd->p_FmPcdKg->numOfSchemes; + kgAlloc.guestId = p_FmPcd->guestId; + msg.msgId = FM_PCD_ALLOC_KG_SCHEMES; + memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc)); + replyLength = sizeof(uint32_t) + p_FmPcd->p_FmPcdKg->numOfSchemes*sizeof(uint8_t); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(kgAlloc), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (replyLength != (sizeof(uint32_t) + p_FmPcd->p_FmPcdKg->numOfSchemes*sizeof(uint8_t))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + memcpy(p_FmPcd->p_FmPcdKg->schemesIds, (uint8_t*)(reply.replyBody),p_FmPcd->p_FmPcdKg->numOfSchemes*sizeof(uint8_t)); + + return (t_Error)reply.error; +} + +static t_Error KgInitMaster(t_FmPcd *p_FmPcd) +{ + t_Error err = E_OK; + struct fman_kg_regs *p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + + if (p_FmPcd->exceptions & FM_EX_KG_DOUBLE_ECC) + FmEnableRamsEcc(p_FmPcd->h_Fm); + + fman_kg_init(p_Regs, p_FmPcd->exceptions, GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd)); + + /* register even if no interrupts enabled, to allow future enablement */ + FmRegisterIntr(p_FmPcd->h_Fm, + e_FM_MOD_KG, + 0, + e_FM_INTR_TYPE_ERR, + PcdKgErrorException, + p_FmPcd); + + fman_kg_enable_scheme_interrupts(p_Regs); + + if (p_FmPcd->p_FmPcdKg->numOfSchemes) + { + err = FmPcdKgAllocSchemes(p_FmPcd, + p_FmPcd->p_FmPcdKg->numOfSchemes, + p_FmPcd->guestId, + p_FmPcd->p_FmPcdKg->schemesIds); + if (err) + RETURN_ERROR(MINOR, err, NO_MSG); + } + + return E_OK; +} + +static void ValidateSchemeSw(t_FmPcdKgScheme *p_Scheme) +{ + ASSERT_COND(!p_Scheme->valid); + if (p_Scheme->netEnvId != ILLEGAL_NETENV) + FmPcdIncNetEnvOwners(p_Scheme->h_FmPcd, p_Scheme->netEnvId); + p_Scheme->valid = TRUE; +} + +static t_Error InvalidateSchemeSw(t_FmPcdKgScheme *p_Scheme) +{ + if (p_Scheme->owners) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Trying to delete a scheme that has ports bound to")); + + if (p_Scheme->netEnvId != ILLEGAL_NETENV) + FmPcdDecNetEnvOwners(p_Scheme->h_FmPcd, p_Scheme->netEnvId); + p_Scheme->valid = FALSE; + + return E_OK; +} + +static t_Error BuildSchemeRegs(t_FmPcdKgScheme *p_Scheme, + t_FmPcdKgSchemeParams *p_SchemeParams, + struct fman_kg_scheme_regs *p_SchemeRegs) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)(p_Scheme->h_FmPcd); + uint32_t grpBits = 0; + uint8_t grpBase; + bool direct=TRUE, absolute=FALSE; + uint16_t profileId=0, numOfProfiles=0, relativeProfileId; + t_Error err = E_OK; + int i = 0; + t_NetEnvParams netEnvParams; + uint32_t tmpReg, fqbTmp = 0, ppcTmp = 0, selectTmp, maskTmp, knownTmp, genTmp; + t_FmPcdKgKeyExtractAndHashParams *p_KeyAndHash = NULL; + uint8_t j, curr, idx; + uint8_t id, shift=0, code=0, offset=0, size=0; + t_FmPcdExtractEntry *p_Extract = NULL; + t_FmPcdKgExtractedOrParams *p_ExtractOr; + bool generic = FALSE; + t_KnownFieldsMasks bitMask; + e_FmPcdKgExtractDfltSelect swDefault = (e_FmPcdKgExtractDfltSelect)0; + t_FmPcdKgSchemesExtracts *p_LocalExtractsArray; + uint8_t numOfSwDefaults = 0; + t_FmPcdKgExtractDflt swDefaults[NUM_OF_SW_DEFAULTS]; + uint8_t currGenId = 0; + + memset(swDefaults, 0, NUM_OF_SW_DEFAULTS*sizeof(t_FmPcdKgExtractDflt)); + memset(p_SchemeRegs, 0, sizeof(struct fman_kg_scheme_regs)); + + if (p_SchemeParams->netEnvParams.numOfDistinctionUnits > FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("numOfDistinctionUnits should not exceed %d", FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)); + + /* by netEnv parameters, get match vector */ + if (!p_SchemeParams->alwaysDirect) + { + p_Scheme->netEnvId = FmPcdGetNetEnvId(p_SchemeParams->netEnvParams.h_NetEnv); + netEnvParams.netEnvId = p_Scheme->netEnvId; + netEnvParams.numOfDistinctionUnits = p_SchemeParams->netEnvParams.numOfDistinctionUnits; + memcpy(netEnvParams.unitIds, p_SchemeParams->netEnvParams.unitIds, (sizeof(uint8_t))*p_SchemeParams->netEnvParams.numOfDistinctionUnits); + err = PcdGetUnitsVector(p_FmPcd, &netEnvParams); + if (err) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + p_Scheme->matchVector = netEnvParams.vector; + } + else + { + p_Scheme->matchVector = SCHEME_ALWAYS_DIRECT; + p_Scheme->netEnvId = ILLEGAL_NETENV; + } + + if (p_SchemeParams->nextEngine == e_FM_PCD_INVALID) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next Engine of the scheme is not Valid")); + + if (p_SchemeParams->bypassFqidGeneration) + { +#ifdef FM_KG_NO_BYPASS_FQID_GEN + if ((p_FmPcd->fmRevInfo.majorRev != 4) && (p_FmPcd->fmRevInfo.majorRev < 6)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("bypassFqidGeneration.")); +#endif /* FM_KG_NO_BYPASS_FQID_GEN */ + if (p_SchemeParams->baseFqid) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("baseFqid set for a scheme that does not generate an FQID")); + } + else + if (!p_SchemeParams->baseFqid) + DBG(WARNING, ("baseFqid is 0.")); + + if (p_SchemeParams->nextEngine == e_FM_PCD_PLCR) + { + direct = p_SchemeParams->kgNextEngineParams.plcrProfile.direct; + p_Scheme->directPlcr = direct; + absolute = (bool)(p_SchemeParams->kgNextEngineParams.plcrProfile.sharedProfile ? TRUE : FALSE); + if (!direct && absolute) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Indirect policing is not available when profile is shared.")); + + if (direct) + { + profileId = p_SchemeParams->kgNextEngineParams.plcrProfile.profileSelect.directRelativeProfileId; + numOfProfiles = 1; + } + else + { + profileId = p_SchemeParams->kgNextEngineParams.plcrProfile.profileSelect.indirectProfile.fqidOffsetRelativeProfileIdBase; + shift = p_SchemeParams->kgNextEngineParams.plcrProfile.profileSelect.indirectProfile.fqidOffsetShift; + numOfProfiles = p_SchemeParams->kgNextEngineParams.plcrProfile.profileSelect.indirectProfile.numOfProfiles; + } + } + + if (p_SchemeParams->nextEngine == e_FM_PCD_CC) + { +#ifdef FM_KG_NO_BYPASS_PLCR_PROFILE_GEN + if ((p_SchemeParams->kgNextEngineParams.cc.plcrNext) && (p_SchemeParams->kgNextEngineParams.cc.bypassPlcrProfileGeneration)) + { + if ((p_FmPcd->fmRevInfo.majorRev != 4) && (p_FmPcd->fmRevInfo.majorRev < 6)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("bypassPlcrProfileGeneration.")); + } +#endif /* FM_KG_NO_BYPASS_PLCR_PROFILE_GEN */ + + err = FmPcdCcGetGrpParams(p_SchemeParams->kgNextEngineParams.cc.h_CcTree, + p_SchemeParams->kgNextEngineParams.cc.grpId, + &grpBits, + &grpBase); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + p_Scheme->ccUnits = grpBits; + + if ((p_SchemeParams->kgNextEngineParams.cc.plcrNext) && + (!p_SchemeParams->kgNextEngineParams.cc.bypassPlcrProfileGeneration)) + { + if (p_SchemeParams->kgNextEngineParams.cc.plcrProfile.sharedProfile) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Shared profile may not be used after Coarse classification.")); + absolute = FALSE; + direct = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.direct; + if (direct) + { + profileId = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.profileSelect.directRelativeProfileId; + numOfProfiles = 1; + } + else + { + profileId = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.profileSelect.indirectProfile.fqidOffsetRelativeProfileIdBase; + shift = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.profileSelect.indirectProfile.fqidOffsetShift; + numOfProfiles = p_SchemeParams->kgNextEngineParams.cc.plcrProfile.profileSelect.indirectProfile.numOfProfiles; + } + } + } + + /* if policer is used directly after KG, or after CC */ + if ((p_SchemeParams->nextEngine == e_FM_PCD_PLCR) || + ((p_SchemeParams->nextEngine == e_FM_PCD_CC) && + (p_SchemeParams->kgNextEngineParams.cc.plcrNext) && + (!p_SchemeParams->kgNextEngineParams.cc.bypassPlcrProfileGeneration))) + { + /* if private policer profile, it may be uninitialized yet, therefore no checks are done at this stage */ + if (absolute) + { + /* for absolute direct policy only, */ + relativeProfileId = profileId; + err = FmPcdPlcrGetAbsoluteIdByProfileParams((t_Handle)p_FmPcd,e_FM_PCD_PLCR_SHARED,NULL, relativeProfileId, &profileId); + if (err) + RETURN_ERROR(MAJOR, err, ("Shared profile not valid offset")); + if (!FmPcdPlcrIsProfileValid(p_FmPcd, profileId)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Shared profile not valid.")); + p_Scheme->relativeProfileId = profileId; + } + else + { + /* save relative profile id's for later check */ + p_Scheme->nextRelativePlcrProfile = TRUE; + p_Scheme->relativeProfileId = profileId; + p_Scheme->numOfProfiles = numOfProfiles; + } + } + else + { + /* if policer is NOT going to be used after KG at all than if bypassFqidGeneration + is set, we do not need numOfUsedExtractedOrs and hashDistributionNumOfFqids */ + if (p_SchemeParams->bypassFqidGeneration && p_SchemeParams->numOfUsedExtractedOrs) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("numOfUsedExtractedOrs is set in a scheme that does not generate FQID or policer profile ID")); + if (p_SchemeParams->bypassFqidGeneration && + p_SchemeParams->useHash && + p_SchemeParams->keyExtractAndHashParams.hashDistributionNumOfFqids) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("hashDistributionNumOfFqids is set in a scheme that does not generate FQID or policer profile ID")); + } + + /* configure all 21 scheme registers */ + tmpReg = KG_SCH_MODE_EN; + switch (p_SchemeParams->nextEngine) + { + case (e_FM_PCD_PLCR): + /* add to mode register - NIA */ + tmpReg |= KG_SCH_MODE_NIA_PLCR; + tmpReg |= NIA_ENG_PLCR; + tmpReg |= (uint32_t)(p_SchemeParams->kgNextEngineParams.plcrProfile.sharedProfile ? NIA_PLCR_ABSOLUTE:0); + /* initialize policer profile command - */ + /* configure kgse_ppc */ + if (direct) + /* use profileId as base, other fields are 0 */ + p_SchemeRegs->kgse_ppc = (uint32_t)profileId; + else + { + if (shift > MAX_PP_SHIFT) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fqidOffsetShift may not be larger than %d", MAX_PP_SHIFT)); + + if (!numOfProfiles || !POWER_OF_2(numOfProfiles)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfProfiles must not be 0 and must be a power of 2")); + + ppcTmp = ((uint32_t)shift << KG_SCH_PP_SHIFT_HIGH_SHIFT) & KG_SCH_PP_SHIFT_HIGH; + ppcTmp |= ((uint32_t)shift << KG_SCH_PP_SHIFT_LOW_SHIFT) & KG_SCH_PP_SHIFT_LOW; + ppcTmp |= ((uint32_t)(numOfProfiles-1) << KG_SCH_PP_MASK_SHIFT); + ppcTmp |= (uint32_t)profileId; + + p_SchemeRegs->kgse_ppc = ppcTmp; + } + break; + case (e_FM_PCD_CC): + /* mode reg - define NIA */ + tmpReg |= (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC); + + p_SchemeRegs->kgse_ccbs = grpBits; + tmpReg |= (uint32_t)(grpBase << KG_SCH_MODE_CCOBASE_SHIFT); + + if (p_SchemeParams->kgNextEngineParams.cc.plcrNext) + { + if (!p_SchemeParams->kgNextEngineParams.cc.bypassPlcrProfileGeneration) + { + /* find out if absolute or relative */ + if (absolute) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("It is illegal to request a shared profile in a scheme that is in a KG->CC->PLCR flow")); + if (direct) + { + /* mask = 0, base = directProfileId */ + p_SchemeRegs->kgse_ppc = (uint32_t)profileId; + } + else + { + if (shift > MAX_PP_SHIFT) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fqidOffsetShift may not be larger than %d", MAX_PP_SHIFT)); + if (!numOfProfiles || !POWER_OF_2(numOfProfiles)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfProfiles must not be 0 and must be a power of 2")); + + ppcTmp = ((uint32_t)shift << KG_SCH_PP_SHIFT_HIGH_SHIFT) & KG_SCH_PP_SHIFT_HIGH; + ppcTmp |= ((uint32_t)shift << KG_SCH_PP_SHIFT_LOW_SHIFT) & KG_SCH_PP_SHIFT_LOW; + ppcTmp |= ((uint32_t)(numOfProfiles-1) << KG_SCH_PP_MASK_SHIFT); + ppcTmp |= (uint32_t)profileId; + + p_SchemeRegs->kgse_ppc = ppcTmp; + } + } + else + ppcTmp = KG_SCH_PP_NO_GEN; + } + break; + case (e_FM_PCD_DONE): + if (p_SchemeParams->kgNextEngineParams.doneAction == e_FM_PCD_DROP_FRAME) + tmpReg |= GET_NIA_BMI_AC_DISCARD_FRAME(p_FmPcd); + else + tmpReg |= GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd); + break; + default: + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Next engine not supported")); + } + p_SchemeRegs->kgse_mode = tmpReg; + + p_SchemeRegs->kgse_mv = p_Scheme->matchVector; + +#if (DPAA_VERSION >= 11) + if (p_SchemeParams->overrideStorageProfile) + { + p_SchemeRegs->kgse_om |= KG_SCH_OM_VSPE; + + tmpReg = 0; + if (p_SchemeParams->storageProfile.direct) + { + profileId = p_SchemeParams->storageProfile.profileSelect.directRelativeProfileId; + shift = 0; + numOfProfiles = 1; + } + else + { + profileId = p_SchemeParams->storageProfile.profileSelect.indirectProfile.fqidOffsetRelativeProfileIdBase; + shift = p_SchemeParams->storageProfile.profileSelect.indirectProfile.fqidOffsetShift; + numOfProfiles = p_SchemeParams->storageProfile.profileSelect.indirectProfile.numOfProfiles; + } + if (shift > MAX_SP_SHIFT) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fqidOffsetShift may not be larger than %d", MAX_SP_SHIFT)); + + if (!numOfProfiles || !POWER_OF_2(numOfProfiles)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfProfiles must not be 0 and must be a power of 2")); + + tmpReg = (uint32_t)shift << KG_SCH_VSP_SHIFT; + tmpReg |= ((uint32_t)(numOfProfiles-1) << KG_SCH_VSP_MASK_SHIFT); + tmpReg |= (uint32_t)profileId; + + + p_SchemeRegs->kgse_vsp = tmpReg; + + p_Scheme->vspe = TRUE; + + } + else + p_SchemeRegs->kgse_vsp = KG_SCH_VSP_NO_KSP_EN; +#endif /* (DPAA_VERSION >= 11) */ + + if (p_SchemeParams->useHash) + { + p_KeyAndHash = &p_SchemeParams->keyExtractAndHashParams; + + if (p_KeyAndHash->numOfUsedExtracts >= FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfUsedExtracts out of range")); + + /* configure kgse_dv0 */ + p_SchemeRegs->kgse_dv0 = p_KeyAndHash->privateDflt0; + + /* configure kgse_dv1 */ + p_SchemeRegs->kgse_dv1 = p_KeyAndHash->privateDflt1; + + if (!p_SchemeParams->bypassFqidGeneration) + { + if (!p_KeyAndHash->hashDistributionNumOfFqids || !POWER_OF_2(p_KeyAndHash->hashDistributionNumOfFqids)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("hashDistributionNumOfFqids must not be 0 and must be a power of 2")); + if ((p_KeyAndHash->hashDistributionNumOfFqids-1) & p_SchemeParams->baseFqid) + DBG(WARNING, ("baseFqid unaligned. Distribution may result in less than hashDistributionNumOfFqids queues.")); + } + + /* configure kgse_ekdv */ + tmpReg = 0; + for ( i=0 ;i<p_KeyAndHash->numOfUsedDflts ; i++) + { + switch (p_KeyAndHash->dflts[i].type) + { + case (e_FM_PCD_KG_MAC_ADDR): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_MAC_ADDR_SHIFT); + break; + case (e_FM_PCD_KG_TCI): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_TCI_SHIFT); + break; + case (e_FM_PCD_KG_ENET_TYPE): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_ENET_TYPE_SHIFT); + break; + case (e_FM_PCD_KG_PPP_SESSION_ID): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_PPP_SESSION_ID_SHIFT); + break; + case (e_FM_PCD_KG_PPP_PROTOCOL_ID): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_PPP_PROTOCOL_ID_SHIFT); + break; + case (e_FM_PCD_KG_MPLS_LABEL): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_MPLS_LABEL_SHIFT); + break; + case (e_FM_PCD_KG_IP_ADDR): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_IP_ADDR_SHIFT); + break; + case (e_FM_PCD_KG_PROTOCOL_TYPE): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_PROTOCOL_TYPE_SHIFT); + break; + case (e_FM_PCD_KG_IP_TOS_TC): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_IP_TOS_TC_SHIFT); + break; + case (e_FM_PCD_KG_IPV6_FLOW_LABEL): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_L4_PORT_SHIFT); + break; + case (e_FM_PCD_KG_IPSEC_SPI): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_IPSEC_SPI_SHIFT); + break; + case (e_FM_PCD_KG_L4_PORT): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_L4_PORT_SHIFT); + break; + case (e_FM_PCD_KG_TCP_FLAG): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_TCP_FLAG_SHIFT); + break; + case (e_FM_PCD_KG_GENERIC_FROM_DATA): + swDefaults[numOfSwDefaults].type = e_FM_PCD_KG_GENERIC_FROM_DATA; + swDefaults[numOfSwDefaults].dfltSelect = p_KeyAndHash->dflts[i].dfltSelect; + numOfSwDefaults ++; + break; + case (e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V): + swDefaults[numOfSwDefaults].type = e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V; + swDefaults[numOfSwDefaults].dfltSelect = p_KeyAndHash->dflts[i].dfltSelect; + numOfSwDefaults ++; + break; + case (e_FM_PCD_KG_GENERIC_NOT_FROM_DATA): + swDefaults[numOfSwDefaults].type = e_FM_PCD_KG_GENERIC_NOT_FROM_DATA; + swDefaults[numOfSwDefaults].dfltSelect = p_KeyAndHash->dflts[i].dfltSelect; + numOfSwDefaults ++; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + } + p_SchemeRegs->kgse_ekdv = tmpReg; + + p_LocalExtractsArray = (t_FmPcdKgSchemesExtracts *)XX_Malloc(sizeof(t_FmPcdKgSchemesExtracts)); + if (!p_LocalExtractsArray) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); + + /* configure kgse_ekfc and kgse_gec */ + knownTmp = 0; + for ( i=0 ;i<p_KeyAndHash->numOfUsedExtracts ; i++) + { + p_Extract = &p_KeyAndHash->extractArray[i]; + switch (p_Extract->type) + { + case (e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO): + knownTmp |= KG_SCH_KN_PORT_ID; + /* save in driver structure */ + p_LocalExtractsArray->extractsArray[i].id = GetKnownFieldId(KG_SCH_KN_PORT_ID); + p_LocalExtractsArray->extractsArray[i].known = TRUE; + break; + case (e_FM_PCD_EXTRACT_BY_HDR): + switch (p_Extract->extractByHdr.hdr) + { + +#ifdef FM_CAPWAP_SUPPORT + case (HEADER_TYPE_UDP_LITE): + p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP; + break; +#endif + case (HEADER_TYPE_UDP_ENCAP_ESP): + switch (p_Extract->extractByHdr.type) + { + case (e_FM_PCD_EXTRACT_FROM_HDR): + /* case where extraction from ESP only */ + if (p_Extract->extractByHdr.extractByHdrType.fromHdr.offset >= UDP_HEADER_SIZE) + { + p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP); + p_Extract->extractByHdr.extractByHdrType.fromHdr.offset -= UDP_HEADER_SIZE; + p_Extract->extractByHdr.ignoreProtocolValidation = TRUE; + } + else + { + p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP; + p_Extract->extractByHdr.ignoreProtocolValidation = FALSE; + } + break; + case (e_FM_PCD_EXTRACT_FROM_FIELD): + switch (p_Extract->extractByHdr.extractByHdrType.fromField.field.udpEncapEsp) + { + case (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC): + case (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_DST): + case (NET_HEADER_FIELD_UDP_ENCAP_ESP_LEN): + case (NET_HEADER_FIELD_UDP_ENCAP_ESP_CKSUM): + p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP; + break; + case (NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI): + p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR; + p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP); + p_Extract->extractByHdr.extractByHdrType.fromField.size = p_Extract->extractByHdr.extractByHdrType.fromField.size; + /*p_Extract->extractByHdr.extractByHdrType.fromField.offset += ESP_SPI_OFFSET;*/ + p_Extract->extractByHdr.ignoreProtocolValidation = TRUE; + break; + case (NET_HEADER_FIELD_UDP_ENCAP_ESP_SEQUENCE_NUM): + p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR; + p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP); + p_Extract->extractByHdr.extractByHdrType.fromField.size = p_Extract->extractByHdr.extractByHdrType.fromField.size; + p_Extract->extractByHdr.extractByHdrType.fromField.offset += ESP_SEQ_NUM_OFFSET; + p_Extract->extractByHdr.ignoreProtocolValidation = TRUE; + break; + } + break; + case (e_FM_PCD_EXTRACT_FULL_FIELD): + switch (p_Extract->extractByHdr.extractByHdrType.fullField.udpEncapEsp) + { + case (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC): + case (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_DST): + case (NET_HEADER_FIELD_UDP_ENCAP_ESP_LEN): + case (NET_HEADER_FIELD_UDP_ENCAP_ESP_CKSUM): + p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP; + break; + case (NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI): + p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR; + p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP); + p_Extract->extractByHdr.extractByHdrType.fromHdr.size = ESP_SPI_SIZE; + p_Extract->extractByHdr.extractByHdrType.fromHdr.offset = ESP_SPI_OFFSET; + p_Extract->extractByHdr.ignoreProtocolValidation = TRUE; + break; + case (NET_HEADER_FIELD_UDP_ENCAP_ESP_SEQUENCE_NUM): + p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR; + p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_Scheme->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP); + p_Extract->extractByHdr.extractByHdrType.fromHdr.size = ESP_SEQ_NUM_SIZE; + p_Extract->extractByHdr.extractByHdrType.fromHdr.offset = ESP_SEQ_NUM_OFFSET; + p_Extract->extractByHdr.ignoreProtocolValidation = TRUE; + break; + } + break; + } + break; + default: + break; + } + switch (p_Extract->extractByHdr.type) + { + case (e_FM_PCD_EXTRACT_FROM_HDR): + generic = TRUE; + /* get the header code for the generic extract */ + code = GetGenHdrCode(p_Extract->extractByHdr.hdr, p_Extract->extractByHdr.hdrIndex, p_Extract->extractByHdr.ignoreProtocolValidation); + /* set generic register fields */ + offset = p_Extract->extractByHdr.extractByHdrType.fromHdr.offset; + size = p_Extract->extractByHdr.extractByHdrType.fromHdr.size; + break; + case (e_FM_PCD_EXTRACT_FROM_FIELD): + generic = TRUE; + /* get the field code for the generic extract */ + code = GetGenFieldCode(p_Extract->extractByHdr.hdr, + p_Extract->extractByHdr.extractByHdrType.fromField.field, p_Extract->extractByHdr.ignoreProtocolValidation,p_Extract->extractByHdr.hdrIndex); + offset = p_Extract->extractByHdr.extractByHdrType.fromField.offset; + size = p_Extract->extractByHdr.extractByHdrType.fromField.size; + break; + case (e_FM_PCD_EXTRACT_FULL_FIELD): + if (!p_Extract->extractByHdr.ignoreProtocolValidation) + { + /* if we have a known field for it - use it, otherwise use generic */ + bitMask = GetKnownProtMask(p_FmPcd, p_Extract->extractByHdr.hdr, p_Extract->extractByHdr.hdrIndex, + p_Extract->extractByHdr.extractByHdrType.fullField); + if (bitMask) + { + knownTmp |= bitMask; + /* save in driver structure */ + p_LocalExtractsArray->extractsArray[i].id = GetKnownFieldId(bitMask); + p_LocalExtractsArray->extractsArray[i].known = TRUE; + } + else + generic = TRUE; + + } + else + generic = TRUE; + if (generic) + { + /* tmp - till we cover more headers under generic */ + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Full header selection not supported")); + } + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + break; + case (e_FM_PCD_EXTRACT_NON_HDR): + /* use generic */ + generic = TRUE; + offset = 0; + /* get the field code for the generic extract */ + code = GetGenCode(p_Extract->extractNonHdr.src, &offset); + offset += p_Extract->extractNonHdr.offset; + size = p_Extract->extractNonHdr.size; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + + if (generic) + { + /* set generic register fields */ + if (currGenId >= FM_KG_NUM_OF_GENERIC_REGS) + RETURN_ERROR(MAJOR, E_FULL, ("Generic registers are fully used")); + if (!code) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG); + + genTmp = KG_SCH_GEN_VALID; + genTmp |= (uint32_t)(code << KG_SCH_GEN_HT_SHIFT); + genTmp |= offset; + if ((size > MAX_KG_SCH_SIZE) || (size < 1)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal extraction (size out of range)")); + genTmp |= (uint32_t)((size - 1) << KG_SCH_GEN_SIZE_SHIFT); + swDefault = GetGenericSwDefault(swDefaults, numOfSwDefaults, code); + if (swDefault == e_FM_PCD_KG_DFLT_ILLEGAL) + DBG(WARNING, ("No sw default configured")); + + genTmp |= swDefault << KG_SCH_GEN_DEF_SHIFT; + genTmp |= KG_SCH_GEN_MASK; + p_SchemeRegs->kgse_gec[currGenId] = genTmp; + /* save in driver structure */ + p_LocalExtractsArray->extractsArray[i].id = currGenId++; + p_LocalExtractsArray->extractsArray[i].known = FALSE; + generic = FALSE; + } + } + p_SchemeRegs->kgse_ekfc = knownTmp; + + selectTmp = 0; + maskTmp = 0xFFFFFFFF; + /* configure kgse_bmch, kgse_bmcl and kgse_fqb */ + + if (p_KeyAndHash->numOfUsedMasks >= FM_PCD_KG_NUM_OF_EXTRACT_MASKS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Only %d masks supported", FM_PCD_KG_NUM_OF_EXTRACT_MASKS)); + for ( i=0 ;i<p_KeyAndHash->numOfUsedMasks ; i++) + { + /* Get the relative id of the extract (for known 0-0x1f, for generic 0-7) */ + id = p_LocalExtractsArray->extractsArray[p_KeyAndHash->masks[i].extractArrayIndex].id; + /* Get the shift of the select field (depending on i) */ + GET_MASK_SEL_SHIFT(shift,i); + if (p_LocalExtractsArray->extractsArray[p_KeyAndHash->masks[i].extractArrayIndex].known) + selectTmp |= id << shift; + else + selectTmp |= (id + MASK_FOR_GENERIC_BASE_ID) << shift; + + /* Get the shift of the offset field (depending on i) - may + be in kgse_bmch or in kgse_fqb (depending on i) */ + GET_MASK_OFFSET_SHIFT(shift,i); + if (i<=1) + selectTmp |= p_KeyAndHash->masks[i].offset << shift; + else + fqbTmp |= p_KeyAndHash->masks[i].offset << shift; + + /* Get the shift of the mask field (depending on i) */ + GET_MASK_SHIFT(shift,i); + /* pass all bits */ + maskTmp |= KG_SCH_BITMASK_MASK << shift; + /* clear bits that need masking */ + maskTmp &= ~(0xFF << shift) ; + /* set mask bits */ + maskTmp |= (p_KeyAndHash->masks[i].mask << shift) ; + } + p_SchemeRegs->kgse_bmch = selectTmp; + p_SchemeRegs->kgse_bmcl = maskTmp; + /* kgse_fqb will be written t the end of the routine */ + + /* configure kgse_hc */ + if (p_KeyAndHash->hashShift > MAX_HASH_SHIFT) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("hashShift must not be larger than %d", MAX_HASH_SHIFT)); + if (p_KeyAndHash->hashDistributionFqidsShift > MAX_DIST_FQID_SHIFT) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("hashDistributionFqidsShift must not be larger than %d", MAX_DIST_FQID_SHIFT)); + + tmpReg = 0; + + tmpReg |= ((p_KeyAndHash->hashDistributionNumOfFqids - 1) << p_KeyAndHash->hashDistributionFqidsShift); + tmpReg |= p_KeyAndHash->hashShift << KG_SCH_HASH_CONFIG_SHIFT_SHIFT; + + if (p_KeyAndHash->symmetricHash) + { + if ((!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_MACSRC) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_MACDST)) || + (!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPSRC1) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPDST1)) || + (!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPSRC2) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPDST2)) || + (!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_L4PSRC) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_L4PDST))) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("symmetricHash set but src/dest extractions missing")); + tmpReg |= KG_SCH_HASH_CONFIG_SYM; + } + p_SchemeRegs->kgse_hc = tmpReg; + + /* build the return array describing the order of the extractions */ + + /* the last currGenId places of the array + are for generic extracts that are always last. + We now sort for the calculation of the order of the known + extractions we sort the known extracts between orderedArray[0] and + orderedArray[p_KeyAndHash->numOfUsedExtracts - currGenId - 1]. + for the calculation of the order of the generic extractions we use: + num_of_generic - currGenId + num_of_known - p_KeyAndHash->numOfUsedExtracts - currGenId + first_generic_index = num_of_known */ + curr = 0; + for (i=0;i<p_KeyAndHash->numOfUsedExtracts ; i++) + { + if (p_LocalExtractsArray->extractsArray[i].known) + { + ASSERT_COND(curr<(p_KeyAndHash->numOfUsedExtracts - currGenId)); + j = curr; + /* id is the extract id (port id = 0, mac src = 1 etc.). the value in the array is the original + index in the user's extractions array */ + /* we compare the id of the current extract with the id of the extract in the orderedArray[j-1] + location */ + while ((j > 0) && (p_LocalExtractsArray->extractsArray[i].id < + p_LocalExtractsArray->extractsArray[p_Scheme->orderedArray[j-1]].id)) + { + p_Scheme->orderedArray[j] = + p_Scheme->orderedArray[j-1]; + j--; + } + p_Scheme->orderedArray[j] = (uint8_t)i; + curr++; + } + else + { + /* index is first_generic_index + generic index (id) */ + idx = (uint8_t)(p_KeyAndHash->numOfUsedExtracts - currGenId + p_LocalExtractsArray->extractsArray[i].id); + ASSERT_COND(idx < FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY); + p_Scheme->orderedArray[idx]= (uint8_t)i; + } + } + XX_Free(p_LocalExtractsArray); + p_LocalExtractsArray = NULL; + + } + else + { + /* clear all unused registers: */ + p_SchemeRegs->kgse_ekfc = 0; + p_SchemeRegs->kgse_ekdv = 0; + p_SchemeRegs->kgse_bmch = 0; + p_SchemeRegs->kgse_bmcl = 0; + p_SchemeRegs->kgse_hc = 0; + p_SchemeRegs->kgse_dv0 = 0; + p_SchemeRegs->kgse_dv1 = 0; + } + + if (p_SchemeParams->bypassFqidGeneration) + p_SchemeRegs->kgse_hc |= KG_SCH_HASH_CONFIG_NO_FQID; + + /* configure kgse_spc */ + if ( p_SchemeParams->schemeCounter.update) + p_SchemeRegs->kgse_spc = p_SchemeParams->schemeCounter.value; + + + /* check that are enough generic registers */ + if (p_SchemeParams->numOfUsedExtractedOrs + currGenId > FM_KG_NUM_OF_GENERIC_REGS) + RETURN_ERROR(MAJOR, E_FULL, ("Generic registers are fully used")); + + /* extracted OR mask on Qid */ + for ( i=0 ;i<p_SchemeParams->numOfUsedExtractedOrs ; i++) + { + + p_Scheme->extractedOrs = TRUE; + /* configure kgse_gec[i] */ + p_ExtractOr = &p_SchemeParams->extractedOrs[i]; + switch (p_ExtractOr->type) + { + case (e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO): + code = KG_SCH_GEN_PARSE_RESULT_N_FQID; + offset = 0; + break; + case (e_FM_PCD_EXTRACT_BY_HDR): + /* get the header code for the generic extract */ + code = GetGenHdrCode(p_ExtractOr->extractByHdr.hdr, p_ExtractOr->extractByHdr.hdrIndex, p_ExtractOr->extractByHdr.ignoreProtocolValidation); + /* set generic register fields */ + offset = p_ExtractOr->extractionOffset; + break; + case (e_FM_PCD_EXTRACT_NON_HDR): + /* get the field code for the generic extract */ + offset = 0; + code = GetGenCode(p_ExtractOr->src, &offset); + offset += p_ExtractOr->extractionOffset; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + + /* set generic register fields */ + if (!code) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG); + genTmp = KG_SCH_GEN_EXTRACT_TYPE | KG_SCH_GEN_VALID; + genTmp |= (uint32_t)(code << KG_SCH_GEN_HT_SHIFT); + genTmp |= offset; + if (!!p_ExtractOr->bitOffsetInFqid == !!p_ExtractOr->bitOffsetInPlcrProfile) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, (" extracted byte must effect either FQID or Policer profile")); + + /************************************************************************************ + bitOffsetInFqid and bitOffsetInPolicerProfile are translated to rotate parameter + in the following way: + + Driver API and implementation: + ============================== + FQID: extracted OR byte may be shifted right 1-31 bits to effect parts of the FQID. + if shifted less than 8 bits, or more than 24 bits a mask is set on the bits that + are not overlapping FQID. + ------------------------ + | FQID (24) | + ------------------------ + -------- + | | extracted OR byte + -------- + + Policer Profile: extracted OR byte may be shifted right 1-15 bits to effect parts of the + PP id. Unless shifted exactly 8 bits to overlap the PP id, a mask is set on the bits that + are not overlapping PP id. + + -------- + | PP (8) | + -------- + -------- + | | extracted OR byte + -------- + + HW implementation + ================= + FQID and PP construct a 32 bit word in the way describe below. Extracted byte is located + as the highest byte of that word and may be rotated to effect any part os the FQID or + the PP. + ------------------------ -------- + | FQID (24) || PP (8) | + ------------------------ -------- + -------- + | | extracted OR byte + -------- + + ************************************************************************************/ + + if (p_ExtractOr->bitOffsetInFqid) + { + if (p_ExtractOr->bitOffsetInFqid > MAX_KG_SCH_FQID_BIT_OFFSET ) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal extraction (bitOffsetInFqid out of range)")); + if (p_ExtractOr->bitOffsetInFqid<8) + genTmp |= (uint32_t)((p_ExtractOr->bitOffsetInFqid+24) << KG_SCH_GEN_SIZE_SHIFT); + else + genTmp |= (uint32_t)((p_ExtractOr->bitOffsetInFqid-8) << KG_SCH_GEN_SIZE_SHIFT); + p_ExtractOr->mask &= GetExtractedOrMask(p_ExtractOr->bitOffsetInFqid, TRUE); + } + else /* effect policer profile */ + { + if (p_ExtractOr->bitOffsetInPlcrProfile > MAX_KG_SCH_PP_BIT_OFFSET ) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal extraction (bitOffsetInPlcrProfile out of range)")); + p_Scheme->bitOffsetInPlcrProfile = p_ExtractOr->bitOffsetInPlcrProfile; + genTmp |= (uint32_t)((p_ExtractOr->bitOffsetInPlcrProfile+16) << KG_SCH_GEN_SIZE_SHIFT); + p_ExtractOr->mask &= GetExtractedOrMask(p_ExtractOr->bitOffsetInPlcrProfile, FALSE); + } + + genTmp |= (uint32_t)(p_ExtractOr->extractionOffset << KG_SCH_GEN_DEF_SHIFT); + /* clear bits that need masking */ + genTmp &= ~KG_SCH_GEN_MASK ; + /* set mask bits */ + genTmp |= (uint32_t)(p_ExtractOr->mask << KG_SCH_GEN_MASK_SHIFT); + p_SchemeRegs->kgse_gec[currGenId++] = genTmp; + + } + /* clear all unused GEC registers */ + for ( i=currGenId ;i<FM_KG_NUM_OF_GENERIC_REGS ; i++) + p_SchemeRegs->kgse_gec[i] = 0; + + /* add base Qid for this scheme */ + /* add configuration for kgse_fqb */ + if (p_SchemeParams->baseFqid & ~0x00FFFFFF) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("baseFqid must be between 1 and 2^24-1")); + + fqbTmp |= p_SchemeParams->baseFqid; + p_SchemeRegs->kgse_fqb = fqbTmp; + + p_Scheme->nextEngine = p_SchemeParams->nextEngine; + p_Scheme->doneAction = p_SchemeParams->kgNextEngineParams.doneAction; + + return E_OK; +} + + +/*****************************************************************************/ +/* Inter-module API routines */ +/*****************************************************************************/ + +t_Error FmPcdKgBuildClsPlanGrp(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_Grp, t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdKgClsPlanGrp *p_ClsPlanGrp; + t_FmPcdIpcKgClsPlanParams kgAlloc; + t_Error err = E_OK; + uint32_t oredVectors = 0; + int i, j; + + /* this routine is protected by the calling routine ! */ + if (p_Grp->numOfOptions >= FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE,("Too many classification plan basic options selected.")); + + /* find a new clsPlan group */ + for (i = 0; i < FM_MAX_NUM_OF_PORTS; i++) + if (!p_FmPcd->p_FmPcdKg->clsPlanGrps[i].used) + break; + if (i == FM_MAX_NUM_OF_PORTS) + RETURN_ERROR(MAJOR, E_FULL,("No classification plan groups available.")); + + p_FmPcd->p_FmPcdKg->clsPlanGrps[i].used = TRUE; + + p_Grp->clsPlanGrpId = (uint8_t)i; + + if (p_Grp->numOfOptions == 0) + p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId = (uint8_t)i; + + p_ClsPlanGrp = &p_FmPcd->p_FmPcdKg->clsPlanGrps[i]; + p_ClsPlanGrp->netEnvId = p_Grp->netEnvId; + p_ClsPlanGrp->owners = 0; + FmPcdSetClsPlanGrpId(p_FmPcd, p_Grp->netEnvId, p_Grp->clsPlanGrpId); + if (p_Grp->numOfOptions != 0) + FmPcdIncNetEnvOwners(p_FmPcd, p_Grp->netEnvId); + + p_ClsPlanGrp->sizeOfGrp = (uint16_t)(1 << p_Grp->numOfOptions); + /* a minimal group of 8 is required */ + if (p_ClsPlanGrp->sizeOfGrp < CLS_PLAN_NUM_PER_GRP) + p_ClsPlanGrp->sizeOfGrp = CLS_PLAN_NUM_PER_GRP; + if (p_FmPcd->guestId == NCSW_MASTER_ID) + { + err = KgAllocClsPlanEntries(h_FmPcd, p_ClsPlanGrp->sizeOfGrp, p_FmPcd->guestId, &p_ClsPlanGrp->baseEntry); + + if (err) + RETURN_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + } + else + { + t_FmPcdIpcMsg msg; + uint32_t replyLength; + t_FmPcdIpcReply reply; + + /* in GUEST_PARTITION, we use the IPC, to also set a private driver group if required */ + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + memset(&kgAlloc, 0, sizeof(kgAlloc)); + kgAlloc.guestId = p_FmPcd->guestId; + kgAlloc.numOfClsPlanEntries = p_ClsPlanGrp->sizeOfGrp; + msg.msgId = FM_PCD_ALLOC_KG_CLSPLAN; + memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc)); + replyLength = (sizeof(uint32_t) + sizeof(p_ClsPlanGrp->baseEntry)); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(kgAlloc), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + if (replyLength != (sizeof(uint32_t) + sizeof(p_ClsPlanGrp->baseEntry))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + if ((t_Error)reply.error != E_OK) + RETURN_ERROR(MINOR, (t_Error)reply.error, NO_MSG); + + p_ClsPlanGrp->baseEntry = *(uint8_t*)(reply.replyBody); + } + + /* build classification plan entries parameters */ + p_ClsPlanSet->baseEntry = p_ClsPlanGrp->baseEntry; + p_ClsPlanSet->numOfClsPlanEntries = p_ClsPlanGrp->sizeOfGrp; + + oredVectors = 0; + for (i = 0; i<p_Grp->numOfOptions; i++) + { + oredVectors |= p_Grp->optVectors[i]; + /* save an array of used options - the indexes represent the power of 2 index */ + p_ClsPlanGrp->optArray[i] = p_Grp->options[i]; + } + /* set the classification plan relevant entries so that all bits + * relevant to the list of options is cleared + */ + for (j = 0; j<p_ClsPlanGrp->sizeOfGrp; j++) + p_ClsPlanSet->vectors[j] = ~oredVectors; + + for (i = 0; i<p_Grp->numOfOptions; i++) + { + /* option i got the place 2^i in the clsPlan array. all entries that + * have bit i set, should have the vector bit cleared. So each option + * has one location that it is exclusive (1,2,4,8...) and represent the + * presence of that option only, and other locations that represent a + * combination of options. + * e.g: + * If ethernet-BC is option 1 it gets entry 2 in the table. Entry 2 + * now represents a frame with ethernet-BC header - so the bit + * representing ethernet-BC should be set and all other option bits + * should be cleared. + * Entries 2,3,6,7,10... also have ethernet-BC and therefore have bit + * vector[1] set, but they also have other bits set: + * 3=1+2, options 0 and 1 + * 6=2+4, options 1 and 2 + * 7=1+2+4, options 0,1,and 2 + * 10=2+8, options 1 and 3 + * etc. + * */ + + /* now for each option (i), we set their bits in all entries (j) + * that contain bit 2^i. + */ + for (j = 0; j<p_ClsPlanGrp->sizeOfGrp; j++) + { + if (j & (1<<i)) + p_ClsPlanSet->vectors[j] |= p_Grp->optVectors[i]; + } + } + + return E_OK; +} + +void FmPcdKgDestroyClsPlanGrp(t_Handle h_FmPcd, uint8_t grpId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdIpcKgClsPlanParams kgAlloc; + t_Error err; + t_FmPcdIpcMsg msg; + uint32_t replyLength; + t_FmPcdIpcReply reply; + + /* check that no port is bound to this clsPlan */ + if (p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].owners) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Trying to delete a clsPlan grp that has ports bound to")); + return; + } + + FmPcdSetClsPlanGrpId(p_FmPcd, p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].netEnvId, ILLEGAL_CLS_PLAN); + + if (grpId == p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId) + p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId = ILLEGAL_CLS_PLAN; + else + FmPcdDecNetEnvOwners(p_FmPcd, p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].netEnvId); + + /* free blocks */ + if (p_FmPcd->guestId == NCSW_MASTER_ID) + KgFreeClsPlanEntries(h_FmPcd, + p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].sizeOfGrp, + p_FmPcd->guestId, + p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].baseEntry); + else /* in GUEST_PARTITION, we use the IPC, to also set a private driver group if required */ + { + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + kgAlloc.guestId = p_FmPcd->guestId; + kgAlloc.numOfClsPlanEntries = p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].sizeOfGrp; + kgAlloc.clsPlanBase = p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].baseEntry; + msg.msgId = FM_PCD_FREE_KG_CLSPLAN; + memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc)); + replyLength = sizeof(uint32_t); + err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(kgAlloc), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL); + if (err != E_OK) + { + REPORT_ERROR(MINOR, err, NO_MSG); + return; + } + if (replyLength != sizeof(uint32_t)) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + return; + } + if ((t_Error)reply.error != E_OK) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Free KG clsPlan failed")); + return; + } + } + + /* clear clsPlan driver structure */ + memset(&p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId], 0, sizeof(t_FmPcdKgClsPlanGrp)); +} + +t_Error FmPcdKgBuildBindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort, uint32_t *p_SpReg, bool add) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t j, schemesPerPortVector = 0; + t_FmPcdKgScheme *p_Scheme; + uint8_t i, relativeSchemeId; + uint32_t tmp, walking1Mask; + uint8_t swPortIndex = 0; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + /* for each scheme */ + for (i = 0; i<p_BindPort->numOfSchemes; i++) + { + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, p_BindPort->schemesIds[i]); + if (relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + + if (add) + { + p_Scheme = &p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId]; + if (!FmPcdKgIsSchemeValidSw(p_Scheme)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Requested scheme is invalid.")); + /* check netEnvId of the port against the scheme netEnvId */ + if ((p_Scheme->netEnvId != p_BindPort->netEnvId) && (p_Scheme->netEnvId != ILLEGAL_NETENV)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Port may not be bound to requested scheme - differ in netEnvId")); + + /* if next engine is private port policer profile, we need to check that it is valid */ + HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, p_BindPort->hardwarePortId); + if (p_Scheme->nextRelativePlcrProfile) + { + for (j = 0;j<p_Scheme->numOfProfiles;j++) + { + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].h_FmPort); + if (p_Scheme->relativeProfileId+j >= p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Relative profile not in range")); + if (!FmPcdPlcrIsProfileValid(p_FmPcd, (uint16_t)(p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase + p_Scheme->relativeProfileId + j))) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Relative profile not valid.")); + } + } + if (!p_BindPort->useClsPlan) + { + /* This check may be redundant as port is a assigned to the whole NetEnv */ + + /* if this port does not use clsPlan, it may not be bound to schemes with units that contain + cls plan options. Schemes that are used only directly, should not be checked. + it also may not be bound to schemes that go to CC with units that are options - so we OR + the match vector and the grpBits (= ccUnits) */ + if ((p_Scheme->matchVector != SCHEME_ALWAYS_DIRECT) || p_Scheme->ccUnits) + { + walking1Mask = 0x80000000; + tmp = (p_Scheme->matchVector == SCHEME_ALWAYS_DIRECT)? 0:p_Scheme->matchVector; + tmp |= p_Scheme->ccUnits; + while (tmp) + { + if (tmp & walking1Mask) + { + tmp &= ~walking1Mask; + if (!PcdNetEnvIsUnitWithoutOpts(p_FmPcd, p_Scheme->netEnvId, walking1Mask)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Port (without clsPlan) may not be bound to requested scheme - uses clsPlan options")); + } + walking1Mask >>= 1; + } + } + } + } + /* build vector */ + schemesPerPortVector |= 1 << (31 - p_BindPort->schemesIds[i]); + } + + *p_SpReg = schemesPerPortVector; + + return E_OK; +} + +t_Error FmPcdKgBindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t spReg; + t_Error err = E_OK; + + err = FmPcdKgBuildBindPortToSchemes(h_FmPcd, p_SchemeBind, &spReg, TRUE); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + err = KgWriteSp(p_FmPcd, p_SchemeBind->hardwarePortId, spReg, TRUE); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + IncSchemeOwners(p_FmPcd, p_SchemeBind); + + return E_OK; +} + +t_Error FmPcdKgUnbindPortToSchemes(t_Handle h_FmPcd, t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t spReg; + t_Error err = E_OK; + + err = FmPcdKgBuildBindPortToSchemes(p_FmPcd, p_SchemeBind, &spReg, FALSE); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + err = KgWriteSp(p_FmPcd, p_SchemeBind->hardwarePortId, spReg, FALSE); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + DecSchemeOwners(p_FmPcd, p_SchemeBind); + + return E_OK; +} + +bool FmPcdKgIsSchemeValidSw(t_Handle h_Scheme) +{ + t_FmPcdKgScheme *p_Scheme = (t_FmPcdKgScheme*)h_Scheme; + + return p_Scheme->valid; +} + +bool KgIsSchemeAlwaysDirect(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + if (p_FmPcd->p_FmPcdKg->schemes[schemeId].matchVector == SCHEME_ALWAYS_DIRECT) + return TRUE; + else + return FALSE; +} + +t_Error FmPcdKgAllocSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint8_t i, j; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE); + + /* This routine is issued only on master core of master partition - + either directly or through IPC, so no need for lock */ + + for (j = 0, i = 0; i < FM_PCD_KG_NUM_OF_SCHEMES && j < numOfSchemes; i++) + { + if (!p_FmPcd->p_FmPcdKg->schemesMng[i].allocated) + { + p_FmPcd->p_FmPcdKg->schemesMng[i].allocated = TRUE; + p_FmPcd->p_FmPcdKg->schemesMng[i].ownerId = guestId; + p_SchemesIds[j] = i; + j++; + } + } + + if (j != numOfSchemes) + { + /* roll back */ + for (j--; j; j--) + { + p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[j]].allocated = FALSE; + p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[j]].ownerId = 0; + p_SchemesIds[j] = 0; + } + + RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("No schemes found")); + } + + return E_OK; +} + +t_Error FmPcdKgFreeSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint8_t i; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE); + + /* This routine is issued only on master core of master partition - + either directly or through IPC */ + + for (i = 0; i < numOfSchemes; i++) + { + if (!p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].allocated) + { + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Scheme was not previously allocated")); + } + if (p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].ownerId != guestId) + { + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Scheme is not owned by caller. ")); + } + p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].allocated = FALSE; + p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].ownerId = 0; + } + + return E_OK; +} + +t_Error KgAllocClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t *p_First) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint8_t numOfBlocks, blocksFound=0, first=0; + uint8_t i, j; + + /* This routine is issued only on master core of master partition - + either directly or through IPC, so no need for lock */ + + if (!numOfClsPlanEntries) + return E_OK; + + if ((numOfClsPlanEntries % CLS_PLAN_NUM_PER_GRP) || (!POWER_OF_2(numOfClsPlanEntries))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfClsPlanEntries must be a power of 2 and divisible by 8")); + + numOfBlocks = (uint8_t)(numOfClsPlanEntries/CLS_PLAN_NUM_PER_GRP); + + /* try to find consequent blocks */ + first = 0; + for (i = 0; i < FM_PCD_MAX_NUM_OF_CLS_PLANS/CLS_PLAN_NUM_PER_GRP;) + { + if (!p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].allocated) + { + blocksFound++; + i++; + if (blocksFound == numOfBlocks) + break; + } + else + { + blocksFound = 0; + /* advance i to the next aligned address */ + first = i = (uint8_t)(first + numOfBlocks); + } + } + + if (blocksFound == numOfBlocks) + { + *p_First = (uint8_t)(first * CLS_PLAN_NUM_PER_GRP); + for (j = first; j < (first + numOfBlocks); j++) + { + p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[j].allocated = TRUE; + p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[j].ownerId = guestId; + } + return E_OK; + } + else + RETURN_ERROR(MINOR, E_FULL, ("No resources for clsPlan")); +} + +void KgFreeClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t base) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint8_t numOfBlocks; + uint8_t i, baseBlock; + +#ifdef DISABLE_ASSERTIONS +UNUSED(guestId); +#endif /* DISABLE_ASSERTIONS */ + + /* This routine is issued only on master core of master partition - + either directly or through IPC, so no need for lock */ + + numOfBlocks = (uint8_t)(numOfClsPlanEntries/CLS_PLAN_NUM_PER_GRP); + ASSERT_COND(!(base%CLS_PLAN_NUM_PER_GRP)); + + baseBlock = (uint8_t)(base/CLS_PLAN_NUM_PER_GRP); + for (i=baseBlock;i<baseBlock+numOfBlocks;i++) + { + ASSERT_COND(p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].allocated); + ASSERT_COND(guestId == p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].ownerId); + p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].allocated = FALSE; + p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].ownerId = 0; + } +} + +void KgEnable(t_FmPcd *p_FmPcd) +{ + struct fman_kg_regs *p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + fman_kg_enable(p_Regs); +} + +void KgDisable(t_FmPcd *p_FmPcd) +{ + struct fman_kg_regs *p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + fman_kg_disable(p_Regs); +} + +void KgSetClsPlan(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanSet *p_Set) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + struct fman_kg_cp_regs *p_FmPcdKgPortRegs; + uint32_t tmpKgarReg = 0, intFlags; + uint16_t i, j; + + /* This routine is protected by the calling routine ! */ + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + p_FmPcdKgPortRegs = &p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->clsPlanRegs; + + intFlags = KgHwLock(p_FmPcd->p_FmPcdKg); + for (i=p_Set->baseEntry;i<p_Set->baseEntry+p_Set->numOfClsPlanEntries;i+=8) + { + tmpKgarReg = FmPcdKgBuildWriteClsPlanBlockActionReg((uint8_t)(i / CLS_PLAN_NUM_PER_GRP)); + + for (j = i; j < i+8; j++) + { + ASSERT_COND(IN_RANGE(0, (j - p_Set->baseEntry), FM_PCD_MAX_NUM_OF_CLS_PLANS-1)); + WRITE_UINT32(p_FmPcdKgPortRegs->kgcpe[j % CLS_PLAN_NUM_PER_GRP],p_Set->vectors[j - p_Set->baseEntry]); + } + + if (WriteKgarWait(p_FmPcd, tmpKgarReg) != E_OK) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("WriteKgarWait FAILED")); + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + return; + } + } + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); +} + +t_Handle KgConfig( t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams) +{ + t_FmPcdKg *p_FmPcdKg; + + UNUSED(p_FmPcd); + + if (p_FmPcdParams->numOfSchemes > FM_PCD_KG_NUM_OF_SCHEMES) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("numOfSchemes should not exceed %d", FM_PCD_KG_NUM_OF_SCHEMES)); + return NULL; + } + + p_FmPcdKg = (t_FmPcdKg *)XX_Malloc(sizeof(t_FmPcdKg)); + if (!p_FmPcdKg) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Keygen allocation FAILED")); + return NULL; + } + memset(p_FmPcdKg, 0, sizeof(t_FmPcdKg)); + + + if (FmIsMaster(p_FmPcd->h_Fm)) + { + p_FmPcdKg->p_FmPcdKgRegs = (struct fman_kg_regs *)UINT_TO_PTR(FmGetPcdKgBaseAddr(p_FmPcdParams->h_Fm)); + p_FmPcd->exceptions |= DEFAULT_fmPcdKgErrorExceptions; + p_FmPcdKg->p_IndirectAccessRegs = (u_FmPcdKgIndirectAccessRegs *)&p_FmPcdKg->p_FmPcdKgRegs->fmkg_indirect[0]; + } + + p_FmPcdKg->numOfSchemes = p_FmPcdParams->numOfSchemes; + if ((p_FmPcd->guestId == NCSW_MASTER_ID) && !p_FmPcdKg->numOfSchemes) + { + p_FmPcdKg->numOfSchemes = FM_PCD_KG_NUM_OF_SCHEMES; + DBG(WARNING, ("numOfSchemes was defined 0 by user, re-defined by driver to FM_PCD_KG_NUM_OF_SCHEMES")); + } + + p_FmPcdKg->emptyClsPlanGrpId = ILLEGAL_CLS_PLAN; + + return p_FmPcdKg; +} + +t_Error KgInit(t_FmPcd *p_FmPcd) +{ + t_Error err = E_OK; + + p_FmPcd->p_FmPcdKg->h_HwSpinlock = XX_InitSpinlock(); + if (!p_FmPcd->p_FmPcdKg->h_HwSpinlock) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM KG HW spinlock")); + + if (p_FmPcd->guestId == NCSW_MASTER_ID) + err = KgInitMaster(p_FmPcd); + else + err = KgInitGuest(p_FmPcd); + + if (err != E_OK) + { + if (p_FmPcd->p_FmPcdKg->h_HwSpinlock) + XX_FreeSpinlock(p_FmPcd->p_FmPcdKg->h_HwSpinlock); + } + + return err; +} + +t_Error KgFree(t_FmPcd *p_FmPcd) +{ + t_FmPcdIpcKgSchemesParams kgAlloc; + t_Error err = E_OK; + t_FmPcdIpcMsg msg; + uint32_t replyLength; + t_FmPcdIpcReply reply; + + FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_KG, 0, e_FM_INTR_TYPE_ERR); + + if (p_FmPcd->guestId == NCSW_MASTER_ID) + { + err = FmPcdKgFreeSchemes(p_FmPcd, + p_FmPcd->p_FmPcdKg->numOfSchemes, + p_FmPcd->guestId, + p_FmPcd->p_FmPcdKg->schemesIds); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + if (p_FmPcd->p_FmPcdKg->h_HwSpinlock) + XX_FreeSpinlock(p_FmPcd->p_FmPcdKg->h_HwSpinlock); + + return E_OK; + } + + /* guest */ + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + kgAlloc.numOfSchemes = p_FmPcd->p_FmPcdKg->numOfSchemes; + kgAlloc.guestId = p_FmPcd->guestId; + ASSERT_COND(kgAlloc.numOfSchemes < FM_PCD_KG_NUM_OF_SCHEMES); + memcpy(kgAlloc.schemesIds, p_FmPcd->p_FmPcdKg->schemesIds, (sizeof(uint8_t))*kgAlloc.numOfSchemes); + msg.msgId = FM_PCD_FREE_KG_SCHEMES; + memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc)); + replyLength = sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(kgAlloc), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + + if (p_FmPcd->p_FmPcdKg->h_HwSpinlock) + XX_FreeSpinlock(p_FmPcd->p_FmPcdKg->h_HwSpinlock); + + return (t_Error)reply.error; +} + +t_Error FmPcdKgSetOrBindToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t netEnvId, protocolOpt_t *p_OptArray, uint8_t *p_ClsPlanGrpId, bool *p_IsEmptyClsPlanGrp) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_FmPcdKgInterModuleClsPlanGrpParams grpParams, *p_GrpParams; + t_FmPcdKgClsPlanGrp *p_ClsPlanGrp; + t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet; + t_Error err; + + /* This function is issued only from FM_PORT_SetPcd which locked all PCD modules, + so no need for lock here */ + + memset(&grpParams, 0, sizeof(grpParams)); + grpParams.clsPlanGrpId = ILLEGAL_CLS_PLAN; + p_GrpParams = &grpParams; + + p_GrpParams->netEnvId = netEnvId; + + /* Get from the NetEnv the information of the clsPlan (can be already created, + * or needs to build) */ + err = PcdGetClsPlanGrpParams(h_FmPcd, p_GrpParams); + if (err) + RETURN_ERROR(MINOR,err,NO_MSG); + + if (p_GrpParams->grpExists) + { + /* this group was already updated (at least) in SW */ + *p_ClsPlanGrpId = p_GrpParams->clsPlanGrpId; + } + else + { + p_ClsPlanSet = (t_FmPcdKgInterModuleClsPlanSet *)XX_Malloc(sizeof(t_FmPcdKgInterModuleClsPlanSet)); + if (!p_ClsPlanSet) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Classification plan set")); + memset(p_ClsPlanSet, 0, sizeof(t_FmPcdKgInterModuleClsPlanSet)); + /* Build (in SW) the clsPlan parameters, including the vectors to be written to HW */ + err = FmPcdKgBuildClsPlanGrp(h_FmPcd, p_GrpParams, p_ClsPlanSet); + if (err) + { + XX_Free(p_ClsPlanSet); + RETURN_ERROR(MINOR, err, NO_MSG); + } + *p_ClsPlanGrpId = p_GrpParams->clsPlanGrpId; + + if (p_FmPcd->h_Hc) + { + /* write clsPlan entries to memory */ + err = FmHcPcdKgSetClsPlan(p_FmPcd->h_Hc, p_ClsPlanSet); + if (err) + { + XX_Free(p_ClsPlanSet); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + } + else + /* write clsPlan entries to memory */ + KgSetClsPlan(p_FmPcd, p_ClsPlanSet); + + XX_Free(p_ClsPlanSet); + } + + /* Set caller parameters */ + + /* mark if this is an empty classification group */ + if (*p_ClsPlanGrpId == p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId) + *p_IsEmptyClsPlanGrp = TRUE; + else + *p_IsEmptyClsPlanGrp = FALSE; + + p_ClsPlanGrp = &p_FmPcd->p_FmPcdKg->clsPlanGrps[*p_ClsPlanGrpId]; + + /* increment owners number */ + p_ClsPlanGrp->owners++; + + /* copy options array for port */ + memcpy(p_OptArray, &p_FmPcd->p_FmPcdKg->clsPlanGrps[*p_ClsPlanGrpId].optArray, FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)*sizeof(protocolOpt_t)); + + /* bind port to the new or existing group */ + err = BindPortToClsPlanGrp(p_FmPcd, hardwarePortId, p_GrpParams->clsPlanGrpId); + if (err) + RETURN_ERROR(MINOR, err, NO_MSG); + + return E_OK; +} + +t_Error FmPcdKgDeleteOrUnbindPortToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t clsPlanGrpId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_FmPcdKgClsPlanGrp *p_ClsPlanGrp = &p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId]; + t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet; + t_Error err; + + /* This function is issued only from FM_PORT_DeletePcd which locked all PCD modules, + so no need for lock here */ + + UnbindPortToClsPlanGrp(p_FmPcd, hardwarePortId); + + /* decrement owners number */ + ASSERT_COND(p_ClsPlanGrp->owners); + p_ClsPlanGrp->owners--; + + if (!p_ClsPlanGrp->owners) + { + if (p_FmPcd->h_Hc) + { + err = FmHcPcdKgDeleteClsPlan(p_FmPcd->h_Hc, clsPlanGrpId); + return err; + } + else + { + /* clear clsPlan entries in memory */ + p_ClsPlanSet = (t_FmPcdKgInterModuleClsPlanSet *)XX_Malloc(sizeof(t_FmPcdKgInterModuleClsPlanSet)); + if (!p_ClsPlanSet) + { + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Classification plan set")); + } + memset(p_ClsPlanSet, 0, sizeof(t_FmPcdKgInterModuleClsPlanSet)); + + p_ClsPlanSet->baseEntry = p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].baseEntry; + p_ClsPlanSet->numOfClsPlanEntries = p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].sizeOfGrp; + KgSetClsPlan(p_FmPcd, p_ClsPlanSet); + XX_Free(p_ClsPlanSet); + + FmPcdKgDestroyClsPlanGrp(h_FmPcd, clsPlanGrpId); + } + } + return E_OK; +} + +uint32_t FmPcdKgGetRequiredAction(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + return p_FmPcd->p_FmPcdKg->schemes[schemeId].requiredAction; +} + +uint32_t FmPcdKgGetPointedOwners(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + return p_FmPcd->p_FmPcdKg->schemes[schemeId].pointedOwners; +} + +bool FmPcdKgIsDirectPlcr(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + return p_FmPcd->p_FmPcdKg->schemes[schemeId].directPlcr; +} + + +uint16_t FmPcdKgGetRelativeProfileId(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + return p_FmPcd->p_FmPcdKg->schemes[schemeId].relativeProfileId; +} + +bool FmPcdKgIsDistrOnPlcrProfile(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + if ((p_FmPcd->p_FmPcdKg->schemes[schemeId].extractedOrs && + p_FmPcd->p_FmPcdKg->schemes[schemeId].bitOffsetInPlcrProfile) || + p_FmPcd->p_FmPcdKg->schemes[schemeId].nextRelativePlcrProfile) + return TRUE; + else + return FALSE; + +} + +e_FmPcdEngine FmPcdKgGetNextEngine(t_Handle h_FmPcd, uint8_t relativeSchemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].valid); + + return p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextEngine; +} + +e_FmPcdDoneAction FmPcdKgGetDoneAction(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + return p_FmPcd->p_FmPcdKg->schemes[schemeId].doneAction; +} + +void FmPcdKgUpdateRequiredAction(t_Handle h_Scheme, uint32_t requiredAction) +{ + t_FmPcdKgScheme *p_Scheme = (t_FmPcdKgScheme *)h_Scheme; + + /* this routine is protected by calling routine */ + + ASSERT_COND(p_Scheme->valid); + + p_Scheme->requiredAction |= requiredAction; +} + +bool FmPcdKgHwSchemeIsValid(uint32_t schemeModeReg) +{ + return (bool)!!(schemeModeReg & KG_SCH_MODE_EN); +} + +uint32_t FmPcdKgBuildWriteSchemeActionReg(uint8_t schemeId, bool updateCounter) +{ + return (uint32_t)(((uint32_t)schemeId << FM_PCD_KG_KGAR_NUM_SHIFT) | + FM_KG_KGAR_GO | + FM_KG_KGAR_WRITE | + FM_KG_KGAR_SEL_SCHEME_ENTRY | + DUMMY_PORT_ID | + (updateCounter ? FM_KG_KGAR_SCM_WSEL_UPDATE_CNT:0)); +} + +uint32_t FmPcdKgBuildReadSchemeActionReg(uint8_t schemeId) +{ + return (uint32_t)(((uint32_t)schemeId << FM_PCD_KG_KGAR_NUM_SHIFT) | + FM_KG_KGAR_GO | + FM_KG_KGAR_READ | + FM_KG_KGAR_SEL_SCHEME_ENTRY | + DUMMY_PORT_ID | + FM_KG_KGAR_SCM_WSEL_UPDATE_CNT); + +} + +uint32_t FmPcdKgBuildWriteClsPlanBlockActionReg(uint8_t grpId) +{ + return (uint32_t)(FM_KG_KGAR_GO | + FM_KG_KGAR_WRITE | + FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY | + DUMMY_PORT_ID | + ((uint32_t)grpId << FM_PCD_KG_KGAR_NUM_SHIFT) | + FM_PCD_KG_KGAR_WSEL_MASK); + + /* if we ever want to write 1 by 1, use: + sel = (uint8_t)(0x01 << (7- (entryId % CLS_PLAN_NUM_PER_GRP))); + */ +} + +uint32_t FmPcdKgBuildWritePortSchemeBindActionReg(uint8_t hardwarePortId) +{ + + return (uint32_t)(FM_KG_KGAR_GO | + FM_KG_KGAR_WRITE | + FM_PCD_KG_KGAR_SEL_PORT_ENTRY | + hardwarePortId | + FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP); +} + +uint32_t FmPcdKgBuildReadPortSchemeBindActionReg(uint8_t hardwarePortId) +{ + + return (uint32_t)(FM_KG_KGAR_GO | + FM_KG_KGAR_READ | + FM_PCD_KG_KGAR_SEL_PORT_ENTRY | + hardwarePortId | + FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP); +} + +uint32_t FmPcdKgBuildWritePortClsPlanBindActionReg(uint8_t hardwarePortId) +{ + + return (uint32_t)(FM_KG_KGAR_GO | + FM_KG_KGAR_WRITE | + FM_PCD_KG_KGAR_SEL_PORT_ENTRY | + hardwarePortId | + FM_PCD_KG_KGAR_SEL_PORT_WSEL_CPP); +} + +uint8_t FmPcdKgGetClsPlanGrpBase(t_Handle h_FmPcd, uint8_t clsPlanGrp) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + return p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrp].baseEntry; +} + +uint16_t FmPcdKgGetClsPlanGrpSize(t_Handle h_FmPcd, uint8_t clsPlanGrp) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + return p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrp].sizeOfGrp; +} + + +uint8_t FmPcdKgGetSchemeId(t_Handle h_Scheme) +{ + return ((t_FmPcdKgScheme*)h_Scheme)->schemeId; + +} + +#if (DPAA_VERSION >= 11) +bool FmPcdKgGetVspe(t_Handle h_Scheme) +{ + return ((t_FmPcdKgScheme*)h_Scheme)->vspe; + +} +#endif /* (DPAA_VERSION >= 11) */ + +uint8_t FmPcdKgGetRelativeSchemeId(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint8_t i; + + for (i = 0;i<p_FmPcd->p_FmPcdKg->numOfSchemes;i++) + if (p_FmPcd->p_FmPcdKg->schemesIds[i] == schemeId) + return i; + + if (i == p_FmPcd->p_FmPcdKg->numOfSchemes) + REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, ("Scheme is out of partition range")); + + return FM_PCD_KG_NUM_OF_SCHEMES; +} + +t_Error FmPcdKgCcGetSetParams(t_Handle h_FmPcd, t_Handle h_Scheme, uint32_t requiredAction, uint32_t value) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint8_t relativeSchemeId, physicalSchemeId; + uint32_t tmpKgarReg, tmpReg32 = 0, intFlags; + t_Error err; + t_FmPcdKgScheme *p_Scheme = (t_FmPcdKgScheme*)h_Scheme; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, 0); + + /* Calling function locked all PCD modules, so no need to lock here */ + + if (!FmPcdKgIsSchemeValidSw(h_Scheme)) + RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is Invalid")); + + if (p_FmPcd->h_Hc) + { + err = FmHcPcdKgCcGetSetParams(p_FmPcd->h_Hc, h_Scheme, requiredAction, value); + + UpateSchemePointedOwner(h_Scheme,TRUE); + FmPcdKgUpdateRequiredAction(h_Scheme,requiredAction); + return err; + } + + physicalSchemeId = p_Scheme->schemeId; + + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId); + if (relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + + if (!p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].pointedOwners || + !(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].requiredAction & requiredAction)) + { + if (requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) + { + switch (p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextEngine) + { + case (e_FM_PCD_DONE): + if (p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].doneAction == e_FM_PCD_ENQ_FRAME) + { + tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + intFlags = KgHwLock(p_FmPcd->p_FmPcdKg); + WriteKgarWait(p_FmPcd, tmpKgarReg); + tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode); + ASSERT_COND(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)); + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode, tmpReg32 | NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA); + /* call indirect command for scheme write */ + tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE); + WriteKgarWait(p_FmPcd, tmpKgarReg); + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + } + break; + case (e_FM_PCD_PLCR): + if (!p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].directPlcr || + (p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].extractedOrs && + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].bitOffsetInPlcrProfile) || + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextRelativePlcrProfile) + { + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("In this situation PP can not be with distribution and has to be shared")); + } + err = FmPcdPlcrCcGetSetParams(h_FmPcd, p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].relativeProfileId, requiredAction); + if (err) + { + RETURN_ERROR(MAJOR, err, NO_MSG); + } + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_VALUE,("in this situation the next engine after scheme can be or PLCR or ENQ_FRAME")); + } + } + if (requiredAction & UPDATE_KG_NIA_CC_WA) + { + if (p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextEngine == e_FM_PCD_CC) + { + tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + intFlags = KgHwLock(p_FmPcd->p_FmPcdKg); + WriteKgarWait(p_FmPcd, tmpKgarReg); + tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode); + ASSERT_COND(tmpReg32 & (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC)); + tmpReg32 &= ~NIA_FM_CTL_AC_CC; + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode, tmpReg32 | NIA_FM_CTL_AC_PRE_CC); + /* call indirect command for scheme write */ + tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE); + WriteKgarWait(p_FmPcd, tmpKgarReg); + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + } + } + if (requiredAction & UPDATE_KG_OPT_MODE) + { + tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + intFlags = KgHwLock(p_FmPcd->p_FmPcdKg); + WriteKgarWait(p_FmPcd, tmpKgarReg); + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_om, value); + /* call indirect command for scheme write */ + tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE); + WriteKgarWait(p_FmPcd, tmpKgarReg); + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + } + if (requiredAction & UPDATE_KG_NIA) + { + tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + intFlags = KgHwLock(p_FmPcd->p_FmPcdKg); + WriteKgarWait(p_FmPcd, tmpKgarReg); + tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode); + tmpReg32 &= ~(NIA_ENG_MASK | NIA_AC_MASK); + tmpReg32 |= value; + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode, tmpReg32); + /* call indirect command for scheme write */ + tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE); + WriteKgarWait(p_FmPcd, tmpKgarReg); + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + } + } + + UpateSchemePointedOwner(h_Scheme, TRUE); + FmPcdKgUpdateRequiredAction(h_Scheme, requiredAction); + + return E_OK; +} +/*********************** End of inter-module routines ************************/ + + +/****************************************/ +/* API routines */ +/****************************************/ + +t_Handle FM_PCD_KgSchemeSet(t_Handle h_FmPcd, t_FmPcdKgSchemeParams *p_SchemeParams) +{ + t_FmPcd *p_FmPcd; + struct fman_kg_scheme_regs schemeRegs; + struct fman_kg_scheme_regs *p_MemRegs; + uint8_t i; + t_Error err = E_OK; + uint32_t tmpKgarReg; + uint32_t intFlags; + uint8_t physicalSchemeId, relativeSchemeId = 0; + t_FmPcdKgScheme *p_Scheme; + + if (p_SchemeParams->modify) + { + p_Scheme = (t_FmPcdKgScheme *)p_SchemeParams->id.h_Scheme; + p_FmPcd = p_Scheme->h_FmPcd; + + SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE, NULL); + + if (!FmPcdKgIsSchemeValidSw(p_Scheme)) + { + REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, + ("Scheme is invalid")); + return NULL; + } + + if (!KgSchemeFlagTryLock(p_Scheme)) + { + DBG(TRACE, ("Scheme Try Lock - BUSY")); + /* Signal to caller BUSY condition */ + p_SchemeParams->id.h_Scheme = NULL; + return NULL; + } + } + else + { + p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE, NULL); + + relativeSchemeId = p_SchemeParams->id.relativeSchemeId; + /* check that schemeId is in range */ + if (relativeSchemeId >= p_FmPcd->p_FmPcdKg->numOfSchemes) + { + REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, ("relative-scheme-id %d!", relativeSchemeId)); + return NULL; + } + + p_Scheme = &p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId]; + if (FmPcdKgIsSchemeValidSw(p_Scheme)) + { + REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, + ("Scheme id (%d)!", relativeSchemeId)); + return NULL; + } + + p_Scheme->schemeId = p_FmPcd->p_FmPcdKg->schemesIds[relativeSchemeId]; + p_Scheme->h_FmPcd = p_FmPcd; + + p_Scheme->p_Lock = FmPcdAcquireLock(p_FmPcd); + if (!p_Scheme->p_Lock) + REPORT_ERROR(MAJOR, E_NOT_AVAILABLE, ("FM KG Scheme lock obj!")); + } + + err = BuildSchemeRegs((t_Handle)p_Scheme, p_SchemeParams, &schemeRegs); + if (err) + { + REPORT_ERROR(MAJOR, err, NO_MSG); + if (p_SchemeParams->modify) + KgSchemeFlagUnlock(p_Scheme); + if (!p_SchemeParams->modify && + p_Scheme->p_Lock) + FmPcdReleaseLock(p_FmPcd, p_Scheme->p_Lock); + return NULL; + } + + if (p_FmPcd->h_Hc) + { + err = FmHcPcdKgSetScheme(p_FmPcd->h_Hc, + (t_Handle)p_Scheme, + &schemeRegs, + p_SchemeParams->schemeCounter.update); + if (p_SchemeParams->modify) + KgSchemeFlagUnlock(p_Scheme); + if (err) + { + if (!p_SchemeParams->modify && + p_Scheme->p_Lock) + FmPcdReleaseLock(p_FmPcd, p_Scheme->p_Lock); + return NULL; + } + if (!p_SchemeParams->modify) + ValidateSchemeSw(p_Scheme); + return (t_Handle)p_Scheme; + } + + physicalSchemeId = p_Scheme->schemeId; + + /* configure all 21 scheme registers */ + p_MemRegs = &p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs; + intFlags = KgHwLock(p_FmPcd->p_FmPcdKg); + WRITE_UINT32(p_MemRegs->kgse_ppc, schemeRegs.kgse_ppc); + WRITE_UINT32(p_MemRegs->kgse_ccbs, schemeRegs.kgse_ccbs); + WRITE_UINT32(p_MemRegs->kgse_mode, schemeRegs.kgse_mode); + WRITE_UINT32(p_MemRegs->kgse_mv, schemeRegs.kgse_mv); + WRITE_UINT32(p_MemRegs->kgse_dv0, schemeRegs.kgse_dv0); + WRITE_UINT32(p_MemRegs->kgse_dv1, schemeRegs.kgse_dv1); + WRITE_UINT32(p_MemRegs->kgse_ekdv, schemeRegs.kgse_ekdv); + WRITE_UINT32(p_MemRegs->kgse_ekfc, schemeRegs.kgse_ekfc); + WRITE_UINT32(p_MemRegs->kgse_bmch, schemeRegs.kgse_bmch); + WRITE_UINT32(p_MemRegs->kgse_bmcl, schemeRegs.kgse_bmcl); + WRITE_UINT32(p_MemRegs->kgse_hc, schemeRegs.kgse_hc); + WRITE_UINT32(p_MemRegs->kgse_spc, schemeRegs.kgse_spc); + WRITE_UINT32(p_MemRegs->kgse_fqb, schemeRegs.kgse_fqb); + WRITE_UINT32(p_MemRegs->kgse_om, schemeRegs.kgse_om); + WRITE_UINT32(p_MemRegs->kgse_vsp, schemeRegs.kgse_vsp); + for (i=0 ; i<FM_KG_NUM_OF_GENERIC_REGS ; i++) + WRITE_UINT32(p_MemRegs->kgse_gec[i], schemeRegs.kgse_gec[i]); + + /* call indirect command for scheme write */ + tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, p_SchemeParams->schemeCounter.update); + + WriteKgarWait(p_FmPcd, tmpKgarReg); + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + + if (!p_SchemeParams->modify) + ValidateSchemeSw(p_Scheme); + else + KgSchemeFlagUnlock(p_Scheme); + + return (t_Handle)p_Scheme; +} + +t_Error FM_PCD_KgSchemeDelete(t_Handle h_Scheme) +{ + t_FmPcd *p_FmPcd; + uint8_t physicalSchemeId; + uint32_t tmpKgarReg, intFlags; + t_Error err = E_OK; + t_FmPcdKgScheme *p_Scheme = (t_FmPcdKgScheme *)h_Scheme; + + SANITY_CHECK_RETURN_ERROR(h_Scheme, E_INVALID_HANDLE); + + p_FmPcd = (t_FmPcd*)(p_Scheme->h_FmPcd); + + /* check that no port is bound to this scheme */ + err = InvalidateSchemeSw(h_Scheme); + if (err) + RETURN_ERROR(MINOR, err, NO_MSG); + + if (p_FmPcd->h_Hc) + { + err = FmHcPcdKgDeleteScheme(p_FmPcd->h_Hc, h_Scheme); + if (p_Scheme->p_Lock) + FmPcdReleaseLock(p_FmPcd, p_Scheme->p_Lock); + return err; + } + + physicalSchemeId = ((t_FmPcdKgScheme *)h_Scheme)->schemeId; + + intFlags = KgHwLock(p_FmPcd->p_FmPcdKg); + /* clear mode register, including enable bit */ + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode, 0); + + /* call indirect command for scheme write */ + tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE); + + WriteKgarWait(p_FmPcd, tmpKgarReg); + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + + if (p_Scheme->p_Lock) + FmPcdReleaseLock(p_FmPcd, p_Scheme->p_Lock); + + return E_OK; +} + +uint32_t FM_PCD_KgSchemeGetCounter(t_Handle h_Scheme) +{ + t_FmPcd *p_FmPcd; + uint32_t tmpKgarReg, spc, intFlags; + uint8_t physicalSchemeId; + + SANITY_CHECK_RETURN_VALUE(h_Scheme, E_INVALID_HANDLE, 0); + + p_FmPcd = (t_FmPcd*)(((t_FmPcdKgScheme *)h_Scheme)->h_FmPcd); + if (p_FmPcd->h_Hc) + return FmHcPcdKgGetSchemeCounter(p_FmPcd->h_Hc, h_Scheme); + + physicalSchemeId = ((t_FmPcdKgScheme *)h_Scheme)->schemeId; + + if (FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId) == FM_PCD_KG_NUM_OF_SCHEMES) + REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + + tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + intFlags = KgHwLock(p_FmPcd->p_FmPcdKg); + WriteKgarWait(p_FmPcd, tmpKgarReg); + if (!(GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode) & KG_SCH_MODE_EN)) + REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is Invalid")); + spc = GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_spc); + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + + return spc; +} + +t_Error FM_PCD_KgSchemeSetCounter(t_Handle h_Scheme, uint32_t value) +{ + t_FmPcd *p_FmPcd; + uint32_t tmpKgarReg, intFlags; + uint8_t physicalSchemeId; + + SANITY_CHECK_RETURN_VALUE(h_Scheme, E_INVALID_HANDLE, 0); + + p_FmPcd = (t_FmPcd*)(((t_FmPcdKgScheme *)h_Scheme)->h_FmPcd); + + if (!FmPcdKgIsSchemeValidSw(h_Scheme)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Requested scheme is invalid.")); + + if (p_FmPcd->h_Hc) + return FmHcPcdKgSetSchemeCounter(p_FmPcd->h_Hc, h_Scheme, value); + + physicalSchemeId = ((t_FmPcdKgScheme *)h_Scheme)->schemeId; + /* check that schemeId is in range */ + if (FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId) == FM_PCD_KG_NUM_OF_SCHEMES) + REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + + /* read specified scheme into scheme registers */ + tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + intFlags = KgHwLock(p_FmPcd->p_FmPcdKg); + WriteKgarWait(p_FmPcd, tmpKgarReg); + if (!(GET_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_mode) & KG_SCH_MODE_EN)) + { + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is Invalid")); + } + + /* change counter value */ + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_spc, value); + + /* call indirect command for scheme write */ + tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, TRUE); + + WriteKgarWait(p_FmPcd, tmpKgarReg); + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + + return E_OK; +} + +t_Error FM_PCD_KgSetAdditionalDataAfterParsing(t_Handle h_FmPcd, uint8_t payloadOffset) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + struct fman_kg_regs *p_Regs; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs, E_NULL_POINTER); + + p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + if (!FmIsMaster(p_FmPcd->h_Fm)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_KgSetAdditionalDataAfterParsing - guest mode!")); + + WRITE_UINT32(p_Regs->fmkg_fdor,payloadOffset); + + return E_OK; +} + +t_Error FM_PCD_KgSetDfltValue(t_Handle h_FmPcd, uint8_t valueId, uint32_t value) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + struct fman_kg_regs *p_Regs; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(((valueId == 0) || (valueId == 1)), E_INVALID_VALUE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs, E_NULL_POINTER); + + p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + + if (!FmIsMaster(p_FmPcd->h_Fm)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_KgSetDfltValue - guest mode!")); + + if (valueId == 0) + WRITE_UINT32(p_Regs->fmkg_gdv0r,value); + else + WRITE_UINT32(p_Regs->fmkg_gdv1r,value); + return E_OK; +} + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FM_PCD_KgDumpRegs(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + int i = 0, j = 0; + uint8_t hardwarePortId = 0; + uint32_t tmpKgarReg, intFlags; + t_Error err = E_OK; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(((p_FmPcd->guestId == NCSW_MASTER_ID) || + p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs), E_INVALID_OPERATION); + + DUMP_SUBTITLE(("\n")); + DUMP_TITLE(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs, ("FmPcdKgRegs Regs")); + + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,fmkg_gcr); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,fmkg_eer); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,fmkg_eeer); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,fmkg_seer); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,fmkg_seeer); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,fmkg_gsr); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,fmkg_tpc); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,fmkg_serc); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,fmkg_fdor); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,fmkg_gdv0r); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,fmkg_gdv1r); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,fmkg_feer); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,fmkg_ar); + + DUMP_SUBTITLE(("\n")); + intFlags = KgHwLock(p_FmPcd->p_FmPcdKg); + for (j = 0;j<FM_PCD_KG_NUM_OF_SCHEMES;j++) + { + tmpKgarReg = FmPcdKgBuildReadSchemeActionReg((uint8_t)j); + if (WriteKgarWait(p_FmPcd, tmpKgarReg) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + DUMP_TITLE(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs, ("FmPcdKgIndirectAccessSchemeRegs Scheme %d Regs", j)); + + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs,kgse_mode); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs,kgse_ekfc); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs,kgse_ekdv); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs,kgse_bmch); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs,kgse_bmcl); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs,kgse_fqb); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs,kgse_hc); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs,kgse_ppc); + + DUMP_TITLE(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_gec, ("kgse_gec")); + DUMP_SUBSTRUCT_ARRAY(i, FM_KG_NUM_OF_GENERIC_REGS) + { + DUMP_MEMORY(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs.kgse_gec[i], sizeof(uint32_t)); + } + + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs,kgse_spc); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs,kgse_dv0); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs,kgse_dv1); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs,kgse_ccbs); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->schemeRegs,kgse_mv); + } + DUMP_SUBTITLE(("\n")); + + for (i=0;i<FM_MAX_NUM_OF_PORTS;i++) + { + SW_PORT_INDX_TO_HW_PORT_ID(hardwarePortId, i); + + tmpKgarReg = FmPcdKgBuildReadPortSchemeBindActionReg(hardwarePortId); + + err = WriteKgarWait(p_FmPcd, tmpKgarReg); + if (err) + RETURN_ERROR(MINOR, err, NO_MSG); + + DUMP_TITLE(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->portRegs, ("FmPcdKgIndirectAccessPortRegs PCD Port %d regs", hardwarePortId)); + + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->portRegs, fmkg_pe_sp); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->portRegs, fmkg_pe_cpp); + } + + DUMP_SUBTITLE(("\n")); + for (j=0;j<FM_PCD_MAX_NUM_OF_CLS_PLANS/CLS_PLAN_NUM_PER_GRP;j++) + { + DUMP_TITLE(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->clsPlanRegs, ("FmPcdKgIndirectAccessClsPlanRegs Regs group %d", j)); + DUMP_TITLE(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->clsPlanRegs.kgcpe, ("kgcpe")); + + tmpKgarReg = ReadClsPlanBlockActionReg((uint8_t)j); + err = WriteKgarWait(p_FmPcd, tmpKgarReg); + if (err) + RETURN_ERROR(MINOR, err, NO_MSG); + DUMP_SUBSTRUCT_ARRAY(i, 8) + DUMP_MEMORY(&p_FmPcd->p_FmPcdKg->p_IndirectAccessRegs->clsPlanRegs.kgcpe[i], sizeof(uint32_t)); + } + KgHwUnlock(p_FmPcd->p_FmPcdKg, intFlags); + + return E_OK; +} +#endif /* (defined(DEBUG_ERRORS) && ... */ diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_kg.h b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_kg.h new file mode 100644 index 0000000..cb7521a --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_kg.h @@ -0,0 +1,206 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_kg.h + + @Description FM KG private header +*//***************************************************************************/ +#ifndef __FM_KG_H +#define __FM_KG_H + +#include "std_ext.h" + +/***********************************************************************/ +/* Keygen defines */ +/***********************************************************************/ +/* maskes */ +#if (DPAA_VERSION >= 11) +#define KG_SCH_VSP_SHIFT_MASK 0x0003f000 +#define KG_SCH_OM_VSPE 0x00000001 +#define KG_SCH_VSP_NO_KSP_EN 0x80000000 + +#define MAX_SP_SHIFT 23 +#define KG_SCH_VSP_MASK_SHIFT 12 +#define KG_SCH_VSP_SHIFT 24 +#endif /* (DPAA_VERSION >= 11) */ + +typedef uint32_t t_KnownFieldsMasks; +#define KG_SCH_KN_PORT_ID 0x80000000 +#define KG_SCH_KN_MACDST 0x40000000 +#define KG_SCH_KN_MACSRC 0x20000000 +#define KG_SCH_KN_TCI1 0x10000000 +#define KG_SCH_KN_TCI2 0x08000000 +#define KG_SCH_KN_ETYPE 0x04000000 +#define KG_SCH_KN_PPPSID 0x02000000 +#define KG_SCH_KN_PPPID 0x01000000 +#define KG_SCH_KN_MPLS1 0x00800000 +#define KG_SCH_KN_MPLS2 0x00400000 +#define KG_SCH_KN_MPLS_LAST 0x00200000 +#define KG_SCH_KN_IPSRC1 0x00100000 +#define KG_SCH_KN_IPDST1 0x00080000 +#define KG_SCH_KN_PTYPE1 0x00040000 +#define KG_SCH_KN_IPTOS_TC1 0x00020000 +#define KG_SCH_KN_IPV6FL1 0x00010000 +#define KG_SCH_KN_IPSRC2 0x00008000 +#define KG_SCH_KN_IPDST2 0x00004000 +#define KG_SCH_KN_PTYPE2 0x00002000 +#define KG_SCH_KN_IPTOS_TC2 0x00001000 +#define KG_SCH_KN_IPV6FL2 0x00000800 +#define KG_SCH_KN_GREPTYPE 0x00000400 +#define KG_SCH_KN_IPSEC_SPI 0x00000200 +#define KG_SCH_KN_IPSEC_NH 0x00000100 +#define KG_SCH_KN_IPPID 0x00000080 +#define KG_SCH_KN_L4PSRC 0x00000004 +#define KG_SCH_KN_L4PDST 0x00000002 +#define KG_SCH_KN_TFLG 0x00000001 + +typedef uint8_t t_GenericCodes; +#define KG_SCH_GEN_SHIM1 0x70 +#define KG_SCH_GEN_DEFAULT 0x10 +#define KG_SCH_GEN_PARSE_RESULT_N_FQID 0x20 +#define KG_SCH_GEN_START_OF_FRM 0x40 +#define KG_SCH_GEN_SHIM2 0x71 +#define KG_SCH_GEN_IP_PID_NO_V 0x72 +#define KG_SCH_GEN_ETH 0x03 +#define KG_SCH_GEN_ETH_NO_V 0x73 +#define KG_SCH_GEN_SNAP 0x04 +#define KG_SCH_GEN_SNAP_NO_V 0x74 +#define KG_SCH_GEN_VLAN1 0x05 +#define KG_SCH_GEN_VLAN1_NO_V 0x75 +#define KG_SCH_GEN_VLAN2 0x06 +#define KG_SCH_GEN_VLAN2_NO_V 0x76 +#define KG_SCH_GEN_ETH_TYPE 0x07 +#define KG_SCH_GEN_ETH_TYPE_NO_V 0x77 +#define KG_SCH_GEN_PPP 0x08 +#define KG_SCH_GEN_PPP_NO_V 0x78 +#define KG_SCH_GEN_MPLS1 0x09 +#define KG_SCH_GEN_MPLS2 0x19 +#define KG_SCH_GEN_MPLS3 0x29 +#define KG_SCH_GEN_MPLS1_NO_V 0x79 +#define KG_SCH_GEN_MPLS_LAST 0x0a +#define KG_SCH_GEN_MPLS_LAST_NO_V 0x7a +#define KG_SCH_GEN_IPV4 0x0b +#define KG_SCH_GEN_IPV6 0x1b +#define KG_SCH_GEN_L3_NO_V 0x7b +#define KG_SCH_GEN_IPV4_TUNNELED 0x0c +#define KG_SCH_GEN_IPV6_TUNNELED 0x1c +#define KG_SCH_GEN_MIN_ENCAP 0x2c +#define KG_SCH_GEN_IP2_NO_V 0x7c +#define KG_SCH_GEN_GRE 0x0d +#define KG_SCH_GEN_GRE_NO_V 0x7d +#define KG_SCH_GEN_TCP 0x0e +#define KG_SCH_GEN_UDP 0x1e +#define KG_SCH_GEN_IPSEC_AH 0x2e +#define KG_SCH_GEN_SCTP 0x3e +#define KG_SCH_GEN_DCCP 0x4e +#define KG_SCH_GEN_IPSEC_ESP 0x6e +#define KG_SCH_GEN_L4_NO_V 0x7e +#define KG_SCH_GEN_NEXTHDR 0x7f +/* shifts */ +#define KG_SCH_PP_SHIFT_HIGH_SHIFT 27 +#define KG_SCH_PP_SHIFT_LOW_SHIFT 12 +#define KG_SCH_PP_MASK_SHIFT 16 +#define KG_SCH_MODE_CCOBASE_SHIFT 24 +#define KG_SCH_DEF_MAC_ADDR_SHIFT 30 +#define KG_SCH_DEF_TCI_SHIFT 28 +#define KG_SCH_DEF_ENET_TYPE_SHIFT 26 +#define KG_SCH_DEF_PPP_SESSION_ID_SHIFT 24 +#define KG_SCH_DEF_PPP_PROTOCOL_ID_SHIFT 22 +#define KG_SCH_DEF_MPLS_LABEL_SHIFT 20 +#define KG_SCH_DEF_IP_ADDR_SHIFT 18 +#define KG_SCH_DEF_PROTOCOL_TYPE_SHIFT 16 +#define KG_SCH_DEF_IP_TOS_TC_SHIFT 14 +#define KG_SCH_DEF_IPV6_FLOW_LABEL_SHIFT 12 +#define KG_SCH_DEF_IPSEC_SPI_SHIFT 10 +#define KG_SCH_DEF_L4_PORT_SHIFT 8 +#define KG_SCH_DEF_TCP_FLAG_SHIFT 6 +#define KG_SCH_HASH_CONFIG_SHIFT_SHIFT 24 +#define KG_SCH_GEN_MASK_SHIFT 16 +#define KG_SCH_GEN_HT_SHIFT 8 +#define KG_SCH_GEN_SIZE_SHIFT 24 +#define KG_SCH_GEN_DEF_SHIFT 29 +#define FM_PCD_KG_KGAR_NUM_SHIFT 16 + +/* others */ +#define NUM_OF_SW_DEFAULTS 3 +#define MAX_PP_SHIFT 23 +#define MAX_KG_SCH_SIZE 16 +#define MASK_FOR_GENERIC_BASE_ID 0x20 +#define MAX_HASH_SHIFT 40 +#define MAX_KG_SCH_FQID_BIT_OFFSET 31 +#define MAX_KG_SCH_PP_BIT_OFFSET 15 +#define MAX_DIST_FQID_SHIFT 23 + +#define GET_MASK_SEL_SHIFT(shift,i) \ +switch (i) { \ + case (0):shift = 26;break; \ + case (1):shift = 20;break; \ + case (2):shift = 10;break; \ + case (3):shift = 4;break; \ + default: \ + RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); \ +} + +#define GET_MASK_OFFSET_SHIFT(shift,i) \ +switch (i) { \ + case (0):shift = 16;break; \ + case (1):shift = 0;break; \ + case (2):shift = 28;break; \ + case (3):shift = 24;break; \ + default: \ + RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); \ +} + +#define GET_MASK_SHIFT(shift,i) \ +switch (i) { \ + case (0):shift = 24;break; \ + case (1):shift = 16;break; \ + case (2):shift = 8;break; \ + case (3):shift = 0;break; \ + default: \ + RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); \ +} + +/***********************************************************************/ +/* Keygen defines */ +/***********************************************************************/ + +#define KG_DOUBLE_MEANING_REGS_OFFSET 0x100 +#define NO_VALIDATION 0x70 +#define KG_ACTION_REG_TO 1024 +#define KG_MAX_PROFILE 255 +#define SCHEME_ALWAYS_DIRECT 0xFFFFFFFF + + +#endif /* __FM_KG_H */ diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_manip.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_manip.c new file mode 100644 index 0000000..e6c8d2d --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_manip.c @@ -0,0 +1,4193 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_manip.c + + @Description FM PCD manip ... +*//***************************************************************************/ +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "debug_ext.h" +#include "fm_pcd_ext.h" +#include "fm_port_ext.h" +#include "fm_muram_ext.h" +#include "memcpy_ext.h" + +#include "fm_common.h" +#include "fm_hc.h" +#include "fm_manip.h" + + +/****************************************/ +/* static functions */ +/****************************************/ +static t_Handle GetManipInfo(t_FmPcdManip *p_Manip, e_ManipInfo manipInfo) +{ + t_FmPcdManip *p_CurManip = p_Manip; + + if (!MANIP_IS_UNIFIED(p_Manip)) + p_CurManip = p_Manip; + else + { + /* go to first unified */ + while (MANIP_IS_UNIFIED_NON_FIRST(p_CurManip)) + p_CurManip = p_CurManip->h_PrevManip; + } + + switch (manipInfo) + { + case (e_MANIP_HMCT): + return p_CurManip->p_Hmct; + case (e_MANIP_HMTD): + return p_CurManip->h_Ad; + case (e_MANIP_HANDLER_TABLE_OWNER): + return (t_Handle)p_CurManip; + default: + return NULL; + } +} +static uint16_t GetHmctSize(t_FmPcdManip *p_Manip) +{ + uint16_t size = 0; + t_FmPcdManip *p_CurManip = p_Manip; + + if (!MANIP_IS_UNIFIED(p_Manip)) + return p_Manip->tableSize; + + /* accumulate sizes, starting with the first node */ + while (MANIP_IS_UNIFIED_NON_FIRST(p_CurManip)) + p_CurManip = p_CurManip->h_PrevManip; + + while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip)) + { + size += p_CurManip->tableSize; + p_CurManip = (t_FmPcdManip *)p_CurManip->h_NextManip; + } + size += p_CurManip->tableSize; /* add last size */ + + return(size); +} +static uint16_t GetDataSize(t_FmPcdManip *p_Manip) +{ + uint16_t size = 0; + t_FmPcdManip *p_CurManip = p_Manip; + + if (!MANIP_IS_UNIFIED(p_Manip)) + return p_Manip->dataSize; + + /* accumulate sizes, starting with the first node */ + while (MANIP_IS_UNIFIED_NON_FIRST(p_CurManip)) + p_CurManip = p_CurManip->h_PrevManip; + + while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip)) + { + size += p_CurManip->dataSize; + p_CurManip = (t_FmPcdManip *)p_CurManip->h_NextManip; + } + size += p_CurManip->dataSize; /* add last size */ + + return(size); +} +static t_Error CalculateTableSize(t_FmPcdManipParams *p_FmPcdManipParams, uint16_t *p_TableSize, uint8_t *p_DataSize) +{ + uint8_t localDataSize, remain, tableSize = 0, dataSize = 0; + + if (p_FmPcdManipParams->u.hdr.rmv) + { + switch (p_FmPcdManipParams->u.hdr.rmvParams.type){ + case (e_FM_PCD_MANIP_RMV_GENERIC): + case (e_FM_PCD_MANIP_RMV_BY_HDR): + /* As long as the only rmv command is the L2, no check on type is required */ + tableSize += HMCD_BASIC_SIZE; + break; + default: + RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("Unknown rmvParams.type")); + } + } + + if (p_FmPcdManipParams->u.hdr.insrt) + { + switch (p_FmPcdManipParams->u.hdr.insrtParams.type){ + case (e_FM_PCD_MANIP_INSRT_GENERIC): + remain = (uint8_t)(p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size % 4); + if (remain) + localDataSize = (uint8_t)(p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size + 4 - remain); + else + localDataSize = p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size; + tableSize += (uint8_t)(HMCD_BASIC_SIZE + localDataSize); + break; + case (e_FM_PCD_MANIP_INSRT_BY_HDR): + /* As long as the only insert command is the internal L2, no check on type is required */ + tableSize += HMCD_BASIC_SIZE+HMCD_PTR_SIZE; + if (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.type == e_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2) + switch (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.specificL2) + { + case (e_FM_PCD_MANIP_HDR_INSRT_MPLS): + dataSize += p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.size; + break; + default: + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); + } + break; + default: + RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("Unknown insrtParams.type")); + } + } + if (p_FmPcdManipParams->u.hdr.fieldUpdate) + { + switch (p_FmPcdManipParams->u.hdr.fieldUpdateParams.type){ + case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN): + tableSize += HMCD_BASIC_SIZE; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType == + e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN) + { + tableSize += HMCD_PTR_SIZE; + dataSize += DSCP_TO_VLAN_TABLE_SIZE; + } + break; + case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4): + tableSize += HMCD_BASIC_SIZE; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates & HDR_MANIP_IPV4_ID) + { + tableSize += HMCD_PARAM_SIZE; + dataSize += 2; + } + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates & HDR_MANIP_IPV4_SRC) + tableSize += HMCD_IPV4_ADDR_SIZE; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates & HDR_MANIP_IPV4_DST) + tableSize += HMCD_IPV4_ADDR_SIZE; + break; + case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6): + tableSize += HMCD_BASIC_SIZE; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates & HDR_MANIP_IPV6_SRC) + tableSize += HMCD_IPV6_ADDR_SIZE; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates & HDR_MANIP_IPV6_DST) + tableSize += HMCD_IPV6_ADDR_SIZE; + break; + case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP): + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates == HDR_MANIP_TCP_UDP_CHECKSUM) + /* we implement this case with the update-checksum descriptor */ + tableSize += HMCD_BASIC_SIZE; + else + /* we implement this case with the TCP/UDP-update descriptor */ + tableSize += HMCD_BASIC_SIZE + HMCD_PARAM_SIZE; + break; + default: + RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("Unknown fieldUpdateParams.type")); + } + } + + if (p_FmPcdManipParams->u.hdr.custom) + { + switch (p_FmPcdManipParams->u.hdr.customParams.type){ + case (e_FM_PCD_MANIP_HDR_CUSTOM_IP_REPLACE): + { + tableSize += HMCD_BASIC_SIZE + HMCD_PARAM_SIZE + HMCD_PARAM_SIZE; + dataSize += p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.hdrSize; + if ((p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.replaceType == e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4) && + (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.updateIpv4Id)) + dataSize += 2; + } + break; + default: + RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("Unknown customParams.type")); + } + } + + *p_TableSize = tableSize; + *p_DataSize = dataSize; + + return E_OK; +} + +static t_Error BuildHmct(t_FmPcdManip *p_Manip, t_FmPcdManipParams *p_FmPcdManipParams, uint8_t *p_DestHmct, uint8_t *p_DestData, bool new) +{ + uint32_t *p_TmpHmct = (uint32_t*)p_DestHmct, *p_LocalData; + uint32_t tmpReg=0, *p_Last = NULL; + uint8_t remain, i, size=0, origSize, *p_UsrData = NULL, *p_TmpData = p_DestData; + t_Handle h_FmPcd = p_Manip->h_FmPcd; + uint8_t j=0; + + if (p_FmPcdManipParams->u.hdr.rmv) + { + if (p_FmPcdManipParams->u.hdr.rmvParams.type == e_FM_PCD_MANIP_RMV_GENERIC) + { + /* initialize HMCD */ + tmpReg = (uint32_t)(HMCD_OPCODE_GENERIC_RMV) << HMCD_OC_SHIFT; + /* tmp, should be conditional */ + tmpReg |= p_FmPcdManipParams->u.hdr.rmvParams.u.generic.offset << HMCD_RMV_OFFSET_SHIFT; + tmpReg |= p_FmPcdManipParams->u.hdr.rmvParams.u.generic.size << HMCD_RMV_SIZE_SHIFT; + } + else if (p_FmPcdManipParams->u.hdr.rmvParams.type == e_FM_PCD_MANIP_RMV_BY_HDR) + { + uint8_t hmcdOpt; + if (!p_FmPcdManipParams->u.hdr.rmvParams.u.byHdr.type == e_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); + + /* initialize HMCD */ + tmpReg = (uint32_t)(HMCD_OPCODE_L2_RMV) << HMCD_OC_SHIFT; + + switch (p_FmPcdManipParams->u.hdr.rmvParams.u.byHdr.u.specificL2) + { + case (e_FM_PCD_MANIP_HDR_RMV_ETHERNET): + hmcdOpt = HMCD_RMV_L2_ETHERNET; + break; + case (e_FM_PCD_MANIP_HDR_RMV_STACKED_QTAGS): + hmcdOpt = HMCD_RMV_L2_STACKED_QTAGS; + break; + case (e_FM_PCD_MANIP_HDR_RMV_ETHERNET_AND_MPLS): + hmcdOpt = HMCD_RMV_L2_ETHERNET_AND_MPLS; + break; + case (e_FM_PCD_MANIP_HDR_RMV_MPLS): + hmcdOpt = HMCD_RMV_L2_MPLS; + break; + default: + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); + } + tmpReg |= hmcdOpt << HMCD_L2_MODE_SHIFT; + } + else + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("manip header remove type!")); + + WRITE_UINT32(*p_TmpHmct, tmpReg); + /* save a pointer to the "last" indication word */ + p_Last = p_TmpHmct; + /* advance to next command */ + p_TmpHmct += HMCD_BASIC_SIZE/4; + } + + if (p_FmPcdManipParams->u.hdr.insrt) + { + if (p_FmPcdManipParams->u.hdr.insrtParams.type == e_FM_PCD_MANIP_INSRT_GENERIC) + { + /* initialize HMCD */ + if (p_FmPcdManipParams->u.hdr.insrtParams.u.generic.replace) + tmpReg = (uint32_t)(HMCD_OPCODE_GENERIC_REPLACE) << HMCD_OC_SHIFT; + else + tmpReg = (uint32_t)(HMCD_OPCODE_GENERIC_INSRT) << HMCD_OC_SHIFT; + + tmpReg |= p_FmPcdManipParams->u.hdr.insrtParams.u.generic.offset << HMCD_INSRT_OFFSET_SHIFT; + tmpReg |= p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size << HMCD_INSRT_SIZE_SHIFT; + + size = p_FmPcdManipParams->u.hdr.insrtParams.u.generic.size; + p_UsrData = p_FmPcdManipParams->u.hdr.insrtParams.u.generic.p_Data; + + WRITE_UINT32(*p_TmpHmct, tmpReg); + /* save a pointer to the "last" indication word */ + p_Last = p_TmpHmct; + + p_TmpHmct += HMCD_BASIC_SIZE/4; + + /* initialize data to be inserted */ + /* if size is not a multiple of 4, padd with 0's */ + origSize = size; + remain = (uint8_t)(size % 4); + if (remain) + { + size += (uint8_t)(4 - remain); + p_LocalData = (uint32_t *)XX_Malloc(size); + memset((uint8_t *)p_LocalData, 0, size); + memcpy((uint8_t *)p_LocalData, p_UsrData, origSize); + } + else + p_LocalData = (uint32_t*)p_UsrData; + + /* initialize data and advance pointer to next command */ + for (i = 0; i<size/4 ; i++, p_TmpHmct += HMCD_BASIC_SIZE/4) + WRITE_UINT32(*p_TmpHmct, *(p_LocalData+i)); + + if (remain) + XX_Free(p_LocalData); + } + + else if (p_FmPcdManipParams->u.hdr.insrtParams.type == e_FM_PCD_MANIP_INSRT_BY_HDR) + { + uint8_t hmcdOpt; + if (!p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.type == e_FM_PCD_MANIP_INSRT_BY_HDR_SPECIFIC_L2) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); + + /* initialize HMCD */ + tmpReg = (uint32_t)(HMCD_OPCODE_L2_INSRT) << HMCD_OC_SHIFT; + + switch (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.specificL2) + { + case (e_FM_PCD_MANIP_HDR_INSRT_MPLS): + if (p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.update) + hmcdOpt = HMCD_INSRT_N_UPDATE_L2_MPLS; + else + hmcdOpt = HMCD_INSRT_L2_MPLS; + break; + default: + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); + } + tmpReg |= hmcdOpt << HMCD_L2_MODE_SHIFT; + + WRITE_UINT32(*p_TmpHmct, tmpReg); + /* save a pointer to the "last" indication word */ + p_Last = p_TmpHmct; + + p_TmpHmct += HMCD_BASIC_SIZE/4; + + /* set size and pointer of user's data */ + size = (uint8_t)p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.size; + + ASSERT_COND(p_TmpData); + Mem2IOCpy32(p_TmpData, p_FmPcdManipParams->u.hdr.insrtParams.u.byHdr.u.specificL2Params.p_Data, size); + tmpReg = (size << HMCD_INSRT_L2_SIZE_SHIFT) | (uint32_t)(XX_VirtToPhys(p_TmpData) - (((t_FmPcd*)h_FmPcd)->physicalMuramBase)); + WRITE_UINT32(*p_TmpHmct, tmpReg); + p_TmpHmct += HMCD_PTR_SIZE/4; + p_TmpData += size; + } + else + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("manip header insert type!")); + } + + if (p_FmPcdManipParams->u.hdr.fieldUpdate) + { + switch (p_FmPcdManipParams->u.hdr.fieldUpdateParams.type){ + case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN): + /* set opcode */ + tmpReg = (uint32_t)(HMCD_OPCODE_VLAN_PRI_UPDATE) << HMCD_OC_SHIFT; + + /* set mode & table pointer */ + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType == + e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN) + { + /* set Mode */ + tmpReg |= (uint32_t)(HMCD_VLAN_PRI_UPDATE_DSCP_TO_VPRI) << HMCD_VLAN_PRI_REP_MODE_SHIFT; + /* set VPRI default */ + tmpReg |= p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.u.dscpToVpri.vpriDefVal; + WRITE_UINT32(*p_TmpHmct, tmpReg); + /* save a pointer to the "last" indication word */ + p_Last = p_TmpHmct; + /* write the table pointer into the Manip descriptor */ + p_TmpHmct += HMCD_BASIC_SIZE/4; + + tmpReg = 0; + ASSERT_COND(p_TmpData); + for (i=0; i<FM_PCD_MANIP_DSCP_VALUES; i++) + { + /* first we build from each 8 values a 32bit register */ + tmpReg |= (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.u.dscpToVpri.dscpToVpriTable[i]) << (32-4*(j+1)); + j++; + /* Than we write this register to the next table word + * (i=7-->word 0, i=15-->word 1,... i=63-->word 7) */ + if ((i%8) == 7) + { + WRITE_UINT32(*((uint32_t*)p_TmpData + (i+1)/8-1), tmpReg); + tmpReg = 0; + j = 0; + } + } + WRITE_UINT32(*p_TmpHmct, (uint32_t)(XX_VirtToPhys(p_TmpData) - (((t_FmPcd*)h_FmPcd)->physicalMuramBase))); + p_TmpHmct += HMCD_PTR_SIZE/4; + + p_TmpData += DSCP_TO_VLAN_TABLE_SIZE; + } + else if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType == + e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_VPRI) + { + /* set Mode */ + /* line commented out as it has no-side-effect ('0' value). */ + /*tmpReg |= HMCD_VLAN_PRI_UPDATE << HMCD_VLAN_PRI_REP_MODE_SHIFT*/; + /* set VPRI parameter */ + tmpReg |= p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.vlan.u.vpri; + WRITE_UINT32(*p_TmpHmct, tmpReg); + /* save a pointer to the "last" indication word */ + p_Last = p_TmpHmct; + p_TmpHmct += HMCD_BASIC_SIZE/4; + } + break; + + case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4): + /* set opcode */ + tmpReg = (uint32_t)(HMCD_OPCODE_IPV4_UPDATE) << HMCD_OC_SHIFT; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates & HDR_MANIP_IPV4_TTL) + tmpReg |= HMCD_IPV4_UPDATE_TTL; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates & HDR_MANIP_IPV4_TOS) + { + tmpReg |= HMCD_IPV4_UPDATE_TOS; + tmpReg |= p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.tos << HMCD_IPV4_UPDATE_TOS_SHIFT; + } + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates & HDR_MANIP_IPV4_ID) + tmpReg |= HMCD_IPV4_UPDATE_ID; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates & HDR_MANIP_IPV4_SRC) + tmpReg |= HMCD_IPV4_UPDATE_SRC; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates & HDR_MANIP_IPV4_DST) + tmpReg |= HMCD_IPV4_UPDATE_DST; + /* write the first 4 bytes of the descriptor */ + WRITE_UINT32(*p_TmpHmct, tmpReg); + /* save a pointer to the "last" indication word */ + p_Last = p_TmpHmct; + + p_TmpHmct += HMCD_BASIC_SIZE/4; + + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates & HDR_MANIP_IPV4_ID) + { + ASSERT_COND(p_TmpData); + WRITE_UINT16(*(uint16_t*)p_TmpData, p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.id); + WRITE_UINT32(*p_TmpHmct, (uint32_t)(XX_VirtToPhys(p_TmpData) - (((t_FmPcd*)p_Manip->h_FmPcd)->physicalMuramBase))); + p_TmpData += 2; + p_TmpHmct += HMCD_PTR_SIZE/4; + } + + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates & HDR_MANIP_IPV4_SRC) + { + WRITE_UINT32(*p_TmpHmct, p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.src); + p_TmpHmct += HMCD_IPV4_ADDR_SIZE/4; + } + + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.validUpdates & HDR_MANIP_IPV4_DST) + { + WRITE_UINT32(*p_TmpHmct, p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv4.dst); + p_TmpHmct += HMCD_IPV4_ADDR_SIZE/4; + } + break; + + case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6): + /* set opcode */ + tmpReg = (uint32_t)(HMCD_OPCODE_IPV6_UPDATE) << HMCD_OC_SHIFT; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates & HDR_MANIP_IPV6_HL) + tmpReg |= HMCD_IPV6_UPDATE_HL; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates & HDR_MANIP_IPV6_TC) + { + tmpReg |= HMCD_IPV6_UPDATE_TC; + tmpReg |= p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.trafficClass << HMCD_IPV6_UPDATE_TC_SHIFT; + } + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates & HDR_MANIP_IPV6_SRC) + tmpReg |= HMCD_IPV6_UPDATE_SRC; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates & HDR_MANIP_IPV6_DST) + tmpReg |= HMCD_IPV6_UPDATE_DST; + /* write the first 4 bytes of the descriptor */ + WRITE_UINT32(*p_TmpHmct, tmpReg); + /* save a pointer to the "last" indication word */ + p_Last = p_TmpHmct; + + p_TmpHmct += HMCD_BASIC_SIZE/4; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates & HDR_MANIP_IPV6_SRC) + for (i = 0 ; i < NET_HEADER_FIELD_IPv6_ADDR_SIZE ; i+=4) + { + WRITE_UINT32(*p_TmpHmct, *(uint32_t*)&p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.src[i]); + p_TmpHmct += HMCD_PTR_SIZE/4; + } + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.validUpdates & HDR_MANIP_IPV6_DST) + for (i = 0 ; i < NET_HEADER_FIELD_IPv6_ADDR_SIZE ; i+=4) + { + WRITE_UINT32(*p_TmpHmct, *(uint32_t*)&p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.ipv6.dst[i]); + p_TmpHmct += HMCD_PTR_SIZE/4; + } + break; + + case (e_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP): + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates == HDR_MANIP_TCP_UDP_CHECKSUM) + { + /* we implement this case with the update-checksum descriptor */ + /* set opcode */ + tmpReg = (uint32_t)(HMCD_OPCODE_TCP_UDP_CHECKSUM) << HMCD_OC_SHIFT; + /* write the first 4 bytes of the descriptor */ + WRITE_UINT32(*p_TmpHmct, tmpReg); + /* save a pointer to the "last" indication word */ + p_Last = p_TmpHmct; + + p_TmpHmct += HMCD_BASIC_SIZE/4; + } + else + { + /* we implement this case with the TCP/UDP update descriptor */ + /* set opcode */ + tmpReg = (uint32_t)(HMCD_OPCODE_TCP_UDP_UPDATE) << HMCD_OC_SHIFT; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates & HDR_MANIP_TCP_UDP_DST) + tmpReg |= HMCD_TCP_UDP_UPDATE_DST; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates & HDR_MANIP_TCP_UDP_SRC) + tmpReg |= HMCD_TCP_UDP_UPDATE_SRC; + /* write the first 4 bytes of the descriptor */ + WRITE_UINT32(*p_TmpHmct, tmpReg); + /* save a pointer to the "last" indication word */ + p_Last = p_TmpHmct; + + p_TmpHmct += HMCD_BASIC_SIZE/4; + + tmpReg = 0; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates & HDR_MANIP_TCP_UDP_SRC) + tmpReg |= ((uint32_t)p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.src) << HMCD_TCP_UDP_UPDATE_SRC_SHIFT; + if (p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.validUpdates & HDR_MANIP_TCP_UDP_DST) + tmpReg |= ((uint32_t)p_FmPcdManipParams->u.hdr.fieldUpdateParams.u.tcpUdp.dst); + WRITE_UINT32(*p_TmpHmct, tmpReg); + p_TmpHmct += HMCD_PTR_SIZE/4; + } + break; + + default: + RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("Unknown fieldUpdateParams.type")); + } + } + + if (p_FmPcdManipParams->u.hdr.custom) + { + switch (p_FmPcdManipParams->u.hdr.customParams.type) + { + case (e_FM_PCD_MANIP_HDR_CUSTOM_IP_REPLACE): + /* set opcode */ + tmpReg = (uint32_t)(HMCD_OPCODE_REPLACE_IP) << HMCD_OC_SHIFT; + + if (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.decTtlHl) + tmpReg |= HMCD_IP_REPLACE_TTL_HL; + if (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.replaceType == e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV4_BY_IPV6) + /* line commented out as it has no-side-effect ('0' value). */ + /*tmpReg |= HMCD_IP_REPLACE_REPLACE_IPV4*/; + else if (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.replaceType == e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4) + { + tmpReg |= HMCD_IP_REPLACE_REPLACE_IPV6; + if (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.updateIpv4Id) + tmpReg |= HMCD_IP_REPLACE_ID; + } + else + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, + ("One flag out of HDR_MANIP_IP_REPLACE_IPV4, HDR_MANIP_IP_REPLACE_IPV6 - must be set.")); + + /* write the first 4 bytes of the descriptor */ + WRITE_UINT32(*p_TmpHmct, tmpReg); + /* save a pointer to the "last" indication word */ + p_Last = p_TmpHmct; + + p_TmpHmct += HMCD_BASIC_SIZE/4; + + size = p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.hdrSize; + ASSERT_COND(p_TmpData); + Mem2IOCpy32(p_TmpData, p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.hdr, size); + tmpReg = (uint32_t)(size << HMCD_IP_REPLACE_L3HDRSIZE_SHIFT); + tmpReg |= (uint32_t)(XX_VirtToPhys(p_TmpData) - (((t_FmPcd*)h_FmPcd)->physicalMuramBase)); + WRITE_UINT32(*p_TmpHmct, tmpReg); + p_TmpHmct += HMCD_PTR_SIZE/4; + p_TmpData += size; + + if ((p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.replaceType == e_FM_PCD_MANIP_HDR_CUSTOM_REPLACE_IPV6_BY_IPV4) && + (p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.updateIpv4Id)) + { + WRITE_UINT16(*(uint16_t*)p_TmpData, p_FmPcdManipParams->u.hdr.customParams.u.ipHdrReplace.id); + WRITE_UINT32(*p_TmpHmct, (uint32_t)(XX_VirtToPhys(p_TmpData) - (((t_FmPcd*)h_FmPcd)->physicalMuramBase))); + p_TmpData += 2; + } + p_TmpHmct += HMCD_PTR_SIZE/4; + break; + default: + RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("Unknown customParams.type")); + } + } + + + /* If this node has a nextManip, and no parsing is required after it, the old table must be copied to the new table + the old table and should be freed */ + if (p_FmPcdManipParams->h_NextManip && + (MANIP_DONT_REPARSE(p_FmPcdManipParams->h_NextManip))) + { + if (new) + { + /* If this is the first time this manip is created we need to free unused memory. If it + * is a dynamic changes case, the memory used is either the CC shadow or the existing + * table - no allocation, no free */ + MANIP_UPDATE_UNIFIED_POSITION(p_FmPcdManipParams->h_NextManip); + + p_Manip->unifiedPosition = e_MANIP_UNIFIED_FIRST; + + /* The HMTD of the next Manip is never going to be used */ + if (((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->muramAllocate) + FM_MURAM_FreeMem(((t_FmPcd *)((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_FmPcd)->h_FmMuram, ((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_Ad); + else + XX_Free(((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_Ad); + ((t_FmPcdManip *)p_FmPcdManipParams->h_NextManip)->h_Ad = NULL; + + /* advance pointer */ + p_TmpHmct += MANIP_GET_HMCT_SIZE(p_FmPcdManipParams->h_NextManip)/4; + } + } + else + { + ASSERT_COND(p_Last); + /* set the "last" indication on the last command of the current table */ + WRITE_UINT32(*p_Last, GET_UINT32(*p_Last) | HMCD_LAST); + } + + return E_OK; +} + +static t_Error CreateManipActionNew(t_FmPcdManip *p_Manip, t_FmPcdManipParams *p_FmPcdManipParams) +{ + t_FmPcdManip *p_CurManip; + t_Error err; + uint32_t nextSize = 0, totalSize; + uint16_t tmpReg; + uint8_t *p_OldHmct, *p_TmpHmctPtr, *p_TmpDataPtr; + + /* set Manip structure */ + if (p_FmPcdManipParams->h_NextManip) + { + if (MANIP_DONT_REPARSE(p_FmPcdManipParams->h_NextManip)) + nextSize = (uint32_t)(GetHmctSize(p_FmPcdManipParams->h_NextManip) + GetDataSize(p_FmPcdManipParams->h_NextManip)); + else + p_Manip->cascadedNext = TRUE; + } + p_Manip->dontParseAfterManip = p_FmPcdManipParams->u.hdr.dontParseAfterManip; + + /* Allocate new table */ + /* calculate table size according to manip parameters */ + err = CalculateTableSize(p_FmPcdManipParams, &p_Manip->tableSize, &p_Manip->dataSize); + if (err) + RETURN_ERROR(MINOR, err, NO_MSG); + + totalSize =(uint16_t)(p_Manip->tableSize + p_Manip->dataSize + nextSize); + + p_Manip->p_Hmct = (uint8_t*)FM_MURAM_AllocMem(((t_FmPcd *)p_Manip->h_FmPcd)->h_FmMuram, totalSize, 4); + if (!p_Manip->p_Hmct) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc failed")); + + if (p_Manip->dataSize) + p_Manip->p_Data = (uint8_t*)PTR_MOVE(p_Manip->p_Hmct, (p_Manip->tableSize + nextSize)); + + /* update shadow size to allow runtime replacement of Header manipulation */ + /* The allocated shadow is divided as follows: + 0 . . . 16 . . . + -------------------------------- + | Shadow | Shadow HMTD | + | HMTD | Match Table | + | (16 bytes) | (maximal size) | + -------------------------------- + */ + + err = FmPcdUpdateCcShadow (p_Manip->h_FmPcd, (uint32_t)(totalSize + 16), (uint16_t)FM_PCD_CC_AD_TABLE_ALIGN); + if (err != E_OK) + { + FM_MURAM_FreeMem(p_Manip->h_FmPcd, p_Manip->p_Hmct); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for HdrManip node shadow")); + } + + + if (p_FmPcdManipParams->h_NextManip && + (MANIP_DONT_REPARSE(p_FmPcdManipParams->h_NextManip))) + { + p_OldHmct = (uint8_t *)GetManipInfo(p_FmPcdManipParams->h_NextManip, e_MANIP_HMCT); + p_CurManip = p_FmPcdManipParams->h_NextManip; + /* Run till the last Manip (which is the first to configure) */ + while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip)) + p_CurManip = p_CurManip->h_NextManip; + + while (p_CurManip) + { + /* If this is a unified table, point to the part of the table + * which is the relative offset in HMCT. + */ + p_TmpHmctPtr = (uint8_t*)PTR_MOVE(p_Manip->p_Hmct, + (p_Manip->tableSize + + (PTR_TO_UINT(p_CurManip->p_Hmct) - + PTR_TO_UINT(p_OldHmct)))); + if (p_CurManip->p_Data) + p_TmpDataPtr = (uint8_t*)PTR_MOVE(p_Manip->p_Hmct, + (p_Manip->tableSize + + (PTR_TO_UINT(p_CurManip->p_Data) - + PTR_TO_UINT(p_OldHmct)))); + else + p_TmpDataPtr = NULL; + + BuildHmct(p_CurManip, &p_CurManip->manipParams, p_TmpHmctPtr, p_TmpDataPtr, FALSE); + /* update old manip table pointer */ + MANIP_SET_HMCT_PTR(p_CurManip, p_TmpHmctPtr); + MANIP_SET_DATA_PTR(p_CurManip, p_TmpDataPtr); + + p_CurManip = p_CurManip->h_PrevManip; + } + /* We copied the HMCT to create a new large HMCT so we can free the old one */ + FM_MURAM_FreeMem(MANIP_GET_MURAM(p_FmPcdManipParams->h_NextManip), p_OldHmct); + } + + /* Fill table */ + err = BuildHmct(p_Manip, p_FmPcdManipParams, p_Manip->p_Hmct, p_Manip->p_Data, TRUE); + if (err) + { + FM_MURAM_FreeMem(p_Manip->h_FmPcd, p_Manip->p_Hmct); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + /* Build HMTD (table descriptor) */ + tmpReg = HMTD_CFG_TYPE; /* NADEN = 0 */ + /* add parseAfterManip */ + if (!p_Manip->dontParseAfterManip) + tmpReg |= HMTD_CFG_PRS_AFTER_HM; + /* create cascade */ + if (p_FmPcdManipParams->h_NextManip && + !MANIP_DONT_REPARSE(p_FmPcdManipParams->h_NextManip)) + { + /* indicate that there's another HM table descriptor */ + tmpReg |= HMTD_CFG_NEXT_AD_EN; + WRITE_UINT16(((t_Hmtd *)p_Manip->h_Ad)->nextAdIdx, + (uint16_t)((uint32_t)(XX_VirtToPhys(MANIP_GET_HMTD_PTR(p_FmPcdManipParams->h_NextManip)) - + (((t_FmPcd*)p_Manip->h_FmPcd)->physicalMuramBase)) >> 4)); + } + + WRITE_UINT16(((t_Hmtd *)p_Manip->h_Ad)->cfg, tmpReg); + WRITE_UINT32(((t_Hmtd *)p_Manip->h_Ad)->hmcdBasePtr, + (uint32_t)(XX_VirtToPhys(p_Manip->p_Hmct) - (((t_FmPcd*)p_Manip->h_FmPcd)->physicalMuramBase))); + + WRITE_UINT8(((t_Hmtd *)p_Manip->h_Ad)->opCode, HMAN_OC); + + return E_OK; +} + +static t_Error CreateManipActionShadow(t_FmPcdManip *p_Manip, t_FmPcdManipParams *p_FmPcdManipParams) +{ + uint8_t *p_WholeHmct, *p_TmpHmctPtr, newDataSize, *p_TmpDataPtr = NULL; + uint16_t newSize; + t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd; + t_Error err; + t_FmPcdManip *p_CurManip = p_Manip; + + err = CalculateTableSize(p_FmPcdManipParams, &newSize, &newDataSize); + if (err) + RETURN_ERROR(MINOR, err, NO_MSG); + + /* check coherency of new table parameters */ + if (newSize > p_Manip->tableSize) + RETURN_ERROR(MINOR, E_INVALID_VALUE, ("New Hdr Manip configuration requires larger size than current one (command table).")); + if (newDataSize > p_Manip->dataSize) + RETURN_ERROR(MINOR, E_INVALID_VALUE, ("New Hdr Manip configuration requires larger size than current one (data).")); + if (p_FmPcdManipParams->h_NextManip) + RETURN_ERROR(MINOR, E_INVALID_VALUE, ("New Hdr Manip configuration can not contain h_NextManip.")); + if (MANIP_IS_UNIFIED(p_Manip) && (newSize != p_Manip->tableSize)) + RETURN_ERROR(MINOR, E_INVALID_VALUE, ("New Hdr Manip configuration in a chained manipulation requires different size than current one.")); + if (p_Manip->dontParseAfterManip != p_FmPcdManipParams->u.hdr.dontParseAfterManip) + RETURN_ERROR(MINOR, E_INVALID_VALUE, ("New Hdr Manip configuration differs in dontParseAfterManip value.")); + + p_Manip->tableSize = newSize; + p_Manip->dataSize = newDataSize; + + + /* Build the new table in the shadow */ + if (!MANIP_IS_UNIFIED(p_Manip)) + { + p_TmpHmctPtr = (uint8_t*)PTR_MOVE(p_FmPcd->p_CcShadow, 16); + if (p_Manip->p_Data) + p_TmpDataPtr = (uint8_t*)PTR_MOVE(p_TmpHmctPtr, + (PTR_TO_UINT(p_Manip->p_Data) - PTR_TO_UINT(p_Manip->p_Hmct))); + + BuildHmct(p_Manip, p_FmPcdManipParams, p_TmpHmctPtr, p_Manip->p_Data, FALSE); + } + else + { + p_WholeHmct = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMCT); + ASSERT_COND(p_WholeHmct); + + /* Run till the last Manip (which is the first to configure) */ + while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip)) + p_CurManip = p_CurManip->h_NextManip; + + while (p_CurManip) + { + /* If this is a non-head node in a unified table, point to the part of the shadow + * which is the relative offset in HMCT. + * else, point to the beginning of the + * shadow table (we save 16 for the HMTD. + */ + p_TmpHmctPtr = (uint8_t*)PTR_MOVE(p_FmPcd->p_CcShadow, + (16 + PTR_TO_UINT(p_CurManip->p_Hmct) - PTR_TO_UINT(p_WholeHmct))); + if (p_CurManip->p_Data) + p_TmpDataPtr = (uint8_t*)PTR_MOVE(p_FmPcd->p_CcShadow, + (16 + PTR_TO_UINT(p_CurManip->p_Data) - PTR_TO_UINT(p_WholeHmct))); + + BuildHmct(p_CurManip, &p_CurManip->manipParams, p_TmpHmctPtr, p_TmpDataPtr, FALSE); + p_CurManip = p_CurManip->h_PrevManip; + } + } + + return E_OK; +} + +static t_Error CreateManipActionBackToOrig(t_FmPcdManip *p_Manip, t_FmPcdManipParams *p_FmPcdManipParams) +{ + uint8_t *p_WholeHmct, *p_TmpHmctPtr, *p_TmpDataPtr; + t_FmPcdManip *p_CurManip = p_Manip; + + /* Build the new table in the shadow */ + if (!MANIP_IS_UNIFIED(p_Manip)) + BuildHmct(p_Manip, p_FmPcdManipParams, p_Manip->p_Hmct, p_Manip->p_Data, FALSE); + else + { + p_WholeHmct = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMCT); + ASSERT_COND(p_WholeHmct); + + /* Run till the last Manip (which is the first to configure) */ + while (MANIP_IS_UNIFIED_NON_LAST(p_CurManip)) + p_CurManip = p_CurManip->h_NextManip; + + while (p_CurManip) + { + /* If this is a unified table, point to the part of the table + * which is the relative offset in HMCT. + */ + p_TmpHmctPtr = p_CurManip->p_Hmct; /*- (uint32_t)p_WholeHmct*/ + p_TmpDataPtr = p_CurManip->p_Data; /*- (uint32_t)p_WholeHmct*/ + + BuildHmct(p_CurManip, &p_CurManip->manipParams, p_TmpHmctPtr, p_TmpDataPtr, FALSE); + + p_CurManip = p_CurManip->h_PrevManip; + } + } + + return E_OK; +} + +static t_Error UpdateManipIc(t_Handle h_Manip, uint8_t icOffset) +{ + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip; + t_Handle p_Ad; + uint32_t tmpReg32 = 0; + SANITY_CHECK_RETURN_ERROR(h_Manip,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad, E_INVALID_HANDLE); + + switch (p_Manip->opcode) + { + case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX): + p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad; + if (p_Manip->updateParams & INTERNAL_CONTEXT_OFFSET) + { + tmpReg32 = *(uint32_t *)&((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets; + tmpReg32 |= (uint32_t)((uint32_t)icOffset << 16); + *(uint32_t *)&((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets = tmpReg32; + p_Manip->updateParams &= ~INTERNAL_CONTEXT_OFFSET; + p_Manip->icOffset = icOffset; + } + else + { + if (p_Manip->icOffset != icOffset) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("this manipulation was updated previously by different value");); + } + break; +#ifdef FM_CAPWAP_SUPPORT + case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST): + if (p_Manip->h_Frag) + { + if (p_Manip->updateParams & INTERNAL_CONTEXT_OFFSET) + { + p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad; + tmpReg32 |= GET_UINT32(((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets); + tmpReg32 |= (uint32_t)((uint32_t)icOffset << 16); + WRITE_UINT32(((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets, tmpReg32); + p_Manip->updateParams &= ~INTERNAL_CONTEXT_OFFSET; + p_Manip->icOffset = icOffset; + } + else + { + if (p_Manip->icOffset != icOffset) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("this manipulation was updated previousely by different value");); + } + } + break; +#endif /* FM_CAPWAP_SUPPORT */ + } + + return E_OK; +} + +static t_Error UpdateInitMvIntFrameHeaderFromFrameToBufferPrefix(t_Handle h_FmPort, t_FmPcdManip *p_Manip, t_Handle h_Ad, bool validate) +{ + + t_AdOfTypeContLookup *p_Ad = (t_AdOfTypeContLookup *)h_Ad; + t_FmPortGetSetCcParams fmPortGetSetCcParams; + t_Error err; + uint32_t tmpReg32; + + memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams)); + + SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR((p_Manip->opcode & HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX), E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(!p_Manip->muramAllocate, E_INVALID_STATE); + + if (p_Manip->updateParams) + { + if ((!(p_Manip->updateParams & OFFSET_OF_PR)) || + (p_Manip->shadowUpdateParams & OFFSET_OF_PR)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has not be updated")); + + fmPortGetSetCcParams.getCcParams.type = p_Manip->updateParams; + fmPortGetSetCcParams.setCcParams.type = UPDATE_PSO; + fmPortGetSetCcParams.setCcParams.psoSize = 16; + + err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_PR) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Parser result offset wasn't configured previousely")); +#ifdef FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 + ASSERT_COND(!(fmPortGetSetCcParams.getCcParams.prOffset % 16)); +#endif + } + else if (validate) + { + if ((!(p_Manip->shadowUpdateParams & OFFSET_OF_PR)) || + (p_Manip->updateParams & OFFSET_OF_PR)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has be updated")); + fmPortGetSetCcParams.getCcParams.type = p_Manip->shadowUpdateParams; + fmPortGetSetCcParams.setCcParams.type = UPDATE_PSO; + fmPortGetSetCcParams.setCcParams.psoSize = 16; + + err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_PR) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Parser result offset wasn't configured previousely")); + + } + + ASSERT_COND(p_Ad); + + if (p_Manip->updateParams & OFFSET_OF_PR) + { + tmpReg32 = 0; + tmpReg32 |= fmPortGetSetCcParams.getCcParams.prOffset; + WRITE_UINT32(p_Ad->matchTblPtr, (GET_UINT32(p_Ad->matchTblPtr) | tmpReg32)); + p_Manip->updateParams &= ~OFFSET_OF_PR; + p_Manip->shadowUpdateParams |= OFFSET_OF_PR; + } + else if (validate) + { + tmpReg32 = GET_UINT32(p_Ad->matchTblPtr); + if ((uint8_t)tmpReg32 != fmPortGetSetCcParams.getCcParams.prOffset) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("this manipulation was updated previousely by different value");); + } + + return E_OK; +} + +#ifdef FM_CAPWAP_SUPPORT +static t_Error UpdateModifyCapwapFragmenation(t_FmPcdManip *p_Manip, t_Handle h_Ad, bool validate,t_Handle h_FmTree) +{ + t_AdOfTypeContLookup *p_Ad = (t_AdOfTypeContLookup *)h_Ad; + t_FmPcdCcSavedManipParams *p_SavedManipParams = NULL; + uint32_t tmpReg32 = 0; + + SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Manip->h_Frag,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Manip->frag,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(((p_Manip->opcode == HMAN_OC_CAPWAP_FRAGMENTATION) || (p_Manip->opcode == HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER)), E_INVALID_STATE); + + p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Frag; + + if (p_Manip->updateParams) + { + + if ((!(p_Manip->updateParams & OFFSET_OF_DATA)) || + ((p_Manip->shadowUpdateParams & OFFSET_OF_DATA))) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has not be updated")); + p_SavedManipParams = FmPcdCcTreeGetSavedManipParams(h_FmTree); + if (!p_SavedManipParams) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("for this manipulation tree has to be configured previosely with this type")); + p_Manip->fragParams.dataOffset = p_SavedManipParams->capwapParams.dataOffset; + + tmpReg32 = GET_UINT32(p_Ad->pcAndOffsets); + tmpReg32 |= ((uint32_t)p_Manip->fragParams.dataOffset<< 16); + WRITE_UINT32(p_Ad->pcAndOffsets,tmpReg32); + + p_Manip->updateParams &= ~OFFSET_OF_DATA; + p_Manip->shadowUpdateParams |= OFFSET_OF_DATA; + } + else if (validate) + { + + p_SavedManipParams = FmPcdCcTreeGetSavedManipParams(h_FmTree); + if (!p_SavedManipParams) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("for this manipulation tree has to be configured previosely with this type")); + if (p_Manip->fragParams.dataOffset != p_SavedManipParams->capwapParams.dataOffset) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("this manipulation was updated previousely by different value")); + } + + return E_OK; +} + +static t_Error UpdateInitCapwapFragmentation(t_Handle h_FmPort, + t_FmPcdManip *p_Manip, + t_Handle h_Ad, + bool validate, + t_Handle h_FmTree) +{ + t_AdOfTypeContLookup *p_Ad; + t_FmPortGetSetCcParams fmPortGetSetCcParams; + t_Error err; + uint32_t tmpReg32 = 0; + t_FmPcdCcSavedManipParams *p_SavedManipParams; + + UNUSED(h_Ad); + + SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Manip->h_Frag,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Manip->frag,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(((p_Manip->opcode == HMAN_OC_CAPWAP_FRAGMENTATION) || + (p_Manip->opcode == HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER)), E_INVALID_STATE); + + p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Frag; + + if (p_Manip->updateParams) + { + if ((!(p_Manip->updateParams & OFFSET_OF_DATA)) || + ((p_Manip->shadowUpdateParams & OFFSET_OF_DATA))) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has not be updated")); + fmPortGetSetCcParams.getCcParams.type = p_Manip->updateParams; + fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN | UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY; + fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_FRAG | NIA_ENG_FM_CTL; + /* For CAPWAP Rassembly used FMAN_CTRL2 hardcoded - so for fragmentation its better to use FMAN_CTRL1 */ + fmPortGetSetCcParams.setCcParams.orFmanCtrl = FPM_PORT_FM_CTL1; + + err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_DATA) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Data offset wasn't configured previousely")); + + p_SavedManipParams = (t_FmPcdCcSavedManipParams *)XX_Malloc(sizeof(t_FmPcdCcSavedManipParams)); + p_SavedManipParams->capwapParams.dataOffset = fmPortGetSetCcParams.getCcParams.dataOffset; + +#ifdef FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 + ASSERT_COND(!(p_SavedManipParams->capwapParams.dataOffset % 16)); +#endif /* FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 */ + + FmPcdCcTreeSetSavedManipParams(h_FmTree, (t_Handle)p_SavedManipParams); + } + else if (validate) + { + if ((!(p_Manip->shadowUpdateParams & OFFSET_OF_DATA)) || + ((p_Manip->updateParams & OFFSET_OF_DATA))) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has be updated")); + fmPortGetSetCcParams.getCcParams.type = p_Manip->shadowUpdateParams; + fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN | UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY; + fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_FRAG | NIA_ENG_FM_CTL; + err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_DATA) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Data offset wasn't configured previousely")); + } + + if (p_Manip->updateParams) + { + tmpReg32 = GET_UINT32(p_Ad->pcAndOffsets); + tmpReg32 |= ((uint32_t)fmPortGetSetCcParams.getCcParams.dataOffset<< 16); + WRITE_UINT32(p_Ad->pcAndOffsets,tmpReg32); + + p_Manip->updateParams &= ~OFFSET_OF_DATA; + p_Manip->shadowUpdateParams |= OFFSET_OF_DATA; + p_Manip->fragParams.dataOffset = fmPortGetSetCcParams.getCcParams.dataOffset; + } + else if (validate) + { + if (p_Manip->fragParams.dataOffset != fmPortGetSetCcParams.getCcParams.dataOffset) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("this manipulation was updated previousely by different value")); + } + + return E_OK; +} + +static t_Error UpdateInitCapwapReasm(t_Handle h_FmPcd, + t_Handle h_FmPort, + t_FmPcdManip *p_Manip, + t_Handle h_Ad, + bool validate) +{ + t_CapwapReasmPram *p_ReassmTbl; + t_Error err; + t_FmPortGetSetCcParams fmPortGetSetCcParams; + uint8_t i = 0; + uint16_t size; + uint32_t tmpReg32; + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_FmPcdCcCapwapReassmTimeoutParams ccCapwapReassmTimeoutParams; + + SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Manip->h_Frag,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Manip->frag,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR((p_Manip->opcode == HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST), E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(h_FmPcd,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc,E_INVALID_HANDLE); + + if (p_Manip->h_FmPcd != h_FmPcd) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("handler of PCD previously was initiated by different value")); + + UNUSED(h_Ad); + + memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams)); + p_ReassmTbl = (t_CapwapReasmPram *)p_Manip->h_Frag; + + if (p_Manip->updateParams) + { + if ((!(p_Manip->updateParams & NUM_OF_TASKS) && + !(p_Manip->updateParams & OFFSET_OF_DATA) && + !(p_Manip->updateParams & OFFSET_OF_PR) && + !(p_Manip->updateParams & HW_PORT_ID)) || + ((p_Manip->shadowUpdateParams & NUM_OF_TASKS) || + (p_Manip->shadowUpdateParams & OFFSET_OF_DATA) || (p_Manip->shadowUpdateParams & OFFSET_OF_PR) || + (p_Manip->shadowUpdateParams & HW_PORT_ID))) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has not be updated")); + + fmPortGetSetCcParams.getCcParams.type = p_Manip->updateParams; + fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN; + fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_FRAG | NIA_ENG_FM_CTL; + + err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + if (fmPortGetSetCcParams.getCcParams.type & NUM_OF_TASKS) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Num of tasks wasn't configured previousely")); + if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_DATA) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("offset of the data wasn't configured previousely")); + if (fmPortGetSetCcParams.getCcParams.type & HW_PORT_ID) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("hwPortId wasn't updated")); +#ifdef FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 + ASSERT_COND((fmPortGetSetCcParams.getCcParams.dataOffset % 16) == 0); +#endif /* FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 */ + } + else if (validate) + { + if ((!(p_Manip->shadowUpdateParams & NUM_OF_TASKS) && + !(p_Manip->shadowUpdateParams & OFFSET_OF_DATA) && + !(p_Manip->shadowUpdateParams & OFFSET_OF_PR) && + !(p_Manip->shadowUpdateParams & HW_PORT_ID)) && + ((p_Manip->updateParams & NUM_OF_TASKS) || + (p_Manip->updateParams & OFFSET_OF_DATA) || (p_Manip->updateParams & OFFSET_OF_PR) || + (p_Manip->updateParams & HW_PORT_ID))) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has be updated")); + + fmPortGetSetCcParams.getCcParams.type = p_Manip->shadowUpdateParams; + fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNEN; + fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_FRAG | NIA_ENG_FM_CTL; + + err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + if (fmPortGetSetCcParams.getCcParams.type & NUM_OF_TASKS) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("NumOfTasks wasn't configured previously")); + if (fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_DATA) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("offset of the data wasn't configured previously")); + if (fmPortGetSetCcParams.getCcParams.type & HW_PORT_ID) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("hwPortId wasn't updated")); + } + + if (p_Manip->updateParams) + { + if (p_Manip->updateParams & NUM_OF_TASKS) + { + /*recommendation of Microcode team - (maxNumFramesInProcess * 2) */ + size = (uint16_t)(p_Manip->fragParams.maxNumFramesInProcess*2 + fmPortGetSetCcParams.getCcParams.numOfTasks); + if (size > 255) + RETURN_ERROR(MAJOR,E_INVALID_VALUE, ("numOfOpenReassmEntries + numOfTasks per port can not be greater than 256")); + + p_Manip->fragParams.numOfTasks = fmPortGetSetCcParams.getCcParams.numOfTasks; + + /*p_ReassmFrmDescrIndxPoolTbl*/ + p_Manip->fragParams.p_ReassmFrmDescrIndxPoolTbl = + (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + (uint32_t)(size + 1), + 4); + if (!p_Manip->fragParams.p_ReassmFrmDescrIndxPoolTbl) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for CAPWAP Reassembly frame buffer index pool table")); + + IOMemSet32(p_Manip->fragParams.p_ReassmFrmDescrIndxPoolTbl, 0, (uint32_t)(size + 1)); + + for ( i = 0; i < size; i++) + WRITE_UINT8(*(uint8_t *)PTR_MOVE(p_Manip->fragParams.p_ReassmFrmDescrIndxPoolTbl, i), (uint8_t)(i+1)); + + tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->fragParams.p_ReassmFrmDescrIndxPoolTbl) - p_FmPcd->physicalMuramBase); + + WRITE_UINT32(p_ReassmTbl->reasmFrmDescIndexPoolTblPtr, tmpReg32); + + /*p_ReassmFrmDescrPoolTbl*/ + p_Manip->fragParams.p_ReassmFrmDescrPoolTbl = + (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + (uint32_t)((size + 1) * FM_PCD_MANIP_CAPWAP_REASM_RFD_SIZE), + 4); + + if (!p_Manip->fragParams.p_ReassmFrmDescrPoolTbl) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for CAPWAP Reassembly frame buffer pool table")); + + IOMemSet32(p_Manip->fragParams.p_ReassmFrmDescrPoolTbl, 0, (uint32_t)((size +1)* FM_PCD_MANIP_CAPWAP_REASM_RFD_SIZE)); + + tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->fragParams.p_ReassmFrmDescrPoolTbl) - p_FmPcd->physicalMuramBase); + + WRITE_UINT32(p_ReassmTbl->reasmFrmDescPoolTblPtr, tmpReg32); + + /*p_TimeOutTbl*/ + + p_Manip->fragParams.p_TimeOutTbl = + (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + (uint32_t)((size + 1)* FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_ENTRY_SIZE), + 4); + + if (!p_Manip->fragParams.p_TimeOutTbl) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for CAPWAP Reassembly timeout table")); + + IOMemSet32(p_Manip->fragParams.p_TimeOutTbl, 0, (uint16_t)((size + 1)*FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_ENTRY_SIZE)); + + tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->fragParams.p_TimeOutTbl) - p_FmPcd->physicalMuramBase); + WRITE_UINT32(p_ReassmTbl->timeOutTblPtr, tmpReg32); + + p_Manip->updateParams &= ~NUM_OF_TASKS; + p_Manip->shadowUpdateParams |= NUM_OF_TASKS; + } + + if (p_Manip->updateParams & OFFSET_OF_DATA) + { + p_Manip->fragParams.dataOffset = fmPortGetSetCcParams.getCcParams.dataOffset; + tmpReg32 = GET_UINT32(p_ReassmTbl->mode); + tmpReg32|= p_Manip->fragParams.dataOffset; + WRITE_UINT32(p_ReassmTbl->mode, tmpReg32); + p_Manip->updateParams &= ~OFFSET_OF_DATA; + p_Manip->shadowUpdateParams |= OFFSET_OF_DATA; + } + + if (!(fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_PR)) + { + p_Manip->fragParams.prOffset = fmPortGetSetCcParams.getCcParams.prOffset; + + tmpReg32 = GET_UINT32(p_ReassmTbl->mode); + tmpReg32|= FM_PCD_MANIP_CAPWAP_REASM_PR_COPY; + WRITE_UINT32(p_ReassmTbl->mode, tmpReg32); + + tmpReg32 = GET_UINT32(p_ReassmTbl->intStatsTblPtr); + tmpReg32 |= (uint32_t)p_Manip->fragParams.prOffset << 24; + WRITE_UINT32(p_ReassmTbl->intStatsTblPtr, tmpReg32); + p_Manip->updateParams &= ~OFFSET_OF_PR; + p_Manip->shadowUpdateParams |= OFFSET_OF_PR; + } + else + { + p_Manip->fragParams.prOffset = 0xff; + p_Manip->updateParams &= ~OFFSET_OF_PR; + p_Manip->shadowUpdateParams |= OFFSET_OF_PR; + } + + p_Manip->fragParams.hwPortId = fmPortGetSetCcParams.getCcParams.hardwarePortId; + p_Manip->updateParams &= ~HW_PORT_ID; + p_Manip->shadowUpdateParams |= HW_PORT_ID; + + /*timeout hc */ + ccCapwapReassmTimeoutParams.fqidForTimeOutFrames = p_Manip->fragParams.fqidForTimeOutFrames; + ccCapwapReassmTimeoutParams.portIdAndCapwapReassmTbl = (uint32_t)p_Manip->fragParams.hwPortId << 24; + ccCapwapReassmTimeoutParams.portIdAndCapwapReassmTbl |= (uint32_t)((XX_VirtToPhys(p_ReassmTbl) - p_FmPcd->physicalMuramBase)); + ccCapwapReassmTimeoutParams.timeoutRequestTime = (((uint32_t)1<<p_Manip->fragParams.bitFor1Micro) * p_Manip->fragParams.timeoutRoutineRequestTime)/2; + return FmHcPcdCcCapwapTimeoutReassm(p_FmPcd->h_Hc,&ccCapwapReassmTimeoutParams); + } + + else if (validate) + { + if (fmPortGetSetCcParams.getCcParams.hardwarePortId != p_Manip->fragParams.hwPortId) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Reassembly manipulation previously was assigned to another port")); + if (fmPortGetSetCcParams.getCcParams.numOfTasks != p_Manip->fragParams.numOfTasks) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfTasks for this manipulation previously was defined by another value ")); + + + if (!(fmPortGetSetCcParams.getCcParams.type & OFFSET_OF_PR)) + { + if (p_Manip->fragParams.prOffset != fmPortGetSetCcParams.getCcParams.prOffset) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Parse result offset previously was defined by another value ")); + } + else + { + if (p_Manip->fragParams.prOffset != 0xff) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Parse result offset previously was defined by another value ")); + } + if (fmPortGetSetCcParams.getCcParams.dataOffset != p_Manip->fragParams.dataOffset) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Data offset previously was defined by another value ")); + } + + return E_OK; +} +#endif /* FM_CAPWAP_SUPPORT */ + +t_Error FmPcdRegisterReassmPort(t_Handle h_FmPcd, t_Handle h_IpReasmCommonPramTbl) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdCcIpReassmTimeoutParams ccIpReassmTimeoutParams = {0}; + t_Error err = E_OK; + uint8_t result; + uint32_t bitFor1Micro, tsbs, log2num; + + ASSERT_COND(p_FmPcd); + ASSERT_COND(h_IpReasmCommonPramTbl); + + bitFor1Micro = FmGetTimeStampScale(p_FmPcd->h_Fm); + bitFor1Micro = 32 - bitFor1Micro; + LOG2(FM_PCD_MANIP_IP_REASSM_TIMEOUT_THREAD_THRESH, log2num); + tsbs = bitFor1Micro - log2num; + + ccIpReassmTimeoutParams.iprcpt = (uint32_t)(XX_VirtToPhys(h_IpReasmCommonPramTbl) - p_FmPcd->physicalMuramBase); + ccIpReassmTimeoutParams.tsbs = (uint8_t)tsbs; + ccIpReassmTimeoutParams.activate = TRUE; + if ((err = FmHcPcdCcIpTimeoutReassm(p_FmPcd->h_Hc, &ccIpReassmTimeoutParams, &result)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + switch (result) + { + case (0): + return E_OK; + case (1): + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("failed to allocate TNUM")); + case (2): + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("failed to allocate internal buffer")); + case (3): + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("'Disable Timeout Task' with invalid IPRCPT")); + case (4): + RETURN_ERROR(MAJOR, E_FULL, ("too many timeout tasks")); + case (5): + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("invalid sub command")); + default: + RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); + } + return E_OK; +} + +static t_Error CreateIpReassCommonTable(t_FmPcdManip *p_Manip) +{ + uint32_t tmpReg32 = 0, i; + uint64_t tmpReg64, size; + t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd; + t_Error err = E_OK; + + /* Allocation of the IP Reassembly Common Parameters table. This table is located in the + MURAM. Its size is 64 bytes and its base address should be 8-byte aligned. + It contains parameters that are common to both the IPv4 reassembly function and IPv6 + reassembly function.*/ + p_Manip->ipReassmParams.p_IpReassCommonTbl = + (t_IpReassCommonTbl *)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + FM_PCD_MANIP_IP_REASM_COMMON_PARAM_TABLE_SIZE, + FM_PCD_MANIP_IP_REASM_COMMON_PARAM_TABLE_ALIGN); + + if (!p_Manip->ipReassmParams.p_IpReassCommonTbl) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for Reassembly common parameters table")); + + IOMemSet32(p_Manip->ipReassmParams.p_IpReassCommonTbl, 0, FM_PCD_MANIP_IP_REASM_COMMON_PARAM_TABLE_SIZE); + + /* Setting the TimeOut Mode.*/ + tmpReg32 = 0; + if (p_Manip->ipReassmParams.timeOutMode == e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAMES) + tmpReg32 |= FM_PCD_MANIP_IP_REASM_TIME_OUT_BETWEEN_FRAMES; + + /* Setting TimeOut FQID - Frames that time out are enqueued to this FQID. + In order to cause TimeOut frames to be discarded, this queue should be configured accordingly*/ + tmpReg32 |= p_Manip->ipReassmParams.fqidForTimeOutFrames; + WRITE_UINT32(p_Manip->ipReassmParams.p_IpReassCommonTbl->timeoutModeAndFqid, tmpReg32); + + /* Calculation the size of IP Reassembly Frame Descriptor - number of frames that are allowed to be reassembled simultaneously + 129.*/ + size = p_Manip->ipReassmParams.maxNumFramesInProcess + 129; + + /*Allocation of IP Reassembly Frame Descriptor Indexes Pool - This pool resides in the MURAM */ + p_Manip->ipReassmParams.reassFrmDescrIndxPoolTblAddr = + PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + (uint32_t)(size * 2), + 256)); + if (!p_Manip->ipReassmParams.reassFrmDescrIndxPoolTblAddr) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for Reassembly frame descriptor indexes pool")); + + IOMemSet32(UINT_TO_PTR(p_Manip->ipReassmParams.reassFrmDescrIndxPoolTblAddr), 0, (uint32_t)(size * 2)); + + /* The entries in IP Reassembly Frame Descriptor Indexes Pool contains indexes starting with 1 up to + the maximum number of frames that are allowed to be reassembled simultaneously + 128. + The last entry in this pool must contain the index zero*/ + for (i=0; i<(size-1); i++) + WRITE_UINT16(*(uint16_t *)PTR_MOVE(UINT_TO_PTR(p_Manip->ipReassmParams.reassFrmDescrIndxPoolTblAddr), (i<<1)), + (uint16_t)(i+1)); + + /* Sets the IP Reassembly Frame Descriptor Indexes Pool offset from MURAM */ + tmpReg32 = (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_Manip->ipReassmParams.reassFrmDescrIndxPoolTblAddr)) - p_FmPcd->physicalMuramBase); + WRITE_UINT32(p_Manip->ipReassmParams.p_IpReassCommonTbl->reassFrmDescIndexPoolTblPtr, tmpReg32); + + /* Allocation of the Reassembly Frame Descriptors Pool - This pool resides in external memory. + The number of entries in this pool should be equal to the number of entries in IP Reassembly Frame Descriptor Indexes Pool.*/ + p_Manip->ipReassmParams.reassFrmDescrPoolTblAddr = + PTR_TO_UINT(XX_MallocSmart((uint32_t)(size * 64), p_Manip->ipReassmParams.dataMemId, 64)); + + if (!p_Manip->ipReassmParams.reassFrmDescrPoolTblAddr) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation FAILED")); + + IOMemSet32(UINT_TO_PTR(p_Manip->ipReassmParams.reassFrmDescrPoolTblAddr), 0, (uint32_t)(size * 64)); + + /* Sets the Reassembly Frame Descriptors Pool and liodn offset*/ + tmpReg64 = (uint64_t)(XX_VirtToPhys(UINT_TO_PTR(p_Manip->ipReassmParams.reassFrmDescrPoolTblAddr))); + tmpReg64 |= ((uint64_t)(p_Manip->ipReassmParams.dataLiodnOffset & FM_PCD_MANIP_IP_REASM_LIODN_MASK) << (uint64_t)FM_PCD_MANIP_IP_REASM_LIODN_SHIFT); + tmpReg64 |= ((uint64_t)(p_Manip->ipReassmParams.dataLiodnOffset & FM_PCD_MANIP_IP_REASM_ELIODN_MASK) << (uint64_t)FM_PCD_MANIP_IP_REASM_ELIODN_SHIFT); + WRITE_UINT32(p_Manip->ipReassmParams.p_IpReassCommonTbl->liodnAndReassFrmDescPoolPtrHi, (uint32_t)(tmpReg64 >> 32)); + WRITE_UINT32(p_Manip->ipReassmParams.p_IpReassCommonTbl->reassFrmDescPoolPtrLow, (uint32_t)tmpReg64); + + /*Allocation of the TimeOut table - This table resides in the MURAM. + The number of entries in this table is identical to the number of entries in the Reassembly Frame Descriptors Pool*/ + p_Manip->ipReassmParams.timeOutTblAddr = + PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, (uint32_t)(size * 8),8)); + + if (!p_Manip->ipReassmParams.timeOutTblAddr) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for Reassembly timeout table")); + + IOMemSet32(UINT_TO_PTR(p_Manip->ipReassmParams.timeOutTblAddr), 0, (uint16_t)(size * 8)); + + /* Sets the TimeOut table offset from MURAM */ + tmpReg32 = (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_Manip->ipReassmParams.timeOutTblAddr)) - p_FmPcd->physicalMuramBase); + WRITE_UINT32(p_Manip->ipReassmParams.p_IpReassCommonTbl->timeOutTblPtr, tmpReg32); + + /* Sets the Expiration Delay */ + tmpReg32 = 0; + tmpReg32 |= (((uint32_t)(1 << FmGetTimeStampScale(p_FmPcd->h_Fm))) * p_Manip->ipReassmParams.timeoutThresholdForReassmProcess); + WRITE_UINT32(p_Manip->ipReassmParams.p_IpReassCommonTbl->expirationDelay, tmpReg32); + + err = FmPcdRegisterReassmPort(p_FmPcd, p_Manip->ipReassmParams.p_IpReassCommonTbl); + if (err != E_OK) + { + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->ipReassmParams.p_IpReassCommonTbl); + RETURN_ERROR(MAJOR, err, ("port registration")); + } + + return err; +} + +static t_Error CreateIpReassTable(t_FmPcdManip *p_Manip, bool ipv4) +{ + t_FmPcd *p_FmPcd = p_Manip->h_FmPcd; + uint32_t tmpReg32, autoLearnHashTblSize; + uint32_t numOfWays, setSize, setSizeCode, keySize; + uint32_t waySize, numOfSets, numOfEntries; + uint64_t tmpReg64; + uint16_t minFragSize; + uintptr_t *p_AutoLearnHashTblAddr, *p_AutoLearnSetLockTblAddr; + t_IpReassTbl **p_IpReassTbl; + + if (ipv4) + { + p_IpReassTbl = &p_Manip->ipReassmParams.p_Ipv4ReassTbl; + p_AutoLearnHashTblAddr = &p_Manip->ipReassmParams.ipv4AutoLearnHashTblAddr; + p_AutoLearnSetLockTblAddr = &p_Manip->ipReassmParams.ipv4AutoLearnSetLockTblAddr; + minFragSize = p_Manip->ipReassmParams.minFragSize[0]; + numOfWays = p_Manip->ipReassmParams.numOfFramesPerHashEntry[0]; + keySize = 4 + 4 + 1 + 2; /* 3-tuple + IP-Id */ + } + else + { + p_IpReassTbl = &p_Manip->ipReassmParams.p_Ipv6ReassTbl; + p_AutoLearnHashTblAddr = &p_Manip->ipReassmParams.ipv6AutoLearnHashTblAddr; + p_AutoLearnSetLockTblAddr = &p_Manip->ipReassmParams.ipv6AutoLearnSetLockTblAddr; + minFragSize = p_Manip->ipReassmParams.minFragSize[1]; + numOfWays = p_Manip->ipReassmParams.numOfFramesPerHashEntry[1]; + keySize = 16 + 16 + 4; /* 2-tuple + IP-Id */ + if (numOfWays > e_FM_PCD_MANIP_SIX_WAYS_HASH) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("num of ways")); + } + keySize += 2; /* 2 bytes reserved for RFDIndex */ +#if (DPAA_VERSION >= 11) + keySize += 2; /* 2 bytes reserved */ +#endif /* (DPAA_VERSION >= 11) */ + waySize = ROUND_UP(keySize, 8); + + /* Allocates the IP Reassembly Parameters Table - This table is located in the MURAM.*/ + *p_IpReassTbl = (t_IpReassTbl *)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + FM_PCD_MANIP_IP_REASM_TABLE_SIZE, + FM_PCD_MANIP_IP_REASM_TABLE_ALIGN); + if (!*p_IpReassTbl) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for Reassembly IPv4/IPv6 specific parameters table")); + memset(*p_IpReassTbl, 0, sizeof(t_IpReassTbl)); + + /* Sets the IP Reassembly common Parameters table offset from MURAM in the IP Reassembly Table descriptor*/ + tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->ipReassmParams.p_IpReassCommonTbl) - p_FmPcd->physicalMuramBase); + WRITE_UINT32((*p_IpReassTbl)->ipReassCommonPrmTblPtr, tmpReg32); + + /* Calculate set size (set size is rounded-up to next power of 2) */ + NEXT_POWER_OF_2(numOfWays * waySize, setSize); + + /* Get set size code */ + LOG2(setSize, setSizeCode); + + /* Sets ways number and set size code */ + WRITE_UINT16((*p_IpReassTbl)->waysNumAndSetSize, (uint16_t)((numOfWays << 8) | setSizeCode)); + + /* It is recommended that the total number of entries in this table + (number of sets * number of ways) will be twice the number of frames that + are expected to be reassembled simultaneously.*/ + numOfEntries = (uint32_t)(p_Manip->ipReassmParams.maxNumFramesInProcess * 2); + + /* sets number calculation - number of entries = number of sets * number of ways */ + numOfSets = numOfEntries / numOfWays; + + /* Sets AutoLearnHashKeyMask*/ + NEXT_POWER_OF_2(numOfSets, numOfSets); + + WRITE_UINT16((*p_IpReassTbl)->autoLearnHashKeyMask, (uint16_t)(numOfSets - 1)); + + /* Allocation of IP Reassembly Automatic Learning Hash Table - This table resides in external memory. + The size of this table is determined by the number of sets and the set size. + Table size = set size * number of sets + This table base address should be aligned to SetSize.*/ + autoLearnHashTblSize = numOfSets * setSize; + + *p_AutoLearnHashTblAddr = PTR_TO_UINT(XX_MallocSmart(autoLearnHashTblSize, p_Manip->ipReassmParams.dataMemId, setSize)); + if (!*p_AutoLearnHashTblAddr) + { + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, *p_IpReassTbl); + *p_IpReassTbl = NULL; + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation FAILED")); + } + IOMemSet32(UINT_TO_PTR(*p_AutoLearnHashTblAddr), 0, autoLearnHashTblSize); + + /* Sets the IP Reassembly Automatic Learning Hash Table and liodn offset */ + tmpReg64 = ((uint64_t)(p_Manip->ipReassmParams.dataLiodnOffset & FM_PCD_MANIP_IP_REASM_LIODN_MASK) << (uint64_t)FM_PCD_MANIP_IP_REASM_LIODN_SHIFT); + tmpReg64 |= ((uint64_t)(p_Manip->ipReassmParams.dataLiodnOffset & FM_PCD_MANIP_IP_REASM_ELIODN_MASK) << (uint64_t)FM_PCD_MANIP_IP_REASM_ELIODN_SHIFT); + tmpReg64 |= XX_VirtToPhys(UINT_TO_PTR(*p_AutoLearnHashTblAddr)); + WRITE_UINT32((*p_IpReassTbl)->liodnAlAndAutoLearnHashTblPtrHi, (uint32_t)(tmpReg64 >> 32)); + WRITE_UINT32((*p_IpReassTbl)->autoLearnHashTblPtrLow, (uint32_t)tmpReg64); + + /* Allocation of the Set Lock table - This table resides in external memory + The size of this table is (number of sets in the IP Reassembly Automatic Learning Hash table)*4 bytes. + This table resides in external memory and its base address should be 4-byte aligned */ + *p_AutoLearnSetLockTblAddr = PTR_TO_UINT(XX_MallocSmart((uint32_t)(numOfSets * 4), p_Manip->ipReassmParams.dataMemId, 4)); + if (!*p_AutoLearnSetLockTblAddr) + { + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, *p_IpReassTbl); + *p_IpReassTbl = NULL; + XX_FreeSmart(UINT_TO_PTR(*p_AutoLearnHashTblAddr)); + *p_AutoLearnHashTblAddr = 0; + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation FAILED")); + } + IOMemSet32(UINT_TO_PTR(*p_AutoLearnSetLockTblAddr), 0, (numOfSets * 4)); + + /* sets Set Lock table pointer and liodn offset*/ + tmpReg64 = ((uint64_t)(p_Manip->ipReassmParams.dataLiodnOffset & FM_PCD_MANIP_IP_REASM_LIODN_MASK) << (uint64_t)FM_PCD_MANIP_IP_REASM_LIODN_SHIFT); + tmpReg64 |= ((uint64_t)(p_Manip->ipReassmParams.dataLiodnOffset & FM_PCD_MANIP_IP_REASM_ELIODN_MASK) << (uint64_t)FM_PCD_MANIP_IP_REASM_ELIODN_SHIFT); + tmpReg64 |= XX_VirtToPhys(UINT_TO_PTR(*p_AutoLearnSetLockTblAddr)); + WRITE_UINT32((*p_IpReassTbl)->liodnSlAndAutoLearnSetLockTblPtrHi, (uint32_t)(tmpReg64 >> 32)); + WRITE_UINT32((*p_IpReassTbl)->autoLearnSetLockTblPtrLow, (uint32_t)tmpReg64); + + /* Sets user's requested minimum fragment size (in Bytes) for First/Middle fragment */ + WRITE_UINT16((*p_IpReassTbl)->minFragSize, minFragSize); + + return E_OK; +} + +static t_Error UpdateInitIpReasm(t_Handle h_FmPcd, + t_Handle h_PcdParams, + t_Handle h_FmPort, + t_FmPcdManip *p_Manip, + t_Handle h_Ad, + bool validate) +{ + t_FmPortGetSetCcParams fmPortGetSetCcParams; + uint32_t tmpReg32; + t_Error err; + t_FmPortPcdParams *p_PcdParams = (t_FmPortPcdParams *)h_PcdParams; +#if (DPAA_VERSION >= 11) + uint8_t *p_Ptr; +#endif /* (DPAA_VERSION >= 11) */ + + SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Manip->frag, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR((p_Manip->opcode == HMAN_OC_IP_REASSEMBLY), E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Manip->updateParams || h_PcdParams, E_INVALID_HANDLE); + + UNUSED(h_Ad); + + if (!p_Manip->updateParams) + return E_OK; + + if (p_Manip->h_FmPcd != h_FmPcd) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("handler of PCD previously was initiated by different value")); + + if (p_Manip->updateParams) + { + if ((!(p_Manip->updateParams & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS))) || + ((p_Manip->shadowUpdateParams & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS)))) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has not be updated")); + + fmPortGetSetCcParams.setCcParams.type = 0; + fmPortGetSetCcParams.getCcParams.type = p_Manip->updateParams; + if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (fmPortGetSetCcParams.getCcParams.type & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("offset of the data wasn't configured previously")); + } + else if (validate) + { + if ((!(p_Manip->shadowUpdateParams & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS))) || + ((p_Manip->updateParams & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS)))) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("in this stage parameters from Port has be updated")); + + fmPortGetSetCcParams.setCcParams.type = 0; + fmPortGetSetCcParams.getCcParams.type = p_Manip->shadowUpdateParams; + if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (fmPortGetSetCcParams.getCcParams.type & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("offset of the data wasn't configured previously")); + } + + if (p_Manip->updateParams) + { + if (p_Manip->updateParams & (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS)) + { + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint8_t *p_Ptr, i, totalNumOfTnums; + + totalNumOfTnums = (uint8_t)(fmPortGetSetCcParams.getCcParams.numOfTasks + + fmPortGetSetCcParams.getCcParams.numOfExtraTasks); + + p_Manip->ipReassmParams.internalBufferPoolAddr = + PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + (uint32_t)(totalNumOfTnums * BMI_FIFO_UNITS), + BMI_FIFO_UNITS)); + if (!p_Manip->ipReassmParams.internalBufferPoolAddr) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for Reassembly internal buffers pool")); + IOMemSet32(UINT_TO_PTR(p_Manip->ipReassmParams.internalBufferPoolAddr), + 0, + (uint32_t)(totalNumOfTnums * BMI_FIFO_UNITS)); + + p_Manip->ipReassmParams.internalBufferPoolManagementIndexAddr = + PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + (uint32_t)(5 + totalNumOfTnums), + 4)); + if (!p_Manip->ipReassmParams.internalBufferPoolManagementIndexAddr) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for Reassembly internal buffers management")); + + p_Ptr = (uint8_t*)UINT_TO_PTR(p_Manip->ipReassmParams.internalBufferPoolManagementIndexAddr); + WRITE_UINT32(*(uint32_t*)p_Ptr, (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_Manip->ipReassmParams.internalBufferPoolAddr)) - p_FmPcd->physicalMuramBase)); + for (i=0, p_Ptr += 4; i < totalNumOfTnums; i++, p_Ptr++) + WRITE_UINT8(*p_Ptr, i); + WRITE_UINT8(*p_Ptr, 0xFF); + + tmpReg32 = (4 << FM_PCD_MANIP_IP_REASM_COMMON_INT_BUFFER_IDX_SHIFT) | + ((uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_Manip->ipReassmParams.internalBufferPoolManagementIndexAddr)) - p_FmPcd->physicalMuramBase)); + WRITE_UINT32(p_Manip->ipReassmParams.p_IpReassCommonTbl->internalBufferManagement, tmpReg32); + + p_Manip->updateParams &= ~(NUM_OF_TASKS | NUM_OF_EXTRA_TASKS); + p_Manip->shadowUpdateParams |= (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS); + } + } + + if (p_Manip->ipReassmParams.h_Ipv4Scheme) + { + p_PcdParams->p_KgParams->h_Schemes[p_PcdParams->p_KgParams->numOfSchemes] = p_Manip->ipReassmParams.h_Ipv4Scheme; + p_PcdParams->p_KgParams->numOfSchemes++; + } + if (p_Manip->ipReassmParams.h_Ipv6Scheme) + { + p_PcdParams->p_KgParams->h_Schemes[p_PcdParams->p_KgParams->numOfSchemes] = p_Manip->ipReassmParams.h_Ipv6Scheme; + p_PcdParams->p_KgParams->numOfSchemes++; + } + +#if (DPAA_VERSION >= 11) + memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams)); + fmPortGetSetCcParams.setCcParams.type = 0; + fmPortGetSetCcParams.getCcParams.type = FM_REV; + if ((err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + if (fmPortGetSetCcParams.getCcParams.revInfo.majorRev >= 6) + { + if ((err = FmPortSetGprFunc(h_FmPort, e_FM_PORT_GPR_MURAM_PAGE, (void**)&p_Ptr)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + tmpReg32 = NIA_ENG_KG; + if (p_Manip->ipReassmParams.h_Ipv4Scheme) + { + tmpReg32 |= NIA_KG_DIRECT; + tmpReg32 |= NIA_KG_CC_EN; + tmpReg32 |= FmPcdKgGetSchemeId(p_Manip->ipReassmParams.h_Ipv4Scheme); + WRITE_UINT32(*(uint32_t*)PTR_MOVE(p_Ptr, NIA_IPR_DIRECT_SCHEME_IPV4_OFFSET), tmpReg32); + } + if (p_Manip->ipReassmParams.h_Ipv6Scheme) + { + tmpReg32 &= ~NIA_AC_MASK; + tmpReg32 |= NIA_KG_DIRECT; + tmpReg32 |= NIA_KG_CC_EN; + tmpReg32 |= FmPcdKgGetSchemeId(p_Manip->ipReassmParams.h_Ipv6Scheme); + WRITE_UINT32(*(uint32_t*)PTR_MOVE(p_Ptr, NIA_IPR_DIRECT_SCHEME_IPV6_OFFSET), tmpReg32); + } + } +#endif /* (DPAA_VERSION >= 11) */ + + return E_OK; +} + +#if (DPAA_VERSION == 10) +static t_Error FmPcdFragHcScratchPoolFill(t_Handle h_FmPcd, uint8_t scratchBpid) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdCcFragScratchPoolCmdParams fmPcdCcFragScratchPoolCmdParams; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + memset(&fmPcdCcFragScratchPoolCmdParams, 0, sizeof(t_FmPcdCcFragScratchPoolCmdParams)); + + fmPcdCcFragScratchPoolCmdParams.numOfBuffers = NUM_OF_SCRATCH_POOL_BUFFERS; + fmPcdCcFragScratchPoolCmdParams.bufferPoolId = scratchBpid; + if ((err = FmHcPcdCcIpFragScratchPollCmd(p_FmPcd->h_Hc, TRUE, &fmPcdCcFragScratchPoolCmdParams)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + if (fmPcdCcFragScratchPoolCmdParams.numOfBuffers != 0) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Fill scratch pool failed," + "Failed to release %d buffers to the BM (missing FBPRs)", + fmPcdCcFragScratchPoolCmdParams.numOfBuffers)); + + return E_OK; +} + +static t_Error FmPcdFragHcScratchPoolEmpty(t_Handle h_FmPcd, uint8_t scratchBpid) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdCcFragScratchPoolCmdParams fmPcdCcFragScratchPoolCmdParams; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + memset(&fmPcdCcFragScratchPoolCmdParams, 0, sizeof(t_FmPcdCcFragScratchPoolCmdParams)); + + fmPcdCcFragScratchPoolCmdParams.bufferPoolId = scratchBpid; + if ((err = FmHcPcdCcIpFragScratchPollCmd(p_FmPcd->h_Hc, FALSE, &fmPcdCcFragScratchPoolCmdParams)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + return E_OK; +} +#endif /* (DPAA_VERSION == 10) */ + +static void ReleaseManipHandler(t_FmPcdManip *p_Manip, t_FmPcd *p_FmPcd) +{ + if (p_Manip->h_Ad) + { + if (p_Manip->muramAllocate) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->h_Ad); + else + XX_Free(p_Manip->h_Ad); + p_Manip->h_Ad = NULL; + } + if (p_Manip->p_Template) + { + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->p_Template); + p_Manip->p_Template = NULL; + } + if (p_Manip->h_Frag) + { + if (p_Manip->fragParams.p_AutoLearnHashTbl) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->fragParams.p_AutoLearnHashTbl); + if (p_Manip->fragParams.p_ReassmFrmDescrPoolTbl) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->fragParams.p_ReassmFrmDescrPoolTbl); + if (p_Manip->fragParams.p_ReassmFrmDescrIndxPoolTbl) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->fragParams.p_ReassmFrmDescrIndxPoolTbl); + if (p_Manip->fragParams.p_TimeOutTbl) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->fragParams.p_TimeOutTbl); + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->h_Frag); + + } + if (p_Manip->frag) + { + if (p_Manip->ipFragParams.p_Frag) + { +#if (DPAA_VERSION == 10) + FmPcdFragHcScratchPoolEmpty((t_Handle)p_FmPcd, p_Manip->ipFragParams.scratchBpid); +#endif /* (DPAA_VERSION == 10) */ + + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->ipFragParams.p_Frag); + } + } + else if (p_Manip->reassm) + { + FmPcdUnregisterReassmPort(p_FmPcd, p_Manip->ipReassmParams.p_IpReassCommonTbl); + + if (p_Manip->ipReassmParams.timeOutTblAddr) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, UINT_TO_PTR(p_Manip->ipReassmParams.timeOutTblAddr)); + if (p_Manip->ipReassmParams.reassFrmDescrPoolTblAddr) + XX_FreeSmart(UINT_TO_PTR(p_Manip->ipReassmParams.reassFrmDescrPoolTblAddr)); + + if (p_Manip->ipReassmParams.ipv4AutoLearnHashTblAddr) + XX_FreeSmart(UINT_TO_PTR(p_Manip->ipReassmParams.ipv4AutoLearnHashTblAddr)); + if (p_Manip->ipReassmParams.ipv6AutoLearnHashTblAddr) + XX_FreeSmart(UINT_TO_PTR(p_Manip->ipReassmParams.ipv6AutoLearnHashTblAddr)); + if (p_Manip->ipReassmParams.ipv4AutoLearnSetLockTblAddr) + XX_FreeSmart(UINT_TO_PTR(p_Manip->ipReassmParams.ipv4AutoLearnSetLockTblAddr)); + if (p_Manip->ipReassmParams.ipv6AutoLearnSetLockTblAddr) + XX_FreeSmart(UINT_TO_PTR(p_Manip->ipReassmParams.ipv6AutoLearnSetLockTblAddr)); + if (p_Manip->ipReassmParams.p_Ipv4ReassTbl) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->ipReassmParams.p_Ipv4ReassTbl); + if (p_Manip->ipReassmParams.p_Ipv6ReassTbl) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->ipReassmParams.p_Ipv6ReassTbl); + if (p_Manip->ipReassmParams.p_IpReassCommonTbl) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->ipReassmParams.p_IpReassCommonTbl); + if (p_Manip->ipReassmParams.reassFrmDescrIndxPoolTblAddr) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, UINT_TO_PTR(p_Manip->ipReassmParams.reassFrmDescrIndxPoolTblAddr)); + if (p_Manip->ipReassmParams.internalBufferPoolManagementIndexAddr) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, UINT_TO_PTR(p_Manip->ipReassmParams.internalBufferPoolManagementIndexAddr)); + if (p_Manip->ipReassmParams.internalBufferPoolAddr) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, UINT_TO_PTR(p_Manip->ipReassmParams.internalBufferPoolAddr)); + + if (p_Manip->ipReassmParams.h_Ipv6Ad) + XX_FreeSmart(p_Manip->ipReassmParams.h_Ipv6Ad); + if (p_Manip->ipReassmParams.h_Ipv4Ad) + XX_FreeSmart(p_Manip->ipReassmParams.h_Ipv4Ad); + } + + if (p_Manip->p_StatsTbl) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->p_StatsTbl); +} + +#ifdef FM_CAPWAP_SUPPORT +static t_Error CheckManipParamsAndSetType(t_FmPcdManip *p_Manip, t_FmPcdManipParams *p_ManipParams) +{ + if (p_ManipParams->u.hdr.rmv) + { + switch (p_ManipParams->u.hdr.rmvParams.type) + { + case (e_FM_PCD_MANIP_RMV_BY_HDR): + switch (p_ManipParams->u.hdr.rmvParams.u.byHdr.type) + { + case (e_FM_PCD_MANIP_RMV_BY_HDR_FROM_START) : + if (p_ManipParams->u.hdr.rmvParams.u.byHdr.u.fromStartByHdr.include) + { + switch (p_ManipParams->u.hdr.rmvParams.u.byHdr.u.fromStartByHdr.hdrInfo.hdr) + { + case (HEADER_TYPE_CAPWAP_DTLS) : + p_Manip->opcode = HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST; + p_Manip->muramAllocate = TRUE; + if (p_ManipParams->u.hdr.insrt) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("for CAPWAP_DTLS_HDR remove can not be insrt manipualtion after")); + if (p_ManipParams->fragOrReasm) + { + if (!p_ManipParams->fragOrReasmParams.frag) + { + switch (p_ManipParams->fragOrReasmParams.hdr) + { + case (HEADER_TYPE_CAPWAP): + p_Manip->opcode = HMAN_OC_CAPWAP_REASSEMBLY; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("unsupported header for Reassembly")); + } + } + else + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("for this type of manipulation frag can not be TRUE")); + } + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("non valid net header of remove location")); + } + } + else + { + switch (p_ManipParams->u.hdr.rmvParams.u.byHdr.u.fromStartByHdr.hdrInfo.hdr) + { + case (HEADER_TYPE_CAPWAP_DTLS) : + case (HEADER_TYPE_CAPWAP) : + if (p_ManipParams->fragOrReasm || p_ManipParams->u.hdr.insrt) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("for the type of remove e_FM_PCD_MANIP_RMV_FROM_START_OF_FRAME_TILL_CAPWAP can not be insert or fragOrReasm TRUE")); + p_Manip->opcode = HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR; + p_Manip->muramAllocate = TRUE; + p_ManipParams->u.hdr.insrt = TRUE; //internal frame header + break; + default : + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid type of remove manipulation")); + } + } + break; + default : + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid type of remove manipulation")); + } + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid type of remove manipulation")); + } + } + else if (p_ManipParams->u.hdr.insrt) + { + switch (p_ManipParams->u.hdr.insrtParams.type) + { + case (e_FM_PCD_MANIP_INSRT_BY_TEMPLATE) : + + p_Manip->opcode = HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER; + p_Manip->muramAllocate = FALSE; + if (p_ManipParams->fragOrReasm) + { + if (p_ManipParams->fragOrReasmParams.frag) + { + switch (p_ManipParams->fragOrReasmParams.hdr) + { + case (HEADER_TYPE_CAPWAP): + p_Manip->opcode = HMAN_OC_CAPWAP_FRAGMENTATION; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid header for fragmentation")); + } + } + else + RETURN_ERROR(MAJOR, E_INVALID_STATE,("can not reach this point")); + } + break; + + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("for only isert manipulation unsupported type")); + } + } + else if (p_ManipParams->fragOrReasm) + { + if (p_ManipParams->fragOrReasmParams.frag) + { + switch (p_ManipParams->fragOrReasmParams.hdr) + { + case (HEADER_TYPE_CAPWAP): + p_Manip->opcode = HMAN_OC_CAPWAP_FRAGMENTATION; + p_Manip->muramAllocate = FALSE; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported header for fragmentation")); + } + } + else + { + switch (p_ManipParams->fragOrReasmParams.hdr) + { + case (HEADER_TYPE_CAPWAP): + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Reassembly has to be with additional operation - rmv = TRUE, type of remove - e_FM_PCD_MANIP_RMV_FROM_START_OF_FRAME_INCLUDE_SPECIFIC_LOCATION,type = e_FM_PCD_MANIP_LOC_BY_HDR, hdr = HEADER_TYPE_CAPWAP_DTLS")); + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported header for reassembly")); + } + } + + } + else + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("User didn't ask for any manipulation")); + + p_Manip->insrt = p_ManipParams->u.hdr.insrt; + p_Manip->rmv = p_ManipParams->u.hdr.rmv; + + return E_OK; +} + +#else /* not FM_CAPWAP_SUPPORT */ +static t_Error CheckManipParamsAndSetType(t_FmPcdManip *p_Manip, t_FmPcdManipParams *p_ManipParams) +{ + switch (p_ManipParams->type) + { + case e_FM_PCD_MANIP_HDR : + /* Check that next-manip is not already used */ + if (p_ManipParams->h_NextManip) + { + if (!MANIP_IS_FIRST(p_ManipParams->h_NextManip)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("h_NextManip is already a part of another chain")); + if (MANIP_GET_TYPE(p_ManipParams->h_NextManip) != e_FM_PCD_MANIP_HDR) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("For a Header Manipulation node - no support of h_NextManip of type other than Header Manipulation.")); + } + + if (p_ManipParams->u.hdr.rmv) + { + switch (p_ManipParams->u.hdr.rmvParams.type) + { + case (e_FM_PCD_MANIP_RMV_BY_HDR): + switch (p_ManipParams->u.hdr.rmvParams.u.byHdr.type) + { + case (e_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2) : + p_Manip->opcode = HMAN_OC; + p_Manip->muramAllocate = TRUE; + break; + default : + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid type of remove manipulation")); + } + break; + case (e_FM_PCD_MANIP_RMV_GENERIC): + p_Manip->opcode = HMAN_OC; + p_Manip->muramAllocate = TRUE; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid type of remove manipulation")); + } + p_Manip->rmv = TRUE; + } + else if (p_ManipParams->u.hdr.insrt) + { + switch (p_ManipParams->u.hdr.insrtParams.type) + { + case (e_FM_PCD_MANIP_INSRT_BY_HDR) : + case (e_FM_PCD_MANIP_INSRT_GENERIC): + p_Manip->opcode = HMAN_OC; + p_Manip->muramAllocate = TRUE; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("for only isert manipulation unsupported type")); + } + p_Manip->insrt = TRUE; + } + else if (p_ManipParams->u.hdr.fieldUpdate) + { + /* Check parameters */ + if (p_ManipParams->u.hdr.fieldUpdateParams.type == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN) + { + if ((p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN_VPRI) + && (p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.u.vpri > 7)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("vpri should get values of 0-7 ")); + if (p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.updateType == e_FM_PCD_MANIP_HDR_FIELD_UPDATE_DSCP_TO_VLAN) + { + int i; + + if (p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.u.dscpToVpri.vpriDefVal > 7) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("vpriDefVal should get values of 0-7 ")); + for (i = 0 ; i < FM_PCD_MANIP_DSCP_TO_VLAN_TRANS ; i++) + if (p_ManipParams->u.hdr.fieldUpdateParams.u.vlan.u.dscpToVpri.dscpToVpriTable[i] & 0xf0) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dscpToVpriTabl value out of range (0-15)")); + } + + } + + p_Manip->opcode = HMAN_OC; + p_Manip->muramAllocate = TRUE; + p_Manip->fieldUpdate = TRUE; + } + else if (p_ManipParams->u.hdr.custom) + { + p_Manip->opcode = HMAN_OC; + p_Manip->muramAllocate = TRUE; + p_Manip->custom = TRUE; + } + break; + case e_FM_PCD_MANIP_REASSEM : + if (p_ManipParams->h_NextManip) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("next manip with reassembly")); + switch (p_ManipParams->u.reassem.hdr) + { + case (HEADER_TYPE_IPv4): + p_Manip->ipReassmParams.hdr = HEADER_TYPE_IPv4; + break; + case (HEADER_TYPE_IPv6): + p_Manip->ipReassmParams.hdr = HEADER_TYPE_IPv6; + break; + default: + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("header for reassembly")); + } + p_Manip->opcode = HMAN_OC_IP_REASSEMBLY; + break; + case e_FM_PCD_MANIP_FRAG : + if (p_ManipParams->h_NextManip) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("next manip with fragmentation")); + switch (p_ManipParams->u.frag.hdr) + { + case (HEADER_TYPE_IPv4): + case (HEADER_TYPE_IPv6): + break; + default: + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("header for fragmentation")); + } + p_Manip->opcode = HMAN_OC_IP_FRAGMENTATION; + p_Manip->muramAllocate = TRUE; + break; + case e_FM_PCD_MANIP_SPECIAL_OFFLOAD : + switch (p_ManipParams->u.specialOffload.type) + { + case (e_FM_PCD_MANIP_SPECIAL_OFFLOAD_IPSEC): + p_Manip->opcode = HMAN_OC_IPSEC_MANIP; + p_Manip->muramAllocate = TRUE; + break; + default: + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("special offload type")); + } + break; + default : + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("manip type")); + } + + return E_OK; +} +#endif /* not FM_CAPWAP_SUPPORT */ + +#ifdef FM_CAPWAP_SUPPORT +static t_Error UpdateIndxStats(t_Handle h_FmPcd, + t_Handle h_FmPort, + t_FmPcdManip *p_Manip) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint32_t tmpReg32 = 0; + t_AdOfTypeContLookup *p_Ad; + t_FmPortGetSetCcParams fmPortGetSetCcParams; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE); + + p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad; + if (p_Manip->h_FmPcd != h_FmPcd) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("handler of PCD previously was initiated by different value")); + + memset(&fmPortGetSetCcParams, 0, sizeof(t_FmPortGetSetCcParams)); + + if (!p_Manip->p_StatsTbl) + { + + fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNDN; + fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_CC; + err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + tmpReg32 = GET_UINT32(p_Ad->ccAdBase); + + p_Manip->p_StatsTbl = + (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + (uint32_t)p_Manip->owner * FM_PCD_MANIP_INDEXED_STATS_ENTRY_SIZE, + 4); + if (!p_Manip->p_StatsTbl) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for Manipulation indexed statistics table")); + + IOMemSet32(p_Manip->p_StatsTbl, 0, (uint32_t)(p_Manip->owner * 4)); + + tmpReg32 |= (uint32_t)(XX_VirtToPhys(p_Manip->p_StatsTbl) - p_FmPcd->physicalMuramBase); + + if (p_Manip->cnia) + tmpReg32 |= FM_PCD_MANIP_INDEXED_STATS_CNIA; + + tmpReg32 |= FM_PCD_MANIP_INDEXED_STATS_DPD; + WRITE_UINT32(p_Ad->ccAdBase, tmpReg32); + } + else + { + fmPortGetSetCcParams.setCcParams.type = UPDATE_NIA_PNDN; + fmPortGetSetCcParams.setCcParams.nia = NIA_FM_CTL_AC_CC; + err = FmPortGetSetCcParams(h_FmPort, &fmPortGetSetCcParams); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + return E_OK; +} +#endif /* FM_CAPWAP_SUPPORT */ + +static t_Error FmPcdManipInitUpdate(t_Handle h_FmPcd, + t_Handle h_PcdParams, + t_Handle h_FmPort, + t_Handle h_Manip, + t_Handle h_Ad, + bool validate, + int level, + t_Handle h_FmTree) +{ + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(h_Manip,E_INVALID_HANDLE); + + UNUSED(level); + UNUSED(h_FmPcd); + UNUSED(h_FmTree); + + switch (p_Manip->opcode) + { + case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX): + err = UpdateInitMvIntFrameHeaderFromFrameToBufferPrefix(h_FmPort, p_Manip, h_Ad, validate); + break; +#ifdef FM_CAPWAP_SUPPORT + case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER): + if (!p_Manip->h_Frag) + break; + case (HMAN_OC_CAPWAP_FRAGMENTATION): + err = UpdateInitCapwapFragmentation(h_FmPort, p_Manip, h_Ad, validate, h_FmTree); + break; + case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST): + if (p_Manip->h_Frag) + err = UpdateInitCapwapReasm(h_FmPcd, h_FmPort, p_Manip, h_Ad, validate); + break; + case (HMAN_OC_CAPWAP_INDEXED_STATS): + err = UpdateIndxStats(h_FmPcd, h_FmPort, p_Manip); + break; +#endif /* FM_CAPWAP_SUPPORT */ + case (HMAN_OC_IP_REASSEMBLY): + err = UpdateInitIpReasm(h_FmPcd, h_PcdParams, h_FmPort, p_Manip, h_Ad, validate); + break; + default: + return E_OK; + } + + return err; +} + +static t_Error FmPcdManipModifyUpdate(t_Handle h_Manip, t_Handle h_Ad, bool validate, int level, t_Handle h_FmTree) +{ + + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip; + t_Error err = E_OK; + + UNUSED(level); + + switch (p_Manip->opcode) + { + case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX): + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("modify node with this type of manipulation is not suppported")); + case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST): + + if (p_Manip->h_Frag) + { + if (!(p_Manip->shadowUpdateParams & NUM_OF_TASKS) && + !(p_Manip->shadowUpdateParams & OFFSET_OF_DATA) && + !(p_Manip->shadowUpdateParams & OFFSET_OF_PR)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("modify node with this type of manipulation requires manipulation be updated previously in SetPcd function")); + } + break; +#ifdef FM_CAPWAP_SUPPORT + case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER): + if (p_Manip->h_Frag) + err = UpdateModifyCapwapFragmenation(p_Manip, h_Ad, validate, h_FmTree); + break; +#endif /* FM_CAPWAP_SUPPORT */ + default: + return E_OK; + } + + return err; +} + +#ifdef FM_CAPWAP_SUPPORT +static t_Error GetPrOffsetByHeaderOrField(t_FmManipHdrInfo *p_HdrInfo, uint8_t *parseArrayOffset) +{ + e_NetHeaderType hdr = p_HdrInfo->hdr; + e_FmPcdHdrIndex hdrIndex = p_HdrInfo->hdrIndex; + bool byField = p_HdrInfo->byField; + t_FmPcdFields field; + + if (byField) + field = p_HdrInfo->fullField; + + if (byField) + { + switch (hdr) + { + case (HEADER_TYPE_ETH): + switch (field.eth) + { + case (NET_HEADER_FIELD_ETH_TYPE): + *parseArrayOffset = CC_PC_PR_ETYPE_LAST_OFFSET; + break; + default: + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Header manipulation of the type Ethernet with this field not supported")); + } + break; + case (HEADER_TYPE_VLAN): + switch (field.vlan) + { + case (NET_HEADER_FIELD_VLAN_TCI) : + if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + *parseArrayOffset = CC_PC_PR_VLAN1_OFFSET; + else if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST) + *parseArrayOffset = CC_PC_PR_VLAN2_OFFSET; + break; + default: + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Header manipulation of the type VLAN with this field not supported")); + } + break; + default: + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Header manipulation of this header by field not supported")); + } + } + else + { + switch (hdr){ + case (HEADER_TYPE_ETH): + *parseArrayOffset = (uint8_t)CC_PC_PR_ETH_OFFSET; + break; + case (HEADER_TYPE_USER_DEFINED_SHIM1): + *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM1_OFFSET; + break; + case (HEADER_TYPE_USER_DEFINED_SHIM2): + *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM2_OFFSET; + break; + case (HEADER_TYPE_LLC_SNAP): + *parseArrayOffset = CC_PC_PR_USER_LLC_SNAP_OFFSET; + break; + case (HEADER_TYPE_PPPoE): + *parseArrayOffset = CC_PC_PR_PPPOE_OFFSET; + break; + case (HEADER_TYPE_MPLS): + if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + *parseArrayOffset = CC_PC_PR_MPLS1_OFFSET; + else if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST) + *parseArrayOffset = CC_PC_PR_MPLS_LAST_OFFSET; + break; + case (HEADER_TYPE_IPv4): + case (HEADER_TYPE_IPv6): + if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + *parseArrayOffset = CC_PC_PR_IP1_OFFSET; + else if (hdrIndex == e_FM_PCD_HDR_INDEX_2) + *parseArrayOffset = CC_PC_PR_IP_LAST_OFFSET; + break; + case (HEADER_TYPE_MINENCAP): + *parseArrayOffset = CC_PC_PR_MINENC_OFFSET; + break; + case (HEADER_TYPE_GRE): + *parseArrayOffset = CC_PC_PR_GRE_OFFSET; + break; + case (HEADER_TYPE_TCP): + case (HEADER_TYPE_UDP): + case (HEADER_TYPE_IPSEC_AH): + case (HEADER_TYPE_IPSEC_ESP): + case (HEADER_TYPE_DCCP): + case (HEADER_TYPE_SCTP): + *parseArrayOffset = CC_PC_PR_L4_OFFSET; + break; + case (HEADER_TYPE_CAPWAP): + case (HEADER_TYPE_CAPWAP_DTLS): + *parseArrayOffset = CC_PC_PR_NEXT_HEADER_OFFSET; + break; + default: + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Header manipulation of this header is not supported")); + } + } + return E_OK; +} + +static t_Error RmvHdrTillSpecLocNOrInsrtIntFrmHdr(t_FmPcdManipHdrRmvParams *p_ManipParams, t_FmPcdManip *p_Manip) +{ + t_AdOfTypeContLookup *p_Ad; + uint32_t tmpReg32 = 0; + uint8_t prsArrayOffset = 0; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_Manip,E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_ManipParams,E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE); + + p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad; + if (p_Manip->rmv) + { + err = GetPrOffsetByHeaderOrField(&p_ManipParams->u.byHdr.u.fromStartByHdr.hdrInfo, &prsArrayOffset); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + tmpReg32 |= (uint32_t)prsArrayOffset << 24; + tmpReg32 |= HMAN_RMV_HDR; + } + + if (p_Manip->insrt) + tmpReg32 |= HMAN_INSRT_INT_FRM_HDR; + + tmpReg32 |= (uint32_t)HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR; + + WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32); + + tmpReg32 = 0; + tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE; + WRITE_UINT32(p_Ad->ccAdBase, tmpReg32); + + return E_OK; +} +#endif /* FM_CAPWAP_SUPPORT */ + +static t_Error MvIntFrameHeaderFromFrameToBufferPrefix(t_FmPcdManip *p_Manip, bool caamUsed) +{ + t_AdOfTypeContLookup *p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad; + uint32_t tmpReg32 = 0; + + SANITY_CHECK_RETURN_ERROR(p_Ad,E_INVALID_HANDLE); + + p_Manip->updateParams |= OFFSET_OF_PR | INTERNAL_CONTEXT_OFFSET; + + tmpReg32 = 0; + tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE; + *(uint32_t *)&p_Ad->ccAdBase = tmpReg32; + + tmpReg32 = 0; + tmpReg32 |= HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX; + tmpReg32 |= (uint32_t)0x16 << 16; + *(uint32_t *)&p_Ad->pcAndOffsets = tmpReg32; + + if (caamUsed) + *(uint32_t *)&p_Ad->gmask = 0xf0000000; + + return E_OK; +} + +#ifdef FM_CAPWAP_SUPPORT +static t_Error CapwapRmvDtlsHdr(t_FmPcd *p_FmPcd, t_FmPcdManip *p_Manip) +{ + t_AdOfTypeContLookup *p_Ad; + uint32_t tmpReg32 = 0; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE); + + p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad; + + tmpReg32 = 0; + tmpReg32 |= (uint32_t)HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST; + WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32); + + tmpReg32 = 0; + tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE; + + + if (p_Manip->h_Frag) + { + p_Manip->updateParams |= INTERNAL_CONTEXT_OFFSET; + tmpReg32 |= (uint32_t)(XX_VirtToPhys(p_Manip->h_Frag) - (p_FmPcd->physicalMuramBase)); + } + + WRITE_UINT32(p_Ad->ccAdBase, tmpReg32); + + return err; +} + +static t_Error CapwapReassembly(t_CapwapReassemblyParams *p_ManipParams, + t_FmPcdManip *p_Manip, + t_FmPcd *p_FmPcd, + uint8_t poolId) +{ + t_Handle p_Table; + uint32_t tmpReg32 = 0; + int i = 0; + uint8_t log2Num; + uint8_t numOfSets; + uint32_t j = 0; + + SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + if (!p_FmPcd->h_Hc) + RETURN_ERROR(MAJOR, E_INVALID_VALUE,("hc port has to be initialized in this mode")); + if (!POWER_OF_2(p_ManipParams->timeoutRoutineRequestTime)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("timeoutRoutineRequestTime has to be power of 2")); + if (!POWER_OF_2(p_ManipParams->maxNumFramesInProcess)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE,("maxNumFramesInProcess has to be power of 2")); + if (!p_ManipParams->timeoutRoutineRequestTime && p_ManipParams->timeoutThresholdForReassmProcess) + DBG(WARNING, ("if timeoutRoutineRequestTime 0, timeoutThresholdForReassmProcess is uselessly")); + if (p_ManipParams->numOfFramesPerHashEntry == e_FM_PCD_MANIP_FOUR_WAYS_HASH) + { + if ((p_ManipParams->maxNumFramesInProcess < 4) || + (p_ManipParams->maxNumFramesInProcess > 512)) + RETURN_ERROR(MAJOR,E_INVALID_VALUE, ("In the case of numOfFramesPerHashEntry = e_FM_PCD_MANIP_EIGHT_WAYS_HASH maxNumFramesInProcess has to be in the range 4-512")); + } + else + { + if ((p_ManipParams->maxNumFramesInProcess < 8) || + (p_ManipParams->maxNumFramesInProcess > 2048)) + RETURN_ERROR(MAJOR,E_INVALID_VALUE, ("In the case of numOfFramesPerHashEntry = e_FM_PCD_MANIP_FOUR_WAYS_HASH maxNumFramesInProcess has to be in the range 8-2048")); + } + + p_Manip->updateParams |= (NUM_OF_TASKS | OFFSET_OF_PR | OFFSET_OF_DATA | HW_PORT_ID); + + p_Manip->h_Frag = (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + FM_PCD_MANIP_CAPWAP_REASM_TABLE_SIZE, + FM_PCD_MANIP_CAPWAP_REASM_TABLE_ALIGN); + if (!p_Manip->h_Frag) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc CAPWAP reassembly parameters table")); + + IOMemSet32(p_Manip->h_Frag, 0, FM_PCD_MANIP_CAPWAP_REASM_TABLE_SIZE); + + p_Table = (t_CapwapReasmPram *)p_Manip->h_Frag; + + p_Manip->fragParams.p_AutoLearnHashTbl = + (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + (uint32_t)(p_ManipParams->maxNumFramesInProcess * 2 * FM_PCD_MANIP_CAPWAP_REASM_AUTO_LEARNING_HASH_ENTRY_SIZE), + FM_PCD_MANIP_CAPWAP_REASM_TABLE_ALIGN); + + if (!p_Manip->fragParams.p_AutoLearnHashTbl) + RETURN_ERROR(MAJOR, E_NO_MEMORY,("MURAM alloc for CAPWAP automatic learning hash table")); + + IOMemSet32(p_Manip->fragParams.p_AutoLearnHashTbl, 0, (uint32_t)(p_ManipParams->maxNumFramesInProcess * 2 * FM_PCD_MANIP_CAPWAP_REASM_AUTO_LEARNING_HASH_ENTRY_SIZE)); + + + tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->fragParams.p_AutoLearnHashTbl) - p_FmPcd->physicalMuramBase); + + WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->autoLearnHashTblPtr, tmpReg32); + + tmpReg32 = 0; + if (p_ManipParams->timeOutMode == e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAMES) + tmpReg32 |= FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_BETWEEN_FRAMES; + if (p_ManipParams->haltOnDuplicationFrag) + tmpReg32 |= FM_PCD_MANIP_CAPWAP_REASM_HALT_ON_DUPLICATE_FRAG; + if (p_ManipParams->numOfFramesPerHashEntry == e_FM_PCD_MANIP_EIGHT_WAYS_HASH) + { + i = 8; + tmpReg32 |= FM_PCD_MANIP_CAPWAP_REASM_AUTOMATIC_LEARNIN_HASH_8_WAYS; + } + else + i = 4; + + numOfSets = (uint8_t)((p_ManipParams->maxNumFramesInProcess * 2) / i); + LOG2(numOfSets, log2Num); + tmpReg32 |= (uint32_t)(log2Num - 1) << 24; + + WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->mode, tmpReg32); + + for (j=0; j<p_ManipParams->maxNumFramesInProcess*2; j++) + if (((j / i) % 2)== 0) + WRITE_UINT32(*(uint32_t *)PTR_MOVE(p_Manip->fragParams.p_AutoLearnHashTbl, j * FM_PCD_MANIP_CAPWAP_REASM_AUTO_LEARNING_HASH_ENTRY_SIZE), 0x80000000); + + tmpReg32 = 0x00008000; + tmpReg32 |= (uint32_t)poolId << 16; + WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->bufferPoolIdAndRisc1SetIndexes, tmpReg32); + WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->risc23SetIndexes, 0x80008000); + WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->risc4SetIndexesAndExtendedStatsTblPtr, 0x80000000); + + p_Manip->fragParams.maxNumFramesInProcess = p_ManipParams->maxNumFramesInProcess; + + p_Manip->fragParams.sgBpid = poolId; + + p_Manip->fragParams.fqidForTimeOutFrames = p_ManipParams->fqidForTimeOutFrames; + p_Manip->fragParams.timeoutRoutineRequestTime = p_ManipParams->timeoutRoutineRequestTime; + p_Manip->fragParams.bitFor1Micro = FmGetTimeStampScale(p_FmPcd->h_Fm); + + tmpReg32 = 0; + tmpReg32 |= (((uint32_t)1<<p_Manip->fragParams.bitFor1Micro) * p_ManipParams->timeoutThresholdForReassmProcess); + WRITE_UINT32(((t_CapwapReasmPram *)p_Table)->expirationDelay, tmpReg32); + + return E_OK; +} + +static t_Error CapwapFragmentation(t_CapwapFragmentationParams *p_ManipParams, + t_FmPcdManip *p_Manip, + t_FmPcd *p_FmPcd, + uint8_t poolId) +{ + t_AdOfTypeContLookup *p_Ad; + uint32_t tmpReg32 = 0; + + SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE); + + p_Manip->updateParams |= OFFSET_OF_DATA; + + p_Manip->frag = TRUE; + + p_Manip->h_Frag = (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!p_Manip->h_Frag) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for CAPWAP fragmentation table descriptor")); + + IOMemSet32(p_Manip->h_Frag, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Frag; + + tmpReg32 = 0; + tmpReg32 |= (uint32_t)HMAN_OC_CAPWAP_FRAGMENTATION; + + if (p_ManipParams->headerOptionsCompr) + tmpReg32 |= FM_PCD_MANIP_CAPWAP_FRAG_COMPR_OPTION_FIELD_EN; + tmpReg32 |= ((uint32_t)poolId << 8); + WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32); + + tmpReg32 = 0; + tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE; + WRITE_UINT32(p_Ad->ccAdBase, tmpReg32); + + p_Manip->sizeForFragmentation = p_ManipParams->sizeForFragmentation; + p_Manip->fragParams.sgBpid = poolId; + + return E_OK; +} +#endif /* FM_CAPWAP_SUPPORT */ + +static t_Error FillReassmManipParams(t_FmPcdManip *p_Manip, bool ipv4) +{ + t_AdOfTypeContLookup *p_Ad; + t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd; + uint32_t tmpReg32; + t_Error err = E_OK; + + /* Creates the IP Reassembly Parameters table. It contains parameters that are specific to either the IPv4 reassembly + function or to the IPv6 reassembly function. If both IPv4 reassembly and IPv6 reassembly are required, then + two separate IP Reassembly Parameter tables are required.*/ + if ((err = CreateIpReassTable(p_Manip, ipv4)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + /* Sets the first Ad register (ccAdBase) - Action Descriptor Type and Pointer to the IP Reassembly Parameters Table offset from MURAM*/ + tmpReg32 = 0; + tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE; + + /* Gets the required Action descriptor table pointer */ + if (ipv4) + { + p_Ad = (t_AdOfTypeContLookup *)p_Manip->ipReassmParams.h_Ipv4Ad; + tmpReg32 |= (uint32_t)(XX_VirtToPhys(p_Manip->ipReassmParams.p_Ipv4ReassTbl) - (p_FmPcd->physicalMuramBase)); + } + else + { + p_Ad = (t_AdOfTypeContLookup *)p_Manip->ipReassmParams.h_Ipv6Ad; + tmpReg32 |= (uint32_t)(XX_VirtToPhys(p_Manip->ipReassmParams.p_Ipv6ReassTbl) - (p_FmPcd->physicalMuramBase)); + } + + WRITE_UINT32(p_Ad->ccAdBase, tmpReg32); + + /* Sets the second Ad register (matchTblPtr) - Buffer pool ID (BPID for V2) and Scatter/Gather table offset*/ + /* mark the Scatter/Gather table offset to be set later on when the port will be known */ + p_Manip->updateParams = (NUM_OF_TASKS | NUM_OF_EXTRA_TASKS); + +#if (DPAA_VERSION == 10) + tmpReg32 = (uint32_t)(p_Manip->ipReassmParams.sgBpid << 8); + WRITE_UINT32(p_Ad->matchTblPtr, tmpReg32); +#endif /* (DPAA_VERSION == 10) */ +#if (DPAA_VERSION >= 11) + if (p_Manip->ipReassmParams.nonConsistentSpFqid != 0) + { + tmpReg32 = FM_PCD_AD_NCSPFQIDM_MASK | (uint32_t)(p_Manip->ipReassmParams.nonConsistentSpFqid); + WRITE_UINT32(p_Ad->gmask, tmpReg32); + } +#endif /* (DPAA_VERSION >= 11) */ + + /* Sets the third Ad register (pcAndOffsets)- IP Reassemble Operation Code*/ + tmpReg32 = 0; + tmpReg32 |= (uint32_t)HMAN_OC_IP_REASSEMBLY; + WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32); + + p_Manip->reassm = TRUE; + + return E_OK; +} + +static t_Error SetIpv4ReassmManip(t_FmPcdManip *p_Manip) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd; + + /* Allocation if IPv4 Action descriptor */ + p_Manip->ipReassmParams.h_Ipv4Ad = + (t_Handle)XX_MallocSmart(FM_PCD_CC_AD_ENTRY_SIZE, + p_Manip->ipReassmParams.dataMemId, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!p_Manip->ipReassmParams.h_Ipv4Ad) + { + ReleaseManipHandler(p_Manip, p_FmPcd); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Allocation of IPv4 table descriptor")); + } + + memset(p_Manip->ipReassmParams.h_Ipv4Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + /* Fill reassembly manipulation parameter in the IP Reassembly Action Descriptor */ + return FillReassmManipParams(p_Manip, TRUE); +} + +static t_Error SetIpv6ReassmManip(t_FmPcdManip *p_Manip) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)p_Manip->h_FmPcd; + + /* Allocation if IPv6 Action descriptor */ + p_Manip->ipReassmParams.h_Ipv6Ad = + (t_Handle)XX_MallocSmart(FM_PCD_CC_AD_ENTRY_SIZE, + p_Manip->ipReassmParams.dataMemId, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!p_Manip->ipReassmParams.h_Ipv6Ad) + { + ReleaseManipHandler(p_Manip, p_FmPcd); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Allocation of IPv6 table descriptor")); + } + + memset(p_Manip->ipReassmParams.h_Ipv6Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + /* Fill reassembly manipulation parameter in the IP Reassembly Action Descriptor */ + return FillReassmManipParams(p_Manip, FALSE); +} + +static t_Error IpReassembly(t_FmPcdManipReassemParams *p_ManipReassmParams, + t_FmPcdManip *p_Manip) +{ + uint32_t maxSetNumber = 10000; + t_FmPcdManipReassemIpParams reassmManipParams = p_ManipReassmParams->u.ipReassem; + t_Error res; + + SANITY_CHECK_RETURN_ERROR(p_Manip->h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(((t_FmPcd *)p_Manip->h_FmPcd)->h_Hc, E_INVALID_HANDLE); + + /* Check validation of user's parameter.*/ + if ((reassmManipParams.timeoutThresholdForReassmProcess < 1000) || + (reassmManipParams.timeoutThresholdForReassmProcess > 8000000)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE,("timeoutThresholdForReassmProcess should be 1msec - 8sec")); + /* It is recommended that the total number of entries in this table (number of sets * number of ways) + will be twice the number of frames that are expected to be reassembled simultaneously.*/ + if (reassmManipParams.maxNumFramesInProcess > + (reassmManipParams.maxNumFramesInProcess * maxSetNumber / 2)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("maxNumFramesInProcess has to be less than (maximun set number * number of ways / 2)")); + + if ((p_ManipReassmParams->hdr == HEADER_TYPE_IPv6) && + (reassmManipParams.minFragSize[1] < 256)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("minFragSize[1] must be >= 256")); + + /* Saves user's reassembly manipulation parameters */ + p_Manip->ipReassmParams.relativeSchemeId[0] = reassmManipParams.relativeSchemeId[0]; + p_Manip->ipReassmParams.relativeSchemeId[1] = reassmManipParams.relativeSchemeId[1]; + p_Manip->ipReassmParams.numOfFramesPerHashEntry[0] = reassmManipParams.numOfFramesPerHashEntry[0]; + p_Manip->ipReassmParams.numOfFramesPerHashEntry[1] = reassmManipParams.numOfFramesPerHashEntry[1]; + p_Manip->ipReassmParams.minFragSize[0] = reassmManipParams.minFragSize[0]; + p_Manip->ipReassmParams.minFragSize[1] = reassmManipParams.minFragSize[1]; + p_Manip->ipReassmParams.maxNumFramesInProcess = reassmManipParams.maxNumFramesInProcess; + p_Manip->ipReassmParams.timeOutMode = reassmManipParams.timeOutMode; + p_Manip->ipReassmParams.fqidForTimeOutFrames = reassmManipParams.fqidForTimeOutFrames; + p_Manip->ipReassmParams.timeoutThresholdForReassmProcess = reassmManipParams.timeoutThresholdForReassmProcess; + p_Manip->ipReassmParams.dataMemId = reassmManipParams.dataMemId; + p_Manip->ipReassmParams.dataLiodnOffset = reassmManipParams.dataLiodnOffset; +#if (DPAA_VERSION == 10) + p_Manip->ipReassmParams.sgBpid = reassmManipParams.sgBpid; +#endif /* (DPAA_VERSION == 10) */ +#if (DPAA_VERSION >= 11) + if (reassmManipParams.nonConsistentSpFqid != 0) + { + p_Manip->ipReassmParams.nonConsistentSpFqid = reassmManipParams.nonConsistentSpFqid; + } +#endif /* (DPAA_VERSION >= 11) */ + + /* Creates and initializes the IP Reassembly common parameter table */ + CreateIpReassCommonTable(p_Manip); + + /* Creation of IPv4 reassembly manipulation */ + if ((p_Manip->ipReassmParams.hdr == HEADER_TYPE_IPv6) || (p_Manip->ipReassmParams.hdr == HEADER_TYPE_IPv4)) + { + res = SetIpv4ReassmManip(p_Manip); + if (res != E_OK) + return res; + } + + /* Creation of IPv6 reassembly manipulation */ + if (p_Manip->ipReassmParams.hdr == HEADER_TYPE_IPv6) + { + res = SetIpv6ReassmManip(p_Manip); + if (res != E_OK) + return res; + } + + return E_OK; +} + +static void setReassmSchemeParams(t_FmPcd* p_FmPcd, t_FmPcdKgSchemeParams *p_Scheme, t_Handle h_CcTree, bool ipv4, uint8_t groupId) +{ + uint32_t j; + uint8_t res; + + /* Configures scheme's network environment parameters */ + p_Scheme->netEnvParams.numOfDistinctionUnits = 2; + if (ipv4) + res = FmPcdNetEnvGetUnitId(p_FmPcd, FmPcdGetNetEnvId(p_Scheme->netEnvParams.h_NetEnv), HEADER_TYPE_IPv4, FALSE, 0); + else + res = FmPcdNetEnvGetUnitId(p_FmPcd, FmPcdGetNetEnvId(p_Scheme->netEnvParams.h_NetEnv), HEADER_TYPE_IPv6, FALSE, 0); + ASSERT_COND(res != FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS); + p_Scheme->netEnvParams.unitIds[0] = res; + + res = FmPcdNetEnvGetUnitId(p_FmPcd, FmPcdGetNetEnvId(p_Scheme->netEnvParams.h_NetEnv), HEADER_TYPE_USER_DEFINED_SHIM2, FALSE, 0); + ASSERT_COND(res != FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS); + p_Scheme->netEnvParams.unitIds[1] = res; + + /* Configures scheme's next engine parameters*/ + p_Scheme->nextEngine = e_FM_PCD_CC; + p_Scheme->kgNextEngineParams.cc.h_CcTree = h_CcTree; + p_Scheme->kgNextEngineParams.cc.grpId = groupId; + p_Scheme->useHash = TRUE; + + /* Configures scheme's key*/ + if (ipv4 == TRUE) + { + p_Scheme->keyExtractAndHashParams.numOfUsedExtracts = 4; + p_Scheme->keyExtractAndHashParams.extractArray[0].type = e_FM_PCD_EXTRACT_BY_HDR; + p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.hdr = HEADER_TYPE_IPv4 ; + p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.extractByHdrType.fullField.ipv4 = NET_HEADER_FIELD_IPv4_DST_IP; + p_Scheme->keyExtractAndHashParams.extractArray[1].type = e_FM_PCD_EXTRACT_BY_HDR; + p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.hdr = HEADER_TYPE_IPv4; + p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.extractByHdrType.fullField.ipv4 = NET_HEADER_FIELD_IPv4_SRC_IP; + p_Scheme->keyExtractAndHashParams.extractArray[2].type = e_FM_PCD_EXTRACT_BY_HDR; + p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.hdr = HEADER_TYPE_IPv4; + p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.extractByHdrType.fullField.ipv4 = NET_HEADER_FIELD_IPv4_PROTO; + p_Scheme->keyExtractAndHashParams.extractArray[3].type = e_FM_PCD_EXTRACT_BY_HDR; + p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.hdr = HEADER_TYPE_IPv4; + p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR; + p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.ignoreProtocolValidation = FALSE; + p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.extractByHdrType.fromHdr.size = 2; + p_Scheme->keyExtractAndHashParams.extractArray[3].extractByHdr.extractByHdrType.fromHdr.offset = 4; + } + else /* IPv6 */ + { + p_Scheme->keyExtractAndHashParams.numOfUsedExtracts = 3; + p_Scheme->keyExtractAndHashParams.extractArray[0].type = e_FM_PCD_EXTRACT_BY_HDR; + p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.hdr = HEADER_TYPE_IPv6 ; + p_Scheme->keyExtractAndHashParams.extractArray[0].extractByHdr.extractByHdrType.fullField.ipv6 = NET_HEADER_FIELD_IPv6_DST_IP; + p_Scheme->keyExtractAndHashParams.extractArray[1].type = e_FM_PCD_EXTRACT_BY_HDR; + p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.type = e_FM_PCD_EXTRACT_FULL_FIELD; + p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.hdr = HEADER_TYPE_IPv6; + p_Scheme->keyExtractAndHashParams.extractArray[1].extractByHdr.extractByHdrType.fullField.ipv6 = NET_HEADER_FIELD_IPv6_SRC_IP; + p_Scheme->keyExtractAndHashParams.extractArray[2].type = e_FM_PCD_EXTRACT_BY_HDR; + p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.hdr = HEADER_TYPE_USER_DEFINED_SHIM2; + p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR; + p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.extractByHdrType.fromHdr.size = 4; + p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.extractByHdrType.fromHdr.offset = 4; + p_Scheme->keyExtractAndHashParams.extractArray[2].extractByHdr.ignoreProtocolValidation = TRUE; + } + + p_Scheme->keyExtractAndHashParams.privateDflt0 = 0x01020304; + p_Scheme->keyExtractAndHashParams.privateDflt1 = 0x11121314; + p_Scheme->keyExtractAndHashParams.numOfUsedDflts = FM_PCD_KG_NUM_OF_DEFAULT_GROUPS; + for (j=0; j<FM_PCD_KG_NUM_OF_DEFAULT_GROUPS; j++) + { + p_Scheme->keyExtractAndHashParams.dflts[j].type = (e_FmPcdKgKnownFieldsDfltTypes)j; /* all types */ + p_Scheme->keyExtractAndHashParams.dflts[j].dfltSelect = e_FM_PCD_KG_DFLT_GBL_0; + } +} + +static t_Error IpReassemblyStats(t_FmPcdManip *p_Manip, t_FmPcdManipReassemIpStats *p_Stats) +{ + ASSERT_COND(p_Manip); + ASSERT_COND(p_Stats); + ASSERT_COND(p_Manip->ipReassmParams.p_IpReassCommonTbl); + + p_Stats->timeout = GET_UINT32(p_Manip->ipReassmParams.p_IpReassCommonTbl->totalTimeOutCounter); + p_Stats->rfdPoolBusy = GET_UINT32(p_Manip->ipReassmParams.p_IpReassCommonTbl->totalRfdPoolBusyCounter); + p_Stats->internalBufferBusy = GET_UINT32(p_Manip->ipReassmParams.p_IpReassCommonTbl->totalInternalBufferBusy); + p_Stats->externalBufferBusy = GET_UINT32(p_Manip->ipReassmParams.p_IpReassCommonTbl->totalExternalBufferBusy); + p_Stats->sgFragments = GET_UINT32(p_Manip->ipReassmParams.p_IpReassCommonTbl->totalSgFragmentCounter); + p_Stats->dmaSemaphoreDepletion = GET_UINT32(p_Manip->ipReassmParams.p_IpReassCommonTbl->totalDmaSemaphoreDepletionCounter); + + if (p_Manip->ipReassmParams.p_Ipv4ReassTbl) + { + p_Stats->specificHdrStatistics[0].successfullyReassembled = GET_UINT32(p_Manip->ipReassmParams.p_Ipv4ReassTbl->totalSuccessfullyReasmFramesCounter); + p_Stats->specificHdrStatistics[0].validFragments = GET_UINT32(p_Manip->ipReassmParams.p_Ipv4ReassTbl->totalValidFragmentCounter); + p_Stats->specificHdrStatistics[0].processedFragments = GET_UINT32(p_Manip->ipReassmParams.p_Ipv4ReassTbl->totalProcessedFragCounter); + p_Stats->specificHdrStatistics[0].malformedFragments = GET_UINT32(p_Manip->ipReassmParams.p_Ipv4ReassTbl->totalMalformdFragCounter); + p_Stats->specificHdrStatistics[0].autoLearnBusy = GET_UINT32(p_Manip->ipReassmParams.p_Ipv4ReassTbl->totalSetBusyCounter); + p_Stats->specificHdrStatistics[0].discardedFragments = GET_UINT32(p_Manip->ipReassmParams.p_Ipv4ReassTbl->totalDiscardedFragsCounter); + p_Stats->specificHdrStatistics[0].moreThan16Fragments = GET_UINT32(p_Manip->ipReassmParams.p_Ipv4ReassTbl->totalMoreThan16FramesCounter); + } + if (p_Manip->ipReassmParams.p_Ipv6ReassTbl) + { + p_Stats->specificHdrStatistics[1].successfullyReassembled = GET_UINT32(p_Manip->ipReassmParams.p_Ipv6ReassTbl->totalSuccessfullyReasmFramesCounter); + p_Stats->specificHdrStatistics[1].validFragments = GET_UINT32(p_Manip->ipReassmParams.p_Ipv6ReassTbl->totalValidFragmentCounter); + p_Stats->specificHdrStatistics[1].processedFragments = GET_UINT32(p_Manip->ipReassmParams.p_Ipv6ReassTbl->totalProcessedFragCounter); + p_Stats->specificHdrStatistics[1].malformedFragments = GET_UINT32(p_Manip->ipReassmParams.p_Ipv6ReassTbl->totalMalformdFragCounter); + p_Stats->specificHdrStatistics[1].autoLearnBusy = GET_UINT32(p_Manip->ipReassmParams.p_Ipv6ReassTbl->totalSetBusyCounter); + p_Stats->specificHdrStatistics[1].discardedFragments = GET_UINT32(p_Manip->ipReassmParams.p_Ipv6ReassTbl->totalDiscardedFragsCounter); + p_Stats->specificHdrStatistics[1].moreThan16Fragments = GET_UINT32(p_Manip->ipReassmParams.p_Ipv6ReassTbl->totalMoreThan16FramesCounter); + } + return E_OK; +} + +#ifdef FM_CAPWAP_SUPPORT +static t_Error IndxStats(t_FmPcdStatsParams *p_StatsParams,t_FmPcdManip *p_Manip,t_FmPcd *p_FmPcd) +{ + t_AdOfTypeContLookup *p_Ad; + uint32_t tmpReg32 = 0; + + SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE); + + UNUSED(p_FmPcd); + + p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad; + + tmpReg32 = 0; + tmpReg32 |= (uint32_t)HMAN_OC_CAPWAP_INDEXED_STATS; + if (p_StatsParams->type == e_FM_PCD_STATS_PER_FLOWID) + tmpReg32 |= (uint32_t)0x16 << 16; + WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32); + + tmpReg32 = 0; + tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE; + WRITE_UINT32(p_Ad->ccAdBase, tmpReg32); + + return E_OK; +} + +static t_Error InsrtHdrByTempl(t_FmPcdManipHdrInsrtParams *p_ManipParams, t_FmPcdManip *p_Manip, t_FmPcd *p_FmPcd) +{ + t_FmPcdManipHdrInsrtByTemplateParams *p_InsrtByTemplate = &p_ManipParams->u.byTemplate; + uint8_t tmpReg8 = 0xff; + t_AdOfTypeContLookup *p_Ad; + bool ipModify = FALSE; + uint32_t tmpReg32 = 0, tmpRegNia = 0; + uint16_t tmpReg16 = 0; + t_Error err = E_OK; + uint8_t extraAddedBytes = 0, blockSize = 0, extraAddedBytesAlignedToBlockSize = 0, log2Num = 0; + uint8_t *p_Template = NULL; + + SANITY_CHECK_RETURN_ERROR(p_ManipParams,E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_Manip,E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd,E_NULL_POINTER); + + p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad; + if (p_Manip->insrt) + { + if ((!p_InsrtByTemplate->size && p_InsrtByTemplate->modifyOuterIp) || + (!p_InsrtByTemplate->size && p_InsrtByTemplate->modifyOuterVlan)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : asking for header template modifications with no template for insertion (template size)")); + + if (p_InsrtByTemplate->size && p_InsrtByTemplate->modifyOuterIp && (p_InsrtByTemplate->size <= p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : size of template < ipOuterOffset")); + + if (p_InsrtByTemplate->size > 128) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Size of header template for insertion can not be more than 128")); + + if (p_InsrtByTemplate->size) + { + p_Manip->p_Template = (uint8_t *)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + p_InsrtByTemplate->size, + FM_PCD_CC_AD_TABLE_ALIGN); + if(!p_Manip->p_Template) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation in MURAM FAILED")); + + tmpReg32 = (uint32_t)(XX_VirtToPhys(p_Manip->p_Template) - (p_FmPcd->physicalMuramBase)); + tmpReg32 |= (uint32_t)p_InsrtByTemplate->size << 24; + *(uint32_t *)&p_Ad->matchTblPtr = tmpReg32; + } + + tmpReg32 = 0; + + p_Template = (uint8_t *)XX_Malloc(p_InsrtByTemplate->size * sizeof(uint8_t)); + + if (!p_Template) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("XX_Malloc allocation FAILED")); + + memcpy(p_Template, p_InsrtByTemplate->hdrTemplate, p_InsrtByTemplate->size * sizeof(uint8_t)); + + + if (p_InsrtByTemplate->modifyOuterIp) + { + ipModify = TRUE; + + tmpReg8 = (uint8_t)p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset]; + + if((tmpReg8 & 0xf0) == 0x40) + tmpReg8 = 4; + else if((tmpReg8 & 0xf0) == 0x60) + tmpReg8 = 6; + else + tmpReg8 = 0xff; + + if (tmpReg8 != 0xff) + { + if(p_InsrtByTemplate->modifyOuterIpParams.dscpEcn & 0xff00) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : IPV4 present in header template, dscpEcn has to be only 1 byte")); + if(p_InsrtByTemplate->modifyOuterIpParams.recalculateLength) + { + + if((p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedAlignedToBlockSize + p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedNotAlignedToBlockSize) > 255) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("extra Byte added can not be more than 256 bytes")); + extraAddedBytes = (uint8_t) (p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedAlignedToBlockSize + p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedNotAlignedToBlockSize); + blockSize = p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.blockSize; + extraAddedBytesAlignedToBlockSize = p_InsrtByTemplate->modifyOuterIpParams.recalculateLengthParams.extraBytesAddedAlignedToBlockSize; + /*IP header template - IP totalLength - + (1 byte) extraByteForIp = headerTemplateSize - ipOffset + insertedBytesAfterThisStage , + in the case of SEC insertedBytesAfterThisStage - SEC trailer (21/31) + header(13) + second byte - extraByteForIp = headerTemplate - ipOffset + insertedBytesAfterThisStage*/ + } + if (blockSize) + { + if (!POWER_OF_2(blockSize)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("inputFrmPaddingUpToBlockSize has to be power of 2")); + } + + } + if (tmpReg8 == 4) + { + if ((IPv4_HDRCHECKSUM_FIELD_OFFSET_FROM_IP + p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset) > p_InsrtByTemplate->size) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : IP present in header template, user asked for IP modifications but ipOffset + ipTotalLengthFieldOffset in header template bigger than template size")); + + + p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_DSCECN_FIELD_OFFSET_FROM_IP] = (uint8_t)p_InsrtByTemplate->modifyOuterIpParams.dscpEcn; + + if (blockSize) + blockSize -= 1; + + if ((p_InsrtByTemplate->size - p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + extraAddedBytes) > 255) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("p_InsrtByTemplate->size - p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + extraAddedBytes has to be less than 255")); + + p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_TOTALLENGTH_FIELD_OFFSET_FROM_IP + 1] = blockSize;// IPV6 - in AD instead of SEQ IND + p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_TOTALLENGTH_FIELD_OFFSET_FROM_IP] = (uint8_t)(p_InsrtByTemplate->size - p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + extraAddedBytes);// for IPV6 decrement additional 40 bytes of IPV6 heade size + + p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_ID_FIELD_OFFSET_FROM_IP] = 0x00; + p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_ID_FIELD_OFFSET_FROM_IP + 1] = extraAddedBytesAlignedToBlockSize; + + + + /*IP header template - relevant only for ipv4 CheckSum = 0*/ + p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_HDRCHECKSUM_FIELD_OFFSET_FROM_IP] = 0x00; + p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv4_HDRCHECKSUM_FIELD_OFFSET_FROM_IP + 1] = 0x00; + + + /*UDP checksum has to be 0*/ + if (p_InsrtByTemplate->modifyOuterIpParams.udpPresent) + { + if ((p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP + UDP_CHECKSUM_FIELD_SIZE) > p_InsrtByTemplate->size) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : UDP present according to user but (UDP offset + UDP header size) < size of header template")); + + p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP ] = 0x00; + p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP + 1] = 0x00; + + } + + if (p_InsrtByTemplate->modifyOuterIpParams.ipIdentGenId > 7) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("ipIdentGenId has to be one out of 8 sequence number generators (0 - 7) for IP identification field")); + + tmpRegNia |= (uint32_t)p_InsrtByTemplate->modifyOuterIpParams.ipIdentGenId<<24; + } + else if (tmpReg8 == 6) + { + /*TODO - add check for maximum value of blockSize;*/ + if (blockSize) + LOG2(blockSize, log2Num); + tmpRegNia |= (uint32_t)log2Num << 24; + + // for IPV6 decrement additional 40 bytes of IPV6 heade size - because IPV6 header size is not included in payloadLength + p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv6_PAYLOAD_LENGTH_OFFSET_FROM_IP] = (uint8_t)(p_InsrtByTemplate->size - p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + extraAddedBytes - 40); + p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv6_PAYLOAD_LENGTH_OFFSET_FROM_IP + 1] = extraAddedBytesAlignedToBlockSize; + if (p_InsrtByTemplate->modifyOuterIpParams.udpPresent) + { + if ((p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP + UDP_CHECKSUM_FIELD_SIZE) > p_InsrtByTemplate->size) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inconsistent parameters : UDP present according to user but (UDP offset + UDP header size) < size of header template")); + if (p_Template[p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset + IPv6_NEXT_HEADER_OFFSET_FROM_IP] != 0x88) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("OUr suppport is only IPv6/UDPLite")); + p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_LENGTH_FIELD_OFFSET_FROM_UDP] = 0x00; + p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_LENGTH_FIELD_OFFSET_FROM_UDP + 1] = 0x08; + p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP] = 0x00; + p_Template[p_InsrtByTemplate->modifyOuterIpParams.udpOffset + UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP + 1] = 0x00; + } + } + else + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("IP version supported only IPV4")); + } + + tmpReg32 = tmpReg16 = tmpReg8 = 0; + /*TODO - check it*/ + if (p_InsrtByTemplate->modifyOuterVlan) + { + if (p_InsrtByTemplate->modifyOuterVlanParams.vpri & ~0x07) + RETURN_ERROR(MAJOR, E_INVALID_STATE,("Inconsistent parameters : user asked for VLAN modifications but VPRI more than 3 bits")); + + memcpy(&tmpReg16, &p_Template[VLAN_TAG_FIELD_OFFSET_FROM_ETH], 2*(sizeof(uint8_t))); + if ((tmpReg16 != 0x9100) && (tmpReg16!= 0x9200) && (tmpReg16 != 0x8100)) + RETURN_ERROR(MAJOR, E_INVALID_STATE,("Inconsistent parameters : user asked for VLAN modifications but Tag Protocol identifier is not VLAN ")); + + memcpy(&tmpReg8, &p_Template[14],1*(sizeof(uint8_t))); + tmpReg8 &= 0x1f; + tmpReg8 |= (uint8_t)(p_InsrtByTemplate->modifyOuterVlanParams.vpri << 5); + + p_Template[14] = tmpReg8; + } + + Mem2IOCpy32(p_Manip->p_Template, p_Template, p_InsrtByTemplate->size); + + XX_Free(p_Template); + } + + tmpReg32 = 0; + if (p_Manip->h_Frag) + { + tmpRegNia |= (uint32_t)(XX_VirtToPhys(p_Manip->h_Frag) - (p_FmPcd->physicalMuramBase)); + tmpReg32 |= (uint32_t)p_Manip->sizeForFragmentation << 16; + } + else + tmpReg32 = 0xffff0000; + + if (ipModify) + tmpReg32 |= (uint32_t)p_InsrtByTemplate->modifyOuterIpParams.ipOuterOffset << 8; + else + tmpReg32 |= (uint32_t)0x0000ff00; + + tmpReg32 |= (uint32_t)HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER; + *(uint32_t *)&p_Ad->pcAndOffsets = tmpReg32; + + tmpRegNia |= FM_PCD_AD_CONT_LOOKUP_TYPE; + *(uint32_t *)&p_Ad->ccAdBase = tmpRegNia; + + return err; +} +#endif /* FM_CAPWAP_SUPPORT */ + +static t_Error IpFragmentationStats(t_FmPcdManip *p_Manip, t_FmPcdManipFragIpStats *p_Stats) +{ + t_AdOfTypeContLookup *p_Ad; + + ASSERT_COND(p_Manip); + ASSERT_COND(p_Stats); + ASSERT_COND(p_Manip->h_Ad); + ASSERT_COND(p_Manip->ipFragParams.p_Frag); + + p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad; + + p_Stats->totalFrames = GET_UINT32(p_Ad->gmask); + p_Stats->fragmentedFrames = GET_UINT32(p_Manip->ipFragParams.p_Frag->ccAdBase) & 0x00ffffff; + p_Stats->generatedFragments = GET_UINT32(p_Manip->ipFragParams.p_Frag->matchTblPtr); + + return E_OK; +} + +static t_Error IpFragmentation(t_FmPcdManipFragIpParams *p_ManipParams, t_FmPcdManip *p_Manip) +{ + uint32_t pcAndOffsetsReg = 0, ccAdBaseReg = 0, gmaskReg = 0; + t_FmPcd *p_FmPcd; +#if (DPAA_VERSION == 10) + t_Error err = E_OK; +#endif /* (DPAA_VERSION == 10) */ + + SANITY_CHECK_RETURN_ERROR(p_Manip->h_Ad,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_ManipParams->sizeForFragmentation != 0xFFFF, E_INVALID_VALUE); + + p_FmPcd = p_Manip->h_FmPcd; + /* Allocation of fragmentation Action Descriptor */ + p_Manip->ipFragParams.p_Frag = (t_AdOfTypeContLookup *)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!p_Manip->ipFragParams.p_Frag) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for Fragmentation table descriptor")); + IOMemSet32( p_Manip->ipFragParams.p_Frag, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + /* Prepare the third Ad register (pcAndOffsets)- OperationCode */ + pcAndOffsetsReg = (uint32_t)HMAN_OC_IP_FRAGMENTATION; + + /* Prepare the first Ad register (ccAdBase) - Don't frag action and Action descriptor type*/ + ccAdBaseReg = FM_PCD_AD_CONT_LOOKUP_TYPE; + ccAdBaseReg |= (p_ManipParams->dontFragAction << FM_PCD_MANIP_IP_FRAG_DF_SHIFT); + + + /* Set Scatter/Gather BPid */ + if (p_ManipParams->sgBpidEn) + { + ccAdBaseReg |= FM_PCD_MANIP_IP_FRAG_SG_BDID_EN; + pcAndOffsetsReg |= ((p_ManipParams->sgBpid << FM_PCD_MANIP_IP_FRAG_SG_BDID_SHIFT) & FM_PCD_MANIP_IP_FRAG_SG_BDID_MASK); + } + + /* Prepare the first Ad register (gmask) - scratch buffer pool id and Pointer to fragment ID */ + gmaskReg = (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_FmPcd->ipv6FrameIdAddr)) - p_FmPcd->physicalMuramBase); +#if (DPAA_VERSION == 10) + gmaskReg |= p_ManipParams->scratchBpid << FM_PCD_MANIP_IP_FRAG_SCRATCH_BPID; +#else + gmaskReg |= (0xFF) << FM_PCD_MANIP_IP_FRAG_SCRATCH_BPID; +#endif /* (DPAA_VERSION == 10) */ + + /* Set all Ad registers */ + WRITE_UINT32(p_Manip->ipFragParams.p_Frag->pcAndOffsets, pcAndOffsetsReg); + WRITE_UINT32(p_Manip->ipFragParams.p_Frag->ccAdBase, ccAdBaseReg); + WRITE_UINT32(p_Manip->ipFragParams.p_Frag->gmask, gmaskReg); + + /* Saves user's fragmentation manipulation parameters */ + p_Manip->frag = TRUE; + p_Manip->sizeForFragmentation = p_ManipParams->sizeForFragmentation; + +#if (DPAA_VERSION == 10) + p_Manip->ipFragParams.scratchBpid = p_ManipParams->scratchBpid; + + /* scratch buffer pool initialization */ + if ((err = FmPcdFragHcScratchPoolFill((t_Handle)p_FmPcd, p_ManipParams->scratchBpid)) != E_OK) + { + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, p_Manip->ipFragParams.p_Frag); + p_Manip->ipFragParams.p_Frag = NULL; + RETURN_ERROR(MAJOR, err, NO_MSG); + } +#endif /* (DPAA_VERSION == 10) */ + + return E_OK; +} + +static t_Error IPManip(t_FmPcdManip *p_Manip) +{ + t_Error err = E_OK; + t_FmPcd *p_FmPcd; + t_AdOfTypeContLookup *p_Ad; + uint32_t tmpReg32 = 0, tmpRegNia = 0; + + SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE); + p_FmPcd = p_Manip->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd,E_INVALID_HANDLE); + + p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad; + + tmpReg32 = FM_PCD_MANIP_IP_NO_FRAGMENTATION; + if (p_Manip->frag == TRUE) + { + tmpRegNia = (uint32_t)(XX_VirtToPhys(p_Manip->ipFragParams.p_Frag) - (p_FmPcd->physicalMuramBase)); + tmpReg32 = (uint32_t)p_Manip->sizeForFragmentation << FM_PCD_MANIP_IP_MTU_SHIFT; + } + + tmpRegNia |= FM_PCD_AD_CONT_LOOKUP_TYPE; + tmpReg32 |= HMAN_OC_IP_MANIP; + +#if (DPAA_VERSION >= 11) + tmpRegNia |= FM_PCD_MANIP_IP_CNIA; +#endif /* (DPAA_VERSION >= 11) */ + + WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32); + WRITE_UINT32(p_Ad->ccAdBase, tmpRegNia); + WRITE_UINT32(p_Ad->gmask, 0); /* Total frame counter - MUST be initialized to zero.*/ + + return err; +} + +static t_Error IPSecManip(t_FmPcdManipParams *p_ManipParams, + t_FmPcdManip *p_Manip) +{ + t_AdOfTypeContLookup *p_Ad; + t_FmPcdManipSpecialOffloadIPSecParams *p_IPSecParams; + t_Error err = E_OK; + uint32_t tmpReg32 = 0; + + SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_ManipParams,E_INVALID_HANDLE); + + p_IPSecParams = &p_ManipParams->u.specialOffload.u.ipsec; + + SANITY_CHECK_RETURN_ERROR(!p_IPSecParams->variableIpHdrLen || + p_IPSecParams->decryption, E_INVALID_VALUE); + SANITY_CHECK_RETURN_ERROR(!p_IPSecParams->variableIpVersion || + !p_IPSecParams->decryption, E_INVALID_VALUE); + SANITY_CHECK_RETURN_ERROR(!p_IPSecParams->variableIpVersion || + p_IPSecParams->outerIPHdrLen, E_INVALID_VALUE); + + p_Ad = (t_AdOfTypeContLookup *)p_Manip->h_Ad; + + tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE; + tmpReg32 |= (p_IPSecParams->decryption)?FM_PCD_MANIP_IPSEC_DEC:0; + tmpReg32 |= (p_IPSecParams->ecnCopy)?FM_PCD_MANIP_IPSEC_ECN_EN:0; + tmpReg32 |= (p_IPSecParams->dscpCopy)?FM_PCD_MANIP_IPSEC_DSCP_EN:0; + tmpReg32 |= (p_IPSecParams->variableIpHdrLen)?FM_PCD_MANIP_IPSEC_VIPL_EN:0; + tmpReg32 |= (p_IPSecParams->variableIpVersion)?FM_PCD_MANIP_IPSEC_VIPV_EN:0; + WRITE_UINT32(p_Ad->ccAdBase, tmpReg32); + + tmpReg32 = HMAN_OC_IPSEC_MANIP; + tmpReg32 |= p_IPSecParams->outerIPHdrLen << FM_PCD_MANIP_IPSEC_IP_HDR_LEN_SHIFT; + if (p_ManipParams->h_NextManip) + { + WRITE_UINT32(p_Ad->matchTblPtr, + (uint32_t)(XX_VirtToPhys(((t_FmPcdManip *)p_ManipParams->h_NextManip)->h_Ad)- + (((t_FmPcd *)p_Manip->h_FmPcd)->physicalMuramBase)) >> 4); + + tmpReg32 |= FM_PCD_MANIP_IPSEC_NADEN; + } + WRITE_UINT32(p_Ad->pcAndOffsets, tmpReg32); + + return err; +} + +#ifdef FM_CAPWAP_SUPPORT +static t_Error CheckStatsParamsAndSetType(t_FmPcdManip *p_Manip, t_FmPcdStatsParams *p_StatsParams) +{ + + switch (p_StatsParams->type) + { + case (e_FM_PCD_STATS_PER_FLOWID): + p_Manip->opcode = HMAN_OC_CAPWAP_INDEXED_STATS; + p_Manip->muramAllocate = TRUE; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported statistics type")); + } + + return E_OK; +} +#endif /* FM_CAPWAP_SUPPORT */ + +static t_Handle ManipOrStatsSetNode(t_Handle h_FmPcd, t_Handle *p_Params, bool stats) +{ + t_FmPcdManip *p_Manip; + t_Error err; + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + + p_Manip = (t_FmPcdManip*)XX_Malloc(sizeof(t_FmPcdManip)); + if (!p_Manip) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); + return NULL; + } + memset(p_Manip, 0, sizeof(t_FmPcdManip)); + + p_Manip->type = ((t_FmPcdManipParams *)p_Params)->type; + memcpy((uint8_t*)&p_Manip->manipParams, p_Params, sizeof(p_Manip->manipParams)); + + if (!stats) + err = CheckManipParamsAndSetType(p_Manip, (t_FmPcdManipParams *)p_Params); +#ifdef FM_CAPWAP_SUPPORT + else + err = CheckStatsParamsAndSetType(p_Manip, (t_FmPcdStatsParams *)p_Params); +#else + else + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Statistics node!")); + return NULL; + } +#endif /* FM_CAPWAP_SUPPORT */ + if (err) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("INVALID HEADER MANIPULATION TYPE")); + ReleaseManipHandler(p_Manip, p_FmPcd); + XX_Free(p_Manip); + return NULL; + } + + if (p_Manip->opcode != HMAN_OC_IP_REASSEMBLY) + { + /* In Case of IP reassembly manipulation the IPv4/IPv6 reassembly action descriptor will + be defines later on */ + if (p_Manip->muramAllocate) + { + p_Manip->h_Ad = (t_Handle)FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!p_Manip->h_Ad) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for Manipulation action descriptor")); + ReleaseManipHandler(p_Manip, p_FmPcd); + XX_Free(p_Manip); + return NULL; + } + + IOMemSet32(p_Manip->h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE); + } + else + { + p_Manip->h_Ad = (t_Handle)XX_Malloc(FM_PCD_CC_AD_ENTRY_SIZE * sizeof(uint8_t)); + if (!p_Manip->h_Ad) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Allocation of Manipulation action descriptor")); + ReleaseManipHandler(p_Manip, p_FmPcd); + XX_Free(p_Manip); + return NULL; + } + + memset(p_Manip->h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE * sizeof(uint8_t)); + } + } + + p_Manip->h_FmPcd = h_FmPcd; + + return p_Manip; +} + +static void UpdateAdPtrOfNodesWhichPointsOnCrntMdfManip(t_FmPcdManip *p_CrntMdfManip, + t_List *h_NodesLst) +{ + t_CcNodeInformation *p_CcNodeInformation; + t_FmPcdCcNode *p_NodePtrOnCurrentMdfManip = NULL; + t_List *p_Pos; + int i = 0; + t_Handle p_AdTablePtOnCrntCurrentMdfNode/*, p_AdTableNewModified*/; + t_CcNodeInformation ccNodeInfo; + + LIST_FOR_EACH(p_Pos, &p_CrntMdfManip->nodesLst) + { + p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); + p_NodePtrOnCurrentMdfManip = (t_FmPcdCcNode *)p_CcNodeInformation->h_CcNode; + + ASSERT_COND(p_NodePtrOnCurrentMdfManip); + + /* Search in the previous node which exact index points on this current modified node for getting AD */ + for (i = 0; i < p_NodePtrOnCurrentMdfManip->numOfKeys + 1; i++) + { + if (p_NodePtrOnCurrentMdfManip->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + { + if (p_NodePtrOnCurrentMdfManip->keyAndNextEngineParams[i].nextEngineParams.h_Manip == (t_Handle)p_CrntMdfManip) + { + if (p_NodePtrOnCurrentMdfManip->keyAndNextEngineParams[i].p_StatsObj) + p_AdTablePtOnCrntCurrentMdfNode = + p_NodePtrOnCurrentMdfManip->keyAndNextEngineParams[i].p_StatsObj->h_StatsAd; + else + p_AdTablePtOnCrntCurrentMdfNode = + PTR_MOVE(p_NodePtrOnCurrentMdfManip->h_AdTable, i*FM_PCD_CC_AD_ENTRY_SIZE); + + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = p_AdTablePtOnCrntCurrentMdfNode; + EnqueueNodeInfoToRelevantLst(h_NodesLst, &ccNodeInfo, NULL); + } + } + } + + ASSERT_COND(i != p_NodePtrOnCurrentMdfManip->numOfKeys); + } +} + +static void BuildHmtd(uint8_t *p_Dest, uint8_t *p_Src, uint8_t *p_Hmcd, t_FmPcd *p_FmPcd) +{ + t_Error err; + + /* Copy the HMTD */ + IO2IOCpy32(p_Dest, (uint8_t*)p_Src, 16); + /* Replace the HMCT table pointer */ + WRITE_UINT32(((t_Hmtd *)p_Dest)->hmcdBasePtr, + (uint32_t)(XX_VirtToPhys(p_Hmcd) - ((t_FmPcd*)p_FmPcd)->physicalMuramBase)); + /* Call Host Command to replace HMTD by a new HMTD */ + err = FmHcPcdCcDoDynamicChange(p_FmPcd->h_Hc, + (uint32_t)(XX_VirtToPhys(p_Src) - p_FmPcd->physicalMuramBase), + (uint32_t)(XX_VirtToPhys(p_Dest) - p_FmPcd->physicalMuramBase)); + if (err) + REPORT_ERROR(MINOR, err, ("Failed in dynamic manip change, continued to the rest of the owners.")); +} + +/*****************************************************************************/ +/* Inter-module API routines */ +/*****************************************************************************/ + +t_Error FmPcdManipUpdate(t_Handle h_FmPcd, + t_Handle h_PcdParams, + t_Handle h_FmPort, + t_Handle h_Manip, + t_Handle h_Ad, + bool validate, + int level, + t_Handle h_FmTree, + bool modify) +{ + t_Error err; + + if (!modify) + err = FmPcdManipInitUpdate(h_FmPcd, h_PcdParams, h_FmPort, h_Manip, h_Ad, validate, level, h_FmTree); + else + err = FmPcdManipModifyUpdate(h_Manip, h_Ad, validate, level, h_FmTree); + + return err; +} + +uint32_t FmPcdManipGetRequiredAction (t_Handle h_Manip) +{ + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip; + + ASSERT_COND(h_Manip); + + switch (p_Manip->opcode) + { + case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST): + case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX): + return UPDATE_NIA_ENQ_WITHOUT_DMA; + default: + return 0; + } +} + +void FmPcdManipUpdateOwner(t_Handle h_Manip, bool add) +{ + + uint32_t intFlags; + + intFlags = XX_LockIntrSpinlock(((t_FmPcdManip *)h_Manip)->h_Spinlock); + if (add) + ((t_FmPcdManip *)h_Manip)->owner++; + else + { + ASSERT_COND(((t_FmPcdManip *)h_Manip)->owner); + ((t_FmPcdManip *)h_Manip)->owner--; + } + XX_UnlockIntrSpinlock(((t_FmPcdManip *)h_Manip)->h_Spinlock, intFlags); +} + +t_List *FmPcdManipGetNodeLstPointedOnThisManip(t_Handle h_Manip) +{ + ASSERT_COND(h_Manip); + return &((t_FmPcdManip *)h_Manip)->nodesLst; +} + +t_List *FmPcdManipGetSpinlock(t_Handle h_Manip) +{ + ASSERT_COND(h_Manip); + return ((t_FmPcdManip *)h_Manip)->h_Spinlock; +} + +t_Error FmPcdManipCheckParamsForCcNextEngine(t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams, uint32_t *requiredAction) +{ + t_FmPcdManip *p_Manip; + t_Error err; + bool pointFromCc = TRUE; + + + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams->h_Manip, E_NULL_POINTER); + + p_Manip = (t_FmPcdManip *)(p_FmPcdCcNextEngineParams->h_Manip); + *requiredAction = 0; + + while (p_Manip) + { + switch (p_Manip->opcode) + { + case (HMAN_OC_CAPWAP_INDEXED_STATS): + if (p_FmPcdCcNextEngineParams->nextEngine != e_FM_PCD_DONE) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("For this type of header manipulation has to be nextEngine e_FM_PCD_DONE")); + if (p_FmPcdCcNextEngineParams->params.enqueueParams.overrideFqid) + p_Manip->cnia = TRUE; + case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST): + *requiredAction = UPDATE_NIA_ENQ_WITHOUT_DMA; + case (HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR): + p_Manip->ownerTmp++; + break; + case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER): + if ((p_FmPcdCcNextEngineParams->nextEngine != e_FM_PCD_DONE) && + !p_FmPcdCcNextEngineParams->params.enqueueParams.overrideFqid) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("For this type of header manipulation has to be nextEngine e_FM_PCD_DONE with fqidForCtrlFlow FALSE")); + p_Manip->ownerTmp++; + break; + case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX): + if ((p_FmPcdCcNextEngineParams->nextEngine != e_FM_PCD_CC) && + (FmPcdCcGetParseCode(p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode) != CC_PC_GENERIC_IC_HASH_INDEXED)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("For this type of header manipulation next engine has to be CC and action = e_FM_PCD_ACTION_INDEXED_LOOKUP")); + err = UpdateManipIc(p_FmPcdCcNextEngineParams->h_Manip, FmPcdCcGetOffset(p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode)); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + *requiredAction = UPDATE_NIA_ENQ_WITHOUT_DMA; + break; + case (HMAN_OC_IP_FRAGMENTATION): +#if (DPAA_VERSION == 10) + if (!(p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_DONE)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("For this type of header manipulation has to be nextEngine e_FM_PCD_DONE")); +#else + if (!((p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_DONE) || + (p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_PLCR))) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("For this type of header manipulation has to be nextEngine " + "e_FM_PCD_DONE or e_FM_PCD_PLCR")); +#endif /* (DPAA_VERSION == 10) */ + p_Manip->ownerTmp++; + break; + case (HMAN_OC_IP_REASSEMBLY): + if (p_FmPcdCcNextEngineParams->nextEngine != e_FM_PCD_DONE) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("For this type of header manipulation has to be nextEngine e_FM_PCD_DONE")); + p_Manip->ownerTmp++; + break; + case (HMAN_OC_IPSEC_MANIP): + p_Manip->ownerTmp++; + break; + case (HMAN_OC): + if (( p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_CC) && MANIP_IS_CASCADE_NEXT(p_Manip)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't have a cascaded manipulation when and Next Engine is CC")); + if (!MANIP_IS_FIRST(p_Manip) && pointFromCc) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("h_Manip is already used and may not be shared (no sharing of non-head manip nodes)")); + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE,("invalid type of header manipulation for this state")); + } + p_Manip = p_Manip->h_NextManip; + pointFromCc = FALSE; + } + return E_OK; +} + + +t_Error FmPcdManipCheckParamsWithCcNodeParams(t_Handle h_Manip, t_Handle h_FmPcdCcNode) +{ + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(h_Manip, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(h_FmPcdCcNode, E_INVALID_HANDLE); + + switch (p_Manip->opcode) + { + case (HMAN_OC_CAPWAP_INDEXED_STATS): + if (p_Manip->ownerTmp != FmPcdCcGetNumOfKeys(h_FmPcdCcNode)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("The manipulation of the type statistics flowId if exist has to be pointed by all numOfKeys")); + break; + case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST): + if (p_Manip->h_Frag) + { + if (p_Manip->ownerTmp != FmPcdCcGetNumOfKeys(h_FmPcdCcNode)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("The manipulation of the type remove DTLS if exist has to be pointed by all numOfKeys")); + err = UpdateManipIc(h_Manip, FmPcdCcGetOffset(h_FmPcdCcNode)); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + break; + default: + break; + } + + return err; +} + +void FmPcdManipUpdateAdResultForCc(t_Handle h_Manip, + t_FmPcdCcNextEngineParams *p_CcNextEngineParams, + t_Handle p_Ad, + t_Handle *p_AdNewPtr) +{ + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip; + + + /* This routine creates a Manip AD and can return in "p_AdNewPtr" + * either the new descriptor or NULL if it writes the Manip AD into p_AD (into the match table) */ + + ASSERT_COND(p_Manip); + ASSERT_COND(p_CcNextEngineParams); + ASSERT_COND(p_Ad); + ASSERT_COND(p_AdNewPtr); + + FmPcdManipUpdateOwner(h_Manip, TRUE); + + /* According to "type", either build & initialize a new AD (p_AdNew) or initialize + * p_Ad ( the AD in the match table) and set p_AdNew = NULL. */ + switch (p_Manip->opcode) + { + case (HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR): + case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST): + case (HMAN_OC_CAPWAP_INDEXED_STATS): + *p_AdNewPtr = p_Manip->h_Ad; + break; + case (HMAN_OC_IPSEC_MANIP): + *p_AdNewPtr = p_Manip->h_Ad; + break; + case (HMAN_OC_IP_FRAGMENTATION): + if ((p_CcNextEngineParams->nextEngine == e_FM_PCD_DONE) && + (!p_CcNextEngineParams->params.enqueueParams.overrideFqid)) + { + memcpy((uint8_t *)p_Ad, (uint8_t *)p_Manip->h_Ad, sizeof(t_AdOfTypeContLookup)); +#if (DPAA_VERSION >= 11) + WRITE_UINT32(((t_AdOfTypeContLookup *)p_Ad)->ccAdBase, + GET_UINT32(((t_AdOfTypeContLookup *)p_Ad)->ccAdBase) & ~FM_PCD_MANIP_IP_CNIA); +#endif /* (DPAA_VERSION >= 11) */ + *p_AdNewPtr = NULL; + } + else + *p_AdNewPtr = p_Manip->h_Ad; + break; + case (HMAN_OC_IP_REASSEMBLY): + if (FmPcdManipIpReassmIsIpv6Hdr(p_Manip)) + { + if (!p_Manip->ipReassmParams.ipv6Assigned) + { + *p_AdNewPtr = p_Manip->ipReassmParams.h_Ipv6Ad; + p_Manip->ipReassmParams.ipv6Assigned = TRUE; + FmPcdManipUpdateOwner(h_Manip, FALSE); + } + else + { + *p_AdNewPtr = p_Manip->ipReassmParams.h_Ipv4Ad; + p_Manip->ipReassmParams.ipv6Assigned = FALSE; + } + } + else + *p_AdNewPtr = p_Manip->ipReassmParams.h_Ipv4Ad; + memcpy((uint8_t *)p_Ad, (uint8_t *)*p_AdNewPtr, sizeof(t_AdOfTypeContLookup)); + *p_AdNewPtr = NULL; + break; + case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER): + case (HMAN_OC_CAPWAP_FRAGMENTATION): + WRITE_UINT32(((t_AdOfTypeResult *)p_Ad)->fqid, ((t_AdOfTypeResult *)(p_Manip->h_Ad))->fqid); + WRITE_UINT32(((t_AdOfTypeResult *)p_Ad)->plcrProfile, ((t_AdOfTypeResult *)(p_Manip->h_Ad))->plcrProfile); + WRITE_UINT32(((t_AdOfTypeResult *)p_Ad)->nia, ((t_AdOfTypeResult *)(p_Manip->h_Ad))->nia); + *p_AdNewPtr = NULL; + break; + case (HMAN_OC): + /* Allocate and initialize HMTD */ + *p_AdNewPtr = p_Manip->h_Ad; + break; + default: + break; + } +} + +void FmPcdManipUpdateAdContLookupForCc(t_Handle h_Manip, t_Handle p_Ad, t_Handle *p_AdNewPtr, uint32_t adTableOffset) +{ + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip; + + /* This routine creates a Manip AD and can return in "p_AdNewPtr" + * either the new descriptor or NULL if it writes the Manip AD into p_AD (into the match table) */ + ASSERT_COND(p_Manip); + + FmPcdManipUpdateOwner(h_Manip, TRUE); + + switch (p_Manip->opcode) + { + case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX): + WRITE_UINT32(((t_AdOfTypeContLookup *)p_Ad)->ccAdBase, ((t_AdOfTypeContLookup *)(p_Manip->h_Ad))->ccAdBase); + WRITE_UINT32(((t_AdOfTypeContLookup *)p_Ad)->matchTblPtr, ((t_AdOfTypeContLookup *)(p_Manip->h_Ad))->matchTblPtr); + WRITE_UINT32(((t_AdOfTypeContLookup *)p_Ad)->pcAndOffsets, ((t_AdOfTypeContLookup *)(p_Manip->h_Ad))->pcAndOffsets); + WRITE_UINT32(((t_AdOfTypeContLookup *)p_Ad)->gmask, ((t_AdOfTypeContLookup *)(p_Manip->h_Ad))->gmask); + WRITE_UINT32(((t_AdOfTypeContLookup *)p_Ad)->ccAdBase, (GET_UINT32(((t_AdOfTypeContLookup *)p_Ad)->ccAdBase) | adTableOffset)); + *p_AdNewPtr = NULL; + break; + + case (HMAN_OC): + /* Initialize HMTD within the match table*/ + IOMemSet32(p_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE); + /* copy the existing HMTD */ /* ask Alla - memcpy??? */ + memcpy((uint8_t*)p_Ad, p_Manip->h_Ad, sizeof(t_Hmtd)); + /* update NADEN to be "1"*/ + WRITE_UINT16(((t_Hmtd *)p_Ad)->cfg, + (uint16_t)(GET_UINT16(((t_Hmtd *)p_Ad)->cfg) | HMTD_CFG_NEXT_AD_EN)); + /* update next action descriptor */ + WRITE_UINT16(((t_Hmtd *)p_Ad)->nextAdIdx, (uint16_t)(adTableOffset >> 4)); + /* mark that Manip's HMTD is not used */ + *p_AdNewPtr = NULL; + break; + + default: + break; + } +} + +t_Error FmPcdManipBuildIpReassmScheme(t_FmPcd *p_FmPcd, t_Handle h_NetEnv, t_Handle h_CcTree, t_Handle h_Manip, bool isIpv4, uint8_t groupId) +{ + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip; + t_FmPcdKgSchemeParams *p_SchemeParams = NULL; + + ASSERT_COND(p_FmPcd); + ASSERT_COND(h_NetEnv); + ASSERT_COND(p_Manip); + + /* scheme was already build, no need to check for IPv6 */ + if (p_Manip->ipReassmParams.h_Ipv4Scheme) + return E_OK; + + p_SchemeParams = XX_Malloc(sizeof(t_FmPcdKgSchemeParams)); + if (!p_SchemeParams) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation failed for scheme")); + + /* Configures the IPv4 or IPv6 scheme*/ + memset(p_SchemeParams, 0, sizeof(t_FmPcdKgSchemeParams)); + p_SchemeParams->netEnvParams.h_NetEnv = h_NetEnv; + p_SchemeParams->id.relativeSchemeId = + (uint8_t)((isIpv4 == TRUE) ? + p_Manip->ipReassmParams.relativeSchemeId[0] : + p_Manip->ipReassmParams.relativeSchemeId[1]); + p_SchemeParams->schemeCounter.update = TRUE; +#if (DPAA_VERSION >= 11) + p_SchemeParams->alwaysDirect = TRUE; + p_SchemeParams->bypassFqidGeneration = TRUE; +#else + p_SchemeParams->keyExtractAndHashParams.hashDistributionNumOfFqids = 1; + p_SchemeParams->baseFqid = 0xFFFFFF; /*TODO- baseFqid*/ +#endif /* (DPAA_VERSION >= 11) */ + + setReassmSchemeParams(p_FmPcd, p_SchemeParams, h_CcTree, isIpv4, groupId); + + /* Sets the new scheme */ + if (isIpv4) + p_Manip->ipReassmParams.h_Ipv4Scheme = FM_PCD_KgSchemeSet(p_FmPcd, p_SchemeParams); + else + p_Manip->ipReassmParams.h_Ipv6Scheme = FM_PCD_KgSchemeSet(p_FmPcd, p_SchemeParams); + + XX_Free(p_SchemeParams); + + return E_OK; +} + +t_Error FmPcdManipDeleteIpReassmSchemes(t_Handle h_Manip) +{ + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip; + + ASSERT_COND(p_Manip); + + if (p_Manip->ipReassmParams.h_Ipv4Scheme) + FM_PCD_KgSchemeDelete(p_Manip->ipReassmParams.h_Ipv4Scheme); + + if (p_Manip->ipReassmParams.h_Ipv6Scheme) + FM_PCD_KgSchemeDelete(p_Manip->ipReassmParams.h_Ipv6Scheme); + + return E_OK; +} + +bool FmPcdManipIpReassmIsIpv6Hdr(t_Handle h_Manip) +{ + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip; + + ASSERT_COND(p_Manip); + + return (p_Manip->ipReassmParams.hdr == HEADER_TYPE_IPv6); +} + +#ifdef FM_CAPWAP_SUPPORT +t_Handle FmPcdManipApplSpecificBuild(void) +{ + t_FmPcdManip *p_Manip; + + p_Manip = (t_FmPcdManip*)XX_Malloc(sizeof(t_FmPcdManip)); + if (!p_Manip) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); + return NULL; + } + memset(p_Manip, 0, sizeof(t_FmPcdManip)); + + p_Manip->opcode = HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX; + p_Manip->muramAllocate = FALSE; + + p_Manip->h_Ad = (t_Handle)XX_Malloc(FM_PCD_CC_AD_ENTRY_SIZE * sizeof(uint8_t)); + if (!p_Manip->h_Ad) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Allocation of Manipulation action descriptor")); + XX_Free(p_Manip); + return NULL; + } + + memset(p_Manip->h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE * sizeof(uint8_t)); + + /*treatFdStatusFieldsAsErrors = TRUE hardcoded - assumption its always come after CAAM*/ + /*Application specific = type of flowId index, move internal frame header from data to IC, + SEC errors check*/ + if (MvIntFrameHeaderFromFrameToBufferPrefix(p_Manip, TRUE)!= E_OK) + { + XX_Free(p_Manip->h_Ad); + XX_Free(p_Manip); + return NULL; + } + return p_Manip; +} + +bool FmPcdManipIsCapwapApplSpecific(t_Handle h_Manip) +{ + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip; + ASSERT_COND(h_Manip); + + return (bool)((p_Manip->opcode == HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST) ? TRUE : FALSE); +} +#endif /* FM_CAPWAP_SUPPORT */ +/*********************** End of inter-module routines ************************/ + + +/****************************************/ +/* API Init unit functions */ +/****************************************/ + +t_Handle FM_PCD_ManipNodeSet(t_Handle h_FmPcd, t_FmPcdManipParams *p_ManipParams) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_FmPcdManip *p_Manip; + t_Error err; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE,NULL); + SANITY_CHECK_RETURN_VALUE(p_ManipParams,E_INVALID_HANDLE,NULL); + + p_Manip = ManipOrStatsSetNode(h_FmPcd, (t_Handle)p_ManipParams, FALSE); + if (!p_Manip) + return NULL; + + if (((p_Manip->opcode == HMAN_OC_IP_REASSEMBLY) || + (p_Manip->opcode == HMAN_OC_IP_FRAGMENTATION) || + (p_Manip->opcode == HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX) || + (p_Manip->opcode == HMAN_OC) || + (p_Manip->opcode == HMAN_OC_IPSEC_MANIP)) && + (!FmPcdIsAdvancedOffloadSupported(p_FmPcd))) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Advanced-offload must be enabled")); + XX_Free(p_Manip); + return NULL; + } + p_Manip->h_Spinlock = XX_InitSpinlock(); + if (!p_Manip->h_Spinlock) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED HEADER MANIPULATION TYPE")); + ReleaseManipHandler(p_Manip, p_FmPcd); + XX_Free(p_Manip); + return NULL; + } + INIT_LIST(&p_Manip->nodesLst); + + switch (p_Manip->opcode) + { + case (HMAN_OC_IP_REASSEMBLY): + /* IpReassembly */ + err = IpReassembly(&p_ManipParams->u.reassem, p_Manip); + break; + case (HMAN_OC_IP_FRAGMENTATION): + /* IpFragmentation */ + err = IpFragmentation(&p_ManipParams->u.frag.u.ipFrag ,p_Manip); + if (err) + break; + err = IPManip(p_Manip); + break; + case (HMAN_OC_IPSEC_MANIP) : + err = IPSecManip(p_ManipParams, p_Manip); + break; +#ifdef FM_CAPWAP_SUPPORT + case (HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR): + /* HmanType1 */ + err = RmvHdrTillSpecLocNOrInsrtIntFrmHdr(&p_ManipParams->u.hdr.rmvParams, p_Manip); + break; + case (HMAN_OC_CAPWAP_FRAGMENTATION): + err = CapwapFragmentation(&p_ManipParams->fragOrReasmParams.u.capwapFragParams, + p_Manip, + p_FmPcd, + p_ManipParams->fragOrReasmParams.sgBpid); + if (err) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED HEADER MANIPULATION TYPE")); + ReleaseManipHandler(p_Manip, p_FmPcd); + XX_Free(p_Manip); + return NULL; + } + if (p_Manip->insrt) + p_Manip->opcode = HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER; + case (HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER): + /* HmanType2 + if user asked only for fragmentation still need to allocate HmanType2 */ + err = InsrtHdrByTempl(&p_ManipParams->u.hdr.insrtParams, p_Manip, p_FmPcd); + break; + case (HMAN_OC_CAPWAP_REASSEMBLY): + err = CapwapReassembly(&p_ManipParams->fragOrReasmParams.u.capwapReasmParams, + p_Manip, + p_FmPcd, + p_ManipParams->fragOrReasmParams.sgBpid); + if (err) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED HEADER MANIPULATION TYPE")); + ReleaseManipHandler(p_Manip, p_FmPcd); + XX_Free(p_Manip); + return NULL; + } + if (p_Manip->rmv) + p_Manip->opcode = HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST; + case (HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST): + /*CAPWAP decapsulation + if user asked only for reassembly still need to allocate CAPWAP decapsulation*/ + err = CapwapRmvDtlsHdr(p_FmPcd, p_Manip); + break; +#endif /* FM_CAPWAP_SUPPORT */ + case (HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX): + /*Application Specific type 1*/ + err = MvIntFrameHeaderFromFrameToBufferPrefix(p_Manip, TRUE); + break; + case (HMAN_OC): + /* New Manip */ + err = CreateManipActionNew(p_Manip, p_ManipParams); + break; + default: + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED HEADER MANIPULATION TYPE")); + ReleaseManipHandler(p_Manip, p_FmPcd); + XX_Free(p_Manip); + return NULL; + } + + if (err) + { + REPORT_ERROR(MAJOR, err, NO_MSG); + ReleaseManipHandler(p_Manip, p_FmPcd); + XX_Free(p_Manip); + return NULL; + } + + if (p_ManipParams->h_NextManip) + { + /* in the check routine we've verified that h_NextManip has no owners + * and that only supported types are allowed. */ + p_Manip->h_NextManip = p_ManipParams->h_NextManip; + /* save a "prev" pointer in h_NextManip */ + MANIP_SET_PREV(p_Manip->h_NextManip, p_Manip); + FmPcdManipUpdateOwner(p_Manip->h_NextManip, TRUE); + } + + return p_Manip; +} + +t_Error FM_PCD_ManipNodeReplace(t_Handle h_Manip, t_FmPcdManipParams *p_ManipParams) +{ + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_Manip, *p_FirstManip; + t_FmPcd *p_FmPcd = (t_FmPcd *)(p_Manip->h_FmPcd); + t_Error err; + uint8_t *p_WholeHmct = NULL, *p_ShadowHmct = NULL, *p_Hmtd = NULL; + t_List lstOfNodeshichPointsOnCrntMdfManip, *p_Pos; + t_CcNodeInformation *p_CcNodeInfo; + SANITY_CHECK_RETURN_ERROR(h_Manip,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_ManipParams,E_INVALID_HANDLE); + + INIT_LIST(&lstOfNodeshichPointsOnCrntMdfManip); + + if ((p_ManipParams->type != e_FM_PCD_MANIP_HDR) || + (p_Manip->type != e_FM_PCD_MANIP_HDR)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, + ("FM_PCD_ManipNodeReplace Functionality supported only for Header Manipulation.")); + + ASSERT_COND(p_Manip->opcode == HMAN_OC); + ASSERT_COND(p_Manip->manipParams.h_NextManip == p_Manip->h_NextManip); + memcpy((uint8_t*)&p_Manip->manipParams, p_ManipParams, sizeof(p_Manip->manipParams)); + p_Manip->manipParams.h_NextManip = p_Manip->h_NextManip; + + /* The replacement of the HdrManip depends on the node type.*/ + /* + * (1) If this is an independent node, all its owners should be updated. + * + * (2) If it is the head of a cascaded chain (it does not have a "prev" but + * it has a "next" and it has a "cascaded-next" indication), the next + * node remains unchanged, and the behavior is as in (1). + * + * (3) If it is not the head, but a part of a cascaded chain, in can be + * also replaced as a regular node with just one owner. + * + * (4) If it is a part of a chain implemented as a unified table, the + * whole table is replaced and the owners of the head node must be updated. + * + */ + /* lock shadow */ + if (!p_FmPcd->p_CcShadow) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("CC Shadow not allocated")); + + if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) + return ERROR_CODE(E_BUSY); + + /* this routine creates a new manip action in the CC Shadow. */ + err = CreateManipActionShadow(p_Manip, p_ManipParams); + if (err) + RETURN_ERROR(MINOR, err, NO_MSG); + + /* If the owners list is empty (these are NOT the "owners" counter, but pointers from CC) + * replace only HMTD and no lcok is required. Otherwise + * lock the whole PCD + * In case 4 MANIP_IS_UNIFIED_NON_FIRST(p_Manip) - Use the head node instead. */ + if (!FmPcdLockTryLockAll(p_FmPcd)) + { + DBG(TRACE, ("FmPcdLockTryLockAll failed")); + return ERROR_CODE(E_BUSY); + } + + p_ShadowHmct = (uint8_t*)PTR_MOVE(p_FmPcd->p_CcShadow, 16); + + p_FirstManip = (t_FmPcdManip*)GetManipInfo(p_Manip, e_MANIP_HANDLER_TABLE_OWNER); + ASSERT_COND(p_FirstManip); + + if (!LIST_IsEmpty(&p_FirstManip->nodesLst)) + UpdateAdPtrOfNodesWhichPointsOnCrntMdfManip(p_FirstManip, &lstOfNodeshichPointsOnCrntMdfManip); + + p_Hmtd = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMTD); + ASSERT_COND(p_Hmtd); + BuildHmtd(p_FmPcd->p_CcShadow, (uint8_t *)p_Hmtd, p_ShadowHmct, ((t_FmPcd*)(p_Manip->h_FmPcd))); + + LIST_FOR_EACH(p_Pos, &lstOfNodeshichPointsOnCrntMdfManip) + { + p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos); + BuildHmtd(p_FmPcd->p_CcShadow, (uint8_t *)p_CcNodeInfo->h_CcNode, p_ShadowHmct, ((t_FmPcd*)(p_Manip->h_FmPcd))); + } + + p_WholeHmct = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMCT); + ASSERT_COND(p_WholeHmct); + + /* re-build the HMCT n the original location */ + err = CreateManipActionBackToOrig(p_Manip, p_ManipParams); + if (err) + { + RELEASE_LOCK(p_FmPcd->shadowLock); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + p_Hmtd = (uint8_t *)GetManipInfo(p_Manip, e_MANIP_HMTD); + ASSERT_COND(p_Hmtd); + BuildHmtd(p_FmPcd->p_CcShadow, (uint8_t *)p_Hmtd, p_WholeHmct,((t_FmPcd*)p_Manip->h_FmPcd)); + + /* If LIST > 0, create a list of p_Ad's that point to the HMCT. Join also t_HMTD to this list. + * For each p_Hmct (from list+fixed): + * call Host Command to replace HMTD by a new one */ + LIST_FOR_EACH(p_Pos, &lstOfNodeshichPointsOnCrntMdfManip) + { + p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos); + BuildHmtd(p_FmPcd->p_CcShadow, (uint8_t *)p_CcNodeInfo->h_CcNode, p_WholeHmct, ((t_FmPcd*)(p_Manip->h_FmPcd))); + } + + + ReleaseLst(&lstOfNodeshichPointsOnCrntMdfManip); + + FmPcdLockUnlockAll(p_FmPcd); + + /* unlock shadow */ + RELEASE_LOCK(p_FmPcd->shadowLock); + + return E_OK; +} + +t_Error FM_PCD_ManipNodeDelete(t_Handle h_ManipNode) +{ + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_ManipNode; + + SANITY_CHECK_RETURN_ERROR(p_Manip,E_INVALID_HANDLE); + + if (p_Manip->owner) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("This manipulation node not be removed because this node is occupied, first - unbind this node ")); + + if (p_Manip->h_NextManip) + { + MANIP_SET_PREV(p_Manip->h_NextManip, NULL); + FmPcdManipUpdateOwner(p_Manip->h_NextManip, FALSE); + } + + if (p_Manip->p_Hmct && MANIP_IS_UNIFIED_FIRST(p_Manip)) + FM_MURAM_FreeMem(((t_FmPcd *)p_Manip->h_FmPcd)->h_FmMuram, p_Manip->p_Hmct); + + if (p_Manip->h_Spinlock) + { + XX_FreeSpinlock(p_Manip->h_Spinlock); + p_Manip->h_Spinlock = NULL; + } + + ReleaseManipHandler(p_Manip, p_Manip->h_FmPcd); + + XX_Free(h_ManipNode); + + return E_OK; +} + +t_Error FM_PCD_ManipGetStatistics(t_Handle h_ManipNode, t_FmPcdManipStats *p_FmPcdManipStats) +{ + t_FmPcdManip *p_Manip = (t_FmPcdManip *)h_ManipNode; + + SANITY_CHECK_RETURN_ERROR(p_Manip, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcdManipStats, E_NULL_POINTER); + + switch (p_Manip->opcode) + { + case (HMAN_OC_IP_REASSEMBLY): + return IpReassemblyStats(p_Manip, &p_FmPcdManipStats->u.reassem.u.ipReassem); + case (HMAN_OC_IP_FRAGMENTATION): + return IpFragmentationStats(p_Manip, &p_FmPcdManipStats->u.frag.u.ipFrag); + default: + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("no statistics to this type of manip")); + } + + return E_OK; +} + +#ifdef FM_CAPWAP_SUPPORT +t_Handle FM_PCD_StatisticsSetNode(t_Handle h_FmPcd, t_FmPcdStatsParams *p_StatsParams) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_FmPcdManip *p_Manip; + t_Error err; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE,NULL); + SANITY_CHECK_RETURN_VALUE(p_StatsParams,E_INVALID_HANDLE,NULL); + + p_Manip = ManipOrStatsSetNode(h_FmPcd, (t_Handle)p_StatsParams, TRUE); + if (!p_Manip) + return NULL; + + switch (p_Manip->opcode) + { + case (HMAN_OC_CAPWAP_INDEXED_STATS): + /* Indexed statistics */ + err = IndxStats(p_StatsParams, p_Manip, p_FmPcd); + break; + default: + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("UNSUPPORTED Statistics type")); + ReleaseManipHandler(p_Manip, p_FmPcd); + XX_Free(p_Manip); + return NULL; + } + + if (err) + { + REPORT_ERROR(MAJOR, err, NO_MSG); + ReleaseManipHandler(p_Manip, p_FmPcd); + XX_Free(p_Manip); + return NULL; + } + + return p_Manip; +} +#endif /* FM_CAPWAP_SUPPORT */ diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_manip.h b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_manip.h new file mode 100644 index 0000000..390ca6e --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_manip.h @@ -0,0 +1,480 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_manip.h + + @Description FM PCD manip... +*//***************************************************************************/ +#ifndef __FM_MANIP_H +#define __FM_MANIP_H + +#include "std_ext.h" +#include "error_ext.h" +#include "list_ext.h" + +#include "fm_cc.h" + + +/***********************************************************************/ +/* Header manipulations defines */ +/***********************************************************************/ + +#define NUM_OF_SCRATCH_POOL_BUFFERS 1000 /*TODO - Change it!!*/ + +#define HMAN_OC_RMV_N_OR_INSRT_INT_FRM_HDR 0x2e +#define HMAN_OC_INSRT_HDR_BY_TEMPL_N_OR_FRAG_AFTER 0x31 +#define HMAN_OC_CAPWAP_FRAGMENTATION 0x33 +#define HMAN_OC_IP_MANIP 0x34 +#define HMAN_OC_IP_FRAGMENTATION 0x74 +#define HMAN_OC_IP_REASSEMBLY 0xB4 +#define HMAN_OC_IPSEC_MANIP 0xF4 +#define HMAN_OC_MV_INT_FRAME_HDR_FROM_FRM_TO_BUFFER_PREFFIX 0x2f +#define HMAN_OC_CAPWAP_RMV_DTLS_IF_EXIST 0x30 +#define HMAN_OC_CAPWAP_REASSEMBLY 0x11 /* dummy */ +#define HMAN_OC_CAPWAP_INDEXED_STATS 0x32 /* dummy */ +#define HMAN_OC 0x35 + +#define HMAN_RMV_HDR 0x80000000 +#define HMAN_INSRT_INT_FRM_HDR 0x40000000 + +#define UDP_CHECKSUM_FIELD_OFFSET_FROM_UDP 6 +#define UDP_CHECKSUM_FIELD_SIZE 2 +#define UDP_LENGTH_FIELD_OFFSET_FROM_UDP 4 + +#define IPv4_DSCECN_FIELD_OFFSET_FROM_IP 1 +#define IPv4_TOTALLENGTH_FIELD_OFFSET_FROM_IP 2 +#define IPv4_HDRCHECKSUM_FIELD_OFFSET_FROM_IP 10 +#define VLAN_TAG_FIELD_OFFSET_FROM_ETH 12 +#define IPv4_ID_FIELD_OFFSET_FROM_IP 4 + +#define IPv6_PAYLOAD_LENGTH_OFFSET_FROM_IP 4 +#define IPv6_NEXT_HEADER_OFFSET_FROM_IP 6 + +#define FM_PCD_MANIP_CAPWAP_REASM_TABLE_SIZE 0x80 +#define FM_PCD_MANIP_CAPWAP_REASM_TABLE_ALIGN 8 +#define FM_PCD_MANIP_CAPWAP_REASM_RFD_SIZE 32 +#define FM_PCD_MANIP_CAPWAP_REASM_AUTO_LEARNING_HASH_ENTRY_SIZE 4 +#define FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_ENTRY_SIZE 8 + + +#define FM_PCD_MANIP_CAPWAP_REASM_TIME_OUT_BETWEEN_FRAMES 0x40000000 +#define FM_PCD_MANIP_CAPWAP_REASM_HALT_ON_DUPLICATE_FRAG 0x10000000 +#define FM_PCD_MANIP_CAPWAP_REASM_AUTOMATIC_LEARNIN_HASH_8_WAYS 0x08000000 +#define FM_PCD_MANIP_CAPWAP_REASM_PR_COPY 0x00800000 + +#define FM_PCD_MANIP_CAPWAP_FRAG_COMPR_OPTION_FIELD_EN 0x80000000 + +#define FM_PCD_MANIP_INDEXED_STATS_ENTRY_SIZE 4 +#define FM_PCD_MANIP_INDEXED_STATS_CNIA 0x20000000 +#define FM_PCD_MANIP_INDEXED_STATS_DPD 0x10000000 + +#define FM_PCD_MANIP_IP_REASM_TABLE_SIZE 0x40 +#define FM_PCD_MANIP_IP_REASM_TABLE_ALIGN 8 + +#define FM_PCD_MANIP_IP_REASM_COMMON_PARAM_TABLE_SIZE 64 +#define FM_PCD_MANIP_IP_REASM_COMMON_PARAM_TABLE_ALIGN 8 +#define FM_PCD_MANIP_IP_REASM_TIME_OUT_BETWEEN_FRAMES 0x80000000 +#define FM_PCD_MANIP_IP_REASM_COUPLING_ENABLE 0x40000000 +#define FM_PCD_MANIP_IP_REASM_COUPLING_MASK 0xFF000000 +#define FM_PCD_MANIP_IP_REASM_COUPLING_SHIFT 24 +#define FM_PCD_MANIP_IP_REASM_LIODN_MASK 0x0000003F +#define FM_PCD_MANIP_IP_REASM_LIODN_SHIFT 56 +#define FM_PCD_MANIP_IP_REASM_ELIODN_MASK 0x000003c0 +#define FM_PCD_MANIP_IP_REASM_ELIODN_SHIFT 38 +#define FM_PCD_MANIP_IP_REASM_COMMON_INT_BUFFER_IDX_MASK 0x000000FF +#define FM_PCD_MANIP_IP_REASM_COMMON_INT_BUFFER_IDX_SHIFT 24 + +#define FM_PCD_MANIP_IP_MTU_SHIFT 16 +#define FM_PCD_MANIP_IP_NO_FRAGMENTATION 0xFFFF0000 +#define FM_PCD_MANIP_IP_CNIA 0x20000000 + +#define FM_PCD_MANIP_IP_REASSM_TIMEOUT_THREAD_THRESH 1024 +#define FM_PCD_MANIP_IP_FRAG_DF_SHIFT 28 +#define FM_PCD_MANIP_IP_FRAG_SCRATCH_BPID 24 +#define FM_PCD_MANIP_IP_FRAG_SG_BDID_EN 0x08000000 +#define FM_PCD_MANIP_IP_FRAG_SG_BDID_MASK 0xFF000000 +#define FM_PCD_MANIP_IP_FRAG_SG_BDID_SHIFT 24 + +#define FM_PCD_MANIP_IPSEC_DEC 0x10000000 +#define FM_PCD_MANIP_IPSEC_VIPV_EN 0x08000000 +#define FM_PCD_MANIP_IPSEC_ECN_EN 0x04000000 +#define FM_PCD_MANIP_IPSEC_DSCP_EN 0x02000000 +#define FM_PCD_MANIP_IPSEC_VIPL_EN 0x01000000 +#define FM_PCD_MANIP_IPSEC_NADEN 0x20000000 + +#define FM_PCD_MANIP_IPSEC_IP_HDR_LEN_MASK 0x00FF0000 +#define FM_PCD_MANIP_IPSEC_IP_HDR_LEN_SHIFT 16 + +#define e_FM_MANIP_IP_INDX 1 + +#define HMCD_OPCODE_GENERIC_RMV 0x01 +#define HMCD_OPCODE_GENERIC_INSRT 0x02 +#define HMCD_OPCODE_GENERIC_REPLACE 0x05 +#define HMCD_OPCODE_L2_RMV 0x08 +#define HMCD_OPCODE_L2_INSRT 0x09 +#define HMCD_OPCODE_VLAN_PRI_UPDATE 0x0B +#define HMCD_OPCODE_IPV4_UPDATE 0x0C +#define HMCD_OPCODE_IPV6_UPDATE 0x10 +#define HMCD_OPCODE_TCP_UDP_UPDATE 0x0E +#define HMCD_OPCODE_TCP_UDP_CHECKSUM 0x14 +#define HMCD_OPCODE_REPLACE_IP 0x12 + +#define HMCD_DSCP_VALUES 64 + +#define HMCD_BASIC_SIZE 4 +#define HMCD_PTR_SIZE 4 +#define HMCD_PARAM_SIZE 4 +#define HMCD_IPV4_ADDR_SIZE 4 +#define HMCD_IPV6_ADDR_SIZE 0x10 + +#define HMCD_LAST 0x00800000 + +#define HMCD_OC_SHIFT 24 + +#define HMCD_RMV_OFFSET_SHIFT 0 +#define HMCD_RMV_SIZE_SHIFT 8 + +#define HMCD_INSRT_OFFSET_SHIFT 0 +#define HMCD_INSRT_SIZE_SHIFT 8 + +#define HMTD_CFG_TYPE 0x4000 +#define HMTD_CFG_EXT_HMCT 0x0080 +#define HMTD_CFG_PRS_AFTER_HM 0x0040 +#define HMTD_CFG_NEXT_AD_EN 0x0020 + +#define HMCD_RMV_L2_ETHERNET 0 +#define HMCD_RMV_L2_STACKED_QTAGS 1 +#define HMCD_RMV_L2_ETHERNET_AND_MPLS 2 +#define HMCD_RMV_L2_MPLS 3 + +#define HMCD_INSRT_L2_MPLS 0 +#define HMCD_INSRT_N_UPDATE_L2_MPLS 1 +#define HMCD_INSRT_L2_SIZE_SHIFT 24 + +#define HMCD_L2_MODE_SHIFT 16 + +#define HMCD_VLAN_PRI_REP_MODE_SHIFT 16 +#define HMCD_VLAN_PRI_UPDATE 0 +#define HMCD_VLAN_PRI_UPDATE_DSCP_TO_VPRI 1 + +#define HMCD_IPV4_UPDATE_TTL 0x00000001 +#define HMCD_IPV4_UPDATE_TOS 0x00000002 +#define HMCD_IPV4_UPDATE_DST 0x00000020 +#define HMCD_IPV4_UPDATE_SRC 0x00000040 +#define HMCD_IPV4_UPDATE_ID 0x00000080 +#define HMCD_IPV4_UPDATE_TOS_SHIFT 8 + +#define HMCD_IPV6_UPDATE_HL 0x00000001 +#define HMCD_IPV6_UPDATE_TC 0x00000002 +#define HMCD_IPV6_UPDATE_DST 0x00000040 +#define HMCD_IPV6_UPDATE_SRC 0x00000080 +#define HMCD_IPV6_UPDATE_TC_SHIFT 8 + +#define HMCD_TCP_UDP_UPDATE_DST 0x00004000 +#define HMCD_TCP_UDP_UPDATE_SRC 0x00008000 +#define HMCD_TCP_UDP_UPDATE_SRC_SHIFT 16 + +#define HMCD_IP_REPLACE_REPLACE_IPV4 0x00000000 +#define HMCD_IP_REPLACE_REPLACE_IPV6 0x00010000 +#define HMCD_IP_REPLACE_TTL_HL 0x00200000 +#define HMCD_IP_REPLACE_ID 0x00400000 + +#define HMCD_IP_REPLACE_L3HDRSIZE_SHIFT 24 + +#define DSCP_TO_VLAN_TABLE_SIZE 32 + +#define MANIP_GET_HMCT_SIZE(h_Manip) (((t_FmPcdManip *)h_Manip)->tableSize) +#define MANIP_GET_DATA_SIZE(h_Manip) (((t_FmPcdManip *)h_Manip)->dataSize) + +#define MANIP_GET_HMCT_PTR(h_Manip) (((t_FmPcdManip *)h_Manip)->p_Hmct) +#define MANIP_GET_DATA_PTR(h_Manip) (((t_FmPcdManip *)h_Manip)->p_Data) + +#define MANIP_SET_HMCT_PTR(h_Manip, h_NewPtr) (((t_FmPcdManip *)h_Manip)->p_Hmct = h_NewPtr) +#define MANIP_SET_DATA_PTR(h_Manip, h_NewPtr) (((t_FmPcdManip *)h_Manip)->p_Data = h_NewPtr) + +#define MANIP_GET_HMTD_PTR(h_Manip) (((t_FmPcdManip *)h_Manip)->h_Ad) +#define MANIP_DONT_REPARSE(h_Manip) (((t_FmPcdManip *)h_Manip)->dontParseAfterManip) +#define MANIP_SET_PREV(h_Manip, h_Prev) (((t_FmPcdManip *)h_Manip)->h_PrevManip = h_Prev) +#define MANIP_GET_OWNERS(h_Manip) (((t_FmPcdManip *)h_Manip)->owner) +#define MANIP_GET_TYPE(h_Manip) (((t_FmPcdManip *)h_Manip)->type) +#define MANIP_SET_UNIFIED_TBL_PTR_INDICATION(h_Manip) (((t_FmPcdManip *)h_Manip)->unifiedTablePtr = TRUE) +#define MANIP_GET_MURAM(h_Manip) (((t_FmPcd *)((t_FmPcdManip *)h_Manip)->h_FmPcd)->h_FmMuram) +#define MANIP_FREE_HMTD(h_Manip) \ + {if (((t_FmPcdManip *)h_Manip)->muramAllocate) \ + FM_MURAM_FreeMem(((t_FmPcd *)((t_FmPcdManip *)h_Manip)->h_FmPcd)->h_FmMuram, ((t_FmPcdManip *)h_Manip)->h_Ad);\ + else \ + XX_Free(((t_FmPcdManip *)h_Manip)->h_Ad); \ + ((t_FmPcdManip *)h_Manip)->h_Ad = NULL; \ + } +/* position regarding Manip SW structure */ +#define MANIP_IS_FIRST(h_Manip) (!(((t_FmPcdManip *)h_Manip)->h_PrevManip)) +#define MANIP_IS_CASCADE_NEXT(h_Manip) (((t_FmPcdManip *)h_Manip)->cascadedNext) +#define MANIP_IS_UNIFIED(h_Manip) (!(((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_NONE)) +#define MANIP_IS_UNIFIED_NON_FIRST(h_Manip) ((((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_MID) || \ + (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_LAST)) +#define MANIP_IS_UNIFIED_NON_LAST(h_Manip) ((((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_FIRST) ||\ + (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_MID)) +#define MANIP_IS_UNIFIED_FIRST(h_Manip) (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_FIRST) +#define MANIP_IS_UNIFIED_LAST(h_Manip) (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_LAST) + +#define MANIP_UPDATE_UNIFIED_POSITION(h_Manip) (((t_FmPcdManip *)h_Manip)->unifiedPosition = \ + (((t_FmPcdManip *)h_Manip)->unifiedPosition == e_MANIP_UNIFIED_NONE)? \ + e_MANIP_UNIFIED_LAST : e_MANIP_UNIFIED_MID) + +typedef enum e_ManipUnifiedPosition { + e_MANIP_UNIFIED_NONE = 0, + e_MANIP_UNIFIED_FIRST, + e_MANIP_UNIFIED_MID, + e_MANIP_UNIFIED_LAST +} e_ManipUnifiedPosition; + +typedef enum e_ManipInfo { + e_MANIP_HMTD, + e_MANIP_HMCT, + e_MANIP_HANDLER_TABLE_OWNER +}e_ManipInfo; +/***********************************************************************/ +/* Memory map */ +/***********************************************************************/ +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ + +typedef _Packed struct t_CapwapReasmPram { + volatile uint32_t mode; + volatile uint32_t autoLearnHashTblPtr; + volatile uint32_t intStatsTblPtr; + volatile uint32_t reasmFrmDescPoolTblPtr; + volatile uint32_t reasmFrmDescIndexPoolTblPtr; + volatile uint32_t timeOutTblPtr; + volatile uint32_t bufferPoolIdAndRisc1SetIndexes; + volatile uint32_t risc23SetIndexes; + volatile uint32_t risc4SetIndexesAndExtendedStatsTblPtr; + volatile uint32_t extendedStatsTblPtr; + volatile uint32_t expirationDelay; + volatile uint32_t totalProcessedFragCounter; + volatile uint32_t totalUnsuccessfulReasmFramesCounter; + volatile uint32_t totalDuplicatedFragCounter; + volatile uint32_t totalMalformdFragCounter; + volatile uint32_t totalTimeOutCounter; + volatile uint32_t totalSetBusyCounter; + volatile uint32_t totalRfdPoolBusyCounter; + volatile uint32_t totalDiscardedFragsCounter; + volatile uint32_t totalMoreThan16FramesCounter; + volatile uint32_t internalBufferBusy; + volatile uint32_t externalBufferBusy; + volatile uint32_t reserved1[4]; +} _PackedType t_CapwapReasmPram; + +typedef _Packed struct t_IpReassTbl { + volatile uint16_t waysNumAndSetSize; + volatile uint16_t autoLearnHashKeyMask; + volatile uint32_t ipReassCommonPrmTblPtr; + volatile uint32_t liodnAlAndAutoLearnHashTblPtrHi; + volatile uint32_t autoLearnHashTblPtrLow; + volatile uint32_t liodnSlAndAutoLearnSetLockTblPtrHi; + volatile uint32_t autoLearnSetLockTblPtrLow; + volatile uint16_t minFragSize; + volatile uint16_t reserved1; + volatile uint32_t totalSuccessfullyReasmFramesCounter; + volatile uint32_t totalValidFragmentCounter; + volatile uint32_t totalProcessedFragCounter; + volatile uint32_t totalMalformdFragCounter; + volatile uint32_t totalSetBusyCounter; + volatile uint32_t totalDiscardedFragsCounter; + volatile uint32_t totalMoreThan16FramesCounter; + volatile uint32_t reserved2[2]; +} _PackedType t_IpReassTbl; + +typedef _Packed struct t_IpReassCommonTbl { + volatile uint32_t timeoutModeAndFqid; + volatile uint32_t reassFrmDescIndexPoolTblPtr; + volatile uint32_t liodnAndReassFrmDescPoolPtrHi; + volatile uint32_t reassFrmDescPoolPtrLow; + volatile uint32_t timeOutTblPtr; + volatile uint32_t expirationDelay; + volatile uint32_t internalBufferManagement; + volatile uint32_t reserved2; + volatile uint32_t totalTimeOutCounter; + volatile uint32_t totalRfdPoolBusyCounter; + volatile uint32_t totalInternalBufferBusy; + volatile uint32_t totalExternalBufferBusy; + volatile uint32_t totalSgFragmentCounter; + volatile uint32_t totalDmaSemaphoreDepletionCounter; + volatile uint32_t reserved3[2]; +} _PackedType t_IpReassCommonTbl; + +typedef _Packed struct t_Hmtd { + volatile uint16_t cfg; + volatile uint8_t eliodnOffset; + volatile uint8_t extHmcdBasePtrHi; + volatile uint32_t hmcdBasePtr; + volatile uint16_t nextAdIdx; + volatile uint8_t res1; + volatile uint8_t opCode; + volatile uint32_t res2; +} _PackedType t_Hmtd; + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +/***********************************************************************/ +/* Driver's internal structures */ +/***********************************************************************/ +typedef struct +{ + t_Handle p_AutoLearnHashTbl; + t_Handle p_ReassmFrmDescrPoolTbl; + t_Handle p_ReassmFrmDescrIndxPoolTbl; + t_Handle p_TimeOutTbl; + uint16_t maxNumFramesInProcess; + uint8_t numOfTasks; + //uint8_t poolId; + uint8_t prOffset; + uint16_t dataOffset; + uint8_t sgBpid; + uint8_t hwPortId; + uint32_t fqidForTimeOutFrames; + uint32_t timeoutRoutineRequestTime; + uint32_t bitFor1Micro; +} t_FragParams; + +typedef struct +{ + t_AdOfTypeContLookup *p_Frag; +#if (DPAA_VERSION == 10) + uint8_t scratchBpid; +#endif /* (DPAA_VERSION == 10) */ +} t_IpFragParams; + +typedef struct t_IpReassmParams +{ + t_Handle h_Ipv4Ad; + t_Handle h_Ipv6Ad; + bool ipv6Assigned; + e_NetHeaderType hdr; /* Header selection */ + t_IpReassCommonTbl *p_IpReassCommonTbl; + t_IpReassTbl *p_Ipv4ReassTbl; + t_IpReassTbl *p_Ipv6ReassTbl; + uintptr_t ipv4AutoLearnHashTblAddr; + uintptr_t ipv6AutoLearnHashTblAddr; + uintptr_t ipv4AutoLearnSetLockTblAddr; + uintptr_t ipv6AutoLearnSetLockTblAddr; + uintptr_t reassFrmDescrIndxPoolTblAddr; + uintptr_t reassFrmDescrPoolTblAddr; + uintptr_t timeOutTblAddr; + uintptr_t internalBufferPoolManagementIndexAddr; + uintptr_t internalBufferPoolAddr; + uint32_t maxNumFramesInProcess; + uint8_t sgBpid; + uint8_t dataMemId; + uint16_t dataLiodnOffset; + uint32_t fqidForTimeOutFrames; + e_FmPcdManipReassemTimeOutMode timeOutMode; + uint32_t timeoutThresholdForReassmProcess; + uint16_t minFragSize[2]; + e_FmPcdManipReassemWaysNumber numOfFramesPerHashEntry[2]; + uint8_t relativeSchemeId[2]; + t_Handle h_Ipv4Scheme; + t_Handle h_Ipv6Scheme; + uint32_t nonConsistentSpFqid; +} t_IpReassmParams; + + +typedef struct{ + e_FmPcdManipType type; + t_FmPcdManipParams manipParams; + bool muramAllocate; + t_Handle h_Ad; + uint32_t opcode; + bool rmv; + bool insrt; + t_Handle h_NextManip; + t_Handle h_PrevManip; + /* HdrManip parameters*/ + uint8_t *p_Hmct; + uint8_t *p_Data; + bool dontParseAfterManip; + bool fieldUpdate; + bool custom; + uint16_t tableSize; + uint8_t dataSize; + bool cascadedNext; + e_ManipUnifiedPosition unifiedPosition; + /* end HdrManip */ + uint8_t *p_Template; + t_Handle h_Frag; + bool frag; + bool reassm; + uint16_t sizeForFragmentation; + uint8_t owner; + uint32_t updateParams; + uint32_t shadowUpdateParams; + t_FragParams fragParams; + union { + t_IpReassmParams ipReassmParams; + t_IpFragParams ipFragParams; + }; + uint8_t icOffset; + uint16_t ownerTmp; + bool cnia; + t_Handle p_StatsTbl; + t_Handle h_FmPcd; + t_List nodesLst; + t_Handle h_Spinlock; + +} t_FmPcdManip; + +typedef struct t_FmPcdCcSavedManipParams +{ + union + { + struct + { + uint16_t dataOffset; + //uint8_t poolId; + }capwapParams; + struct + { + uint16_t dataOffset; + uint8_t poolId; + }ipParams; + }; + +} t_FmPcdCcSavedManipParams; + + +#endif /* __FM_MANIP_H */ diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_pcd.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_pcd.c new file mode 100644 index 0000000..747347c --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_pcd.c @@ -0,0 +1,2114 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_pcd.c + + @Description FM PCD ... +*//***************************************************************************/ +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "xx_ext.h" +#include "sprint_ext.h" +#include "debug_ext.h" +#include "net_ext.h" +#include "fm_ext.h" +#include "fm_pcd_ext.h" + +#include "fm_common.h" +#include "fm_pcd.h" +#include "fm_pcd_ipc.h" +#include "fm_hc.h" +#include "fm_muram_ext.h" + + +/****************************************/ +/* static functions */ +/****************************************/ + +static t_Error CheckFmPcdParameters(t_FmPcd *p_FmPcd) +{ + if (!p_FmPcd->h_Fm) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("h_Fm has to be initialized")); + + if (p_FmPcd->guestId == NCSW_MASTER_ID) + { + if (p_FmPcd->p_FmPcdKg && !p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Something WRONG")); + + if (p_FmPcd->p_FmPcdPlcr && !p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Something WRONG")); + + if (!p_FmPcd->f_Exception) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_FmPcdExceptions has to be initialized")); + + if ((!p_FmPcd->f_FmPcdIndexedException) && (p_FmPcd->p_FmPcdPlcr || p_FmPcd->p_FmPcdKg)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_FmPcdIndexedException has to be initialized")); + + if (p_FmPcd->p_FmPcdDriverParam->prsMaxParseCycleLimit > PRS_MAX_CYCLE_LIMIT) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("prsMaxParseCycleLimit has to be less than 8191")); + } + + return E_OK; +} + +static volatile bool blockingFlag = FALSE; +static void IpcMsgCompletionCB(t_Handle h_FmPcd, + uint8_t *p_Msg, + uint8_t *p_Reply, + uint32_t replyLength, + t_Error status) +{ + UNUSED(h_FmPcd);UNUSED(p_Msg);UNUSED(p_Reply);UNUSED(replyLength);UNUSED(status); + blockingFlag = FALSE; +} + +static t_Error IpcMsgHandlerCB(t_Handle h_FmPcd, + uint8_t *p_Msg, + uint32_t msgLength, + uint8_t *p_Reply, + uint32_t *p_ReplyLength) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_Error err = E_OK; + t_FmPcdIpcMsg *p_IpcMsg = (t_FmPcdIpcMsg*)p_Msg; + t_FmPcdIpcReply *p_IpcReply = (t_FmPcdIpcReply*)p_Reply; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR((msgLength >= sizeof(uint32_t)), E_INVALID_VALUE); + +#ifdef DISABLE_SANITY_CHECKS + UNUSED(msgLength); +#endif /* DISABLE_SANITY_CHECKS */ + + ASSERT_COND(p_Msg); + + memset(p_IpcReply, 0, (sizeof(uint8_t) * FM_PCD_MAX_REPLY_SIZE)); + *p_ReplyLength = 0; + + switch (p_IpcMsg->msgId) + { + case (FM_PCD_MASTER_IS_ALIVE): + *(uint8_t*)(p_IpcReply->replyBody) = 1; + p_IpcReply->error = E_OK; + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t); + break; + case (FM_PCD_MASTER_IS_ENABLED): + /* count partitions registrations */ + if (p_FmPcd->enabled) + p_FmPcd->numOfEnabledGuestPartitionsPcds++; + *(uint8_t*)(p_IpcReply->replyBody) = (uint8_t)p_FmPcd->enabled; + p_IpcReply->error = E_OK; + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t); + break; + case (FM_PCD_GUEST_DISABLE): + if (p_FmPcd->numOfEnabledGuestPartitionsPcds) + { + p_FmPcd->numOfEnabledGuestPartitionsPcds--; + p_IpcReply->error = E_OK; + } + else + { + REPORT_ERROR(MINOR, E_INVALID_STATE,("Trying to disable an unregistered partition")); + p_IpcReply->error = E_INVALID_STATE; + } + *p_ReplyLength = sizeof(uint32_t); + break; + case (FM_PCD_GET_COUNTER): + { + e_FmPcdCounters inCounter; + uint32_t outCounter; + + memcpy((uint8_t*)&inCounter, p_IpcMsg->msgBody, sizeof(uint32_t)); + outCounter = FM_PCD_GetCounter(h_FmPcd, inCounter); + memcpy(p_IpcReply->replyBody, (uint8_t*)&outCounter, sizeof(uint32_t)); + p_IpcReply->error = E_OK; + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t); + break; + } + case (FM_PCD_ALLOC_KG_SCHEMES): + { + t_FmPcdIpcKgSchemesParams ipcSchemesParams; + + memcpy((uint8_t*)&ipcSchemesParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgSchemesParams)); + err = FmPcdKgAllocSchemes(h_FmPcd, + ipcSchemesParams.numOfSchemes, + ipcSchemesParams.guestId, + p_IpcReply->replyBody); + p_IpcReply->error = err; + *p_ReplyLength = sizeof(uint32_t) + ipcSchemesParams.numOfSchemes*sizeof(uint8_t); + break; + } + case (FM_PCD_FREE_KG_SCHEMES): + { + t_FmPcdIpcKgSchemesParams ipcSchemesParams; + + memcpy((uint8_t*)&ipcSchemesParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgSchemesParams)); + err = FmPcdKgFreeSchemes(h_FmPcd, + ipcSchemesParams.numOfSchemes, + ipcSchemesParams.guestId, + ipcSchemesParams.schemesIds); + p_IpcReply->error = err; + *p_ReplyLength = sizeof(uint32_t); + break; + } + case (FM_PCD_ALLOC_KG_CLSPLAN): + { + t_FmPcdIpcKgClsPlanParams ipcKgClsPlanParams; + + memcpy((uint8_t*)&ipcKgClsPlanParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgClsPlanParams)); + err = KgAllocClsPlanEntries(h_FmPcd, + ipcKgClsPlanParams.numOfClsPlanEntries, + ipcKgClsPlanParams.guestId, + p_IpcReply->replyBody); + p_IpcReply->error = err; + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t); + break; + } + case (FM_PCD_FREE_KG_CLSPLAN): + { + t_FmPcdIpcKgClsPlanParams ipcKgClsPlanParams; + + memcpy((uint8_t*)&ipcKgClsPlanParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgClsPlanParams)); + KgFreeClsPlanEntries(h_FmPcd, + ipcKgClsPlanParams.numOfClsPlanEntries, + ipcKgClsPlanParams.guestId, + ipcKgClsPlanParams.clsPlanBase); + *p_ReplyLength = sizeof(uint32_t); + break; + } + case (FM_PCD_ALLOC_PROFILES): + { + t_FmIpcResourceAllocParams ipcAllocParams; + uint16_t base; + memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams)); + base = PlcrAllocProfilesForPartition(h_FmPcd, + ipcAllocParams.base, + ipcAllocParams.num, + ipcAllocParams.guestId); + memcpy(p_IpcReply->replyBody, (uint16_t*)&base, sizeof(uint16_t)); + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint16_t); + break; + } + case (FM_PCD_FREE_PROFILES): + { + t_FmIpcResourceAllocParams ipcAllocParams; + memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams)); + PlcrFreeProfilesForPartition(h_FmPcd, + ipcAllocParams.base, + ipcAllocParams.num, + ipcAllocParams.guestId); + break; + } + case (FM_PCD_SET_PORT_PROFILES): + { + t_FmIpcResourceAllocParams ipcAllocParams; + memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams)); + PlcrSetPortProfiles(h_FmPcd, + ipcAllocParams.guestId, + ipcAllocParams.num, + ipcAllocParams.base); + break; + } + case (FM_PCD_CLEAR_PORT_PROFILES): + { + t_FmIpcResourceAllocParams ipcAllocParams; + memcpy(&ipcAllocParams, p_IpcMsg->msgBody, sizeof(t_FmIpcResourceAllocParams)); + PlcrClearPortProfiles(h_FmPcd, + ipcAllocParams.guestId); + break; + } + case (FM_PCD_GET_SW_PRS_OFFSET): + { + t_FmPcdIpcSwPrsLable ipcSwPrsLable; + uint32_t swPrsOffset; + + memcpy((uint8_t*)&ipcSwPrsLable, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcSwPrsLable)); + swPrsOffset = + FmPcdGetSwPrsOffset(h_FmPcd, + (e_NetHeaderType)ipcSwPrsLable.enumHdr, + ipcSwPrsLable.indexPerHdr); + memcpy(p_IpcReply->replyBody, (uint8_t*)&swPrsOffset, sizeof(uint32_t)); + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t); + break; + } + case (FM_PCD_PRS_INC_PORT_STATS): + { + t_FmPcdIpcPrsIncludePort ipcPrsIncludePort; + + memcpy((uint8_t*)&ipcPrsIncludePort, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcPrsIncludePort)); + PrsIncludePortInStatistics(h_FmPcd, + ipcPrsIncludePort.hardwarePortId, + ipcPrsIncludePort.include); + break; + } + default: + *p_ReplyLength = 0; + RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("command not found!!!")); + } + return E_OK; +} + +static uint32_t NetEnvLock(t_Handle h_NetEnv) +{ + ASSERT_COND(h_NetEnv); + return XX_LockIntrSpinlock(((t_FmPcdNetEnv*)h_NetEnv)->h_Spinlock); +} + +static void NetEnvUnlock(t_Handle h_NetEnv, uint32_t intFlags) +{ + ASSERT_COND(h_NetEnv); + XX_UnlockIntrSpinlock(((t_FmPcdNetEnv*)h_NetEnv)->h_Spinlock, intFlags); +} + +static void EnqueueLockToFreeLst(t_FmPcd *p_FmPcd, t_FmPcdLock *p_Lock) +{ + uint32_t intFlags; + + intFlags = XX_LockIntrSpinlock(p_FmPcd->h_Spinlock); + LIST_AddToTail(&p_Lock->node, &p_FmPcd->freeLocksLst); + XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags); +} + +static t_FmPcdLock * DequeueLockFromFreeLst(t_FmPcd *p_FmPcd) +{ + t_FmPcdLock *p_Lock = NULL; + uint32_t intFlags; + + intFlags = XX_LockIntrSpinlock(p_FmPcd->h_Spinlock); + if (!LIST_IsEmpty(&p_FmPcd->freeLocksLst)) + { + p_Lock = FM_PCD_LOCK_OBJ(p_FmPcd->freeLocksLst.p_Next); + LIST_DelAndInit(&p_Lock->node); + } + XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags); + + return p_Lock; +} + +static void EnqueueLockToAcquiredLst(t_FmPcd *p_FmPcd, t_FmPcdLock *p_Lock) +{ + uint32_t intFlags; + + intFlags = XX_LockIntrSpinlock(p_FmPcd->h_Spinlock); + LIST_AddToTail(&p_Lock->node, &p_FmPcd->acquiredLocksLst); + XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags); +} + +static t_Error FillFreeLocksLst(t_FmPcd *p_FmPcd) +{ + t_FmPcdLock *p_Lock; + int i; + + for (i=0; i<10; i++) + { + p_Lock = (t_FmPcdLock *)XX_Malloc(sizeof(t_FmPcdLock)); + if (!p_Lock) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("FM-PCD lock obj!")); + memset(p_Lock, 0, sizeof(t_FmPcdLock)); + INIT_LIST(&p_Lock->node); + p_Lock->h_Spinlock = XX_InitSpinlock(); + if (!p_Lock->h_Spinlock) + { + XX_Free(p_Lock); + RETURN_ERROR(MINOR, E_INVALID_STATE, ("FM-PCD spinlock obj!")); + } + EnqueueLockToFreeLst(p_FmPcd, p_Lock); + } + + return E_OK; +} + +static void ReleaseFreeLocksLst(t_FmPcd *p_FmPcd) +{ + t_FmPcdLock *p_Lock; + + p_Lock = DequeueLockFromFreeLst(p_FmPcd); + while (p_Lock) + { + XX_FreeSpinlock(p_Lock->h_Spinlock); + XX_Free(p_Lock); + p_Lock = DequeueLockFromFreeLst(p_FmPcd); + } +} + + + +/*****************************************************************************/ +/* Inter-module API routines */ +/*****************************************************************************/ + +void FmPcdSetClsPlanGrpId(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint8_t clsPlanGrpId) +{ + ASSERT_COND(p_FmPcd); + p_FmPcd->netEnvs[netEnvId].clsPlanGrpId = clsPlanGrpId; +} + +t_Error PcdGetClsPlanGrpParams(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_GrpParams) +{ + uint8_t netEnvId = p_GrpParams->netEnvId; + int i, k, j; + + ASSERT_COND(p_FmPcd); + if (p_FmPcd->netEnvs[netEnvId].clsPlanGrpId != ILLEGAL_CLS_PLAN) + { + p_GrpParams->grpExists = TRUE; + p_GrpParams->clsPlanGrpId = p_FmPcd->netEnvs[netEnvId].clsPlanGrpId; + return E_OK; + } + + for (i=0; ((i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE)); i++) + { + for (k=0; ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE)); k++) + { + /* if an option exists, add it to the opts list */ + if (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt) + { + /* check if this option already exists, add if it doesn't */ + for (j = 0;j<p_GrpParams->numOfOptions;j++) + { + if (p_GrpParams->options[j] == p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt) + break; + } + p_GrpParams->optVectors[j] |= p_FmPcd->netEnvs[netEnvId].unitsVectors[i]; + if (j == p_GrpParams->numOfOptions) + { + p_GrpParams->options[p_GrpParams->numOfOptions] = p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt; + p_GrpParams->numOfOptions++; + } + } + } + } + + if (p_GrpParams->numOfOptions == 0) + { + if (p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId != ILLEGAL_CLS_PLAN) + { + p_GrpParams->grpExists = TRUE; + p_GrpParams->clsPlanGrpId = p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId; + } + } + + return E_OK; + +} + +t_Error PcdGetVectorForOpt(t_FmPcd *p_FmPcd, uint8_t netEnvId, protocolOpt_t opt, uint32_t *p_Vector) +{ + uint8_t j,k; + + *p_Vector = 0; + + ASSERT_COND(p_FmPcd); + for (j=0; ((j < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) && + (p_FmPcd->netEnvs[netEnvId].units[j].hdrs[0].hdr != HEADER_TYPE_NONE)); j++) + { + for (k=0; ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) && + (p_FmPcd->netEnvs[netEnvId].units[j].hdrs[k].hdr != HEADER_TYPE_NONE)); k++) + { + if (p_FmPcd->netEnvs[netEnvId].units[j].hdrs[k].opt == opt) + *p_Vector |= p_FmPcd->netEnvs[netEnvId].unitsVectors[j]; + } + } + + if (!*p_Vector) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Requested option was not defined for this Network Environment Characteristics module")); + else + return E_OK; +} + +t_Error PcdGetUnitsVector(t_FmPcd *p_FmPcd, t_NetEnvParams *p_Params) +{ + int i; + + ASSERT_COND(p_FmPcd); + ASSERT_COND(p_Params->netEnvId < FM_MAX_NUM_OF_PORTS); + + p_Params->vector = 0; + for (i=0; i<p_Params->numOfDistinctionUnits ;i++) + { + if (p_FmPcd->netEnvs[p_Params->netEnvId].units[p_Params->unitIds[i]].hdrs[0].hdr == HEADER_TYPE_NONE) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Requested unit was not defined for this Network Environment Characteristics module")); + ASSERT_COND(p_FmPcd->netEnvs[p_Params->netEnvId].unitsVectors[p_Params->unitIds[i]]); + p_Params->vector |= p_FmPcd->netEnvs[p_Params->netEnvId].unitsVectors[p_Params->unitIds[i]]; + } + + return E_OK; +} + +bool PcdNetEnvIsUnitWithoutOpts(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint32_t unitVector) +{ + int i=0, k; + + ASSERT_COND(p_FmPcd); + /* check whether a given unit may be used by non-clsPlan users. */ + /* first, recognize the unit by its vector */ + while (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE) + { + if (p_FmPcd->netEnvs[netEnvId].unitsVectors[i] == unitVector) + { + for (k=0; + ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE)); + k++) + /* check that no option exists */ + if ((protocolOpt_t)p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt) + return FALSE; + break; + } + i++; + } + /* assert that a unit was found to mach the vector */ + ASSERT_COND(p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); + + return TRUE; +} +bool FmPcdNetEnvIsHdrExist(t_Handle h_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + int i, k; + + ASSERT_COND(p_FmPcd); + + for (i=0; ((i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE)); i++) + { + for (k=0; ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE)); k++) + if (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr == hdr) + return TRUE; + } + for (i=0; ((i < FM_PCD_MAX_NUM_OF_ALIAS_HDRS) && + (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr != HEADER_TYPE_NONE)); i++) + { + if (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr == hdr) + return TRUE; + } + + return FALSE; +} + +uint8_t FmPcdNetEnvGetUnitId(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr, bool interchangable, protocolOpt_t opt) +{ + uint8_t i, k; + + ASSERT_COND(p_FmPcd); + + if (interchangable) + { + for (i=0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++) + { + for (k=0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++) + { + if ((p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr == hdr) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt == opt)) + + return i; + } + } + } + else + { + for (i=0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++) + if ((p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr == hdr) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].opt == opt) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[1].hdr == HEADER_TYPE_NONE)) + return i; + + for (i=0; (i < FM_PCD_MAX_NUM_OF_ALIAS_HDRS) && + (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr != HEADER_TYPE_NONE); i++) + if ((p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr == hdr) && + (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].opt == opt)) + return p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].aliasHdr; + } + + return FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS; +} + +t_Error FmPcdUnregisterReassmPort(t_Handle h_FmPcd, t_Handle h_IpReasmCommonPramTbl) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdCcIpReassmTimeoutParams ccIpReassmTimeoutParams = {0}; + uint8_t result; + t_Error err = E_OK; + + ASSERT_COND(p_FmPcd); + ASSERT_COND(h_IpReasmCommonPramTbl); + + ccIpReassmTimeoutParams.iprcpt = (uint32_t)(XX_VirtToPhys(h_IpReasmCommonPramTbl) - p_FmPcd->physicalMuramBase); + ccIpReassmTimeoutParams.activate = FALSE; /*Disable Timeout Task*/ + + if ((err = FmHcPcdCcIpTimeoutReassm(p_FmPcd->h_Hc, &ccIpReassmTimeoutParams, &result)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + switch (result) + { + case (0): + return E_OK; + case (1): + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("")); + case (2): + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("")); + case (3): + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("Disable Timeout Task with invalid IPRCPT")); + default: + RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); + } + + return E_OK; +} + +e_NetHeaderType FmPcdGetAliasHdr(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr) +{ + int i; + + ASSERT_COND(p_FmPcd); + ASSERT_COND(netEnvId < FM_MAX_NUM_OF_PORTS); + + for (i=0; (i < FM_PCD_MAX_NUM_OF_ALIAS_HDRS) + && (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr != HEADER_TYPE_NONE); i++) + { + if (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr == hdr) + return p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].aliasHdr; + } + + return HEADER_TYPE_NONE; +} + +void FmPcdPortRegister(t_Handle h_FmPcd, t_Handle h_FmPort, uint8_t hardwarePortId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint16_t swPortIndex = 0; + + ASSERT_COND(h_FmPcd); + HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId); + p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].h_FmPort = h_FmPort; +} + +uint32_t FmPcdGetLcv(t_Handle h_FmPcd, uint32_t netEnvId, uint8_t hdrNum) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(h_FmPcd); + return p_FmPcd->netEnvs[netEnvId].lcvs[hdrNum]; +} + +uint32_t FmPcdGetMacsecLcv(t_Handle h_FmPcd, uint32_t netEnvId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(h_FmPcd); + return p_FmPcd->netEnvs[netEnvId].macsecVector; +} + +uint8_t FmPcdGetNetEnvId(t_Handle h_NetEnv) +{ + return ((t_FmPcdNetEnv*)h_NetEnv)->netEnvId; +} + +void FmPcdIncNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId) +{ + uint32_t intFlags; + + ASSERT_COND(h_FmPcd); + + intFlags = NetEnvLock(&((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId]); + ((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId].owners++; + NetEnvUnlock(&((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId], intFlags); +} + +void FmPcdDecNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId) +{ + uint32_t intFlags; + + ASSERT_COND(h_FmPcd); + ASSERT_COND(((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId].owners); + + intFlags = NetEnvLock(&((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId]); + ((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId].owners--; + NetEnvUnlock(&((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId], intFlags); +} + +uint32_t FmPcdLock(t_Handle h_FmPcd) +{ + ASSERT_COND(h_FmPcd); + return XX_LockIntrSpinlock(((t_FmPcd*)h_FmPcd)->h_Spinlock); +} + +void FmPcdUnlock(t_Handle h_FmPcd, uint32_t intFlags) +{ + ASSERT_COND(h_FmPcd); + XX_UnlockIntrSpinlock(((t_FmPcd*)h_FmPcd)->h_Spinlock, intFlags); +} + +t_FmPcdLock * FmPcdAcquireLock(t_Handle h_FmPcd) +{ + t_FmPcdLock *p_Lock; + ASSERT_COND(h_FmPcd); + p_Lock = DequeueLockFromFreeLst((t_FmPcd*)h_FmPcd); + if (!p_Lock) + { + FillFreeLocksLst(h_FmPcd); + p_Lock = DequeueLockFromFreeLst((t_FmPcd*)h_FmPcd); + } + + if (p_Lock) + EnqueueLockToAcquiredLst((t_FmPcd*)h_FmPcd, p_Lock); + return p_Lock; +} + +void FmPcdReleaseLock(t_Handle h_FmPcd, t_FmPcdLock *p_Lock) +{ + uint32_t intFlags; + ASSERT_COND(h_FmPcd); + intFlags = FmPcdLock(h_FmPcd); + LIST_DelAndInit(&p_Lock->node); + FmPcdUnlock(h_FmPcd, intFlags); + EnqueueLockToFreeLst((t_FmPcd*)h_FmPcd, p_Lock); +} + +bool FmPcdLockTryLockAll(t_Handle h_FmPcd) +{ + uint32_t intFlags; + t_List *p_Pos, *p_SavedPos=NULL; + + ASSERT_COND(h_FmPcd); + intFlags = FmPcdLock(h_FmPcd); + LIST_FOR_EACH(p_Pos, &((t_FmPcd*)h_FmPcd)->acquiredLocksLst) + { + t_FmPcdLock *p_Lock = FM_PCD_LOCK_OBJ(p_Pos); + if (!FmPcdLockTryLock(p_Lock)) + { + p_SavedPos = p_Pos; + break; + } + } + if (p_SavedPos) + { + LIST_FOR_EACH(p_Pos, &((t_FmPcd*)h_FmPcd)->acquiredLocksLst) + { + t_FmPcdLock *p_Lock = FM_PCD_LOCK_OBJ(p_Pos); + if (p_Pos == p_SavedPos) + break; + FmPcdLockUnlock(p_Lock); + } + } + FmPcdUnlock(h_FmPcd, intFlags); + + CORE_MemoryBarrier(); + + if (p_SavedPos) + return FALSE; + + return TRUE; +} + +void FmPcdLockUnlockAll(t_Handle h_FmPcd) +{ + uint32_t intFlags; + t_List *p_Pos; + + ASSERT_COND(h_FmPcd); + intFlags = FmPcdLock(h_FmPcd); + LIST_FOR_EACH(p_Pos, &((t_FmPcd*)h_FmPcd)->acquiredLocksLst) + { + t_FmPcdLock *p_Lock = FM_PCD_LOCK_OBJ(p_Pos); + p_Lock->flag = FALSE; + } + FmPcdUnlock(h_FmPcd, intFlags); + + CORE_MemoryBarrier(); +} + +t_Handle FmPcdGetHcHandle(t_Handle h_FmPcd) +{ + ASSERT_COND(h_FmPcd); + SANITY_CHECK_RETURN_VALUE(((t_FmPcd*)h_FmPcd)->h_Hc, E_INVALID_HANDLE, NULL); + return ((t_FmPcd*)h_FmPcd)->h_Hc; +} + +bool FmPcdIsAdvancedOffloadSupported(t_Handle h_FmPcd) +{ + ASSERT_COND(h_FmPcd); + return ((t_FmPcd*)h_FmPcd)->advancedOffloadSupport; +} +/*********************** End of inter-module routines ************************/ + + +/****************************************/ +/* API Init unit functions */ +/****************************************/ + +t_Handle FM_PCD_Config(t_FmPcdParams *p_FmPcdParams) +{ + t_FmPcd *p_FmPcd = NULL; + t_FmPhysAddr physicalMuramBase; + uint8_t i; + + SANITY_CHECK_RETURN_VALUE(p_FmPcdParams, E_INVALID_HANDLE,NULL); + + p_FmPcd = (t_FmPcd *) XX_Malloc(sizeof(t_FmPcd)); + if (!p_FmPcd) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD")); + return NULL; + } + memset(p_FmPcd, 0, sizeof(t_FmPcd)); + + p_FmPcd->p_FmPcdDriverParam = (t_FmPcdDriverParam *) XX_Malloc(sizeof(t_FmPcdDriverParam)); + if (!p_FmPcd->p_FmPcdDriverParam) + { + XX_Free(p_FmPcd); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD Driver Param")); + return NULL; + } + memset(p_FmPcd->p_FmPcdDriverParam, 0, sizeof(t_FmPcdDriverParam)); + + p_FmPcd->h_Fm = p_FmPcdParams->h_Fm; + p_FmPcd->guestId = FmGetGuestId(p_FmPcd->h_Fm); + p_FmPcd->h_FmMuram = FmGetMuramHandle(p_FmPcd->h_Fm); + if (p_FmPcd->h_FmMuram) + { + FmGetPhysicalMuramBase(p_FmPcdParams->h_Fm, &physicalMuramBase); + p_FmPcd->physicalMuramBase = (uint64_t)((uint64_t)(&physicalMuramBase)->low | ((uint64_t)(&physicalMuramBase)->high << 32)); + } + + for (i = 0; i<FM_MAX_NUM_OF_PORTS; i++) + p_FmPcd->netEnvs[i].clsPlanGrpId = ILLEGAL_CLS_PLAN; + + if (p_FmPcdParams->useHostCommand) + { + t_FmHcParams hcParams; + + memset(&hcParams, 0, sizeof(hcParams)); + hcParams.h_Fm = p_FmPcd->h_Fm; + hcParams.h_FmPcd = (t_Handle)p_FmPcd; + memcpy((uint8_t*)&hcParams.params, (uint8_t*)&p_FmPcdParams->hc, sizeof(t_FmPcdHcParams)); + p_FmPcd->h_Hc = FmHcConfigAndInit(&hcParams); + if (!p_FmPcd->h_Hc) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD HC")); + FM_PCD_Free(p_FmPcd); + return NULL; + } + } + else if (p_FmPcd->guestId != NCSW_MASTER_ID) + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("No Host Command defined for a guest partition.")); + + if (p_FmPcdParams->kgSupport) + { + p_FmPcd->p_FmPcdKg = (t_FmPcdKg *)KgConfig(p_FmPcd, p_FmPcdParams); + if (!p_FmPcd->p_FmPcdKg) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD Keygen")); + FM_PCD_Free(p_FmPcd); + return NULL; + } + } + + if (p_FmPcdParams->plcrSupport) + { + p_FmPcd->p_FmPcdPlcr = (t_FmPcdPlcr *)PlcrConfig(p_FmPcd, p_FmPcdParams); + if (!p_FmPcd->p_FmPcdPlcr) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD Policer")); + FM_PCD_Free(p_FmPcd); + return NULL; + } + } + + if (p_FmPcdParams->prsSupport) + { + p_FmPcd->p_FmPcdPrs = (t_FmPcdPrs *)PrsConfig(p_FmPcd, p_FmPcdParams); + if (!p_FmPcd->p_FmPcdPrs) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD Parser")); + FM_PCD_Free(p_FmPcd); + return NULL; + } + } + + p_FmPcd->h_Spinlock = XX_InitSpinlock(); + if (!p_FmPcd->h_Spinlock) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD spinlock")); + FM_PCD_Free(p_FmPcd); + return NULL; + } + INIT_LIST(&p_FmPcd->freeLocksLst); + INIT_LIST(&p_FmPcd->acquiredLocksLst); + + p_FmPcd->numOfEnabledGuestPartitionsPcds = 0; + + p_FmPcd->f_Exception = p_FmPcdParams->f_Exception; + p_FmPcd->f_FmPcdIndexedException = p_FmPcdParams->f_ExceptionId; + p_FmPcd->h_App = p_FmPcdParams->h_App; + + p_FmPcd->p_CcShadow = NULL; + p_FmPcd->ccShadowSize = 0; + p_FmPcd->ccShadowAlign = 0; + + p_FmPcd->h_ShadowSpinlock = XX_InitSpinlock(); + if (!p_FmPcd->h_ShadowSpinlock) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM PCD shadow spinlock")); + FM_PCD_Free(p_FmPcd); + return NULL; + } + + return p_FmPcd; +} + +t_Handle FM_PCD_GetHcDevH(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *) h_FmPcd; + + return (p_FmPcd) ? FmGcGetHcPortDevH(p_FmPcd->h_Hc) : NULL; +} + +t_Error FM_PCD_Init(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_Error err = E_OK; + t_FmPcdIpcMsg msg; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE); + + FM_GetRevision(p_FmPcd->h_Fm, &p_FmPcd->fmRevInfo); + + if (p_FmPcd->guestId != NCSW_MASTER_ID) + { + memset(p_FmPcd->fmPcdIpcHandlerModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE); + if (Sprint (p_FmPcd->fmPcdIpcHandlerModuleName, "FM_PCD_%d_%d", FmGetId(p_FmPcd->h_Fm), NCSW_MASTER_ID) != 10) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); + memset(p_FmPcd->fmPcdModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE); + if (Sprint (p_FmPcd->fmPcdModuleName, "FM_PCD_%d_%d",FmGetId(p_FmPcd->h_Fm), p_FmPcd->guestId) != (p_FmPcd->guestId<10 ? 10:11)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); + + p_FmPcd->h_IpcSession = XX_IpcInitSession(p_FmPcd->fmPcdIpcHandlerModuleName, p_FmPcd->fmPcdModuleName); + if (p_FmPcd->h_IpcSession) + { + t_FmPcdIpcReply reply; + uint32_t replyLength; + uint8_t isMasterAlive = 0; + + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_PCD_MASTER_IS_ALIVE; + msg.msgBody[0] = p_FmPcd->guestId; + blockingFlag = TRUE; + + do + { + replyLength = sizeof(uint32_t) + sizeof(isMasterAlive); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId)+sizeof(p_FmPcd->guestId), + (uint8_t*)&reply, + &replyLength, + IpcMsgCompletionCB, + h_FmPcd)) != E_OK) + REPORT_ERROR(MAJOR, err, NO_MSG); + while (blockingFlag) ; + if (replyLength != (sizeof(uint32_t) + sizeof(isMasterAlive))) + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + isMasterAlive = *(uint8_t*)(reply.replyBody); + } while (!isMasterAlive); + } + } + + CHECK_INIT_PARAMETERS(p_FmPcd, CheckFmPcdParameters); + + if (p_FmPcd->p_FmPcdKg) + { + err = KgInit(p_FmPcd); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + if (p_FmPcd->p_FmPcdPlcr) + { + err = PlcrInit(p_FmPcd); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + if (p_FmPcd->p_FmPcdPrs) + { + err = PrsInit(p_FmPcd); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + if (p_FmPcd->guestId == NCSW_MASTER_ID) + { + /* register to inter-core messaging mechanism */ + memset(p_FmPcd->fmPcdModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE); + if (Sprint (p_FmPcd->fmPcdModuleName, "FM_PCD_%d_%d",FmGetId(p_FmPcd->h_Fm),NCSW_MASTER_ID) != 10) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); + err = XX_IpcRegisterMsgHandler(p_FmPcd->fmPcdModuleName, IpcMsgHandlerCB, p_FmPcd, FM_PCD_MAX_REPLY_SIZE); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + /* IPv6 Frame-Id used for fragmentation */ + p_FmPcd->ipv6FrameIdAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_FmPcd->h_FmMuram, 4, 4)); + if (!p_FmPcd->ipv6FrameIdAddr) + { + FM_PCD_Free(p_FmPcd); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for IPv6 Frame-Id")); + } + IOMemSet32(UINT_TO_PTR(p_FmPcd->ipv6FrameIdAddr), 0, 4); + + XX_Free(p_FmPcd->p_FmPcdDriverParam); + p_FmPcd->p_FmPcdDriverParam = NULL; + + FmRegisterPcd(p_FmPcd->h_Fm, p_FmPcd); + + return E_OK; +} + +t_Error FM_PCD_Free(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd =(t_FmPcd *)h_FmPcd; + t_Error err = E_OK; + + if (p_FmPcd->ipv6FrameIdAddr) + FM_MURAM_FreeMem(p_FmPcd->h_FmMuram, UINT_TO_PTR(p_FmPcd->ipv6FrameIdAddr)); + + if (p_FmPcd->enabled) + FM_PCD_Disable(p_FmPcd); + + if (p_FmPcd->p_FmPcdDriverParam) + { + XX_Free(p_FmPcd->p_FmPcdDriverParam); + p_FmPcd->p_FmPcdDriverParam = NULL; + } + + if (p_FmPcd->p_FmPcdKg) + { + if ((err = KgFree(p_FmPcd)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + XX_Free(p_FmPcd->p_FmPcdKg); + p_FmPcd->p_FmPcdKg = NULL; + } + + if (p_FmPcd->p_FmPcdPlcr) + { + if ((err = PlcrFree(p_FmPcd)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + XX_Free(p_FmPcd->p_FmPcdPlcr); + p_FmPcd->p_FmPcdPlcr = NULL; + } + + if (p_FmPcd->p_FmPcdPrs) + { + if (p_FmPcd->guestId == NCSW_MASTER_ID) + PrsFree(p_FmPcd); + XX_Free(p_FmPcd->p_FmPcdPrs); + p_FmPcd->p_FmPcdPrs = NULL; + } + + if (p_FmPcd->h_Hc) + { + FmHcFree(p_FmPcd->h_Hc); + p_FmPcd->h_Hc = NULL; + } + + XX_IpcUnregisterMsgHandler(p_FmPcd->fmPcdModuleName); + + FmUnregisterPcd(p_FmPcd->h_Fm); + + ReleaseFreeLocksLst(p_FmPcd); + + if (p_FmPcd->h_Spinlock) + XX_FreeSpinlock(p_FmPcd->h_Spinlock); + + if (p_FmPcd->h_ShadowSpinlock) + XX_FreeSpinlock(p_FmPcd->h_ShadowSpinlock); + + XX_Free(p_FmPcd); + + return E_OK; +} + +t_Error FM_PCD_ConfigException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t bitMask = 0; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + if (p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ConfigException - guest mode!")); + + GET_FM_PCD_EXCEPTION_FLAG(bitMask, exception); + if (bitMask) + { + if (enable) + p_FmPcd->exceptions |= bitMask; + else + p_FmPcd->exceptions &= ~bitMask; + } + else + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception")); + + return E_OK; +} + +t_Error FM_PCD_ConfigHcFramesDataMemory(t_Handle h_FmPcd, uint8_t memId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + return FmHcSetFramesDataMemory(p_FmPcd->h_Hc, memId); +} + +t_Error FM_PCD_Enable(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + + if (p_FmPcd->enabled) + return E_OK; + + if ((p_FmPcd->guestId != NCSW_MASTER_ID) && + p_FmPcd->h_IpcSession) + { + uint8_t enabled; + t_FmPcdIpcMsg msg; + t_FmPcdIpcReply reply; + uint32_t replyLength; + + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_PCD_MASTER_IS_ENABLED; + replyLength = sizeof(uint32_t) + sizeof(enabled); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t) + sizeof(enabled)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + p_FmPcd->enabled = (bool)!!(*(uint8_t*)(reply.replyBody)); + if (!p_FmPcd->enabled) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM-PCD master should be enabled first!")); + + return E_OK; + } + else if (p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, + ("running in guest-mode without IPC!")); + + if (p_FmPcd->p_FmPcdKg) + KgEnable(p_FmPcd); + + if (p_FmPcd->p_FmPcdPlcr) + PlcrEnable(p_FmPcd); + + if (p_FmPcd->p_FmPcdPrs) + PrsEnable(p_FmPcd); + + p_FmPcd->enabled = TRUE; + + return E_OK; +} + +t_Error FM_PCD_Disable(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + + if (!p_FmPcd->enabled) + return E_OK; + + if ((p_FmPcd->guestId != NCSW_MASTER_ID) && + p_FmPcd->h_IpcSession) + { + t_FmPcdIpcMsg msg; + t_FmPcdIpcReply reply; + uint32_t replyLength; + + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_PCD_GUEST_DISABLE; + replyLength = sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + if (reply.error == E_OK) + p_FmPcd->enabled = FALSE; + + return (t_Error)(reply.error); + } + else if (p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, + ("running in guest-mode without IPC!")); + + if (p_FmPcd->numOfEnabledGuestPartitionsPcds != 0) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("Trying to disable a master partition PCD while" + "guest partitions are still enabled!")); + + if (p_FmPcd->p_FmPcdKg) + KgDisable(p_FmPcd); + + if (p_FmPcd->p_FmPcdPlcr) + PlcrDisable(p_FmPcd); + + if (p_FmPcd->p_FmPcdPrs) + PrsDisable(p_FmPcd); + + p_FmPcd->enabled = FALSE; + + return E_OK; +} + +t_Handle FM_PCD_NetEnvCharacteristicsSet(t_Handle h_FmPcd, t_FmPcdNetEnvParams *p_NetEnvParams) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t intFlags, specialUnits = 0; + uint8_t bitId = 0; + uint8_t i, j, k; + uint8_t netEnvCurrId; + uint8_t ipsecAhUnit = 0,ipsecEspUnit = 0; + bool ipsecAhExists = FALSE, ipsecEspExists = FALSE, shim1Selected = FALSE; + uint8_t hdrNum; + t_FmPcdNetEnvParams *p_modifiedNetEnvParams; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_STATE, NULL); + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, NULL); + SANITY_CHECK_RETURN_VALUE(p_NetEnvParams, E_NULL_POINTER, NULL); + + intFlags = FmPcdLock(p_FmPcd); + + /* find a new netEnv */ + for (i = 0; i < FM_MAX_NUM_OF_PORTS; i++) + if (!p_FmPcd->netEnvs[i].used) + break; + + if (i== FM_MAX_NUM_OF_PORTS) + { + REPORT_ERROR(MAJOR, E_FULL,("No more than %d netEnv's allowed.", FM_MAX_NUM_OF_PORTS)); + FmPcdUnlock(p_FmPcd, intFlags); + return NULL; + } + + p_FmPcd->netEnvs[i].used = TRUE; + FmPcdUnlock(p_FmPcd, intFlags); + + /* As anyone doesn't have handle of this netEnv yet, no need + to protect it with spinlocks */ + + p_modifiedNetEnvParams = (t_FmPcdNetEnvParams *) XX_Malloc(sizeof(t_FmPcdNetEnvParams)); + if (!p_modifiedNetEnvParams) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FmPcdNetEnvParams")); + return NULL; + } + + memcpy(p_modifiedNetEnvParams, p_NetEnvParams, sizeof(t_FmPcdNetEnvParams)); + p_NetEnvParams = p_modifiedNetEnvParams; + + netEnvCurrId = (uint8_t)i; + + /* clear from previous use */ + memset(&p_FmPcd->netEnvs[netEnvCurrId].units, 0, FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS * sizeof(t_FmPcdIntDistinctionUnit)); + memset(&p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs, 0, FM_PCD_MAX_NUM_OF_ALIAS_HDRS * sizeof(t_FmPcdNetEnvAliases)); + memcpy(&p_FmPcd->netEnvs[netEnvCurrId].units, p_NetEnvParams->units, p_NetEnvParams->numOfDistinctionUnits*sizeof(t_FmPcdIntDistinctionUnit)); + + p_FmPcd->netEnvs[netEnvCurrId].netEnvId = netEnvCurrId; + p_FmPcd->netEnvs[netEnvCurrId].h_FmPcd = p_FmPcd; + + p_FmPcd->netEnvs[netEnvCurrId].clsPlanGrpId = ILLEGAL_CLS_PLAN; + + /* check that header with opt is not interchanged with the same header */ + for (i = 0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++) + { + for (k = 0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++) + { + /* if an option exists, check that other headers are not the same header + without option */ + if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt) + { + for (j = 0; (j < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[j].hdr != HEADER_TYPE_NONE); j++) + { + if ((p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[j].hdr == p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr) && + !p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[j].opt) + { + REPORT_ERROR(MINOR, E_FULL, + ("Illegal unit - header with opt may not be interchangeable with the same header without opt")); + XX_Free(p_modifiedNetEnvParams); + return NULL; + } + } + } + } + } + + /* Specific headers checking */ + for (i = 0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++) + { + for (k = 0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++) + { + /* Some headers pairs may not be defined on different units as the parser + doesn't distinguish */ + /* IPSEC_AH and IPSEC_SPI can't be 2 units, */ + /* check that header with opt is not interchanged with the same header */ + if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPSEC_AH) + { + if (ipsecEspExists && (ipsecEspUnit != i)) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("HEADER_TYPE_IPSEC_AH and HEADER_TYPE_IPSEC_ESP may not be defined in separate units")); + XX_Free(p_modifiedNetEnvParams); + return NULL; + } + else + { + ipsecAhUnit = i; + ipsecAhExists = TRUE; + } + } + if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPSEC_ESP) + { + if (ipsecAhExists && (ipsecAhUnit != i)) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("HEADER_TYPE_IPSEC_AH and HEADER_TYPE_IPSEC_ESP may not be defined in separate units")); + XX_Free(p_modifiedNetEnvParams); + return NULL; + } + else + { + ipsecEspUnit = i; + ipsecEspExists = TRUE; + } + } + /* ENCAP_ESP */ + if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_UDP_ENCAP_ESP) + { + /* IPSec UDP encapsulation is currently set to use SHIM1 */ + p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_UDP_ENCAP_ESP; + p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_USER_DEFINED_SHIM1; + p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_USER_DEFINED_SHIM1; + p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0; + } +#ifdef FM_CAPWAP_SUPPORT + /* UDP_LITE */ + if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_UDP_LITE) + { + /* IPSec UDP encapsulation is currently set to use SHIM1 */ + p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_UDP_LITE; + p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_UDP; + p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_UDP; + p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0; + } +#endif /* FM_CAPWAP_SUPPORT */ + + /* IP FRAG */ + if ((p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPv4) && + (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt == IPV4_FRAG_1)) + { + /* If IPv4+Frag, we need to set 2 units - SHIM 2 and IPv4. We first set SHIM2, and than check if + * IPv4 exists. If so we don't need to set an extra unit + * We consider as "having IPv4" any IPv4 without interchangable headers + * but including any options. */ + p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_IPv4; + p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].opt = IPV4_FRAG_1; + p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_USER_DEFINED_SHIM2; + p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_USER_DEFINED_SHIM2; + p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0; + + /* check if IPv4 header exists by itself */ + if (FmPcdNetEnvGetUnitId(p_FmPcd, netEnvCurrId, HEADER_TYPE_IPv4, FALSE, 0) == FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) + { + p_FmPcd->netEnvs[netEnvCurrId].units[p_NetEnvParams->numOfDistinctionUnits].hdrs[0].hdr = HEADER_TYPE_IPv4; + p_FmPcd->netEnvs[netEnvCurrId].units[p_NetEnvParams->numOfDistinctionUnits++].hdrs[0].opt = 0; + } + } + if ((p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPv6) && + (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt == IPV6_FRAG_1)) + { + /* If IPv6+Frag, we need to set 2 units - SHIM 2 and IPv6. We first set SHIM2, and than check if + * IPv4 exists. If so we don't need to set an extra unit + * We consider as "having IPv6" any IPv6 without interchangable headers + * but including any options. */ + p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_IPv6; + p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].opt = IPV6_FRAG_1; + p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_USER_DEFINED_SHIM2; + p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_USER_DEFINED_SHIM2; + p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0; + + /* check if IPv6 header exists by itself */ + if (FmPcdNetEnvGetUnitId(p_FmPcd, netEnvCurrId, HEADER_TYPE_IPv6, FALSE, 0) == FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) + { + p_FmPcd->netEnvs[netEnvCurrId].units[p_NetEnvParams->numOfDistinctionUnits].hdrs[0].hdr = HEADER_TYPE_IPv6; + p_FmPcd->netEnvs[netEnvCurrId].units[p_NetEnvParams->numOfDistinctionUnits++].hdrs[0].opt = 0; + } + } + } + } + + /* if private header (shim), check that no other headers specified */ + for (i = 0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++) + { + if (IS_PRIVATE_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr)) + if (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[1].hdr != HEADER_TYPE_NONE) + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("SHIM header may not be interchanged with other headers")); + XX_Free(p_modifiedNetEnvParams); + return NULL; + } + } + + for (i = 0; i < p_NetEnvParams->numOfDistinctionUnits; i++) + { + if (IS_PRIVATE_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr)) + switch (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr) + { + case (HEADER_TYPE_USER_DEFINED_SHIM1): + if (shim1Selected) + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("SHIM header cannot be selected with UDP_IPSEC_ESP")); + XX_Free(p_modifiedNetEnvParams); + return NULL; + } + shim1Selected = TRUE; + p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i] = 0x00000001; + break; + case (HEADER_TYPE_USER_DEFINED_SHIM2): + p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i] = 0x00000002; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Requested SHIM not supported")); + } + else + { + p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i] = (uint32_t)(0x80000000 >> bitId++); + + if (IS_SPECIAL_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr)) + p_FmPcd->netEnvs[netEnvCurrId].macsecVector = p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i]; + } + } + + /* define a set of hardware parser LCV's according to the defined netenv */ + + /* set an array of LCV's for each header in the netEnv */ + for (i = 0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++) + { + /* private headers have no LCV in the hard parser */ + if (!IS_PRIVATE_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr)) + { + for (k = 0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++) + { + GET_PRS_HDR_NUM(hdrNum, p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr); + if ((hdrNum == ILLEGAL_HDR_NUM) || (hdrNum == NO_HDR_NUM)) + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG); + XX_Free(p_modifiedNetEnvParams); + return NULL; + } + p_FmPcd->netEnvs[netEnvCurrId].lcvs[hdrNum] |= p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i]; + } + } + } + XX_Free(p_modifiedNetEnvParams); + + p_FmPcd->netEnvs[netEnvCurrId].h_Spinlock = XX_InitSpinlock(); + if (!p_FmPcd->netEnvs[netEnvCurrId].h_Spinlock) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Pcd NetEnv spinlock")); + return NULL; + } + return &p_FmPcd->netEnvs[netEnvCurrId]; +} + +t_Error FM_PCD_NetEnvCharacteristicsDelete(t_Handle h_NetEnv) +{ + t_FmPcdNetEnv *p_NetEnv = (t_FmPcdNetEnv*)h_NetEnv; + t_FmPcd *p_FmPcd = p_NetEnv->h_FmPcd; + uint32_t intFlags; + uint8_t netEnvId = p_NetEnv->netEnvId; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + /* check that no port is bound to this netEnv */ + if (p_FmPcd->netEnvs[netEnvId].owners) + { + RETURN_ERROR(MINOR, E_INVALID_STATE, + ("Trying to delete a netEnv that has ports/schemes/trees/clsPlanGrps bound to")); + } + + intFlags = FmPcdLock(p_FmPcd); + + p_FmPcd->netEnvs[netEnvId].used = FALSE; + p_FmPcd->netEnvs[netEnvId].clsPlanGrpId = ILLEGAL_CLS_PLAN; + + memset(p_FmPcd->netEnvs[netEnvId].units, 0, sizeof(t_FmPcdIntDistinctionUnit)*FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS); + memset(p_FmPcd->netEnvs[netEnvId].unitsVectors, 0, sizeof(uint32_t)*FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS); + memset(p_FmPcd->netEnvs[netEnvId].lcvs, 0, sizeof(uint32_t)*FM_PCD_PRS_NUM_OF_HDRS); + + if (p_FmPcd->netEnvs[netEnvId].h_Spinlock) + XX_FreeSpinlock(p_FmPcd->netEnvs[netEnvId].h_Spinlock); + + FmPcdUnlock(p_FmPcd, intFlags); + return E_OK; +} + +void FM_PCD_HcTxConf(t_Handle h_FmPcd, t_DpaaFD *p_Fd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN(h_FmPcd, E_INVALID_STATE); + + FmHcTxConf(p_FmPcd->h_Hc, p_Fd); +} + +t_Error FM_PCD_SetAdvancedOffloadSupport(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmCtrlCodeRevisionInfo revInfo; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->enabled, E_INVALID_STATE); + + if ((err = FM_GetFmanCtrlCodeRevision(p_FmPcd->h_Fm, &revInfo)) != E_OK) + { + DBG(WARNING, ("FM in guest-mode without IPC, can't validate firmware revision.")); + revInfo.packageRev = IP_OFFLOAD_PACKAGE_NUMBER; + } + if (revInfo.packageRev != IP_OFFLOAD_PACKAGE_NUMBER) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Fman ctrl code package")); + + p_FmPcd->advancedOffloadSupport = TRUE; + + return E_OK; +} + +uint32_t FM_PCD_GetCounter(t_Handle h_FmPcd, e_FmPcdCounters counter) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t outCounter = 0; + t_Error err; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, 0); + + switch (counter) + { + case (e_FM_PCD_KG_COUNTERS_TOTAL): + if (!p_FmPcd->p_FmPcdKg) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("KeyGen is not activated")); + return 0; + } + if ((p_FmPcd->guestId != NCSW_MASTER_ID) && + !p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs && + !p_FmPcd->h_IpcSession) + { + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, + ("running in guest-mode without neither IPC nor mapped register!")); + return 0; + } + break; + + case (e_FM_PCD_PLCR_COUNTERS_YELLOW): + case (e_FM_PCD_PLCR_COUNTERS_RED): + case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED): + case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW): + case (e_FM_PCD_PLCR_COUNTERS_TOTAL): + case (e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH): + if (!p_FmPcd->p_FmPcdPlcr) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Policer is not activated")); + return 0; + } + if ((p_FmPcd->guestId != NCSW_MASTER_ID) && + !p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs && + !p_FmPcd->h_IpcSession) + { + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, + ("running in \"guest-mode\" without neither IPC nor mapped register!")); + return 0; + } + + /* check that counters are enabled */ + if (p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs && + !(GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr) & FM_PCD_PLCR_GCR_STEN)) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); + return 0; + } + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs || + ((p_FmPcd->guestId != NCSW_MASTER_ID) && p_FmPcd->h_IpcSession)); + break; + + case (e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH): + case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED): + case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED): + case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED): + case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED): + case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR): + case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR): + case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR): + case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR): + case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES): + if (!p_FmPcd->p_FmPcdPrs) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Parser is not activated")); + return 0; + } + if ((p_FmPcd->guestId != NCSW_MASTER_ID) && + !p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs && + !p_FmPcd->h_IpcSession) + { + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, + ("running in guest-mode without neither IPC nor mapped register!")); + return 0; + } + break; + default: + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported type of counter")); + return 0; + } + + if ((p_FmPcd->guestId != NCSW_MASTER_ID) && + p_FmPcd->h_IpcSession) + { + t_FmPcdIpcMsg msg; + t_FmPcdIpcReply reply; + uint32_t replyLength; + + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_PCD_GET_COUNTER; + memcpy(msg.msgBody, (uint8_t *)&counter, sizeof(uint32_t)); + replyLength = sizeof(uint32_t) + sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) +sizeof(uint32_t), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t) + sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + + memcpy((uint8_t*)&outCounter, reply.replyBody, sizeof(uint32_t)); + return outCounter; + } + + switch (counter) + { + /* Parser statistics */ + case (e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_pds); + case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l2rrs); + case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l3rrs); + case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l4rrs); + case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_srrs); + case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l2rres); + case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l3rres); + case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l4rres); + case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_srres); + case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_spcs); + case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_spscs); + case (e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_hxscs); + case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mrcs); + case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mrscs); + case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mwcs); + case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mwscs); + case (e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_fcscs); + case (e_FM_PCD_KG_COUNTERS_TOTAL): + return GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_tpc); + + /* Policer statistics */ + case (e_FM_PCD_PLCR_COUNTERS_YELLOW): + return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ypcnt); + case (e_FM_PCD_PLCR_COUNTERS_RED): + return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rpcnt); + case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED): + return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rrpcnt); + case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW): + return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rypcnt); + case (e_FM_PCD_PLCR_COUNTERS_TOTAL): + return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_tpcnt); + case (e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH): + return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_flmcnt); + + default: + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Unsupported type of counter")); + return 0; + } +} + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FM_PCD_DumpRegs(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_Error err = E_OK; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if (p_FmPcd->p_FmPcdKg) + err |= FM_PCD_KgDumpRegs(h_FmPcd); + if (p_FmPcd->p_FmPcdPlcr) + err |= FM_PCD_PlcrDumpRegs(h_FmPcd); + if (p_FmPcd->p_FmPcdPrs) + err |= FM_PCD_PrsDumpRegs(h_FmPcd); + + return err; +} + +t_Error FM_PCD_HcDumpRegs(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_STATE); + + return FmHcDumpRegs(p_FmPcd->h_Hc); +} +#endif /* (defined(DEBUG_ERRORS) && ... */ + +t_Error FM_PCD_SetException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t bitMask = 0, tmpReg; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if (p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_SetException - guest mode!")); + + GET_FM_PCD_EXCEPTION_FLAG(bitMask, exception); + + if (bitMask) + { + if (enable) + p_FmPcd->exceptions |= bitMask; + else + p_FmPcd->exceptions &= ~bitMask; + + switch (exception) + { + case (e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC): + case (e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW): + if (!p_FmPcd->p_FmPcdKg) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - keygen is not working")); + break; + case (e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC): + case (e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR): + case (e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE): + case (e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE): + if (!p_FmPcd->p_FmPcdPlcr) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - policer is not working")); + break; + case (e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC): + case (e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC): + if (!p_FmPcd->p_FmPcdPrs) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - parser is not working")); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported exception")); + + } + + switch (exception) + { + case (e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_eeer); + if (enable) + tmpReg |= FM_EX_KG_DOUBLE_ECC; + else + tmpReg &= ~FM_EX_KG_DOUBLE_ECC; + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_eeer, tmpReg); + break; + case (e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_eeer); + if (enable) + tmpReg |= FM_EX_KG_KEYSIZE_OVERFLOW; + else + tmpReg &= ~FM_EX_KG_KEYSIZE_OVERFLOW; + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_eeer, tmpReg); + break; + case (e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_perer); + if (enable) + tmpReg |= FM_PCD_PRS_DOUBLE_ECC; + else + tmpReg &= ~FM_PCD_PRS_DOUBLE_ECC; + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_perer, tmpReg); + break; + case (e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_pever); + if (enable) + tmpReg |= FM_PCD_PRS_SINGLE_ECC; + else + tmpReg &= ~FM_PCD_PRS_SINGLE_ECC; + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_pever, tmpReg); + break; + case (e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier); + if (enable) + tmpReg |= FM_PCD_PLCR_DOUBLE_ECC; + else + tmpReg &= ~FM_PCD_PLCR_DOUBLE_ECC; + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier, tmpReg); + break; + case (e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier); + if (enable) + tmpReg |= FM_PCD_PLCR_INIT_ENTRY_ERROR; + else + tmpReg &= ~FM_PCD_PLCR_INIT_ENTRY_ERROR; + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier, tmpReg); + break; + case (e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier); + if (enable) + tmpReg |= FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE; + else + tmpReg &= ~FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE; + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier, tmpReg); + break; + case (e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier); + if (enable) + tmpReg |= FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE; + else + tmpReg &= ~FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE; + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier, tmpReg); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported exception")); + } + /* for ECC exceptions driver automatically enables ECC mechanism, if disabled. + Driver may disable them automatically, depending on driver's status */ + if (enable && ( (exception == e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC) | + (exception == e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC) | + (exception == e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC) | + (exception == e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC))) + FmEnableRamsEcc(p_FmPcd->h_Fm); + if (!enable && ( (exception == e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC) | + (exception == e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC) | + (exception == e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC) | + (exception == e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC))) + FmDisableRamsEcc(p_FmPcd->h_Fm); + } + else + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception")); + + return E_OK; +} + +t_Error FM_PCD_ForceIntr (t_Handle h_FmPcd, e_FmPcdExceptions exception) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if (p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ForceIntr - guest mode!")); + + switch (exception) + { + case (e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC): + case (e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW): + if (!p_FmPcd->p_FmPcdKg) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - keygen is not working")); + break; + case (e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC): + case (e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR): + case (e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE): + case (e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE): + if (!p_FmPcd->p_FmPcdPlcr) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - policer is not working")); + break; + case (e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC): + case (e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC): + if (!p_FmPcd->p_FmPcdPrs) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt -parsrer is not working")); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Invalid interrupt requested")); + + } + switch (exception) + { + case e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC: + if (!(p_FmPcd->exceptions & FM_PCD_EX_PRS_DOUBLE_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + break; + case e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC: + if (!(p_FmPcd->exceptions & FM_PCD_EX_PRS_SINGLE_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + break; + case e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC: + if (!(p_FmPcd->exceptions & FM_EX_KG_DOUBLE_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_feer, FM_EX_KG_DOUBLE_ECC); + break; + case e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW: + if (!(p_FmPcd->exceptions & FM_EX_KG_KEYSIZE_OVERFLOW)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_feer, FM_EX_KG_KEYSIZE_OVERFLOW); + break; + case e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC: + if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_DOUBLE_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr, FM_PCD_PLCR_DOUBLE_ECC); + break; + case e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR: + if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_INIT_ENTRY_ERROR)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr, FM_PCD_PLCR_INIT_ENTRY_ERROR); + break; + case e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE: + if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr, FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE); + break; + case e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE: + if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr, FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE); + break; + default: + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception may not be forced")); + } + + return E_OK; +} + + +t_Error FM_PCD_ModifyCounter(t_Handle h_FmPcd, e_FmPcdCounters counter, uint32_t value) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if (p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ModifyCounter - guest mode!")); + + switch (counter) + { + case (e_FM_PCD_KG_COUNTERS_TOTAL): + if (!p_FmPcd->p_FmPcdKg) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Invalid counters - KeyGen is not working")); + break; + case (e_FM_PCD_PLCR_COUNTERS_YELLOW): + case (e_FM_PCD_PLCR_COUNTERS_RED): + case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED): + case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW): + case (e_FM_PCD_PLCR_COUNTERS_TOTAL): + case (e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH): + if (!p_FmPcd->p_FmPcdPlcr) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Invalid counters - Policer is not working")); + if (!(GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr) & FM_PCD_PLCR_GCR_STEN)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); + break; + case (e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH): + case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED): + case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED): + case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED): + case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED): + case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR): + case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR): + case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR): + case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR): + case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES): + case (e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES): + if (!p_FmPcd->p_FmPcdPrs) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported type of counter")); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported type of counter")); + } + switch (counter) + { + case (e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_pds, value); + break; + case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l2rrs, value); + break; + case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l3rrs, value); + break; + case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l4rrs, value); + break; + case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_srrs, value); + break; + case (e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l2rres, value); + break; + case (e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l3rres, value); + break; + case (e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_l4rres, value); + break; + case (e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_srres, value); + break; + case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_spcs, value); + break; + case (e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_spscs, value); + break; + case (e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_hxscs, value); + break; + case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mrcs, value); + break; + case (e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mrscs, value); + break; + case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mwcs, value); + break; + case (e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_mwscs, value); + break; + case (e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fmpr_fcscs, value); + break; + case (e_FM_PCD_KG_COUNTERS_TOTAL): + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->fmkg_tpc,value); + break; + + /*Policer counters*/ + case (e_FM_PCD_PLCR_COUNTERS_YELLOW): + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ypcnt, value); + break; + case (e_FM_PCD_PLCR_COUNTERS_RED): + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rpcnt, value); + break; + case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED): + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rrpcnt, value); + break; + case (e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW): + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rypcnt, value); + break; + case (e_FM_PCD_PLCR_COUNTERS_TOTAL): + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_tpcnt, value); + break; + case (e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH): + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_flmcnt, value); + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported type of counter")); + } + +return E_OK; +} + + diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_pcd.h b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_pcd.h new file mode 100644 index 0000000..b6b9b35 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_pcd.h @@ -0,0 +1,540 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_pcd.h + + @Description FM PCD ... +*//***************************************************************************/ +#ifndef __FM_PCD_H +#define __FM_PCD_H + +#include "std_ext.h" +#include "error_ext.h" +#include "list_ext.h" +#include "fm_pcd_ext.h" +#include "fm_common.h" +#include "fsl_fman_prs.h" +#include "fsl_fman_kg.h" + +#define __ERR_MODULE__ MODULE_FM_PCD + + +/****************************/ +/* Defaults */ +/****************************/ +#define DEFAULT_plcrAutoRefresh FALSE +#define DEFAULT_fmPcdKgErrorExceptions (FM_EX_KG_DOUBLE_ECC | FM_EX_KG_KEYSIZE_OVERFLOW) +#define DEFAULT_fmPcdPlcrErrorExceptions (FM_PCD_EX_PLCR_DOUBLE_ECC | FM_PCD_EX_PLCR_INIT_ENTRY_ERROR) +#define DEFAULT_fmPcdPlcrExceptions 0 +#define DEFAULT_fmPcdPrsErrorExceptions (FM_PCD_EX_PRS_DOUBLE_ECC) + +#define DEFAULT_fmPcdPrsExceptions FM_PCD_EX_PRS_SINGLE_ECC +#define DEFAULT_numOfUsedProfilesPerWindow 16 +#define DEFAULT_numOfSharedPlcrProfiles 4 + +/****************************/ +/* Network defines */ +/****************************/ +#define UDP_HEADER_SIZE 8 + +#define ESP_SPI_OFFSET 0 +#define ESP_SPI_SIZE 4 +#define ESP_SEQ_NUM_OFFSET ESP_SPI_SIZE +#define ESP_SEQ_NUM_SIZE 4 + +/****************************/ +/* General defines */ +/****************************/ +#define ILLEGAL_CLS_PLAN 0xff +#define ILLEGAL_NETENV 0xff + +#define FM_PCD_MAX_NUM_OF_ALIAS_HDRS 3 + +/****************************/ +/* Error defines */ +/****************************/ + +#define FM_PCD_EX_PLCR_DOUBLE_ECC 0x20000000 +#define FM_PCD_EX_PLCR_INIT_ENTRY_ERROR 0x10000000 +#define FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE 0x08000000 +#define FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE 0x04000000 + +#define GET_FM_PCD_EXCEPTION_FLAG(bitMask, exception) \ +switch (exception){ \ + case e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC: \ + bitMask = FM_EX_KG_DOUBLE_ECC; break; \ + case e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC: \ + bitMask = FM_PCD_EX_PLCR_DOUBLE_ECC; break; \ + case e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW: \ + bitMask = FM_EX_KG_KEYSIZE_OVERFLOW; break; \ + case e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR: \ + bitMask = FM_PCD_EX_PLCR_INIT_ENTRY_ERROR; break; \ + case e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE: \ + bitMask = FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE; break; \ + case e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE: \ + bitMask = FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE; break; \ + case e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC: \ + bitMask = FM_PCD_EX_PRS_DOUBLE_ECC; break; \ + case e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC: \ + bitMask = FM_PCD_EX_PRS_SINGLE_ECC; break; \ + default: bitMask = 0;break;} + +/***********************************************************************/ +/* Policer defines */ +/***********************************************************************/ +#define FM_PCD_PLCR_GCR_STEN 0x40000000 +#define FM_PCD_PLCR_DOUBLE_ECC 0x80000000 +#define FM_PCD_PLCR_INIT_ENTRY_ERROR 0x40000000 +#define FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE 0x80000000 +#define FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE 0x40000000 + +/***********************************************************************/ +/* Memory map */ +/***********************************************************************/ +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ + + +typedef _Packed struct { +/* General Configuration and Status Registers */ + volatile uint32_t fmpl_gcr; /* 0x000 FMPL_GCR - FM Policer General Configuration */ + volatile uint32_t fmpl_gsr; /* 0x004 FMPL_GSR - FM Policer Global Status Register */ + volatile uint32_t fmpl_evr; /* 0x008 FMPL_EVR - FM Policer Event Register */ + volatile uint32_t fmpl_ier; /* 0x00C FMPL_IER - FM Policer Interrupt Enable Register */ + volatile uint32_t fmpl_ifr; /* 0x010 FMPL_IFR - FM Policer Interrupt Force Register */ + volatile uint32_t fmpl_eevr; /* 0x014 FMPL_EEVR - FM Policer Error Event Register */ + volatile uint32_t fmpl_eier; /* 0x018 FMPL_EIER - FM Policer Error Interrupt Enable Register */ + volatile uint32_t fmpl_eifr; /* 0x01C FMPL_EIFR - FM Policer Error Interrupt Force Register */ +/* Global Statistic Counters */ + volatile uint32_t fmpl_rpcnt; /* 0x020 FMPL_RPC - FM Policer RED Packets Counter */ + volatile uint32_t fmpl_ypcnt; /* 0x024 FMPL_YPC - FM Policer YELLOW Packets Counter */ + volatile uint32_t fmpl_rrpcnt; /* 0x028 FMPL_RRPC - FM Policer Recolored RED Packet Counter */ + volatile uint32_t fmpl_rypcnt; /* 0x02C FMPL_RYPC - FM Policer Recolored YELLOW Packet Counter */ + volatile uint32_t fmpl_tpcnt; /* 0x030 FMPL_TPC - FM Policer Total Packet Counter */ + volatile uint32_t fmpl_flmcnt; /* 0x034 FMPL_FLMC - FM Policer Frame Length Mismatch Counter */ + volatile uint32_t fmpl_res0[21]; /* 0x038 - 0x08B Reserved */ +/* Profile RAM Access Registers */ + volatile uint32_t fmpl_par; /* 0x08C FMPL_PAR - FM Policer Profile Action Register*/ + t_FmPcdPlcrProfileRegs profileRegs; +/* Error Capture Registers */ + volatile uint32_t fmpl_serc; /* 0x100 FMPL_SERC - FM Policer Soft Error Capture */ + volatile uint32_t fmpl_upcr; /* 0x104 FMPL_UPCR - FM Policer Uninitialized Profile Capture Register */ + volatile uint32_t fmpl_res2; /* 0x108 Reserved */ +/* Debug Registers */ + volatile uint32_t fmpl_res3[61]; /* 0x10C-0x200 Reserved Debug*/ +/* Profile Selection Mapping Registers Per Port-ID (n=1-11, 16) */ + volatile uint32_t fmpl_dpmr; /* 0x200 FMPL_DPMR - FM Policer Default Mapping Register */ + volatile uint32_t fmpl_pmr[63]; /*+default 0x204-0x2FF FMPL_PMR1 - FMPL_PMR63, - FM Policer Profile Mapping Registers. + (for port-ID 1-11, only for supported Port-ID registers) */ +} _PackedType t_FmPcdPlcrRegs; + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +/***********************************************************************/ +/* Driver's internal structures */ +/***********************************************************************/ + +typedef struct { + bool known; + uint8_t id; +} t_FmPcdKgSchemesExtractsEntry; + +typedef struct { + t_FmPcdKgSchemesExtractsEntry extractsArray[FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY]; +} t_FmPcdKgSchemesExtracts; + +typedef struct { + t_Handle h_Manip; + bool keepRes; + e_FmPcdEngine nextEngine; + uint8_t parseCode; +} t_FmPcdInfoForManip; + +/**************************************************************************//** + @Description A structure of parameters to communicate + between the port and PCD regarding the KG scheme. +*//***************************************************************************/ +typedef struct { + uint8_t netEnvId; /* in */ + uint8_t numOfDistinctionUnits; /* in */ + uint8_t unitIds[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; /* in */ + uint32_t vector; /* out */ +} t_NetEnvParams; + +typedef struct { + bool allocated; + uint8_t ownerId; /* guestId for KG in multi-partition only. + portId for PLCR in any environment */ +} t_FmPcdAllocMng; + +typedef struct { + volatile bool lock; + bool used; + uint8_t owners; + uint8_t netEnvId; + uint8_t guestId; + uint8_t baseEntry; + uint16_t sizeOfGrp; + protocolOpt_t optArray[FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)]; +} t_FmPcdKgClsPlanGrp; + +typedef struct { + t_Handle h_FmPcd; + uint8_t schemeId; + t_FmPcdLock *p_Lock; + bool valid; + uint8_t netEnvId; + uint8_t owners; + uint32_t matchVector; + uint32_t ccUnits; + bool nextRelativePlcrProfile; + uint16_t relativeProfileId; + uint16_t numOfProfiles; + t_FmPcdKgKeyOrder orderedArray; + e_FmPcdEngine nextEngine; + e_FmPcdDoneAction doneAction; + uint8_t pointedOwners; + uint32_t requiredAction; + bool extractedOrs; + uint8_t bitOffsetInPlcrProfile; + bool directPlcr; +#if (DPAA_VERSION >= 11) + bool vspe; +#endif +} t_FmPcdKgScheme; + +typedef _Packed union { + struct fman_kg_scheme_regs schemeRegs; + struct fman_kg_pe_regs portRegs; + struct fman_kg_cp_regs clsPlanRegs; +} _PackedType u_FmPcdKgIndirectAccessRegs; + +typedef struct { + struct fman_kg_regs *p_FmPcdKgRegs; + uint32_t schemeExceptionsBitMask; + uint8_t numOfSchemes; + t_Handle h_HwSpinlock; + uint8_t schemesIds[FM_PCD_KG_NUM_OF_SCHEMES]; + t_FmPcdKgScheme schemes[FM_PCD_KG_NUM_OF_SCHEMES]; + t_FmPcdKgClsPlanGrp clsPlanGrps[FM_MAX_NUM_OF_PORTS]; + uint8_t emptyClsPlanGrpId; + t_FmPcdAllocMng schemesMng[FM_PCD_KG_NUM_OF_SCHEMES]; /* only for MASTER ! */ + t_FmPcdAllocMng clsPlanBlocksMng[FM_PCD_MAX_NUM_OF_CLS_PLANS/CLS_PLAN_NUM_PER_GRP]; + u_FmPcdKgIndirectAccessRegs *p_IndirectAccessRegs; +} t_FmPcdKg; + +typedef struct { + uint16_t profilesBase; + uint16_t numOfProfiles; + t_Handle h_FmPort; +} t_FmPcdPlcrMapParam; + +typedef struct { + uint16_t absoluteProfileId; + t_Handle h_FmPcd; + bool valid; + t_FmPcdLock *p_Lock; + t_FmPcdAllocMng profilesMng; + uint8_t pointedOwners; + uint32_t requiredAction; + e_FmPcdEngine nextEngineOnGreen; /**< Green next engine type */ + u_FmPcdPlcrNextEngineParams paramsOnGreen; /**< Green next engine params */ + + e_FmPcdEngine nextEngineOnYellow; /**< Yellow next engine type */ + u_FmPcdPlcrNextEngineParams paramsOnYellow; /**< Yellow next engine params */ + + e_FmPcdEngine nextEngineOnRed; /**< Red next engine type */ + u_FmPcdPlcrNextEngineParams paramsOnRed; /**< Red next engine params */ +} t_FmPcdPlcrProfile; + +typedef struct { + t_FmPcdPlcrRegs *p_FmPcdPlcrRegs; + uint16_t partPlcrProfilesBase; + uint16_t partNumOfPlcrProfiles; + t_FmPcdPlcrProfile profiles[FM_PCD_PLCR_NUM_ENTRIES]; + uint16_t numOfSharedProfiles; + uint16_t sharedProfilesIds[FM_PCD_PLCR_NUM_ENTRIES]; + t_FmPcdPlcrMapParam portsMapping[FM_MAX_NUM_OF_PORTS]; + t_Handle h_HwSpinlock; + t_Handle h_SwSpinlock; +} t_FmPcdPlcr; + +typedef struct { + uint32_t *p_SwPrsCode; + uint32_t *p_CurrSwPrs; + uint8_t currLabel; + struct fman_prs_regs *p_FmPcdPrsRegs; + t_FmPcdPrsLabelParams labelsTable[FM_PCD_PRS_NUM_OF_LABELS]; + uint32_t fmPcdPrsPortIdStatistics; +} t_FmPcdPrs; + +typedef struct { + struct { + e_NetHeaderType hdr; + protocolOpt_t opt; /* only one option !! */ + } hdrs[FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS]; +} t_FmPcdIntDistinctionUnit; + +typedef struct { + e_NetHeaderType hdr; + protocolOpt_t opt; /* only one option !! */ + e_NetHeaderType aliasHdr; +} t_FmPcdNetEnvAliases; + +typedef struct { + uint8_t netEnvId; + t_Handle h_FmPcd; + t_Handle h_Spinlock; + bool used; + uint8_t owners; + uint8_t clsPlanGrpId; + t_FmPcdIntDistinctionUnit units[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; + uint32_t unitsVectors[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; + uint32_t lcvs[FM_PCD_PRS_NUM_OF_HDRS]; + uint32_t macsecVector; + t_FmPcdNetEnvAliases aliasHdrs[FM_PCD_MAX_NUM_OF_ALIAS_HDRS]; +} t_FmPcdNetEnv; + +typedef struct { + struct fman_prs_cfg dfltCfg; + bool plcrAutoRefresh; + uint16_t prsMaxParseCycleLimit; +} t_FmPcdDriverParam; + +typedef struct { + t_Handle h_Fm; + t_Handle h_FmMuram; + t_FmRevisionInfo fmRevInfo; + + uint64_t physicalMuramBase; + + t_Handle h_Spinlock; + t_List freeLocksLst; + t_List acquiredLocksLst; + + t_Handle h_IpcSession; /* relevant for guest only */ + bool enabled; + uint8_t guestId; /**< Guest Partition Id */ + uint8_t numOfEnabledGuestPartitionsPcds; + char fmPcdModuleName[MODULE_NAME_SIZE]; + char fmPcdIpcHandlerModuleName[MODULE_NAME_SIZE]; /* relevant for guest only - this is the master's name */ + t_FmPcdNetEnv netEnvs[FM_MAX_NUM_OF_PORTS]; + t_FmPcdKg *p_FmPcdKg; + t_FmPcdPlcr *p_FmPcdPlcr; + t_FmPcdPrs *p_FmPcdPrs; + + void *p_CcShadow; /**< CC MURAM shadow */ + uint32_t ccShadowSize; + uint32_t ccShadowAlign; + volatile bool shadowLock; + t_Handle h_ShadowSpinlock; + + t_Handle h_Hc; + + uint32_t exceptions; + t_FmPcdExceptionCallback *f_Exception; + t_FmPcdIdExceptionCallback *f_FmPcdIndexedException; + t_Handle h_App; + uintptr_t ipv6FrameIdAddr; + bool advancedOffloadSupport; + + t_FmPcdDriverParam *p_FmPcdDriverParam; +} t_FmPcd; + +#if (DPAA_VERSION >= 11) +typedef uint8_t t_FmPcdFrmReplicUpdateType; +#define FRM_REPLIC_UPDATE_COUNTER 0x01 +#define FRM_REPLIC_UPDATE_INFO 0x02 +#endif /* (DPAA_VERSION >= 11) */ +/***********************************************************************/ +/* PCD internal routines */ +/***********************************************************************/ + +t_Error PcdGetVectorForOpt(t_FmPcd *p_FmPcd, uint8_t netEnvId, protocolOpt_t opt, uint32_t *p_Vector); +t_Error PcdGetUnitsVector(t_FmPcd *p_FmPcd, t_NetEnvParams *p_Params); +bool PcdNetEnvIsUnitWithoutOpts(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint32_t unitVector); +t_Error PcdGetClsPlanGrpParams(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_GrpParams); +void FmPcdSetClsPlanGrpId(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint8_t clsPlanGrpId); +e_NetHeaderType FmPcdGetAliasHdr(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr); +uint8_t FmPcdNetEnvGetUnitIdForSingleHdr(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr); +uint8_t FmPcdNetEnvGetUnitId(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr, bool interchangable, protocolOpt_t opt); + +t_Error FmPcdManipBuildIpReassmScheme(t_FmPcd *p_FmPcd, t_Handle h_NetEnv, t_Handle h_CcTree, t_Handle h_Manip, bool isIpv4, uint8_t groupId); +t_Error FmPcdManipDeleteIpReassmSchemes(t_Handle h_Manip); +bool FmPcdManipIpReassmIsIpv6Hdr(t_Handle h_Manip); + +t_Handle KgConfig( t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams); +t_Error KgInit(t_FmPcd *p_FmPcd); +t_Error KgFree(t_FmPcd *p_FmPcd); +void KgSetClsPlan(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanSet *p_Set); +bool KgIsSchemeAlwaysDirect(t_Handle h_FmPcd, uint8_t schemeId); +void KgEnable(t_FmPcd *p_FmPcd); +void KgDisable(t_FmPcd *p_FmPcd); +t_Error KgAllocClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t *p_First); +void KgFreeClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t base); + +/* only for MULTI partittion */ +t_Error FmPcdKgAllocSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds); +t_Error FmPcdKgFreeSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds); +/* only for SINGLE partittion */ +t_Error KgBindPortToSchemes(t_Handle h_FmPcd , uint8_t hardwarePortId, uint32_t spReg); + +t_FmPcdLock *FmPcdAcquireLock(t_Handle h_FmPcd); +void FmPcdReleaseLock(t_Handle h_FmPcd, t_FmPcdLock *p_Lock); + +t_Handle PlcrConfig(t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams); +t_Error PlcrInit(t_FmPcd *p_FmPcd); +t_Error PlcrFree(t_FmPcd *p_FmPcd); +void PlcrEnable(t_FmPcd *p_FmPcd); +void PlcrDisable(t_FmPcd *p_FmPcd); +uint16_t PlcrAllocProfilesForPartition(t_FmPcd *p_FmPcd, uint16_t base, uint16_t numOfProfiles, uint8_t guestId); +void PlcrFreeProfilesForPartition(t_FmPcd *p_FmPcd, uint16_t base, uint16_t numOfProfiles, uint8_t guestId); +t_Error PlcrSetPortProfiles(t_FmPcd *p_FmPcd, + uint8_t hardwarePortId, + uint16_t numOfProfiles, + uint16_t base); +t_Error PlcrClearPortProfiles(t_FmPcd *p_FmPcd, uint8_t hardwarePortId); + +t_Handle PrsConfig(t_FmPcd *p_FmPcd,t_FmPcdParams *p_FmPcdParams); +t_Error PrsInit(t_FmPcd *p_FmPcd); +void PrsEnable(t_FmPcd *p_FmPcd); +void PrsDisable(t_FmPcd *p_FmPcd); +void PrsFree(t_FmPcd *p_FmPcd ); +t_Error PrsIncludePortInStatistics(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, bool include); + +t_Error FmPcdCcGetGrpParams(t_Handle treeId, uint8_t grpId, uint32_t *p_GrpBits, uint8_t *p_GrpBase); +uint8_t FmPcdCcGetOffset(t_Handle h_CcNode); +uint8_t FmPcdCcGetParseCode(t_Handle h_CcNode); +uint16_t FmPcdCcGetNumOfKeys(t_Handle h_CcNode); +t_Error ValidateNextEngineParams(t_Handle h_FmPcd, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams, e_FmPcdCcStatsMode supportedStatsMode); + +void FmPcdManipUpdateOwner(t_Handle h_Manip, bool add); +t_Error FmPcdManipCheckParamsForCcNextEngine(t_FmPcdCcNextEngineParams *p_InfoForManip, uint32_t *requiredAction); +void FmPcdManipUpdateAdResultForCc(t_Handle h_Manip, + t_FmPcdCcNextEngineParams *p_CcNextEngineParams, + t_Handle p_Ad, + t_Handle *p_AdNewPtr); +void FmPcdManipUpdateAdContLookupForCc(t_Handle h_Manip, t_Handle p_Ad, t_Handle *p_AdNew, uint32_t adTableOffset); +void FmPcdManipUpdateOwner(t_Handle h_Manip, bool add); +t_Error FmPcdManipCheckParamsWithCcNodeParams(t_Handle h_Manip, t_Handle h_FmPcdCcNode); +#ifdef FM_CAPWAP_SUPPORT +t_Handle FmPcdManipApplSpecificBuild(void); +bool FmPcdManipIsCapwapApplSpecific(t_Handle h_Manip); +#endif /* FM_CAPWAP_SUPPORT */ +#if (DPAA_VERSION >= 11) +void * FrmReplicGroupGetSourceTableDescriptor(t_Handle h_ReplicGroup); +void FrmReplicGroupUpdateOwner(t_Handle h_ReplicGroup, bool add); +void FrmReplicGroupUpdateAd(t_Handle h_ReplicGroup, void *p_Ad, t_Handle *h_AdNew); + +void FmPcdCcGetAdTablesThatPointOnReplicGroup(t_Handle h_Node, + t_Handle h_ReplicGroup, + t_List *p_AdTables, + uint32_t *p_NumOfAdTables); +#endif /* (DPAA_VERSION >= 11) */ + +void EnqueueNodeInfoToRelevantLst(t_List *p_List, t_CcNodeInformation *p_CcInfo, t_Handle h_Spinlock); +void DequeueNodeInfoFromRelevantLst(t_List *p_List, t_Handle h_Info, t_Handle h_Spinlock); +t_CcNodeInformation* FindNodeInfoInReleventLst(t_List *p_List, t_Handle h_Info, t_Handle h_Spinlock); +t_List *FmPcdManipGetSpinlock(t_Handle h_Manip); +t_List *FmPcdManipGetNodeLstPointedOnThisManip(t_Handle h_Manip); + +typedef struct +{ + t_Handle h_StatsAd; + t_Handle h_StatsCounters; +#if (DPAA_VERSION >= 11) + t_Handle h_StatsFLRs; +#endif /* (DPAA_VERSION >= 11) */ +} t_FmPcdCcStatsParams; + +void NextStepAd(t_Handle h_Ad, + t_FmPcdCcStatsParams *p_FmPcdCcStatsParams, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams, + t_FmPcd *p_FmPcd); +void ReleaseLst(t_List *p_List); + +static __inline__ t_Handle FmPcdGetMuramHandle(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + ASSERT_COND(p_FmPcd); + return p_FmPcd->h_FmMuram; +} + +static __inline__ uint64_t FmPcdGetMuramPhysBase(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + ASSERT_COND(p_FmPcd); + return p_FmPcd->physicalMuramBase; +} + +static __inline__ uint32_t FmPcdLockSpinlock(t_FmPcdLock *p_Lock) +{ + ASSERT_COND(p_Lock); + return XX_LockIntrSpinlock(p_Lock->h_Spinlock); +} + +static __inline__ void FmPcdUnlockSpinlock(t_FmPcdLock *p_Lock, uint32_t flags) +{ + ASSERT_COND(p_Lock); + XX_UnlockIntrSpinlock(p_Lock->h_Spinlock, flags); +} + +static __inline__ bool FmPcdLockTryLock(t_FmPcdLock *p_Lock) +{ + uint32_t intFlags; + + ASSERT_COND(p_Lock); + intFlags = XX_LockIntrSpinlock(p_Lock->h_Spinlock); + if (p_Lock->flag) + { + XX_UnlockIntrSpinlock(p_Lock->h_Spinlock, intFlags); + return FALSE; + } + p_Lock->flag = TRUE; + XX_UnlockIntrSpinlock(p_Lock->h_Spinlock, intFlags); + return TRUE; +} + +static __inline__ void FmPcdLockUnlock(t_FmPcdLock *p_Lock) +{ + ASSERT_COND(p_Lock); + p_Lock->flag = FALSE; +} + + +#endif /* __FM_PCD_H */ diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_pcd_ipc.h b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_pcd_ipc.h new file mode 100644 index 0000000..5184ce6 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_pcd_ipc.h @@ -0,0 +1,280 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/**************************************************************************//** + @File fm_pcd_ipc.h + + @Description FM PCD Inter-Partition prototypes, structures and definitions. +*//***************************************************************************/ +#ifndef __FM_PCD_IPC_H +#define __FM_PCD_IPC_H + +#include "std_ext.h" + + +/**************************************************************************//** + @Group FM_grp Frame Manager API + + @Description FM API functions, definitions and enums + + @{ +*//***************************************************************************/ + + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ + +/**************************************************************************//** + @Description Structure for getting a sw parser address according to a label + Fields commented 'IN' are passed by the port module to be used + by the FM module. + Fields commented 'OUT' will be filled by FM before returning to port. +*//***************************************************************************/ +typedef _Packed struct t_FmPcdIpcSwPrsLable +{ + uint32_t enumHdr; /**< IN. The existance of this header will envoke + the sw parser code. */ + uint8_t indexPerHdr; /**< IN. Normally 0, if more than one sw parser + attachments for the same header, use this + + index to distinguish between them. */ +} _PackedType t_FmPcdIpcSwPrsLable; + +/**************************************************************************//** + @Description Structure for port-PCD communication. + Fields commented 'IN' are passed by the port module to be used + by the FM module. + Fields commented 'OUT' will be filled by FM before returning to port. + Some fields are optional (depending on configuration) and + will be analized by the port and FM modules accordingly. +*//***************************************************************************/ + +typedef struct t_FmPcdIpcKgSchemesParams +{ + uint8_t guestId; + uint8_t numOfSchemes; + uint8_t schemesIds[FM_PCD_KG_NUM_OF_SCHEMES]; +} _PackedType t_FmPcdIpcKgSchemesParams; + +typedef struct t_FmPcdIpcKgClsPlanParams +{ + uint8_t guestId; + uint16_t numOfClsPlanEntries; + uint8_t clsPlanBase; +} _PackedType t_FmPcdIpcKgClsPlanParams; + +typedef _Packed struct t_FmPcdIpcPrsIncludePort +{ + uint8_t hardwarePortId; + bool include; +} _PackedType t_FmPcdIpcPrsIncludePort; + + +#define FM_PCD_MAX_REPLY_SIZE 16 +#define FM_PCD_MAX_MSG_SIZE 36 +#define FM_PCD_MAX_REPLY_BODY_SIZE 36 + +typedef _Packed struct { + uint32_t msgId; + uint8_t msgBody[FM_PCD_MAX_MSG_SIZE]; +} _PackedType t_FmPcdIpcMsg; + +typedef _Packed struct t_FmPcdIpcReply { + uint32_t error; + uint8_t replyBody[FM_PCD_MAX_REPLY_BODY_SIZE]; +} _PackedType t_FmPcdIpcReply; + +typedef _Packed struct t_FmIpcResourceAllocParams { + uint8_t guestId; + uint16_t base; + uint16_t num; +}_PackedType t_FmIpcResourceAllocParams; + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + + +/**************************************************************************//** + @Function FM_PCD_ALLOC_KG_SCHEMES + + @Description Used by FM PCD front-end in order to allocate KG resources + + @Param[in/out] t_FmPcdIpcKgAllocParams Pointer +*//***************************************************************************/ +#define FM_PCD_ALLOC_KG_SCHEMES 3 + +/**************************************************************************//** + @Function FM_PCD_FREE_KG_SCHEMES + + @Description Used by FM PCD front-end in order to Free KG resources + + @Param[in/out] t_FmPcdIpcKgSchemesParams Pointer +*//***************************************************************************/ +#define FM_PCD_FREE_KG_SCHEMES 4 + +/**************************************************************************//** + @Function FM_PCD_ALLOC_PROFILES + + @Description Used by FM PCD front-end in order to allocate Policer profiles + + @Param[in/out] t_FmIpcResourceAllocParams Pointer +*//***************************************************************************/ +#define FM_PCD_ALLOC_PROFILES 5 + +/**************************************************************************//** + @Function FM_PCD_FREE_PROFILES + + @Description Used by FM PCD front-end in order to Free Policer profiles + + @Param[in/out] t_FmIpcResourceAllocParams Pointer +*//***************************************************************************/ +#define FM_PCD_FREE_PROFILES 6 + +/**************************************************************************//** + @Function FM_PCD_SET_PORT_PROFILES + + @Description Used by FM PCD front-end in order to allocate Policer profiles + for specific port + + @Param[in/out] t_FmIpcResourceAllocParams Pointer +*//***************************************************************************/ +#define FM_PCD_SET_PORT_PROFILES 7 + +/**************************************************************************//** + @Function FM_PCD_CLEAR_PORT_PROFILES + + @Description Used by FM PCD front-end in order to allocate Policer profiles + for specific port + + @Param[in/out] t_FmIpcResourceAllocParams Pointer +*//***************************************************************************/ +#define FM_PCD_CLEAR_PORT_PROFILES 8 + +/**************************************************************************//** + @Function FM_PCD_GET_PHYS_MURAM_BASE + + @Description Used by FM PCD front-end in order to get MURAM base address + + @Param[in/out] t_FmPcdIcPhysAddr Pointer +*//***************************************************************************/ +#define FM_PCD_GET_PHYS_MURAM_BASE 9 + +/**************************************************************************//** + @Function FM_PCD_GET_SW_PRS_OFFSET + + @Description Used by FM front-end to get the SW parser offset of the start of + code relevant to a given label. + + @Param[in/out] t_FmPcdIpcSwPrsLable Pointer +*//***************************************************************************/ +#define FM_PCD_GET_SW_PRS_OFFSET 10 + +/**************************************************************************//** + @Function FM_PCD_MASTER_IS_ENABLED + + @Description Used by FM front-end in order to verify + PCD enablement. + + @Param[in] bool Pointer +*//***************************************************************************/ +#define FM_PCD_MASTER_IS_ENABLED 15 + +/**************************************************************************//** + @Function FM_PCD_GUEST_DISABLE + + @Description Used by FM front-end to inform back-end when + front-end PCD is disabled + + @Param[in] None +*//***************************************************************************/ +#define FM_PCD_GUEST_DISABLE 16 + +/**************************************************************************//** + @Function FM_PCD_FREE_KG_CLSPLAN + + @Description Used by FM PCD front-end in order to Free KG classification plan entries + + @Param[in/out] t_FmPcdIpcKgClsPlanParams Pointer +*//***************************************************************************/ +#define FM_PCD_FREE_KG_CLSPLAN 22 + +/**************************************************************************//** + @Function FM_PCD_ALLOC_KG_CLSPLAN + + @Description Used by FM PCD front-end in order to allocate KG classification plan entries + + @Param[in/out] t_FmPcdIpcKgClsPlanParams Pointer +*//***************************************************************************/ +#define FM_PCD_ALLOC_KG_CLSPLAN 23 + +/**************************************************************************//** + @Function FM_PCD_MASTER_IS_ALIVE + + @Description Used by FM front-end to check that back-end exists + + @Param[in] None +*//***************************************************************************/ +#define FM_PCD_MASTER_IS_ALIVE 24 + +/**************************************************************************//** + @Function FM_PCD_GET_COUNTER + + @Description Used by FM front-end to read PCD counters + + @Param[in/out] t_FmPcdIpcGetCounter Pointer +*//***************************************************************************/ +#define FM_PCD_GET_COUNTER 25 + +/**************************************************************************//** + @Function FM_PCD_PRS_INC_PORT_STATS + + @Description Used by FM front-end to set/clear statistics for port + + @Param[in/out] t_FmPcdIpcPrsIncludePort Pointer +*//***************************************************************************/ +#define FM_PCD_PRS_INC_PORT_STATS 26 + +#if (DPAA_VERSION >= 11) +/* TODO - doc */ +#define FM_PCD_ALLOC_SP 27 +#endif /* (DPAA_VERSION >= 11) */ + + +/** @} */ /* end of FM_PCD_IPC_grp group */ +/** @} */ /* end of FM_grp group */ + + +#endif /* __FM_PCD_IPC_H */ diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_plcr.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_plcr.c new file mode 100644 index 0000000..ded7960 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_plcr.c @@ -0,0 +1,1927 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_plcr.c + + @Description FM PCD POLICER... +*//***************************************************************************/ +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "debug_ext.h" +#include "net_ext.h" +#include "fm_ext.h" + +#include "fm_common.h" +#include "fm_pcd.h" +#include "fm_hc.h" +#include "fm_pcd_ipc.h" +#include "fm_plcr.h" + + +/****************************************/ +/* static functions */ +/****************************************/ + +static uint32_t PlcrProfileLock(t_Handle h_Profile) +{ + ASSERT_COND(h_Profile); + return FmPcdLockSpinlock(((t_FmPcdPlcrProfile *)h_Profile)->p_Lock); +} + +static void PlcrProfileUnlock(t_Handle h_Profile, uint32_t intFlags) +{ + ASSERT_COND(h_Profile); + FmPcdUnlockSpinlock(((t_FmPcdPlcrProfile *)h_Profile)->p_Lock, intFlags); +} + +static bool PlcrProfileFlagTryLock(t_Handle h_Profile) +{ + ASSERT_COND(h_Profile); + return FmPcdLockTryLock(((t_FmPcdPlcrProfile *)h_Profile)->p_Lock); +} + +static void PlcrProfileFlagUnlock(t_Handle h_Profile) +{ + ASSERT_COND(h_Profile); + FmPcdLockUnlock(((t_FmPcdPlcrProfile *)h_Profile)->p_Lock); +} + +static uint32_t PlcrHwLock(t_Handle h_FmPcdPlcr) +{ + ASSERT_COND(h_FmPcdPlcr); + return XX_LockIntrSpinlock(((t_FmPcdPlcr*)h_FmPcdPlcr)->h_HwSpinlock); +} + +static void PlcrHwUnlock(t_Handle h_FmPcdPlcr, uint32_t intFlags) +{ + ASSERT_COND(h_FmPcdPlcr); + XX_UnlockIntrSpinlock(((t_FmPcdPlcr*)h_FmPcdPlcr)->h_HwSpinlock, intFlags); +} + +static uint32_t PlcrSwLock(t_Handle h_FmPcdPlcr) +{ + ASSERT_COND(h_FmPcdPlcr); + return XX_LockIntrSpinlock(((t_FmPcdPlcr*)h_FmPcdPlcr)->h_SwSpinlock); +} + +static void PlcrSwUnlock(t_Handle h_FmPcdPlcr, uint32_t intFlags) +{ + ASSERT_COND(h_FmPcdPlcr); + XX_UnlockIntrSpinlock(((t_FmPcdPlcr*)h_FmPcdPlcr)->h_SwSpinlock, intFlags); +} + +static bool IsProfileShared(t_Handle h_FmPcd, uint16_t absoluteProfileId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint16_t i; + + SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, FALSE); + + for (i=0;i<p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles;i++) + if (p_FmPcd->p_FmPcdPlcr->sharedProfilesIds[i] == absoluteProfileId) + return TRUE; + return FALSE; +} + +static t_Error SetProfileNia(t_FmPcd *p_FmPcd, e_FmPcdEngine nextEngine, u_FmPcdPlcrNextEngineParams *p_NextEngineParams, uint32_t *nextAction) +{ + uint32_t nia; + uint16_t absoluteProfileId; + uint8_t relativeSchemeId, physicalSchemeId; + + nia = FM_PCD_PLCR_NIA_VALID; + + switch (nextEngine) + { + case e_FM_PCD_DONE : + switch (p_NextEngineParams->action) + { + case e_FM_PCD_DROP_FRAME : + nia |= GET_NIA_BMI_AC_DISCARD_FRAME(p_FmPcd); + break; + case e_FM_PCD_ENQ_FRAME: + nia |= GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd); + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + break; + case e_FM_PCD_KG: + physicalSchemeId = FmPcdKgGetSchemeId(p_NextEngineParams->h_DirectScheme); + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId); + if (relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + if (!FmPcdKgIsSchemeValidSw(p_NextEngineParams->h_DirectScheme)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid direct scheme.")); + if (!KgIsSchemeAlwaysDirect(p_FmPcd, relativeSchemeId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Policer Profile may point only to a scheme that is always direct.")); + nia |= NIA_ENG_KG | NIA_KG_DIRECT | physicalSchemeId; + break; + case e_FM_PCD_PLCR: + absoluteProfileId = ((t_FmPcdPlcrProfile *)p_NextEngineParams->h_Profile)->absoluteProfileId; + if (!IsProfileShared(p_FmPcd, absoluteProfileId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next profile must be a shared profile")); + if (!FmPcdPlcrIsProfileValid(p_FmPcd, absoluteProfileId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid profile ")); + nia |= NIA_ENG_PLCR | NIA_PLCR_ABSOLUTE | absoluteProfileId; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + + *nextAction = nia; + + return E_OK; +} + +static uint32_t CalcFPP(uint32_t fpp) +{ + if (fpp > 15) + return 15 - (0x1f - fpp); + else + return 16 + fpp; +} + +static void GetInfoRateReg(e_FmPcdPlcrRateMode rateMode, + uint32_t rate, + uint64_t tsuInTenthNano, + uint32_t fppShift, + uint64_t *p_Integer, + uint64_t *p_Fraction) +{ + uint64_t tmp, div; + + if (rateMode == e_FM_PCD_PLCR_BYTE_MODE) + { + /* now we calculate the initial integer for the bigger rate */ + /* from Kbps to Bytes/TSU */ + tmp = (uint64_t)rate; + tmp *= 1000; /* kb --> b */ + tmp *= tsuInTenthNano; /* bps --> bpTsu(in 10nano) */ + + div = 1000000000; /* nano */ + div *= 10; /* 10 nano */ + div *= 8; /* bit to byte */ + } + else + { + /* now we calculate the initial integer for the bigger rate */ + /* from Kbps to Bytes/TSU */ + tmp = (uint64_t)rate; + tmp *= tsuInTenthNano; /* bps --> bpTsu(in 10nano) */ + + div = 1000000000; /* nano */ + div *= 10; /* 10 nano */ + } + *p_Integer = (tmp<<fppShift)/div; + + /* for calculating the fraction, we will recalculate cir and deduct the integer. + * For precision, we will multiply by 2^16. we do not divid back, since we write + * this value as fraction - see spec. + */ + *p_Fraction = (((tmp<<fppShift)<<16) - ((*p_Integer<<16)*div))/div; +} + +/* .......... */ + +static void CalcRates(t_Handle h_FmPcd, + t_FmPcdPlcrNonPassthroughAlgParams *p_NonPassthroughAlgParam, + uint32_t *cir, + uint32_t *cbs, + uint32_t *pir_eir, + uint32_t *pbs_ebs, + uint32_t *fpp) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint64_t integer, fraction; + uint32_t temp, tsuInTenthNanos, bitFor1Micro; + uint8_t fppShift=0; + + bitFor1Micro = FmGetTimeStampScale(p_FmPcd->h_Fm); /* TimeStamp per nano seconds units */ + /* we want the tsu to count 10 nano for better precision normally tsu is 3.9 nano, now we will get 39 */ + tsuInTenthNanos = (uint32_t)(1000*10/(1<<bitFor1Micro)); + + /* we choose the faster rate to calibrate fpp */ + /* The meaning of this step: + * when fppShift is 0 it means all TS bits are treated as integer and TSU is the TS LSB count. + * In this configuration we calculate the integer and fraction that represent the higher infoRate + * When this is done, we can tell where we have "spare" unused bits and optimize the division of TS + * into "integer" and "fraction" where the logic is - as many bits as possible for integer at + * high rate, as many bits as possible for fraction at low rate. + */ + if (p_NonPassthroughAlgParam->comittedInfoRate > p_NonPassthroughAlgParam->peakOrAccessiveInfoRate) + GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->comittedInfoRate, tsuInTenthNanos, 0, &integer, &fraction); + else + GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->peakOrAccessiveInfoRate, tsuInTenthNanos, 0, &integer, &fraction); + + /* we shift integer, as in cir/pir it is represented by the MSB 16 bits, and + * the LSB bits are for the fraction */ + temp = (uint32_t)((integer<<16) & 0x00000000FFFFFFFF); + /* temp is effected by the rate. For low rates it may be as low as 0, and then we'll + * take max FP = 31. + * For high rates it will never exceed the 32 bit reg (after the 16 shift), as it is + * limited by the 10G physical port. + */ + if (temp != 0) + { + /* In this case, the largest rate integer is non 0, if it does not occupy all (high) 16 + * bits of the PIR_EIR we can use this fact and enlarge it to occupy all 16 bits. + * The logic is to have as many bits for integer in the higher rates, but if we have "0"s + * in the integer part of the cir/pir register, than these bits are wasted. So we want + * to use these bits for the fraction. in this way we will have for fraction - the number + * of "0" bits and the rest - for integer. + * In other words: For each bit we shift it in PIR_EIR, we move the FP in the TS + * one bit to the left - preserving the relationship and achieving more bits + * for integer in the TS. + */ + + /* count zeroes left of the higher used bit (in order to shift the value such that + * unused bits may be used for fraction). + */ + while ((temp & 0x80000000) == 0) + { + temp = temp << 1; + fppShift++; + } + if (fppShift > 15) + { + REPORT_ERROR(MAJOR, E_INVALID_SELECTION, ("timeStampPeriod to Information rate ratio is too small")); + return; + } + } + else + { + temp = (uint32_t)fraction; /* fraction will alyas be smaller than 2^16 */ + if (!temp) + /* integer and fraction are 0, we set FP to its max val */ + fppShift = 31; + else + { + /* integer was 0 but fraction is not. FP is 16 for the fraction, + * + all left zeroes of the fraction. */ + fppShift=16; + /* count zeroes left of the higher used bit (in order to shift the value such that + * unused bits may be used for fraction). + */ + while ((temp & 0x8000) == 0) + { + temp = temp << 1; + fppShift++; + } + } + } + + /* + * This means that the FM TS register will now be used so that 'fppShift' bits are for + * fraction and the rest for integer */ + /* now we re-calculate cir and pir_eir with the calculated FP */ + GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->comittedInfoRate, tsuInTenthNanos, fppShift, &integer, &fraction); + *cir = (uint32_t)(integer << 16 | (fraction & 0xFFFF)); + GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->peakOrAccessiveInfoRate, tsuInTenthNanos, fppShift, &integer, &fraction); + *pir_eir = (uint32_t)(integer << 16 | (fraction & 0xFFFF)); + + *cbs = p_NonPassthroughAlgParam->comittedBurstSize; + *pbs_ebs = p_NonPassthroughAlgParam->peakOrAccessiveBurstSize; + + /* convert FP as it should be written to reg. + * 0-15 --> 16-31 + * 16-31 --> 0-15 + */ + *fpp = CalcFPP(fppShift); +} + +static void WritePar(t_FmPcd *p_FmPcd, uint32_t par) +{ + t_FmPcdPlcrRegs *p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + WRITE_UINT32(p_FmPcdPlcrRegs->fmpl_par, par); + + while (GET_UINT32(p_FmPcdPlcrRegs->fmpl_par) & FM_PCD_PLCR_PAR_GO) ; +} + +static t_Error BuildProfileRegs(t_FmPcd *p_FmPcd, + t_FmPcdPlcrProfileParams *p_ProfileParams, + t_FmPcdPlcrProfileRegs *p_PlcrRegs) +{ + t_Error err = E_OK; + uint32_t pemode, gnia, ynia, rnia; + + ASSERT_COND(p_FmPcd); + +/* Set G, Y, R Nia */ + err = SetProfileNia(p_FmPcd, p_ProfileParams->nextEngineOnGreen, &(p_ProfileParams->paramsOnGreen), &gnia); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + err = SetProfileNia(p_FmPcd, p_ProfileParams->nextEngineOnYellow, &(p_ProfileParams->paramsOnYellow), &ynia); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + err = SetProfileNia(p_FmPcd, p_ProfileParams->nextEngineOnRed, &(p_ProfileParams->paramsOnRed), &rnia); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + +/* Mode fmpl_pemode */ + pemode = FM_PCD_PLCR_PEMODE_PI; + + switch (p_ProfileParams->algSelection) + { + case e_FM_PCD_PLCR_PASS_THROUGH: + p_PlcrRegs->fmpl_pecir = 0; + p_PlcrRegs->fmpl_pecbs = 0; + p_PlcrRegs->fmpl_pepepir_eir = 0; + p_PlcrRegs->fmpl_pepbs_ebs = 0; + p_PlcrRegs->fmpl_pelts = 0; + p_PlcrRegs->fmpl_pects = 0; + p_PlcrRegs->fmpl_pepts_ets = 0; + pemode &= ~FM_PCD_PLCR_PEMODE_ALG_MASK; + switch (p_ProfileParams->colorMode) + { + case e_FM_PCD_PLCR_COLOR_BLIND: + pemode |= FM_PCD_PLCR_PEMODE_CBLND; + switch (p_ProfileParams->color.dfltColor) + { + case e_FM_PCD_PLCR_GREEN: + pemode &= ~FM_PCD_PLCR_PEMODE_DEFC_MASK; + break; + case e_FM_PCD_PLCR_YELLOW: + pemode |= FM_PCD_PLCR_PEMODE_DEFC_Y; + break; + case e_FM_PCD_PLCR_RED: + pemode |= FM_PCD_PLCR_PEMODE_DEFC_R; + break; + case e_FM_PCD_PLCR_OVERRIDE: + pemode |= FM_PCD_PLCR_PEMODE_DEFC_OVERRIDE; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + + break; + case e_FM_PCD_PLCR_COLOR_AWARE: + pemode &= ~FM_PCD_PLCR_PEMODE_CBLND; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + break; + + case e_FM_PCD_PLCR_RFC_2698: + /* Select algorithm MODE[ALG] = "01" */ + pemode |= FM_PCD_PLCR_PEMODE_ALG_RFC2698; + if (p_ProfileParams->nonPassthroughAlgParams.comittedInfoRate > p_ProfileParams->nonPassthroughAlgParams.peakOrAccessiveInfoRate) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("in RFC2698 Peak rate must be equal or larger than comittedInfoRate.")); + goto cont_rfc; + case e_FM_PCD_PLCR_RFC_4115: + /* Select algorithm MODE[ALG] = "10" */ + pemode |= FM_PCD_PLCR_PEMODE_ALG_RFC4115; +cont_rfc: + /* Select Color-Blind / Color-Aware operation (MODE[CBLND]) */ + switch (p_ProfileParams->colorMode) + { + case e_FM_PCD_PLCR_COLOR_BLIND: + pemode |= FM_PCD_PLCR_PEMODE_CBLND; + break; + case e_FM_PCD_PLCR_COLOR_AWARE: + pemode &= ~FM_PCD_PLCR_PEMODE_CBLND; + /*In color aware more select override color interpretation (MODE[OVCLR]) */ + switch (p_ProfileParams->color.override) + { + case e_FM_PCD_PLCR_GREEN: + pemode &= ~FM_PCD_PLCR_PEMODE_OVCLR_MASK; + break; + case e_FM_PCD_PLCR_YELLOW: + pemode |= FM_PCD_PLCR_PEMODE_OVCLR_Y; + break; + case e_FM_PCD_PLCR_RED: + pemode |= FM_PCD_PLCR_PEMODE_OVCLR_R; + break; + case e_FM_PCD_PLCR_OVERRIDE: + pemode |= FM_PCD_PLCR_PEMODE_OVCLR_G_NC; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + /* Select Measurement Unit Mode to BYTE or PACKET (MODE[PKT]) */ + switch (p_ProfileParams->nonPassthroughAlgParams.rateMode) + { + case e_FM_PCD_PLCR_BYTE_MODE : + pemode &= ~FM_PCD_PLCR_PEMODE_PKT; + switch (p_ProfileParams->nonPassthroughAlgParams.byteModeParams.frameLengthSelection) + { + case e_FM_PCD_PLCR_L2_FRM_LEN: + pemode |= FM_PCD_PLCR_PEMODE_FLS_L2; + break; + case e_FM_PCD_PLCR_L3_FRM_LEN: + pemode |= FM_PCD_PLCR_PEMODE_FLS_L3; + break; + case e_FM_PCD_PLCR_L4_FRM_LEN: + pemode |= FM_PCD_PLCR_PEMODE_FLS_L4; + break; + case e_FM_PCD_PLCR_FULL_FRM_LEN: + pemode |= FM_PCD_PLCR_PEMODE_FLS_FULL; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + switch (p_ProfileParams->nonPassthroughAlgParams.byteModeParams.rollBackFrameSelection) + { + case e_FM_PCD_PLCR_ROLLBACK_L2_FRM_LEN: + pemode &= ~FM_PCD_PLCR_PEMODE_RBFLS; + break; + case e_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN: + pemode |= FM_PCD_PLCR_PEMODE_RBFLS; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + break; + case e_FM_PCD_PLCR_PACKET_MODE : + pemode |= FM_PCD_PLCR_PEMODE_PKT; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + /* Select timeStamp floating point position (MODE[FPP]) to fit the actual traffic rates. For PACKET + mode with low traffic rates move the fixed point to the left to increase fraction accuracy. For BYTE + mode with high traffic rates move the fixed point to the right to increase integer accuracy. */ + + /* Configure Traffic Parameters*/ + { + uint32_t cir=0, cbs=0, pir_eir=0, pbs_ebs=0, fpp=0; + + CalcRates(p_FmPcd, &p_ProfileParams->nonPassthroughAlgParams, &cir, &cbs, &pir_eir, &pbs_ebs, &fpp); + + /* Set Committed Information Rate (CIR) */ + p_PlcrRegs->fmpl_pecir = cir; + /* Set Committed Burst Size (CBS). */ + p_PlcrRegs->fmpl_pecbs = cbs; + /* Set Peak Information Rate (PIR_EIR used as PIR) */ + p_PlcrRegs->fmpl_pepepir_eir = pir_eir; + /* Set Peak Burst Size (PBS_EBS used as PBS) */ + p_PlcrRegs->fmpl_pepbs_ebs = pbs_ebs; + + /* Initialize the Metering Buckets to be full (write them with 0xFFFFFFFF. */ + /* Peak Rate Token Bucket Size (PTS_ETS used as PTS) */ + p_PlcrRegs->fmpl_pepts_ets = 0xFFFFFFFF; + /* Committed Rate Token Bucket Size (CTS) */ + p_PlcrRegs->fmpl_pects = 0xFFFFFFFF; + + /* Set the FPP based on calculation */ + pemode |= (fpp << FM_PCD_PLCR_PEMODE_FPP_SHIFT); + } + break; /* FM_PCD_PLCR_PEMODE_ALG_RFC2698 , FM_PCD_PLCR_PEMODE_ALG_RFC4115 */ + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + + p_PlcrRegs->fmpl_pemode = pemode; + + p_PlcrRegs->fmpl_pegnia = gnia; + p_PlcrRegs->fmpl_peynia = ynia; + p_PlcrRegs->fmpl_pernia = rnia; + + /* Zero Counters */ + p_PlcrRegs->fmpl_pegpc = 0; + p_PlcrRegs->fmpl_peypc = 0; + p_PlcrRegs->fmpl_perpc = 0; + p_PlcrRegs->fmpl_perypc = 0; + p_PlcrRegs->fmpl_perrpc = 0; + + return E_OK; +} + +static t_Error AllocSharedProfiles(t_FmPcd *p_FmPcd, uint16_t numOfProfiles, uint16_t *profilesIds) +{ + uint32_t profilesFound; + uint16_t i, k=0; + uint32_t intFlags; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + if (!numOfProfiles) + return E_OK; + + if (numOfProfiles>FM_PCD_PLCR_NUM_ENTRIES) + RETURN_ERROR(MINOR, E_INVALID_VALUE, ("numProfiles is too big.")); + + intFlags = PlcrSwLock(p_FmPcd->p_FmPcdPlcr); + /* Find numOfProfiles free profiles (may be spread) */ + profilesFound = 0; + for (i=0;i<FM_PCD_PLCR_NUM_ENTRIES; i++) + if (!p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated) + { + profilesFound++; + profilesIds[k] = i; + k++; + if (profilesFound == numOfProfiles) + break; + } + + if (profilesFound != numOfProfiles) + { + PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags); + RETURN_ERROR(MAJOR, E_INVALID_STATE,NO_MSG); + } + + for (i = 0;i<k;i++) + { + p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.allocated = TRUE; + p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.ownerId = 0; + } + PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags); + + return E_OK; +} + +static void FreeSharedProfiles(t_FmPcd *p_FmPcd, uint16_t numOfProfiles, uint16_t *profilesIds) +{ + uint16_t i; + + SANITY_CHECK_RETURN(p_FmPcd, E_INVALID_HANDLE); + + ASSERT_COND(numOfProfiles); + + for (i=0; i < numOfProfiles; i++) + { + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.allocated); + p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.allocated = FALSE; + p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.ownerId = p_FmPcd->guestId; + } +} + +/*********************************************/ +/*............Policer Exception..............*/ +/*********************************************/ +static void EventsCB(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint32_t event, mask, force; + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + event = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_evr); + mask = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier); + + event &= mask; + + /* clear the forced events */ + force = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr); + if (force & event) + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr, force & ~event); + + + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_evr, event); + + if (event & FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE) + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE); + if (event & FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE) + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE); +} + +/* ..... */ + +static void ErrorExceptionsCB(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint32_t event, force, captureReg, mask; + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + event = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eevr); + mask = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier); + + event &= mask; + + /* clear the forced events */ + force = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr); + if (force & event) + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr, force & ~event); + + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eevr, event); + + if (event & FM_PCD_PLCR_DOUBLE_ECC) + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC); + if (event & FM_PCD_PLCR_INIT_ENTRY_ERROR) + { + captureReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_upcr); + /*ASSERT_COND(captureReg & PLCR_ERR_UNINIT_CAP); + p_UnInitCapt->profileNum = (uint8_t)(captureReg & PLCR_ERR_UNINIT_NUM_MASK); + p_UnInitCapt->portId = (uint8_t)((captureReg & PLCR_ERR_UNINIT_PID_MASK) >>PLCR_ERR_UNINIT_PID_SHIFT) ; + p_UnInitCapt->absolute = (bool)(captureReg & PLCR_ERR_UNINIT_ABSOLUTE_MASK);*/ + p_FmPcd->f_FmPcdIndexedException(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR,(uint16_t)(captureReg & PLCR_ERR_UNINIT_NUM_MASK)); + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_upcr, PLCR_ERR_UNINIT_CAP); + } +} + + +/*****************************************************************************/ +/* Inter-module API routines */ +/*****************************************************************************/ + +t_Handle PlcrConfig(t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams) +{ + t_FmPcdPlcr *p_FmPcdPlcr; + uint16_t i=0; + + UNUSED(p_FmPcd); + UNUSED(p_FmPcdParams); + + p_FmPcdPlcr = (t_FmPcdPlcr *) XX_Malloc(sizeof(t_FmPcdPlcr)); + if (!p_FmPcdPlcr) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Policer structure allocation FAILED")); + return NULL; + } + memset(p_FmPcdPlcr, 0, sizeof(t_FmPcdPlcr)); + if (p_FmPcd->guestId == NCSW_MASTER_ID) + { + p_FmPcdPlcr->p_FmPcdPlcrRegs = (t_FmPcdPlcrRegs *)UINT_TO_PTR(FmGetPcdPlcrBaseAddr(p_FmPcdParams->h_Fm)); + p_FmPcd->p_FmPcdDriverParam->plcrAutoRefresh = DEFAULT_plcrAutoRefresh; + p_FmPcd->exceptions |= (DEFAULT_fmPcdPlcrExceptions | DEFAULT_fmPcdPlcrErrorExceptions); + } + + p_FmPcdPlcr->numOfSharedProfiles = DEFAULT_numOfSharedPlcrProfiles; + + p_FmPcdPlcr->partPlcrProfilesBase = p_FmPcdParams->partPlcrProfilesBase; + p_FmPcdPlcr->partNumOfPlcrProfiles = p_FmPcdParams->partNumOfPlcrProfiles; + /* for backward compatabilty. if no policer profile, will set automatically to the max */ + if ((p_FmPcd->guestId == NCSW_MASTER_ID) && + (p_FmPcdPlcr->partNumOfPlcrProfiles == 0)) + p_FmPcdPlcr->partNumOfPlcrProfiles = FM_PCD_PLCR_NUM_ENTRIES; + + for (i=0; i<FM_PCD_PLCR_NUM_ENTRIES; i++) + p_FmPcdPlcr->profiles[i].profilesMng.ownerId = (uint8_t)ILLEGAL_BASE; + + return p_FmPcdPlcr; +} + +t_Error PlcrInit(t_FmPcd *p_FmPcd) +{ + t_FmPcdDriverParam *p_Param = p_FmPcd->p_FmPcdDriverParam; + t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr; + t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + t_Error err = E_OK; + uint32_t tmpReg32 = 0; + uint16_t base; + + if ((p_FmPcdPlcr->partPlcrProfilesBase + p_FmPcdPlcr->partNumOfPlcrProfiles) > FM_PCD_PLCR_NUM_ENTRIES) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("partPlcrProfilesBase+partNumOfPlcrProfiles out of range!!!")); + + p_FmPcdPlcr->h_HwSpinlock = XX_InitSpinlock(); + if (!p_FmPcdPlcr->h_HwSpinlock) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM Policer HW spinlock")); + + p_FmPcdPlcr->h_SwSpinlock = XX_InitSpinlock(); + if (!p_FmPcdPlcr->h_SwSpinlock) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM Policer SW spinlock")); + + base = PlcrAllocProfilesForPartition(p_FmPcd, + p_FmPcdPlcr->partPlcrProfilesBase, + p_FmPcdPlcr->partNumOfPlcrProfiles, + p_FmPcd->guestId); + if (base == (uint16_t)ILLEGAL_BASE) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); + + if (p_FmPcdPlcr->numOfSharedProfiles) + { + err = AllocSharedProfiles(p_FmPcd, + p_FmPcdPlcr->numOfSharedProfiles, + p_FmPcdPlcr->sharedProfilesIds); + if (err) + RETURN_ERROR(MAJOR, err,NO_MSG); + } + + if (p_FmPcd->guestId != NCSW_MASTER_ID) + return E_OK; + + /**********************FMPL_GCR******************/ + tmpReg32 = 0; + tmpReg32 |= FM_PCD_PLCR_GCR_STEN; + if (p_Param->plcrAutoRefresh) + tmpReg32 |= FM_PCD_PLCR_GCR_DAR; + tmpReg32 |= GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd); + + WRITE_UINT32(p_Regs->fmpl_gcr, tmpReg32); + /**********************FMPL_GCR******************/ + + /**********************FMPL_EEVR******************/ + WRITE_UINT32(p_Regs->fmpl_eevr, (FM_PCD_PLCR_DOUBLE_ECC | FM_PCD_PLCR_INIT_ENTRY_ERROR)); + /**********************FMPL_EEVR******************/ + /**********************FMPL_EIER******************/ + tmpReg32 = 0; + if (p_FmPcd->exceptions & FM_PCD_EX_PLCR_DOUBLE_ECC) + { + FmEnableRamsEcc(p_FmPcd->h_Fm); + tmpReg32 |= FM_PCD_PLCR_DOUBLE_ECC; + } + if (p_FmPcd->exceptions & FM_PCD_EX_PLCR_INIT_ENTRY_ERROR) + tmpReg32 |= FM_PCD_PLCR_INIT_ENTRY_ERROR; + WRITE_UINT32(p_Regs->fmpl_eier, tmpReg32); + /**********************FMPL_EIER******************/ + + /**********************FMPL_EVR******************/ + WRITE_UINT32(p_Regs->fmpl_evr, (FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE | FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE)); + /**********************FMPL_EVR******************/ + /**********************FMPL_IER******************/ + tmpReg32 = 0; + if (p_FmPcd->exceptions & FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE) + tmpReg32 |= FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE; + if (p_FmPcd->exceptions & FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE) + tmpReg32 |= FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE; + WRITE_UINT32(p_Regs->fmpl_ier, tmpReg32); + /**********************FMPL_IER******************/ + + /* register even if no interrupts enabled, to allow future enablement */ + FmRegisterIntr(p_FmPcd->h_Fm, + e_FM_MOD_PLCR, + 0, + e_FM_INTR_TYPE_ERR, + ErrorExceptionsCB, + p_FmPcd); + FmRegisterIntr(p_FmPcd->h_Fm, + e_FM_MOD_PLCR, + 0, + e_FM_INTR_TYPE_NORMAL, + EventsCB, + p_FmPcd); + + /* driver initializes one DFLT profile at the last entry*/ + /**********************FMPL_DPMR******************/ + tmpReg32 = 0; + WRITE_UINT32(p_Regs->fmpl_dpmr, tmpReg32); + p_FmPcd->p_FmPcdPlcr->profiles[0].profilesMng.allocated = TRUE; + + return E_OK; +} + +t_Error PlcrFree(t_FmPcd *p_FmPcd) +{ + FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PLCR, 0, e_FM_INTR_TYPE_ERR); + FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PLCR, 0, e_FM_INTR_TYPE_NORMAL); + + if (p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles) + FreeSharedProfiles(p_FmPcd, + p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles, + p_FmPcd->p_FmPcdPlcr->sharedProfilesIds); + + if (p_FmPcd->p_FmPcdPlcr->partNumOfPlcrProfiles) + PlcrFreeProfilesForPartition(p_FmPcd, + p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase, + p_FmPcd->p_FmPcdPlcr->partNumOfPlcrProfiles, + p_FmPcd->guestId); + + if (p_FmPcd->p_FmPcdPlcr->h_SwSpinlock) + XX_FreeSpinlock(p_FmPcd->p_FmPcdPlcr->h_SwSpinlock); + + if (p_FmPcd->p_FmPcdPlcr->h_HwSpinlock) + XX_FreeSpinlock(p_FmPcd->p_FmPcdPlcr->h_HwSpinlock); + + return E_OK; +} + +void PlcrEnable(t_FmPcd *p_FmPcd) +{ + t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + + WRITE_UINT32(p_Regs->fmpl_gcr, GET_UINT32(p_Regs->fmpl_gcr) | FM_PCD_PLCR_GCR_EN); +} + +void PlcrDisable(t_FmPcd *p_FmPcd) +{ + t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + + WRITE_UINT32(p_Regs->fmpl_gcr, GET_UINT32(p_Regs->fmpl_gcr) & ~FM_PCD_PLCR_GCR_EN); +} + +uint16_t PlcrAllocProfilesForPartition(t_FmPcd *p_FmPcd, uint16_t base, uint16_t numOfProfiles, uint8_t guestId) +{ + uint32_t intFlags; + uint16_t profilesFound = 0; + int i = 0; + + ASSERT_COND(p_FmPcd); + ASSERT_COND(p_FmPcd->p_FmPcdPlcr); + + if (!numOfProfiles) + return 0; + + if ((numOfProfiles > FM_PCD_PLCR_NUM_ENTRIES) || + (base + numOfProfiles > FM_PCD_PLCR_NUM_ENTRIES)) + return (uint16_t)ILLEGAL_BASE; + + if (p_FmPcd->h_IpcSession) + { + t_FmIpcResourceAllocParams ipcAllocParams; + t_FmPcdIpcMsg msg; + t_FmPcdIpcReply reply; + t_Error err; + uint32_t replyLength; + + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams)); + ipcAllocParams.guestId = p_FmPcd->guestId; + ipcAllocParams.num = p_FmPcd->p_FmPcdPlcr->partNumOfPlcrProfiles; + ipcAllocParams.base = p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase; + msg.msgId = FM_PCD_ALLOC_PROFILES; + memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams)); + replyLength = sizeof(uint32_t) + sizeof(uint16_t); + err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL); + if ((err != E_OK) || + (replyLength != (sizeof(uint32_t) + sizeof(uint16_t)))) + { + REPORT_ERROR(MAJOR, err, NO_MSG); + return (uint16_t)ILLEGAL_BASE; + } + else + memcpy((uint8_t*)&p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase, reply.replyBody, sizeof(uint16_t)); + if (p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase == (uint16_t)ILLEGAL_BASE) + { + REPORT_ERROR(MAJOR, err, NO_MSG); + return (uint16_t)ILLEGAL_BASE; + } + } + else if (p_FmPcd->guestId != NCSW_MASTER_ID) + { + DBG(WARNING, ("FM Guest mode, without IPC - can't validate Policer-profiles range!")); + return (uint16_t)ILLEGAL_BASE; + } + + intFlags = XX_LockIntrSpinlock(p_FmPcd->h_Spinlock); + for (i=base; i<(base+numOfProfiles); i++) + if (p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId == (uint8_t)ILLEGAL_BASE) + profilesFound++; + else + break; + + if (profilesFound == numOfProfiles) + for (i=base; i<(base+numOfProfiles); i++) + p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = guestId; + else + { + XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags); + return (uint16_t)ILLEGAL_BASE; + } + XX_UnlockIntrSpinlock(p_FmPcd->h_Spinlock, intFlags); + + return base; +} + +void PlcrFreeProfilesForPartition(t_FmPcd *p_FmPcd, uint16_t base, uint16_t numOfProfiles, uint8_t guestId) +{ + int i = 0; + + ASSERT_COND(p_FmPcd); + ASSERT_COND(p_FmPcd->p_FmPcdPlcr); + + if (p_FmPcd->h_IpcSession) + { + t_FmIpcResourceAllocParams ipcAllocParams; + t_FmPcdIpcMsg msg; + t_Error err; + + memset(&msg, 0, sizeof(msg)); + memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams)); + ipcAllocParams.guestId = p_FmPcd->guestId; + ipcAllocParams.num = p_FmPcd->p_FmPcdPlcr->partNumOfPlcrProfiles; + ipcAllocParams.base = p_FmPcd->p_FmPcdPlcr->partPlcrProfilesBase; + msg.msgId = FM_PCD_FREE_PROFILES; + memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams)); + err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams), + NULL, + NULL, + NULL, + NULL); + if (err != E_OK) + REPORT_ERROR(MAJOR, err, NO_MSG); + return; + } + else if (p_FmPcd->guestId != NCSW_MASTER_ID) + { + DBG(WARNING, ("FM Guest mode, without IPC - can't validate Policer-profiles range!")); + return; + } + + for (i=base; i<(base+numOfProfiles); i++) + { + if (p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId == guestId) + p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = (uint8_t)ILLEGAL_BASE; + else + DBG(WARNING, ("Request for freeing storage profile window which wasn't allocated to this partition")); + } +} + +t_Error PlcrSetPortProfiles(t_FmPcd *p_FmPcd, + uint8_t hardwarePortId, + uint16_t numOfProfiles, + uint16_t base) +{ + t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + uint32_t log2Num, tmpReg32; + + if ((p_FmPcd->guestId != NCSW_MASTER_ID) && + !p_Regs && + p_FmPcd->h_IpcSession) + { + t_FmIpcResourceAllocParams ipcAllocParams; + t_FmPcdIpcMsg msg; + t_Error err; + + memset(&msg, 0, sizeof(msg)); + memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams)); + ipcAllocParams.guestId = hardwarePortId; + ipcAllocParams.num = numOfProfiles; + ipcAllocParams.base = base; + msg.msgId = FM_PCD_SET_PORT_PROFILES; + memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams)); + err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams), + NULL, + NULL, + NULL, + NULL); + if (err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + return E_OK; + } + else if (!p_Regs) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, + ("Either IPC or 'baseAddress' is required!")); + + ASSERT_COND(IN_RANGE(1, hardwarePortId, 63)); + + if (GET_UINT32(p_Regs->fmpl_pmr[hardwarePortId-1]) & FM_PCD_PLCR_PMR_V) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("The requesting port has already an allocated profiles window.")); + + /**********************FMPL_PMRx******************/ + LOG2((uint64_t)numOfProfiles, log2Num); + tmpReg32 = base; + tmpReg32 |= log2Num << 16; + tmpReg32 |= FM_PCD_PLCR_PMR_V; + WRITE_UINT32(p_Regs->fmpl_pmr[hardwarePortId-1], tmpReg32); + + return E_OK; +} + +t_Error PlcrClearPortProfiles(t_FmPcd *p_FmPcd, uint8_t hardwarePortId) +{ + t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + + if ((p_FmPcd->guestId != NCSW_MASTER_ID) && + !p_Regs && + p_FmPcd->h_IpcSession) + { + t_FmIpcResourceAllocParams ipcAllocParams; + t_FmPcdIpcMsg msg; + t_Error err; + + memset(&msg, 0, sizeof(msg)); + memset(&ipcAllocParams, 0, sizeof(t_FmIpcResourceAllocParams)); + ipcAllocParams.guestId = hardwarePortId; + msg.msgId = FM_PCD_CLEAR_PORT_PROFILES; + memcpy(msg.msgBody, &ipcAllocParams, sizeof(t_FmIpcResourceAllocParams)); + err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(t_FmIpcResourceAllocParams), + NULL, + NULL, + NULL, + NULL); + if (err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + return E_OK; + } + else if (!p_Regs) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, + ("Either IPC or 'baseAddress' is required!")); + + ASSERT_COND(IN_RANGE(1, hardwarePortId, 63)); + WRITE_UINT32(p_Regs->fmpl_pmr[hardwarePortId-1], 0); + + return E_OK; +} + +t_Error FmPcdPlcrAllocProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId, uint16_t numOfProfiles) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_Error err = E_OK; + uint32_t profilesFound; + uint32_t intFlags; + uint16_t i, first, swPortIndex = 0; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + if (!numOfProfiles) + return E_OK; + + ASSERT_COND(hardwarePortId); + + if (numOfProfiles>FM_PCD_PLCR_NUM_ENTRIES) + RETURN_ERROR(MINOR, E_INVALID_VALUE, ("numProfiles is too big.")); + + if (!POWER_OF_2(numOfProfiles)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numProfiles must be a power of 2.")); + + first = 0; + profilesFound = 0; + intFlags = PlcrSwLock(p_FmPcd->p_FmPcdPlcr); + + for (i=0; i<FM_PCD_PLCR_NUM_ENTRIES; ) + { + if (!p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated) + { + profilesFound++; + i++; + if (profilesFound == numOfProfiles) + break; + } + else + { + profilesFound = 0; + /* advance i to the next aligned address */ + i = first = (uint16_t)(first + numOfProfiles); + } + } + + if (profilesFound == numOfProfiles) + { + for (i=first; i<first + numOfProfiles; i++) + { + p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated = TRUE; + p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = hardwarePortId; + } + } + else + { + PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags); + RETURN_ERROR(MINOR, E_FULL, ("No profiles.")); + } + PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags); + + err = PlcrSetPortProfiles(p_FmPcd, hardwarePortId, numOfProfiles, first); + if (err) + { + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId); + + p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles = numOfProfiles; + p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase = first; + + return E_OK; +} + +t_Error FmPcdPlcrFreeProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_Error err = E_OK; + uint32_t intFlags; + uint16_t i, swPortIndex = 0; + + ASSERT_COND(IN_RANGE(1, hardwarePortId, 63)); + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE); + + HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId); + + err = PlcrClearPortProfiles(p_FmPcd, hardwarePortId); + if (err) + RETURN_ERROR(MAJOR, err,NO_MSG); + + intFlags = PlcrSwLock(p_FmPcd->p_FmPcdPlcr); + for (i=p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase; + i<(p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase + + p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles); + i++) + { + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId == hardwarePortId); + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated); + + p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated = FALSE; + p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = p_FmPcd->guestId; + } + PlcrSwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags); + + p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles = 0; + p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase = 0; + + return E_OK; +} + +t_Error FmPcdPlcrCcGetSetParams(t_Handle h_FmPcd, uint16_t profileIndx ,uint32_t requiredAction) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr; + t_FmPcdPlcrRegs *p_FmPcdPlcrRegs = p_FmPcdPlcr->p_FmPcdPlcrRegs; + uint32_t tmpReg32, intFlags; + t_Error err; + + /* Calling function locked all PCD modules, so no need to lock here */ + + if (profileIndx >= FM_PCD_PLCR_NUM_ENTRIES) + RETURN_ERROR(MAJOR, E_INVALID_VALUE,("Policer profile out of range")); + + if (!FmPcdPlcrIsProfileValid(p_FmPcd, profileIndx)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE,("Policer profile is not valid")); + + /*intFlags = PlcrProfileLock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx]);*/ + + if (p_FmPcd->h_Hc) + { + err = FmHcPcdPlcrCcGetSetParams(p_FmPcd->h_Hc, profileIndx, requiredAction); + + FmPcdPlcrUpatePointedOwner(p_FmPcd, profileIndx, TRUE); + FmPcdPlcrUpdateRequiredAction(p_FmPcd, profileIndx, requiredAction); + + /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/ + return err; + } + + /* lock the HW because once we read the registers we don't want them to be changed + * by another access. (We can copy to a tmp location and release the lock!) */ + + intFlags = PlcrHwLock(p_FmPcdPlcr); + WritePar(p_FmPcd, FmPcdPlcrBuildReadPlcrActionReg(profileIndx)); + + if (!p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].pointedOwners || + !(p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].requiredAction & requiredAction)) + { + if (requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) + { + if ((p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].nextEngineOnGreen!= e_FM_PCD_DONE) || + (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].nextEngineOnYellow!= e_FM_PCD_DONE) || + (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].nextEngineOnRed!= e_FM_PCD_DONE)) + { + PlcrHwUnlock(p_FmPcdPlcr, intFlags); + /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/ + RETURN_ERROR (MAJOR, E_OK, ("In this case the next engine can be e_FM_PCD_DONE")); + } + + if (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].paramsOnGreen.action == e_FM_PCD_ENQ_FRAME) + { + tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegnia); + if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))) + { + PlcrHwUnlock(p_FmPcdPlcr, intFlags); + /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/ + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE")); + } + tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegnia, tmpReg32); + tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx); + tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEGNIA; + WritePar(p_FmPcd, tmpReg32); + } + + if (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].paramsOnYellow.action == e_FM_PCD_ENQ_FRAME) + { + tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peynia); + if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))) + { + PlcrHwUnlock(p_FmPcdPlcr, intFlags); + /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/ + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE")); + } + tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peynia, tmpReg32); + tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx); + tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEYNIA; + WritePar(p_FmPcd, tmpReg32); + PlcrHwUnlock(p_FmPcdPlcr, intFlags); + } + + if (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].paramsOnRed.action == e_FM_PCD_ENQ_FRAME) + { + tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pernia); + if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))) + { + PlcrHwUnlock(p_FmPcdPlcr, intFlags); + /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/ + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE")); + } + tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pernia, tmpReg32); + tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx); + tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PERNIA; + WritePar(p_FmPcd, tmpReg32); + + } + } + } + PlcrHwUnlock(p_FmPcdPlcr, intFlags); + + FmPcdPlcrUpatePointedOwner(p_FmPcd, profileIndx, TRUE); + FmPcdPlcrUpdateRequiredAction(p_FmPcd, profileIndx, requiredAction); + + /*PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[profileIndx], intFlags);*/ + + return E_OK; +} + +void FmPcdPlcrUpatePointedOwner(t_Handle h_FmPcd, uint16_t absoluteProfileId, bool add) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + /* this routine is protected by calling routine */ + + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid); + + if (add) + p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].pointedOwners++; + else + p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].pointedOwners--; +} + +uint32_t FmPcdPlcrGetPointedOwners(t_Handle h_FmPcd, uint16_t absoluteProfileId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid); + + return p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].pointedOwners; +} + +uint32_t FmPcdPlcrGetRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid); + + return p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredAction; +} + +bool FmPcdPlcrIsProfileValid(t_Handle h_FmPcd, uint16_t absoluteProfileId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr; + + ASSERT_COND(absoluteProfileId < FM_PCD_PLCR_NUM_ENTRIES); + + return p_FmPcdPlcr->profiles[absoluteProfileId].valid; +} + +void FmPcdPlcrValidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t intFlags; + + ASSERT_COND(!p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid); + + intFlags = PlcrProfileLock(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId]); + p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid = TRUE; + PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId], intFlags); +} + +void FmPcdPlcrInvalidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t intFlags; + + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid); + + intFlags = PlcrProfileLock(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId]); + p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid = FALSE; + PlcrProfileUnlock(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId], intFlags); +} + +uint16_t FmPcdPlcrProfileGetAbsoluteId(t_Handle h_Profile) +{ + return ((t_FmPcdPlcrProfile*)h_Profile)->absoluteProfileId; +} + +t_Error FmPcdPlcrGetAbsoluteIdByProfileParams(t_Handle h_FmPcd, + e_FmPcdProfileTypeSelection profileType, + t_Handle h_FmPort, + uint16_t relativeProfile, + uint16_t *p_AbsoluteId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr; + uint8_t i; + + switch (profileType) + { + case e_FM_PCD_PLCR_PORT_PRIVATE: + /* get port PCD id from port handle */ + for (i=0;i<FM_MAX_NUM_OF_PORTS;i++) + if (p_FmPcd->p_FmPcdPlcr->portsMapping[i].h_FmPort == h_FmPort) + break; + if (i == FM_MAX_NUM_OF_PORTS) + RETURN_ERROR(MAJOR, E_INVALID_STATE , ("Invalid port handle.")); + + if (!p_FmPcd->p_FmPcdPlcr->portsMapping[i].numOfProfiles) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION , ("Port has no allocated profiles")); + if (relativeProfile >= p_FmPcd->p_FmPcdPlcr->portsMapping[i].numOfProfiles) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION , ("Profile id is out of range")); + *p_AbsoluteId = (uint16_t)(p_FmPcd->p_FmPcdPlcr->portsMapping[i].profilesBase + relativeProfile); + break; + case e_FM_PCD_PLCR_SHARED: + if (relativeProfile >= p_FmPcdPlcr->numOfSharedProfiles) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION , ("Profile id is out of range")); + *p_AbsoluteId = (uint16_t)(p_FmPcdPlcr->sharedProfilesIds[relativeProfile]); + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Invalid policer profile type")); + } + + return E_OK; +} + +uint16_t FmPcdPlcrGetPortProfilesBase(t_Handle h_FmPcd, uint8_t hardwarePortId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint16_t swPortIndex = 0; + + HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId); + + return p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase; +} + +uint16_t FmPcdPlcrGetPortNumOfProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint16_t swPortIndex = 0; + + HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId); + + return p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles; + +} +uint32_t FmPcdPlcrBuildWritePlcrActionReg(uint16_t absoluteProfileId) +{ + return (uint32_t)(FM_PCD_PLCR_PAR_GO | + ((uint32_t)absoluteProfileId << FM_PCD_PLCR_PAR_PNUM_SHIFT)); +} + +uint32_t FmPcdPlcrBuildWritePlcrActionRegs(uint16_t absoluteProfileId) +{ + return (uint32_t)(FM_PCD_PLCR_PAR_GO | + ((uint32_t)absoluteProfileId << FM_PCD_PLCR_PAR_PNUM_SHIFT) | + FM_PCD_PLCR_PAR_PWSEL_MASK); +} + +bool FmPcdPlcrHwProfileIsValid(uint32_t profileModeReg) +{ + + if (profileModeReg & FM_PCD_PLCR_PEMODE_PI) + return TRUE; + else + return FALSE; +} + +uint32_t FmPcdPlcrBuildReadPlcrActionReg(uint16_t absoluteProfileId) +{ + return (uint32_t)(FM_PCD_PLCR_PAR_GO | + FM_PCD_PLCR_PAR_R | + ((uint32_t)absoluteProfileId << FM_PCD_PLCR_PAR_PNUM_SHIFT) | + FM_PCD_PLCR_PAR_PWSEL_MASK); +} + +uint32_t FmPcdPlcrBuildCounterProfileReg(e_FmPcdPlcrProfileCounters counter) +{ + switch (counter) + { + case (e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER): + return FM_PCD_PLCR_PAR_PWSEL_PEGPC; + case (e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER): + return FM_PCD_PLCR_PAR_PWSEL_PEYPC; + case (e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER) : + return FM_PCD_PLCR_PAR_PWSEL_PERPC; + case (e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER) : + return FM_PCD_PLCR_PAR_PWSEL_PERYPC; + case (e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER) : + return FM_PCD_PLCR_PAR_PWSEL_PERRPC; + default: + REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + return 0; + } +} + +uint32_t FmPcdPlcrBuildNiaProfileReg(bool green, bool yellow, bool red) +{ + + uint32_t tmpReg32 = 0; + + if (green) + tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEGNIA; + if (yellow) + tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEYNIA; + if (red) + tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PERNIA; + + return tmpReg32; +} + +void FmPcdPlcrUpdateRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId, uint32_t requiredAction) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + /* this routine is protected by calling routine */ + + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid); + + p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredAction |= requiredAction; +} +/*********************** End of inter-module routines ************************/ + + +/**************************************************/ +/*............Policer API.........................*/ +/**************************************************/ + +t_Error FM_PCD_ConfigPlcrAutoRefreshMode(t_Handle h_FmPcd, bool enable) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE); + + if (!FmIsMaster(p_FmPcd->h_Fm)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ConfigPlcrAutoRefreshMode - guest mode!")); + + p_FmPcd->p_FmPcdDriverParam->plcrAutoRefresh = enable; + + return E_OK; +} + +t_Error FM_PCD_ConfigPlcrNumOfSharedProfiles(t_Handle h_FmPcd, uint16_t numOfSharedPlcrProfiles) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE); + + p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles = numOfSharedPlcrProfiles; + + return E_OK; +} + +t_Error FM_PCD_SetPlcrStatistics(t_Handle h_FmPcd, bool enable) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t tmpReg32; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE); + + if (!FmIsMaster(p_FmPcd->h_Fm)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_SetPlcrStatistics - guest mode!")); + + tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr); + if (enable) + tmpReg32 |= FM_PCD_PLCR_GCR_STEN; + else + tmpReg32 &= ~FM_PCD_PLCR_GCR_STEN; + + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr, tmpReg32); + return E_OK; +} + +/* ... */ +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FM_PCD_PlcrDumpRegs(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + int i = 0; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(((p_FmPcd->guestId == NCSW_MASTER_ID) || + p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs), E_INVALID_OPERATION); + + DUMP_SUBTITLE(("\n")); + DUMP_TITLE(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, ("FM-PCD policer regs")); + + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_gcr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_gsr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_evr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_ier); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_ifr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_eevr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_eier); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_eifr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_rpcnt); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_ypcnt); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_rrpcnt); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_rypcnt); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_tpcnt); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_flmcnt); + + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_serc); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_upcr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_dpmr); + + DUMP_TITLE(&p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_pmr, ("fmpl_pmr")); + DUMP_SUBSTRUCT_ARRAY(i, 63) + { + DUMP_MEMORY(&p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_pmr[i], sizeof(uint32_t)); + } + + return E_OK; +} +#endif /* (defined(DEBUG_ERRORS) && ... */ + +t_Handle FM_PCD_PlcrProfileSet(t_Handle h_FmPcd, + t_FmPcdPlcrProfileParams *p_ProfileParams) +{ + t_FmPcd *p_FmPcd; + t_FmPcdPlcrRegs *p_FmPcdPlcrRegs; + t_FmPcdPlcrProfileRegs plcrProfileReg; + uint32_t intFlags; + uint16_t absoluteProfileId; + t_Error err = E_OK; + uint32_t tmpReg32; + t_FmPcdPlcrProfile *p_Profile; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL); + + if (p_ProfileParams->modify) + { + p_Profile = (t_FmPcdPlcrProfile *)p_ProfileParams->id.h_Profile; + p_FmPcd = p_Profile->h_FmPcd; + absoluteProfileId = p_Profile->absoluteProfileId; + if (absoluteProfileId >= FM_PCD_PLCR_NUM_ENTRIES) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("profileId too Big ")); + return NULL; + } + + SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE, NULL); + + /* Try lock profile using flag */ + if (!PlcrProfileFlagTryLock(p_Profile)) + { + DBG(TRACE, ("Profile Try Lock - BUSY")); + /* Signal to caller BUSY condition */ + p_ProfileParams->id.h_Profile = NULL; + return NULL; + } + } + else + { + p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE, NULL); + + /* SMP: needs to be protected only if another core now changes the windows */ + err = FmPcdPlcrGetAbsoluteIdByProfileParams(h_FmPcd, + p_ProfileParams->id.newParams.profileType, + p_ProfileParams->id.newParams.h_FmPort, + p_ProfileParams->id.newParams.relativeProfileId, + &absoluteProfileId); + + if (absoluteProfileId >= FM_PCD_PLCR_NUM_ENTRIES) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("profileId too Big ")); + return NULL; + } + + if (FmPcdPlcrIsProfileValid(p_FmPcd, absoluteProfileId)) + { + REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Policer Profile is already used")); + return NULL; + } + + /* initialize profile struct */ + p_Profile = &p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId]; + p_Profile->h_FmPcd = p_FmPcd; + p_Profile->absoluteProfileId = absoluteProfileId; + + p_Profile->p_Lock = FmPcdAcquireLock(p_FmPcd); + if (!p_Profile->p_Lock) + REPORT_ERROR(MAJOR, E_NOT_AVAILABLE, ("FM Policer Profile lock obj!")); + } + + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, NULL); + + p_Profile->nextEngineOnGreen = p_ProfileParams->nextEngineOnGreen; + memcpy(&p_Profile->paramsOnGreen, &(p_ProfileParams->paramsOnGreen), sizeof(u_FmPcdPlcrNextEngineParams)); + + p_Profile->nextEngineOnYellow = p_ProfileParams->nextEngineOnYellow; + memcpy(&p_Profile->paramsOnYellow, &(p_ProfileParams->paramsOnYellow), sizeof(u_FmPcdPlcrNextEngineParams)); + + p_Profile->nextEngineOnRed = p_ProfileParams->nextEngineOnRed; + memcpy(&p_Profile->paramsOnRed, &(p_ProfileParams->paramsOnRed), sizeof(u_FmPcdPlcrNextEngineParams)); + + memset(&plcrProfileReg, 0, sizeof(t_FmPcdPlcrProfileRegs)); + + /* build the policer profile registers */ + err = BuildProfileRegs(h_FmPcd, p_ProfileParams, &plcrProfileReg); + if (err) + { + REPORT_ERROR(MAJOR, err, NO_MSG); + if (p_ProfileParams->modify) + /* unlock */ + PlcrProfileFlagUnlock(p_Profile); + if (!p_ProfileParams->modify && + p_Profile->p_Lock) + /* release allocated Profile lock */ + FmPcdReleaseLock(p_FmPcd, p_Profile->p_Lock); + return NULL; + } + + if (p_FmPcd->h_Hc) + { + err = FmHcPcdPlcrSetProfile(p_FmPcd->h_Hc, (t_Handle)p_Profile, &plcrProfileReg); + if (p_ProfileParams->modify) + PlcrProfileFlagUnlock(p_Profile); + if (err) + { + /* release the allocated scheme lock */ + if (!p_ProfileParams->modify && + p_Profile->p_Lock) + FmPcdReleaseLock(p_FmPcd, p_Profile->p_Lock); + + return NULL; + } + if (!p_ProfileParams->modify) + FmPcdPlcrValidateProfileSw(p_FmPcd,absoluteProfileId); + return (t_Handle)p_Profile; + } + + p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + SANITY_CHECK_RETURN_VALUE(p_FmPcdPlcrRegs, E_INVALID_HANDLE, NULL); + + intFlags = PlcrHwLock(p_FmPcd->p_FmPcdPlcr); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pemode , plcrProfileReg.fmpl_pemode); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegnia , plcrProfileReg.fmpl_pegnia); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peynia , plcrProfileReg.fmpl_peynia); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pernia , plcrProfileReg.fmpl_pernia); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pecir , plcrProfileReg.fmpl_pecir); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pecbs , plcrProfileReg.fmpl_pecbs); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pepepir_eir,plcrProfileReg.fmpl_pepepir_eir); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pepbs_ebs,plcrProfileReg.fmpl_pepbs_ebs); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pelts , plcrProfileReg.fmpl_pelts); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pects , plcrProfileReg.fmpl_pects); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pepts_ets,plcrProfileReg.fmpl_pepts_ets); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegpc , plcrProfileReg.fmpl_pegpc); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peypc , plcrProfileReg.fmpl_peypc); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perpc , plcrProfileReg.fmpl_perpc); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perypc , plcrProfileReg.fmpl_perypc); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perrpc , plcrProfileReg.fmpl_perrpc); + + tmpReg32 = FmPcdPlcrBuildWritePlcrActionRegs(absoluteProfileId); + WritePar(p_FmPcd, tmpReg32); + + PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags); + + if (!p_ProfileParams->modify) + FmPcdPlcrValidateProfileSw(p_FmPcd,absoluteProfileId); + else + PlcrProfileFlagUnlock(p_Profile); + + return (t_Handle)p_Profile; +} + +t_Error FM_PCD_PlcrProfileDelete(t_Handle h_Profile) +{ + t_FmPcdPlcrProfile *p_Profile = (t_FmPcdPlcrProfile*)h_Profile; + t_FmPcd *p_FmPcd; + uint16_t profileIndx; + uint32_t tmpReg32, intFlags; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_Profile, E_INVALID_HANDLE); + p_FmPcd = p_Profile->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + profileIndx = p_Profile->absoluteProfileId; + + FmPcdPlcrInvalidateProfileSw(p_FmPcd,profileIndx); + + if (p_FmPcd->h_Hc) + { + err = FmHcPcdPlcrDeleteProfile(p_FmPcd->h_Hc, h_Profile); + if (p_Profile->p_Lock) + /* release allocated Profile lock */ + FmPcdReleaseLock(p_FmPcd, p_Profile->p_Lock); + + return err; + } + + intFlags = PlcrHwLock(p_FmPcd->p_FmPcdPlcr); + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->profileRegs.fmpl_pemode, ~FM_PCD_PLCR_PEMODE_PI); + + tmpReg32 = FmPcdPlcrBuildWritePlcrActionRegs(profileIndx); + WritePar(p_FmPcd, tmpReg32); + PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags); + + if (p_Profile->p_Lock) + /* release allocated Profile lock */ + FmPcdReleaseLock(p_FmPcd, p_Profile->p_Lock); + + return E_OK; +} + +/***************************************************/ +/*............Policer Profile Counter..............*/ +/***************************************************/ +uint32_t FM_PCD_PlcrProfileGetCounter(t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter) +{ + t_FmPcdPlcrProfile *p_Profile = (t_FmPcdPlcrProfile*)h_Profile; + t_FmPcd *p_FmPcd; + uint16_t profileIndx; + uint32_t intFlags, counterVal = 0; + t_FmPcdPlcrRegs *p_FmPcdPlcrRegs; + + SANITY_CHECK_RETURN_ERROR(p_Profile, E_INVALID_HANDLE); + p_FmPcd = p_Profile->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + if (p_FmPcd->h_Hc) + return FmHcPcdPlcrGetProfileCounter(p_FmPcd->h_Hc, h_Profile, counter); + + p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + SANITY_CHECK_RETURN_VALUE(p_FmPcdPlcrRegs, E_INVALID_HANDLE, 0); + + profileIndx = p_Profile->absoluteProfileId; + + if (profileIndx >= FM_PCD_PLCR_NUM_ENTRIES) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("profileId too Big ")); + return 0; + } + intFlags = PlcrHwLock(p_FmPcd->p_FmPcdPlcr); + WritePar(p_FmPcd, FmPcdPlcrBuildReadPlcrActionReg(profileIndx)); + + switch (counter) + { + case e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER: + counterVal = (GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegpc)); + break; + case e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER: + counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peypc); + break; + case e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER: + counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perpc); + break; + case e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER: + counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perypc); + break; + case e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER: + counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perrpc); + break; + default: + REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + break; + } + PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags); + + return counterVal; +} + +t_Error FM_PCD_PlcrProfileSetCounter(t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter, uint32_t value) +{ + t_FmPcdPlcrProfile *p_Profile = (t_FmPcdPlcrProfile*)h_Profile; + t_FmPcd *p_FmPcd; + uint16_t profileIndx; + uint32_t tmpReg32, intFlags; + t_FmPcdPlcrRegs *p_FmPcdPlcrRegs; + + SANITY_CHECK_RETURN_ERROR(p_Profile, E_INVALID_HANDLE); + + p_FmPcd = p_Profile->h_FmPcd; + profileIndx = p_Profile->absoluteProfileId; + + if (p_FmPcd->h_Hc) + return FmHcPcdPlcrSetProfileCounter(p_FmPcd->h_Hc, h_Profile, counter, value); + + p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + SANITY_CHECK_RETURN_ERROR(p_FmPcdPlcrRegs, E_INVALID_HANDLE); + + intFlags = PlcrHwLock(p_FmPcd->p_FmPcdPlcr); + switch (counter) + { + case e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER: + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegpc, value); + break; + case e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER: + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peypc, value); + break; + case e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER: + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perpc, value); + break; + case e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER: + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perypc ,value); + break; + case e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER: + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perrpc ,value); + break; + default: + PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags); + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + + /* Activate the atomic write action by writing FMPL_PAR with: GO=1, RW=1, PSI=0, PNUM = + * Profile Number, PWSEL=0xFFFF (select all words). + */ + tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx); + tmpReg32 |= FmPcdPlcrBuildCounterProfileReg(counter); + WritePar(p_FmPcd, tmpReg32); + PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags); + + return E_OK; +} + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FM_PCD_PlcrProfileDumpRegs(t_Handle h_Profile) +{ + t_FmPcdPlcrProfile *p_Profile = (t_FmPcdPlcrProfile*)h_Profile; + t_FmPcd *p_FmPcd; + t_FmPcdPlcrProfileRegs *p_ProfilesRegs; + uint16_t profileIndx; + uint32_t tmpReg, intFlags; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(p_Profile, E_INVALID_HANDLE); + p_FmPcd = p_Profile->h_FmPcd; + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE); + + profileIndx = p_Profile->absoluteProfileId; + + DUMP_SUBTITLE(("\n")); + DUMP_TITLE(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, ("FM-PCD policer-profile regs")); + + p_ProfilesRegs = &p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->profileRegs; + + tmpReg = FmPcdPlcrBuildReadPlcrActionReg((uint16_t)profileIndx); + intFlags = PlcrHwLock(p_FmPcd->p_FmPcdPlcr); + WritePar(p_FmPcd, tmpReg); + + DUMP_TITLE(p_ProfilesRegs, ("Profile %d regs", profileIndx)); + + DUMP_VAR(p_ProfilesRegs, fmpl_pemode); + DUMP_VAR(p_ProfilesRegs, fmpl_pegnia); + DUMP_VAR(p_ProfilesRegs, fmpl_peynia); + DUMP_VAR(p_ProfilesRegs, fmpl_pernia); + DUMP_VAR(p_ProfilesRegs, fmpl_pecir); + DUMP_VAR(p_ProfilesRegs, fmpl_pecbs); + DUMP_VAR(p_ProfilesRegs, fmpl_pepepir_eir); + DUMP_VAR(p_ProfilesRegs, fmpl_pepbs_ebs); + DUMP_VAR(p_ProfilesRegs, fmpl_pelts); + DUMP_VAR(p_ProfilesRegs, fmpl_pects); + DUMP_VAR(p_ProfilesRegs, fmpl_pepts_ets); + DUMP_VAR(p_ProfilesRegs, fmpl_pegpc); + DUMP_VAR(p_ProfilesRegs, fmpl_peypc); + DUMP_VAR(p_ProfilesRegs, fmpl_perpc); + DUMP_VAR(p_ProfilesRegs, fmpl_perypc); + DUMP_VAR(p_ProfilesRegs, fmpl_perrpc); + PlcrHwUnlock(p_FmPcd->p_FmPcdPlcr, intFlags); + + return E_OK; +} +#endif /* (defined(DEBUG_ERRORS) && ... */ diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_plcr.h b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_plcr.h new file mode 100644 index 0000000..2bb8b96 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_plcr.h @@ -0,0 +1,165 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_plcr.h + + @Description FM Policer private header +*//***************************************************************************/ +#ifndef __FM_PLCR_H +#define __FM_PLCR_H + +#include "std_ext.h" + + +/***********************************************************************/ +/* Policer defines */ +/***********************************************************************/ + +#define FM_PCD_PLCR_PAR_GO 0x80000000 +#define FM_PCD_PLCR_PAR_PWSEL_MASK 0x0000FFFF +#define FM_PCD_PLCR_PAR_R 0x40000000 + +/* shifts */ +#define FM_PCD_PLCR_PAR_PNUM_SHIFT 16 + +/* masks */ +#define FM_PCD_PLCR_PEMODE_PI 0x80000000 +#define FM_PCD_PLCR_PEMODE_CBLND 0x40000000 +#define FM_PCD_PLCR_PEMODE_ALG_MASK 0x30000000 +#define FM_PCD_PLCR_PEMODE_ALG_RFC2698 0x10000000 +#define FM_PCD_PLCR_PEMODE_ALG_RFC4115 0x20000000 +#define FM_PCD_PLCR_PEMODE_DEFC_MASK 0x0C000000 +#define FM_PCD_PLCR_PEMODE_DEFC_Y 0x04000000 +#define FM_PCD_PLCR_PEMODE_DEFC_R 0x08000000 +#define FM_PCD_PLCR_PEMODE_DEFC_OVERRIDE 0x0C000000 +#define FM_PCD_PLCR_PEMODE_OVCLR_MASK 0x03000000 +#define FM_PCD_PLCR_PEMODE_OVCLR_Y 0x01000000 +#define FM_PCD_PLCR_PEMODE_OVCLR_R 0x02000000 +#define FM_PCD_PLCR_PEMODE_OVCLR_G_NC 0x03000000 +#define FM_PCD_PLCR_PEMODE_PKT 0x00800000 +#define FM_PCD_PLCR_PEMODE_FPP_MASK 0x001F0000 +#define FM_PCD_PLCR_PEMODE_FPP_SHIFT 16 +#define FM_PCD_PLCR_PEMODE_FLS_MASK 0x0000F000 +#define FM_PCD_PLCR_PEMODE_FLS_L2 0x00003000 +#define FM_PCD_PLCR_PEMODE_FLS_L3 0x0000B000 +#define FM_PCD_PLCR_PEMODE_FLS_L4 0x0000E000 +#define FM_PCD_PLCR_PEMODE_FLS_FULL 0x0000F000 +#define FM_PCD_PLCR_PEMODE_RBFLS 0x00000800 +#define FM_PCD_PLCR_PEMODE_TRA 0x00000004 +#define FM_PCD_PLCR_PEMODE_TRB 0x00000002 +#define FM_PCD_PLCR_PEMODE_TRC 0x00000001 +#define FM_PCD_PLCR_DOUBLE_ECC 0x80000000 +#define FM_PCD_PLCR_INIT_ENTRY_ERROR 0x40000000 +#define FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE 0x80000000 +#define FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE 0x40000000 + +#define FM_PCD_PLCR_NIA_VALID 0x80000000 + +#define FM_PCD_PLCR_GCR_EN 0x80000000 +#define FM_PCD_PLCR_GCR_STEN 0x40000000 +#define FM_PCD_PLCR_GCR_DAR 0x20000000 +#define FM_PCD_PLCR_GCR_DEFNIA 0x00FFFFFF +#define FM_PCD_PLCR_NIA_ABS 0x00000100 + +#define FM_PCD_PLCR_GSR_BSY 0x80000000 +#define FM_PCD_PLCR_GSR_DQS 0x60000000 +#define FM_PCD_PLCR_GSR_RPB 0x20000000 +#define FM_PCD_PLCR_GSR_FQS 0x0C000000 +#define FM_PCD_PLCR_GSR_LPALG 0x0000C000 +#define FM_PCD_PLCR_GSR_LPCA 0x00003000 +#define FM_PCD_PLCR_GSR_LPNUM 0x000000FF + +#define FM_PCD_PLCR_EVR_PSIC 0x80000000 +#define FM_PCD_PLCR_EVR_AAC 0x40000000 + +#define FM_PCD_PLCR_PAR_PSI 0x20000000 +#define FM_PCD_PLCR_PAR_PNUM 0x00FF0000 +/* PWSEL Selctive select options */ +#define FM_PCD_PLCR_PAR_PWSEL_PEMODE 0x00008000 /* 0 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEGNIA 0x00004000 /* 1 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEYNIA 0x00002000 /* 2 */ +#define FM_PCD_PLCR_PAR_PWSEL_PERNIA 0x00001000 /* 3 */ +#define FM_PCD_PLCR_PAR_PWSEL_PECIR 0x00000800 /* 4 */ +#define FM_PCD_PLCR_PAR_PWSEL_PECBS 0x00000400 /* 5 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEPIR_EIR 0x00000200 /* 6 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEPBS_EBS 0x00000100 /* 7 */ +#define FM_PCD_PLCR_PAR_PWSEL_PELTS 0x00000080 /* 8 */ +#define FM_PCD_PLCR_PAR_PWSEL_PECTS 0x00000040 /* 9 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEPTS_ETS 0x00000020 /* 10 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEGPC 0x00000010 /* 11 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEYPC 0x00000008 /* 12 */ +#define FM_PCD_PLCR_PAR_PWSEL_PERPC 0x00000004 /* 13 */ +#define FM_PCD_PLCR_PAR_PWSEL_PERYPC 0x00000002 /* 14 */ +#define FM_PCD_PLCR_PAR_PWSEL_PERRPC 0x00000001 /* 15 */ + +#define FM_PCD_PLCR_PAR_PMR_BRN_1TO1 0x0000 /* - Full bit replacement. {PBNUM[0:N-1] + 1-> 2^N specific locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_2TO2 0x1 /* - {PBNUM[0:N-2],PNUM[N-1]}. + 2-> 2^(N-1) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_4TO4 0x2 /* - {PBNUM[0:N-3],PNUM[N-2:N-1]}. + 4-> 2^(N-2) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_8TO8 0x3 /* - {PBNUM[0:N-4],PNUM[N-3:N-1]}. + 8->2^(N-3) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_16TO16 0x4 /* - {PBNUM[0:N-5],PNUM[N-4:N-1]}. + 16-> 2^(N-4) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_32TO32 0x5 /* {PBNUM[0:N-6],PNUM[N-5:N-1]}. + 32-> 2^(N-5) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_64TO64 0x6 /* {PBNUM[0:N-7],PNUM[N-6:N-1]}. + 64-> 2^(N-6) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_128TO128 0x7 /* {PBNUM[0:N-8],PNUM[N-7:N-1]}. + 128-> 2^(N-7) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_256TO256 0x8 /* - No bit replacement for N=8. {PNUM[N-8:N-1]}. + When N=8 this option maps all 256 profiles by the DISPATCH bus into one group. */ + +#define FM_PCD_PLCR_PMR_V 0x80000000 +#define PLCR_ERR_ECC_CAP 0x80000000 +#define PLCR_ERR_ECC_TYPE_DOUBLE 0x40000000 +#define PLCR_ERR_ECC_PNUM_MASK 0x00000FF0 +#define PLCR_ERR_ECC_OFFSET_MASK 0x0000000F + +#define PLCR_ERR_UNINIT_CAP 0x80000000 +#define PLCR_ERR_UNINIT_NUM_MASK 0x000000FF +#define PLCR_ERR_UNINIT_PID_MASK 0x003f0000 +#define PLCR_ERR_UNINIT_ABSOLUTE_MASK 0x00008000 + +/* shifts */ +#define PLCR_ERR_ECC_PNUM_SHIFT 4 +#define PLCR_ERR_UNINIT_PID_SHIFT 16 + +#define FM_PCD_PLCR_PMR_BRN_SHIFT 16 + +#define PLCR_PORT_WINDOW_SIZE(hardwarePortId) + + +#endif /* __FM_PLCR_H */ diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_prs.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_prs.c new file mode 100644 index 0000000..e198afd --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_prs.c @@ -0,0 +1,457 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_pcd.c + + @Description FM PCD ... +*//***************************************************************************/ +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "debug_ext.h" +#include "net_ext.h" + +#include "fm_common.h" +#include "fm_pcd.h" +#include "fm_pcd_ipc.h" +#include "fm_prs.h" +#include "fsl_fman_prs.h" + + +static void PcdPrsErrorException(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint32_t event, ev_mask; + struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs; + + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + ev_mask = fman_prs_get_err_ev_mask(PrsRegs); + + event = fman_prs_get_err_event(PrsRegs, ev_mask); + + fman_prs_ack_err_event(PrsRegs, event); + + DBG(TRACE, ("parser error - 0x%08x\n",event)); + + if(event & FM_PCD_PRS_DOUBLE_ECC) + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC); +} + +static void PcdPrsException(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint32_t event, ev_mask; + struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs; + + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + ev_mask = fman_prs_get_expt_ev_mask(PrsRegs); + event = fman_prs_get_expt_event(PrsRegs, ev_mask); + + ASSERT_COND(event & FM_PCD_PRS_SINGLE_ECC); + + DBG(TRACE, ("parser event - 0x%08x\n",event)); + + fman_prs_ack_expt_event(PrsRegs, event); + + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC); +} + +t_Handle PrsConfig(t_FmPcd *p_FmPcd,t_FmPcdParams *p_FmPcdParams) +{ + t_FmPcdPrs *p_FmPcdPrs; + uintptr_t baseAddr; + + UNUSED(p_FmPcd); + UNUSED(p_FmPcdParams); + + p_FmPcdPrs = (t_FmPcdPrs *) XX_Malloc(sizeof(t_FmPcdPrs)); + if (!p_FmPcdPrs) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Parser structure allocation FAILED")); + return NULL; + } + memset(p_FmPcdPrs, 0, sizeof(t_FmPcdPrs)); + fman_prs_defconfig(&p_FmPcd->p_FmPcdDriverParam->dfltCfg); + + if (p_FmPcd->guestId == NCSW_MASTER_ID) + { + baseAddr = FmGetPcdPrsBaseAddr(p_FmPcdParams->h_Fm); + p_FmPcdPrs->p_SwPrsCode = (uint32_t *)UINT_TO_PTR(baseAddr); + p_FmPcdPrs->p_FmPcdPrsRegs = (struct fman_prs_regs *)UINT_TO_PTR(baseAddr + PRS_REGS_OFFSET); + } + + p_FmPcdPrs->fmPcdPrsPortIdStatistics = p_FmPcd->p_FmPcdDriverParam->dfltCfg.port_id_stat; + p_FmPcd->p_FmPcdDriverParam->prsMaxParseCycleLimit = p_FmPcd->p_FmPcdDriverParam->dfltCfg.max_prs_cyc_lim; + p_FmPcd->exceptions |= p_FmPcd->p_FmPcdDriverParam->dfltCfg.prs_exceptions; + + return p_FmPcdPrs; +} + +t_Error PrsInit(t_FmPcd *p_FmPcd) +{ + t_FmPcdDriverParam *p_Param = p_FmPcd->p_FmPcdDriverParam; + uint32_t *p_TmpCode; + uint32_t *p_LoadTarget = (uint32_t *)PTR_MOVE(p_FmPcd->p_FmPcdPrs->p_SwPrsCode, + FM_PCD_SW_PRS_SIZE-FM_PCD_PRS_SW_PATCHES_SIZE); + struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs; +#ifdef FM_CAPWAP_SUPPORT + uint8_t swPrsPatch[] = SW_PRS_UDP_LITE_PATCH; +#else + uint8_t swPrsPatch[] = SW_PRS_IP_FRAG_PATCH; +#endif /* FM_CAPWAP_SUPPORT */ + uint32_t i; + + ASSERT_COND(sizeof(swPrsPatch) <= (FM_PCD_PRS_SW_PATCHES_SIZE-FM_PCD_PRS_SW_TAIL_SIZE)); + + /* nothing to do in guest-partition */ + if (p_FmPcd->guestId != NCSW_MASTER_ID) + return E_OK; + + p_TmpCode = (uint32_t *)XX_MallocSmart(ROUND_UP(sizeof(swPrsPatch),4), 0, sizeof(uint32_t)); + if (!p_TmpCode) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Tmp Sw-Parser code allocation FAILED")); + memset((uint8_t *)p_TmpCode, 0, ROUND_UP(sizeof(swPrsPatch),4)); + memcpy((uint8_t *)p_TmpCode, (uint8_t *)swPrsPatch, sizeof(swPrsPatch)); + + fman_prs_init(PrsRegs, &p_Param->dfltCfg); + + /* register even if no interrupts enabled, to allow future enablement */ + FmRegisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_ERR, PcdPrsErrorException, p_FmPcd); + + /* register even if no interrupts enabled, to allow future enablement */ + FmRegisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_NORMAL, PcdPrsException, p_FmPcd); + + if(p_FmPcd->exceptions & FM_PCD_EX_PRS_SINGLE_ECC) + FmEnableRamsEcc(p_FmPcd->h_Fm); + + if(p_FmPcd->exceptions & FM_PCD_EX_PRS_DOUBLE_ECC) + FmEnableRamsEcc(p_FmPcd->h_Fm); + + /* load sw parser Ip-Frag patch */ + for (i=0; i<DIV_CEIL(sizeof(swPrsPatch),4); i++) + WRITE_UINT32(p_LoadTarget[i], p_TmpCode[i]); + + XX_FreeSmart(p_TmpCode); + + return E_OK; +} + +void PrsFree(t_FmPcd *p_FmPcd) +{ + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_ERR); + /* register even if no interrupts enabled, to allow future enablement */ + FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_NORMAL); +} + +void PrsEnable(t_FmPcd *p_FmPcd) +{ + struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs; + + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + fman_prs_enable(PrsRegs); +} + +void PrsDisable(t_FmPcd *p_FmPcd) +{ + struct fman_prs_regs *PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs; + + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + fman_prs_disable(PrsRegs); +} + +t_Error PrsIncludePortInStatistics(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, bool include) +{ + struct fman_prs_regs *PrsRegs; + uint32_t bitMask = 0; + uint8_t prsPortId; + + SANITY_CHECK_RETURN_ERROR((hardwarePortId >=1 && hardwarePortId <= 16), E_INVALID_VALUE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE); + + PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs; + + GET_FM_PCD_PRS_PORT_ID(prsPortId, hardwarePortId); + GET_FM_PCD_INDEX_FLAG(bitMask, prsPortId); + + if (include) + p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics |= bitMask; + else + p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics &= ~bitMask; + + fman_prs_set_stst_port_msk(PrsRegs, + p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics); + + return E_OK; +} + +t_Error FmPcdPrsIncludePortInStatistics(t_Handle h_FmPcd, uint8_t hardwarePortId, bool include) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_Error err; + + SANITY_CHECK_RETURN_ERROR((hardwarePortId >=1 && hardwarePortId <= 16), E_INVALID_VALUE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE); + + if ((p_FmPcd->guestId != NCSW_MASTER_ID) && + p_FmPcd->h_IpcSession) + { + t_FmPcdIpcPrsIncludePort prsIncludePortParams; + t_FmPcdIpcMsg msg; + + prsIncludePortParams.hardwarePortId = hardwarePortId; + prsIncludePortParams.include = include; + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_PCD_PRS_INC_PORT_STATS; + memcpy(msg.msgBody, &prsIncludePortParams, sizeof(prsIncludePortParams)); + err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) +sizeof(prsIncludePortParams), + NULL, + NULL, + NULL, + NULL); + if (err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + return E_OK; + } + else if (p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, + ("running in guest-mode without IPC!")); + + return PrsIncludePortInStatistics(p_FmPcd, hardwarePortId, include); +} + +uint32_t FmPcdGetSwPrsOffset(t_Handle h_FmPcd, e_NetHeaderType hdr, uint8_t indexPerHdr) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_FmPcdPrsLabelParams *p_Label; + int i; + + SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE, 0); + + if ((p_FmPcd->guestId != NCSW_MASTER_ID) && + p_FmPcd->h_IpcSession) + { + t_Error err = E_OK; + t_FmPcdIpcSwPrsLable labelParams; + t_FmPcdIpcMsg msg; + uint32_t prsOffset = 0; + t_FmPcdIpcReply reply; + uint32_t replyLength; + + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + labelParams.enumHdr = (uint32_t)hdr; + labelParams.indexPerHdr = indexPerHdr; + msg.msgId = FM_PCD_GET_SW_PRS_OFFSET; + memcpy(msg.msgBody, &labelParams, sizeof(labelParams)); + replyLength = sizeof(uint32_t) + sizeof(uint32_t); + err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) +sizeof(labelParams), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL); + if (err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t) + sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + + memcpy((uint8_t*)&prsOffset, reply.replyBody, sizeof(uint32_t)); + return prsOffset; + } + else if (p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, + ("running in guest-mode without IPC!")); + + ASSERT_COND(p_FmPcd->p_FmPcdPrs->currLabel < FM_PCD_PRS_NUM_OF_LABELS); + + for (i=0; i<p_FmPcd->p_FmPcdPrs->currLabel; i++) + { + p_Label = &p_FmPcd->p_FmPcdPrs->labelsTable[i]; + + if ((hdr == p_Label->hdr) && (indexPerHdr == p_Label->indexPerHdr)) + return p_Label->instructionOffset; + } + + REPORT_ERROR(MAJOR, E_NOT_FOUND, ("Sw Parser attachment Not found")); + return (uint32_t)ILLEGAL_BASE; +} + +void FM_PCD_SetPrsStatistics(t_Handle h_FmPcd, bool enable) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + struct fman_prs_regs *PrsRegs; + + SANITY_CHECK_RETURN(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE); + + PrsRegs = (struct fman_prs_regs *)p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs; + + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_SetPrsStatistics - guest mode!")); + return; + } + + fman_prs_set_stst(PrsRegs, enable); +} + +t_Error FM_PCD_PrsLoadSw(t_Handle h_FmPcd, t_FmPcdPrsSwParams *p_SwPrs) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t *p_LoadTarget; + uint32_t *p_TmpCode; + int i; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_SwPrs, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->enabled, E_INVALID_HANDLE); + + if (p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM in guest-mode!")); + + if (!p_SwPrs->override) + { + if(p_FmPcd->p_FmPcdPrs->p_CurrSwPrs > p_FmPcd->p_FmPcdPrs->p_SwPrsCode + p_SwPrs->base*2/4) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("SW parser base must be larger than current loaded code")); + } + else + p_FmPcd->p_FmPcdPrs->currLabel = 0; + + if (p_SwPrs->size > FM_PCD_SW_PRS_SIZE - FM_PCD_PRS_SW_TAIL_SIZE - p_SwPrs->base*2) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_SwPrs->size may not be larger than MAX_SW_PRS_CODE_SIZE")); + + if (p_FmPcd->p_FmPcdPrs->currLabel + p_SwPrs->numOfLabels > FM_PCD_PRS_NUM_OF_LABELS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceeded number of labels allowed ")); + + p_TmpCode = (uint32_t *)XX_MallocSmart(ROUND_UP(p_SwPrs->size,4), 0, sizeof(uint32_t)); + if (!p_TmpCode) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Tmp Sw-Parser code allocation FAILED")); + memset((uint8_t *)p_TmpCode, 0, ROUND_UP(p_SwPrs->size,4)); + memcpy((uint8_t *)p_TmpCode, p_SwPrs->p_Code, p_SwPrs->size); + + /* save sw parser labels */ + memcpy(&p_FmPcd->p_FmPcdPrs->labelsTable[p_FmPcd->p_FmPcdPrs->currLabel], + p_SwPrs->labelsTable, + p_SwPrs->numOfLabels*sizeof(t_FmPcdPrsLabelParams)); + p_FmPcd->p_FmPcdPrs->currLabel += p_SwPrs->numOfLabels; + + /* load sw parser code */ + p_LoadTarget = p_FmPcd->p_FmPcdPrs->p_SwPrsCode + p_SwPrs->base*2/4; + for(i=0; i<DIV_CEIL(p_SwPrs->size,4); i++) + WRITE_UINT32(p_LoadTarget[i], p_TmpCode[i]); + p_FmPcd->p_FmPcdPrs->p_CurrSwPrs = + p_FmPcd->p_FmPcdPrs->p_SwPrsCode + p_SwPrs->base*2/4 + ROUND_UP(p_SwPrs->size,4); + + /* copy data parameters */ + for (i=0;i<FM_PCD_PRS_NUM_OF_HDRS;i++) + WRITE_UINT32(*(p_FmPcd->p_FmPcdPrs->p_SwPrsCode+PRS_SW_DATA/4+i), p_SwPrs->swPrsDataParams[i]); + + /* Clear last 4 bytes */ + WRITE_UINT32(*(p_FmPcd->p_FmPcdPrs->p_SwPrsCode+(PRS_SW_DATA-FM_PCD_PRS_SW_TAIL_SIZE)/4), 0); + + XX_FreeSmart(p_TmpCode); + + return E_OK; +} + +t_Error FM_PCD_ConfigPrsMaxCycleLimit(t_Handle h_FmPcd,uint16_t value) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ConfigPrsMaxCycleLimit - guest mode!")); + + p_FmPcd->p_FmPcdDriverParam->prsMaxParseCycleLimit = value; + + return E_OK; +} + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FM_PCD_PrsDumpRegs(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(((p_FmPcd->guestId == NCSW_MASTER_ID) || + p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs), E_INVALID_OPERATION); + + DUMP_SUBTITLE(("\n")); + DUMP_TITLE(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs, ("FM-PCD parser regs")); + + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_rpclim); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_rpimac); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,pmeec); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_pevr); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_pever); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_perr); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_perer); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_ppsc); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_pds); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_l2rrs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_l3rrs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_l4rrs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_srrs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_l2rres); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_l3rres); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_l4rres); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_srres); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_spcs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_spscs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_hxscs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_mrcs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_mwcs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_mrscs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_mwscs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fmpr_fcscs); + + return E_OK; +} +#endif /* (defined(DEBUG_ERRORS) && ... */ diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_prs.h b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_prs.h new file mode 100644 index 0000000..3e5974c5 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_prs.h @@ -0,0 +1,193 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_prs.h + + @Description FM Parser private header +*//***************************************************************************/ +#ifndef __FM_PRS_H +#define __FM_PRS_H + +#include "std_ext.h" + + +/***********************************************************************/ +/* SW parser IP_FRAG patch */ +/***********************************************************************/ + + +#ifdef FM_CAPWAP_SUPPORT +#define SW_PRS_UDP_LITE_PATCH \ +{\ + 0x31,0x92,0x50,0x29,0x00,0x88,0x08,0x16,0x00,0x00, \ + 0x00,0x01,0x00,0x05,0x00,0x81,0x1C,0x0B,0x00,0x01, \ + 0x1B,0xFF, \ +} +#endif /* FM_CAPWAP_SUPPORT */ + +#if (DPAA_VERSION == 10) +/* Version: 106.1.9 */ +#define SW_PRS_IP_FRAG_PATCH \ +{ \ + 0x31,0x52,0x00,0xDA,0x0A,0x00,0x00,0x00,0x00,0x00, \ + 0x00,0x00,0x43,0x0A,0x00,0x00,0x00,0x01,0x1B,0xFE, \ + 0x00,0x00,0x99,0x00,0x53,0x13,0x00,0x00,0x00,0x00, \ + 0x9F,0x98,0x53,0x13,0x00,0x00,0x1B,0x23,0x33,0xF1, \ + 0x00,0xF9,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00, \ + 0x28,0x7F,0x00,0x03,0x00,0x02,0x00,0x00,0x00,0x01, \ + 0x32,0xC1,0x32,0xF0,0x00,0x4A,0x00,0x80,0x1F,0xFF, \ + 0x00,0x01,0x1B,0xFE,0x31,0x52,0x00,0xDA,0x06,0x00, \ + 0x00,0x00,0x00,0x00,0x00,0x00,0x43,0x2F,0x00,0x00, \ + 0x00,0x01,0x1B,0xFE,0x31,0x52,0x00,0xDA,0x00,0x40, \ + 0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x95,0x00,0x00, \ + 0x00,0x00,0x9B,0x8F,0x2F,0x0F,0x32,0xC1,0x00,0x55, \ + 0x00,0x28,0x28,0x43,0x30,0x7E,0x43,0x45,0x00,0x00, \ + 0x30,0x7E,0x43,0x45,0x00,0x3C,0x1B,0x5D,0x32,0x11, \ + 0x32,0xC0,0x00,0x4F,0x00,0x81,0x00,0x00,0x83,0x8F, \ + 0x2F,0x0F,0x06,0x00,0x32,0x11,0x32,0xC0,0x00,0x4F, \ + 0x00,0x55,0x00,0x01,0x00,0x81,0x32,0x11,0x00,0x00, \ + 0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04,0x00,0x4D, \ + 0x28,0x43,0x06,0x00,0x1B,0x3E,0x30,0x7E,0x53,0x79, \ + 0x00,0x2B,0x32,0x11,0x32,0xC0,0x00,0x4F,0x00,0x81, \ + 0x00,0x00,0x87,0x8F,0x28,0x23,0x06,0x00,0x32,0x11, \ + 0x32,0xC0,0x00,0x4F,0x00,0x55,0x00,0x01,0x00,0x81, \ + 0x32,0x11,0x00,0x00,0x83,0x8E,0x00,0x50,0x00,0x01, \ + 0x01,0x04,0x00,0x4D,0x28,0x43,0x06,0x00,0x00,0x01, \ + 0x1B,0xFE,0x00,0x00,0x9B,0x8E,0x53,0x90,0x00,0x00, \ + 0x06,0x29,0x00,0x00,0x83,0x8F,0x28,0x23,0x06,0x00, \ + 0x06,0x29,0x32,0xC1,0x00,0x55,0x00,0x28,0x00,0x00, \ + 0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04,0x00,0x4D, \ + 0x28,0x43,0x06,0x00,0x00,0x01,0x1B,0xFE,0x32,0xC1, \ + 0x00,0x55,0x00,0x28,0x28,0x43,0x1B,0xCF,0x00,0x00, \ + 0x9B,0x8F,0x2F,0x0F,0x32,0xC1,0x00,0x55,0x00,0x28, \ + 0x28,0x43,0x30,0x7E,0x43,0xBF,0x00,0x2C,0x32,0x11, \ + 0x32,0xC0,0x00,0x4F,0x00,0x81,0x00,0x00,0x87,0x8F, \ + 0x28,0x23,0x06,0x00,0x32,0x11,0x32,0xC0,0x00,0x4F, \ + 0x00,0x81,0x00,0x00,0x83,0x8F,0x2F,0x0F,0x06,0x00, \ + 0x32,0x11,0x32,0xC0,0x00,0x4F,0x00,0x55,0x00,0x01, \ + 0x00,0x81,0x32,0x11,0x00,0x00,0x83,0x8E,0x00,0x50, \ + 0x00,0x01,0x01,0x04,0x00,0x4D,0x28,0x43,0x06,0x00, \ + 0x1B,0x9C,0x33,0xF1,0x00,0xF9,0x00,0x01,0x00,0x00, \ + 0x00,0x00,0x00,0x00,0x28,0x7F,0x00,0x03,0x00,0x02, \ + 0x00,0x00,0x00,0x01,0x32,0xC1,0x32,0xF0,0x00,0x4A, \ + 0x00,0x80,0x1F,0xFF,0x00,0x01,0x1B,0xFE, \ +} + +#else +/* version: 106.3.13 */ +#define SW_PRS_IP_FRAG_PATCH \ +{ \ + 0x31,0x52,0x00,0xDA,0x0E,0x4F,0x00,0x00,0x00,0x00, \ + 0x00,0x00,0x52,0xF6,0x08,0x4B,0x31,0x53,0x00,0xFB, \ + 0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x29,0x2B, \ + 0x33,0xF1,0x00,0xFB,0x00,0xDF,0x00,0x00,0x00,0x00, \ + 0x00,0x00,0x28,0x7F,0x31,0x52,0x00,0xDA,0x0A,0x00, \ + 0x00,0x00,0x00,0x00,0x00,0x00,0x43,0x00,0x00,0x00, \ + 0x00,0x01,0x1B,0xFE,0x00,0x00,0x99,0x00,0x53,0x09, \ + 0x00,0x00,0x00,0x00,0x9F,0x98,0x53,0x09,0x00,0x00, \ + 0x1B,0x24,0x09,0x5F,0x00,0x20,0x00,0x00,0x09,0x4F, \ + 0x00,0x20,0x00,0x00,0x34,0xB7,0x00,0xF9,0x00,0x00, \ + 0x01,0x00,0x00,0x00,0x00,0x00,0x2B,0x97,0x31,0xB3, \ + 0x29,0x8F,0x33,0xF1,0x00,0xF9,0x00,0x01,0x00,0x00, \ + 0x00,0x00,0x00,0x00,0x28,0x7F,0x00,0x03,0x00,0x02, \ + 0x00,0x00,0x00,0x01,0x1B,0xFE,0x00,0x01,0x1B,0xFE, \ + 0x31,0x52,0x00,0xDA,0x0E,0x4F,0x00,0x00,0x00,0x00, \ + 0x00,0x00,0x53,0x3C,0x04,0x4B,0x31,0x53,0x00,0xFB, \ + 0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x29,0x2B, \ + 0x33,0xF1,0x00,0xFB,0x00,0xDF,0x00,0x00,0x00,0x00, \ + 0x00,0x00,0x28,0x7F,0x31,0x52,0x00,0xDA,0x06,0x00, \ + 0x00,0x00,0x00,0x00,0x00,0x00,0x43,0x46,0x00,0x00, \ + 0x00,0x01,0x1B,0xFE,0x31,0x52,0x00,0xDA,0x00,0x40, \ + 0x00,0x00,0x00,0x00,0x00,0x00,0x53,0xAC,0x00,0x00, \ + 0x00,0x00,0x9B,0x8F,0x2F,0x0F,0x32,0xC1,0x00,0x55, \ + 0x00,0x28,0x28,0x43,0x30,0x7E,0x43,0x5C,0x00,0x00, \ + 0x30,0x7E,0x43,0x5C,0x00,0x3C,0x1B,0x74,0x32,0x11, \ + 0x32,0xC0,0x00,0x4F,0x00,0x81,0x00,0x00,0x83,0x8F, \ + 0x2F,0x0F,0x06,0x00,0x32,0x11,0x32,0xC0,0x00,0x4F, \ + 0x00,0x55,0x00,0x01,0x00,0x81,0x32,0x11,0x00,0x00, \ + 0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04,0x00,0x4D, \ + 0x28,0x43,0x06,0x00,0x1B,0x55,0x30,0x7E,0x53,0x90, \ + 0x00,0x2B,0x32,0x11,0x32,0xC0,0x00,0x4F,0x00,0x81, \ + 0x00,0x00,0x87,0x8F,0x28,0x23,0x06,0x00,0x32,0x11, \ + 0x32,0xC0,0x00,0x4F,0x00,0x55,0x00,0x01,0x00,0x81, \ + 0x32,0x11,0x00,0x00,0x83,0x8E,0x00,0x50,0x00,0x01, \ + 0x01,0x04,0x00,0x4D,0x28,0x43,0x06,0x00,0x00,0x01, \ + 0x1B,0xFE,0x00,0x00,0x9B,0x8E,0x53,0xA7,0x00,0x00, \ + 0x06,0x29,0x00,0x00,0x83,0x8F,0x28,0x23,0x06,0x00, \ + 0x06,0x29,0x32,0xC1,0x00,0x55,0x00,0x28,0x00,0x00, \ + 0x83,0x8E,0x00,0x50,0x00,0x01,0x01,0x04,0x00,0x4D, \ + 0x28,0x43,0x06,0x00,0x00,0x01,0x1B,0xFE,0x32,0xC1, \ + 0x00,0x55,0x00,0x28,0x28,0x43,0x1B,0xF1,0x00,0x00, \ + 0x9B,0x8F,0x2F,0x0F,0x32,0xC1,0x00,0x55,0x00,0x28, \ + 0x28,0x43,0x30,0x7E,0x43,0xD6,0x00,0x2C,0x32,0x11, \ + 0x32,0xC0,0x00,0x4F,0x00,0x81,0x00,0x00,0x87,0x8F, \ + 0x28,0x23,0x06,0x00,0x32,0x11,0x32,0xC0,0x00,0x4F, \ + 0x00,0x81,0x00,0x00,0x83,0x8F,0x2F,0x0F,0x06,0x00, \ + 0x32,0x11,0x32,0xC0,0x00,0x4F,0x00,0x55,0x00,0x01, \ + 0x00,0x81,0x32,0x11,0x00,0x00,0x83,0x8E,0x00,0x50, \ + 0x00,0x01,0x01,0x04,0x00,0x4D,0x28,0x43,0x06,0x00, \ + 0x1B,0xB3,0x09,0x5F,0x00,0x20,0x00,0x00,0x09,0x4F, \ + 0x00,0x20,0x00,0x00,0x34,0xB7,0x00,0xF9,0x00,0x00, \ + 0x01,0x00,0x00,0x00,0x00,0x00,0x2B,0x97,0x31,0xB3, \ + 0x29,0x8F,0x33,0xF1,0x00,0xF9,0x00,0x01,0x00,0x00, \ + 0x00,0x00,0x00,0x00,0x28,0x7F,0x00,0x03,0x00,0x02, \ + 0x00,0x00,0x00,0x01,0x1B,0xFE,0x00,0x01,0x1B,0xFE, \ +} +#endif /* (DPAA_VERSION == 10) */ + +/****************************/ +/* Parser defines */ +/****************************/ +#define FM_PCD_PRS_SW_TAIL_SIZE 4 /**< Number of bytes that must be cleared at + the end of the SW parser area */ + +/* masks */ +#define PRS_ERR_CAP 0x80000000 +#define PRS_ERR_TYPE_DOUBLE 0x40000000 +#define PRS_ERR_SINGLE_ECC_CNT_MASK 0x00FF0000 +#define PRS_ERR_ADDR_MASK 0x000001FF + +/* others */ +#define PRS_MAX_CYCLE_LIMIT 8191 +#define PRS_SW_DATA 0x00000800 +#define PRS_REGS_OFFSET 0x00000840 + +#define GET_FM_PCD_PRS_PORT_ID(prsPortId,hardwarePortId) \ + prsPortId = (uint8_t)(hardwarePortId & 0x0f) + +#define GET_FM_PCD_INDEX_FLAG(bitMask, prsPortId) \ + bitMask = 0x80000000>>prsPortId + + +#endif /* __FM_PRS_H */ diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_replic.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_replic.c new file mode 100644 index 0000000..ae72b25 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_replic.c @@ -0,0 +1,993 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_replic.c + + @Description FM frame replicator +*//***************************************************************************/ +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "debug_ext.h" +#include "fm_pcd_ext.h" +#include "fm_muram_ext.h" +#include "fm_common.h" +#include "fm_hc.h" +#include "fm_replic.h" +#include "fm_cc.h" +#include "list_ext.h" + + +/****************************************/ +/* static functions */ +/****************************************/ +static uint8_t GetMemberPosition(t_FmPcdFrmReplicGroup *p_ReplicGroup, + uint32_t memberIndex, + bool isAddOperation) +{ + uint8_t memberPosition; + uint32_t lastMemberIndex; + + ASSERT_COND(p_ReplicGroup); + + /* the last member index is different between add and remove operation - + in case of remove - this is exactly the last member index + in case of add - this is the last member index + 1 - e.g. + if we have 4 members, the index of the actual last member is 3(because the + index starts from 0) therefore in order to add a new member as the last + member we shall use memberIndex = 4 and not 3 + */ + if (isAddOperation) + lastMemberIndex = p_ReplicGroup->numOfEntries; + else + lastMemberIndex = p_ReplicGroup->numOfEntries-1; + + /* last */ + if (memberIndex == lastMemberIndex) + memberPosition = FRM_REPLIC_LAST_MEMBER_INDEX; + else + { + /* first */ + if (memberIndex == 0) + memberPosition = FRM_REPLIC_FIRST_MEMBER_INDEX; + else + { + /* middle */ + ASSERT_COND(memberIndex < lastMemberIndex); + memberPosition = FRM_REPLIC_MIDDLE_MEMBER_INDEX; + } + } + return memberPosition; +} + +static t_Error MemberCheckParams(t_Handle h_FmPcd, + t_FmPcdCcNextEngineParams *p_MemberParams) +{ + t_Error err; + + + if ((p_MemberParams->nextEngine != e_FM_PCD_DONE) && + (p_MemberParams->nextEngine != e_FM_PCD_KG) && + (p_MemberParams->nextEngine != e_FM_PCD_PLCR)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Next engine of a member should be MatchTable(cc) or Done or Policer")); + + /* check the regular parameters of the next engine */ + err = ValidateNextEngineParams(h_FmPcd, p_MemberParams, e_FM_PCD_CC_STATS_MODE_NONE); + if (err) + RETURN_ERROR(MAJOR, err, ("member next engine parameters")); + + return E_OK; +} + +static t_Error CheckParams(t_Handle h_FmPcd, + t_FmPcdFrmReplicGroupParams *p_ReplicGroupParam) +{ + int i; + t_Error err; + + /* check that max num of entries is at least 2 */ + if (!IN_RANGE(2, p_ReplicGroupParam->maxNumOfEntries, FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES)) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, ("maxNumOfEntries in the frame replicator parameters should be 2-%d",FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES)); + + /* check that number of entries is greater than zero */ + if (!p_ReplicGroupParam->numOfEntries) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOFEntries in the frame replicator group should be greater than zero")); + + /* check that max num of entries is equal or greater than number of entries */ + if (p_ReplicGroupParam->maxNumOfEntries < p_ReplicGroupParam->numOfEntries) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("maxNumOfEntries should be equal or greater than numOfEntries")); + + for (i=0; i<p_ReplicGroupParam->numOfEntries; i++) + { + err = MemberCheckParams(h_FmPcd, &p_ReplicGroupParam->nextEngineParams[i]); + if (err) + RETURN_ERROR(MAJOR, err, ("member check parameters")); + } + return E_OK; +} + +static t_FmPcdFrmReplicMember *GetAvailableMember(t_FmPcdFrmReplicGroup *p_ReplicGroup) +{ + t_FmPcdFrmReplicMember *p_ReplicMember = NULL; + t_List *p_Next; + + if (!LIST_IsEmpty(&p_ReplicGroup->availableMembersList)) + { + p_Next = LIST_FIRST(&p_ReplicGroup->availableMembersList); + p_ReplicMember = LIST_OBJECT(p_Next, t_FmPcdFrmReplicMember, node); + ASSERT_COND(p_ReplicMember); + LIST_DelAndInit(p_Next); + } + return p_ReplicMember; +} + +static void PutAvailableMember(t_FmPcdFrmReplicGroup *p_ReplicGroup, + t_FmPcdFrmReplicMember *p_ReplicMember) +{ + LIST_AddToTail(&p_ReplicMember->node, &p_ReplicGroup->availableMembersList); +} + +static void AddMemberToList(t_FmPcdFrmReplicGroup *p_ReplicGroup, + t_FmPcdFrmReplicMember *p_CurrentMember, + t_List *p_ListHead) +{ + LIST_Add(&p_CurrentMember->node, p_ListHead); + + p_ReplicGroup->numOfEntries++; +} + +static void RemoveMemberFromList(t_FmPcdFrmReplicGroup *p_ReplicGroup, + t_FmPcdFrmReplicMember *p_CurrentMember) +{ + ASSERT_COND(p_ReplicGroup->numOfEntries); + LIST_DelAndInit(&p_CurrentMember->node); + p_ReplicGroup->numOfEntries--; +} + +static void LinkSourceToMember(t_FmPcdFrmReplicGroup *p_ReplicGroup, + t_AdOfTypeContLookup *p_SourceTd, + t_FmPcdFrmReplicMember *p_ReplicMember) +{ + t_FmPcd *p_FmPcd; + + ASSERT_COND(p_SourceTd); + ASSERT_COND(p_ReplicMember); + ASSERT_COND(p_ReplicGroup); + ASSERT_COND(p_ReplicGroup->h_FmPcd); + + /* Link the first member in the group to the source TD */ + p_FmPcd = p_ReplicGroup->h_FmPcd; + + WRITE_UINT32(p_SourceTd->matchTblPtr, + (uint32_t)(XX_VirtToPhys(p_ReplicMember->p_MemberAd) - + p_FmPcd->physicalMuramBase)); +} + +static void LinkMemberToMember(t_FmPcdFrmReplicGroup *p_ReplicGroup, + t_FmPcdFrmReplicMember *p_CurrentMember, + t_FmPcdFrmReplicMember *p_NextMember) +{ + t_AdOfTypeResult *p_CurrReplicAd = (t_AdOfTypeResult*)p_CurrentMember->p_MemberAd; + t_AdOfTypeResult *p_NextReplicAd = NULL; + t_FmPcd *p_FmPcd; + uint32_t offset = 0; + + /* Check if the next member exists or it's NULL (- means that this is the last member) */ + if (p_NextMember) + { + p_NextReplicAd = (t_AdOfTypeResult*)p_NextMember->p_MemberAd; + p_FmPcd = p_ReplicGroup->h_FmPcd; + offset = (XX_VirtToPhys(p_NextReplicAd) - (p_FmPcd->physicalMuramBase)); + offset = ((offset>>NEXT_FRM_REPLIC_ADDR_SHIFT)<< NEXT_FRM_REPLIC_MEMBER_INDEX_SHIFT); + } + + /* link the current AD to point to the AD of the next member */ + WRITE_UINT32(p_CurrReplicAd->res, offset); +} + +static t_Error ModifyDescriptor(t_FmPcdFrmReplicGroup *p_ReplicGroup, + void *p_OldDescriptor, + void *p_NewDescriptor) +{ + t_Handle h_Hc; + t_Error err; + t_FmPcd *p_FmPcd; + + ASSERT_COND(p_ReplicGroup); + ASSERT_COND(p_ReplicGroup->h_FmPcd); + ASSERT_COND(p_OldDescriptor); + ASSERT_COND(p_NewDescriptor); + + p_FmPcd = p_ReplicGroup->h_FmPcd; + h_Hc = FmPcdGetHcHandle(p_FmPcd); + if (!h_Hc) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("Host command")); + + err = FmHcPcdCcDoDynamicChange(h_Hc, + (uint32_t)(XX_VirtToPhys(p_OldDescriptor) - p_FmPcd->physicalMuramBase), + (uint32_t)(XX_VirtToPhys(p_NewDescriptor) - p_FmPcd->physicalMuramBase)); + if (err) + RETURN_ERROR(MAJOR, err, ("Dynamic change host command")); + + return E_OK; +} + +static void FillReplicAdOfTypeResult(void *p_ReplicAd, bool last) +{ + t_AdOfTypeResult *p_CurrReplicAd = (t_AdOfTypeResult*)p_ReplicAd; + uint32_t tmp; + + tmp = GET_UINT32(p_CurrReplicAd->plcrProfile); + if (last) + /* clear the NL bit in case it's the last member in the group*/ + WRITE_UINT32(p_CurrReplicAd->plcrProfile,(tmp & ~FRM_REPLIC_NL_BIT)); + else + /* set the NL bit in case it's not the last member in the group */ + WRITE_UINT32(p_CurrReplicAd->plcrProfile, (tmp |FRM_REPLIC_NL_BIT)); + + /* set FR bit in the action descriptor */ + tmp = GET_UINT32(p_CurrReplicAd->nia); + WRITE_UINT32(p_CurrReplicAd->nia, + (tmp | FRM_REPLIC_FR_BIT | FM_PCD_AD_RESULT_EXTENDED_MODE )); +} + +static void BuildSourceTd(void *p_Ad) +{ + t_AdOfTypeContLookup *p_SourceTd; + + ASSERT_COND(p_Ad); + + p_SourceTd = (t_AdOfTypeContLookup *)p_Ad; + + IOMemSet32((uint8_t*)p_SourceTd, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + /* initialize the source table descriptor */ + WRITE_UINT32(p_SourceTd->ccAdBase, FM_PCD_AD_CONT_LOOKUP_TYPE); + WRITE_UINT32(p_SourceTd->pcAndOffsets, FRM_REPLIC_SOURCE_TD_OPCODE); +} + +static t_Error BuildShadowAndModifyDescriptor(t_FmPcdFrmReplicGroup *p_ReplicGroup, + t_FmPcdFrmReplicMember *p_NextMember, + t_FmPcdFrmReplicMember *p_CurrentMember, + bool sourceDescriptor, + bool last) +{ + t_FmPcd *p_FmPcd; + t_FmPcdFrmReplicMember shadowMember; + t_Error err; + + ASSERT_COND(p_ReplicGroup); + ASSERT_COND(p_ReplicGroup->h_FmPcd); + + p_FmPcd = p_ReplicGroup->h_FmPcd; + ASSERT_COND(p_FmPcd->p_CcShadow); + + if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) + return ERROR_CODE(E_BUSY); + + if (sourceDescriptor) + { + BuildSourceTd(p_FmPcd->p_CcShadow); + LinkSourceToMember(p_ReplicGroup, p_FmPcd->p_CcShadow, p_NextMember); + + /* Modify the source table descriptor according to the prepared shadow descriptor */ + err = ModifyDescriptor(p_ReplicGroup, + p_ReplicGroup->p_SourceTd, + p_FmPcd->p_CcShadow/* new prepared source td */); + + RELEASE_LOCK(p_FmPcd->shadowLock); + if (err) + RETURN_ERROR(MAJOR, err, ("Modify source Descriptor in BuildShadowAndModifyDescriptor")); + + } + else + { + IO2IOCpy32(p_FmPcd->p_CcShadow, + p_CurrentMember->p_MemberAd, + FM_PCD_CC_AD_ENTRY_SIZE); + + /* update the last bit in the shadow ad */ + FillReplicAdOfTypeResult(p_FmPcd->p_CcShadow, last); + + shadowMember.p_MemberAd = p_FmPcd->p_CcShadow; + + /* update the next FR member index */ + LinkMemberToMember(p_ReplicGroup, &shadowMember, p_NextMember); + + /* Modify the next member according to the prepared shadow descriptor */ + err = ModifyDescriptor(p_ReplicGroup, + p_CurrentMember->p_MemberAd, + p_FmPcd->p_CcShadow); + + RELEASE_LOCK(p_FmPcd->shadowLock); + if (err) + RETURN_ERROR(MAJOR, err, ("Modify Descriptor in BuildShadowAndModifyDescriptor")); + } + + + return E_OK; +} + +static t_FmPcdFrmReplicMember* GetMemberByIndex(t_FmPcdFrmReplicGroup *p_ReplicGroup, + uint16_t memberIndex) +{ + int i=0; + t_List *p_Pos; + t_FmPcdFrmReplicMember *p_Member = NULL; + + LIST_FOR_EACH(p_Pos, &p_ReplicGroup->membersList) + { + if (i == memberIndex) + { + p_Member = LIST_OBJECT(p_Pos, t_FmPcdFrmReplicMember, node); + return p_Member; + } + i++; + } + return p_Member; +} + +static t_Error AllocMember(t_FmPcdFrmReplicGroup *p_ReplicGroup) +{ + t_FmPcdFrmReplicMember *p_CurrentMember; + t_Handle h_Muram; + + ASSERT_COND(p_ReplicGroup); + + h_Muram = FmPcdGetMuramHandle(p_ReplicGroup->h_FmPcd); + ASSERT_COND(h_Muram); + + /* Initialize an internal structure of a member to add to the available members list */ + p_CurrentMember = (t_FmPcdFrmReplicMember *)XX_Malloc(sizeof(t_FmPcdFrmReplicMember)); + if (!p_CurrentMember) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Frame replicator member")); + + memset(p_CurrentMember, 0 ,sizeof(t_FmPcdFrmReplicMember)); + + /* Allocate the member AD */ + p_CurrentMember->p_MemberAd = + (t_AdOfTypeResult*)FM_MURAM_AllocMem(h_Muram, + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + + if (!p_CurrentMember->p_MemberAd) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("member AD table")); + + IOMemSet32((uint8_t*)p_CurrentMember->p_MemberAd, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + /* Add the new member to the available members list */ + LIST_AddToTail(&p_CurrentMember->node, &(p_ReplicGroup->availableMembersList)); + + return E_OK; +} + +static t_FmPcdFrmReplicMember* InitMember(t_FmPcdFrmReplicGroup *p_ReplicGroup, + t_FmPcdCcNextEngineParams *p_MemberParams, + bool last) +{ + t_FmPcdFrmReplicMember *p_CurrentMember = NULL; + + ASSERT_COND(p_ReplicGroup); + + /* Get an available member from the internal members list */ + p_CurrentMember = GetAvailableMember(p_ReplicGroup); + if (!p_CurrentMember) + { + REPORT_ERROR(MAJOR, E_NOT_FOUND, ("Available member")); + return NULL; + } + p_CurrentMember->h_Manip = NULL; + + /* clear the Ad of the new member */ + IOMemSet32((uint8_t*)p_CurrentMember->p_MemberAd, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + INIT_LIST(&p_CurrentMember->node); + + /* Initialize the Ad of the member */ + NextStepAd(p_CurrentMember->p_MemberAd, + NULL, + p_MemberParams, + p_ReplicGroup->h_FmPcd); + + /* save Manip handle (for free needs) */ + if (p_MemberParams->h_Manip) + p_CurrentMember->h_Manip = p_MemberParams->h_Manip; + + /* Initialize the relevant frame replicator fields in the AD */ + FillReplicAdOfTypeResult(p_CurrentMember->p_MemberAd, last); + + return p_CurrentMember; +} + +static void FreeMember(t_FmPcdFrmReplicGroup *p_ReplicGroup, + t_FmPcdFrmReplicMember *p_Member) +{ + /* Note: Can't free the member AD just returns the member to the available + member list - therefore only memset the AD */ + + /* zero the AD */ + IOMemSet32(p_Member->p_MemberAd, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + + /* return the member to the available members list */ + PutAvailableMember(p_ReplicGroup, p_Member); +} + +static t_Error RemoveMember(t_FmPcdFrmReplicGroup *p_ReplicGroup, + uint16_t memberIndex) +{ + t_FmPcd *p_FmPcd = NULL; + t_FmPcdFrmReplicMember *p_CurrentMember = NULL, *p_PreviousMember = NULL, *p_NextMember = NULL; + t_Error err; + uint8_t memberPosition; + + p_FmPcd = p_ReplicGroup->h_FmPcd; + ASSERT_COND(p_FmPcd); + + p_CurrentMember = GetMemberByIndex(p_ReplicGroup, memberIndex); + ASSERT_COND(p_CurrentMember); + + /* determine the member position in the group */ + memberPosition = GetMemberPosition(p_ReplicGroup, + memberIndex, + FALSE/*remove operation*/); + + switch (memberPosition) + { + case FRM_REPLIC_FIRST_MEMBER_INDEX: + p_NextMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex+1)); + ASSERT_COND(p_NextMember); + + /* update the source td itself by using a host command */ + err = BuildShadowAndModifyDescriptor(p_ReplicGroup, + p_NextMember, + NULL, + TRUE/*sourceDescriptor*/, + FALSE/*last*/); + break; + + case FRM_REPLIC_MIDDLE_MEMBER_INDEX: + p_PreviousMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex-1)); + ASSERT_COND(p_PreviousMember); + + p_NextMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex+1)); + ASSERT_COND(p_NextMember); + + err = BuildShadowAndModifyDescriptor(p_ReplicGroup, + p_NextMember, + p_PreviousMember, + FALSE/*sourceDescriptor*/, + FALSE/*last*/); + + break; + + case FRM_REPLIC_LAST_MEMBER_INDEX: + p_PreviousMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex-1)); + ASSERT_COND(p_PreviousMember); + + err = BuildShadowAndModifyDescriptor(p_ReplicGroup, + NULL, + p_PreviousMember, + FALSE/*sourceDescriptor*/, + TRUE/*last*/); + break; + + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("member position in remove member")); + } + + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + if (p_CurrentMember->h_Manip) + { + FmPcdManipUpdateOwner(p_CurrentMember->h_Manip, FALSE); + p_CurrentMember->h_Manip = NULL; + } + + /* remove the member from the driver internal members list */ + RemoveMemberFromList(p_ReplicGroup, p_CurrentMember); + + /* return the member to the available members list */ + FreeMember(p_ReplicGroup, p_CurrentMember); + + return E_OK; +} + +static void DeleteGroup(t_FmPcdFrmReplicGroup *p_ReplicGroup) +{ + int i, j; + t_Handle h_Muram; + t_FmPcdFrmReplicMember *p_Member, *p_CurrentMember; + + if (p_ReplicGroup) + { + ASSERT_COND(p_ReplicGroup->h_FmPcd); + h_Muram = FmPcdGetMuramHandle(p_ReplicGroup->h_FmPcd); + ASSERT_COND(h_Muram); + + /* free the source table descriptor */ + if (p_ReplicGroup->p_SourceTd) + { + FM_MURAM_FreeMem(h_Muram, p_ReplicGroup->p_SourceTd); + p_ReplicGroup->p_SourceTd = NULL; + } + + /* Remove all members from the members linked list (hw and sw) and + return the members to the available members list */ + if (p_ReplicGroup->numOfEntries) + { + j = p_ReplicGroup->numOfEntries-1; + + /* manually removal of the member because there are no owners of + this group */ + for (i=j; i>=0; i--) + { + p_CurrentMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)i/*memberIndex*/); + ASSERT_COND(p_CurrentMember); + + if (p_CurrentMember->h_Manip) + { + FmPcdManipUpdateOwner(p_CurrentMember->h_Manip, FALSE); + p_CurrentMember->h_Manip = NULL; + } + + /* remove the member from the internal driver members list */ + RemoveMemberFromList(p_ReplicGroup, p_CurrentMember); + + /* return the member to the available members list */ + FreeMember(p_ReplicGroup, p_CurrentMember); + } + } + + /* Free members AD */ + for (i=0; i<p_ReplicGroup->maxNumOfEntries; i++) + { + p_Member = GetAvailableMember(p_ReplicGroup); + ASSERT_COND(p_Member); + if (p_Member->p_MemberAd) + { + FM_MURAM_FreeMem(h_Muram, p_Member->p_MemberAd); + p_Member->p_MemberAd = NULL; + } + XX_Free(p_Member); + } + + /* release the group lock */ + if (p_ReplicGroup->p_Lock) + FmPcdReleaseLock(p_ReplicGroup->h_FmPcd, p_ReplicGroup->p_Lock); + + /* free the replicator group */ + XX_Free(p_ReplicGroup); + p_ReplicGroup = NULL; + } +} + + +/*****************************************************************************/ +/* Inter-module API routines */ +/*****************************************************************************/ + +/* NOTE: the inter-module routines are locked by cc in case of using them */ +void * FrmReplicGroupGetSourceTableDescriptor(t_Handle h_ReplicGroup) +{ + t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup; + ASSERT_COND(p_ReplicGroup); + + return (p_ReplicGroup->p_SourceTd); +} + +void FrmReplicGroupUpdateAd(t_Handle h_ReplicGroup, + void *p_Ad, + t_Handle *h_AdNew) +{ + t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup; + t_AdOfTypeResult *p_AdResult = (t_AdOfTypeResult*)p_Ad; + t_FmPcd *p_FmPcd; + + ASSERT_COND(p_ReplicGroup); + p_FmPcd = p_ReplicGroup->h_FmPcd; + + /* build a bypass ad */ + WRITE_UINT32(p_AdResult->fqid, FM_PCD_AD_BYPASS_TYPE | + (uint32_t)((XX_VirtToPhys(p_ReplicGroup->p_SourceTd)) - p_FmPcd->physicalMuramBase)); + + *h_AdNew = NULL; +} + +void FrmReplicGroupUpdateOwner(t_Handle h_ReplicGroup, + bool add) +{ + t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup; + ASSERT_COND(p_ReplicGroup); + + /* update the group owner counter */ + if (add) + p_ReplicGroup->owners++; + else + { + ASSERT_COND(p_ReplicGroup->owners); + p_ReplicGroup->owners--; + } +} + +t_Error FrmReplicGroupTryLock(t_Handle h_ReplicGroup) +{ + t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup; + + ASSERT_COND(h_ReplicGroup); + + if (FmPcdLockTryLock(p_ReplicGroup->p_Lock)) + return E_OK; + + return ERROR_CODE(E_BUSY); +} + +void FrmReplicGroupUnlock(t_Handle h_ReplicGroup) +{ + t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup; + + ASSERT_COND(h_ReplicGroup); + + FmPcdLockUnlock(p_ReplicGroup->p_Lock); +} +/*********************** End of inter-module routines ************************/ + + +/****************************************/ +/* API Init unit functions */ +/****************************************/ +t_Handle FM_PCD_FrmReplicSetGroup(t_Handle h_FmPcd, + t_FmPcdFrmReplicGroupParams *p_ReplicGroupParam) +{ + t_FmPcdFrmReplicGroup *p_ReplicGroup; + t_FmPcdFrmReplicMember *p_CurrentMember, *p_NextMember = NULL; + int i; + t_Error err; + bool last = FALSE; + t_Handle h_Muram; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(p_ReplicGroupParam, E_INVALID_HANDLE, NULL); + + if (!FmPcdIsAdvancedOffloadSupported(h_FmPcd)) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Advanced-offload must be enabled")); + return NULL; + } + + err = CheckParams(h_FmPcd, p_ReplicGroupParam); + if (err) + { + REPORT_ERROR(MAJOR, err, (NO_MSG)); + return NULL; + } + + p_ReplicGroup = (t_FmPcdFrmReplicGroup*)XX_Malloc(sizeof(t_FmPcdFrmReplicGroup)); + if (!p_ReplicGroup) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); + return NULL; + } + memset(p_ReplicGroup, 0, sizeof(t_FmPcdFrmReplicGroup)); + + /* initialize lists for internal driver use */ + INIT_LIST(&p_ReplicGroup->availableMembersList); + INIT_LIST(&p_ReplicGroup->membersList); + + p_ReplicGroup->h_FmPcd = h_FmPcd; + + h_Muram = FmPcdGetMuramHandle(p_ReplicGroup->h_FmPcd); + ASSERT_COND(h_Muram); + + /* initialize the group lock */ + p_ReplicGroup->p_Lock = FmPcdAcquireLock(p_ReplicGroup->h_FmPcd); + if (!p_ReplicGroup->p_Lock) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Replic group lock")); + DeleteGroup(p_ReplicGroup); + return NULL; + } + + /* Allocate the frame replicator source table descriptor */ + p_ReplicGroup->p_SourceTd = + (t_Handle)FM_MURAM_AllocMem(h_Muram, + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + if (!p_ReplicGroup->p_SourceTd) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("frame replicator source table descriptor")); + DeleteGroup(p_ReplicGroup); + return NULL; + } + + /* update the shadow size - required for the host commands */ + err = FmPcdUpdateCcShadow(p_ReplicGroup->h_FmPcd, + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + if (err) + { + REPORT_ERROR(MAJOR, err, ("Update CC shadow")); + DeleteGroup(p_ReplicGroup); + return NULL; + } + + p_ReplicGroup->maxNumOfEntries = p_ReplicGroupParam->maxNumOfEntries; + + /* Allocate the maximal number of members ADs and Statistics AD for the group + It prevents allocation of Muram in run-time */ + for (i=0; i<p_ReplicGroup->maxNumOfEntries; i++) + { + err = AllocMember(p_ReplicGroup); + if (err) + { + REPORT_ERROR(MAJOR, err, ("allocate a new member")); + DeleteGroup(p_ReplicGroup); + return NULL; + } + } + + /* Initialize the members linked lists: + (hw - the one that is used by the FMan controller and + sw - the one that is managed by the driver internally) */ + for (i=(p_ReplicGroupParam->numOfEntries-1); i>=0; i--) + { + /* check if this is the last member in the group */ + if (i == (p_ReplicGroupParam->numOfEntries-1)) + last = TRUE; + else + last = FALSE; + + /* Initialize a new member */ + p_CurrentMember = InitMember(p_ReplicGroup, + &(p_ReplicGroupParam->nextEngineParams[i]), + last); + if (!p_CurrentMember) + { + REPORT_ERROR(MAJOR, E_INVALID_HANDLE, ("No available member")); + DeleteGroup(p_ReplicGroup); + return NULL; + } + + /* Build the members group - link two consecutive members in the hw linked list */ + LinkMemberToMember(p_ReplicGroup, p_CurrentMember, p_NextMember); + + /* update the driver internal members list to be compatible to the hw members linked list */ + AddMemberToList(p_ReplicGroup, p_CurrentMember, &p_ReplicGroup->membersList); + + p_NextMember = p_CurrentMember; + } + + /* initialize the source table descriptor */ + BuildSourceTd(p_ReplicGroup->p_SourceTd); + + /* link the source table descriptor to point to the first member in the group */ + LinkSourceToMember(p_ReplicGroup, p_ReplicGroup->p_SourceTd, p_NextMember); + + return p_ReplicGroup; +} + +t_Error FM_PCD_FrmReplicDeleteGroup(t_Handle h_ReplicGroup) +{ + t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup *)h_ReplicGroup; + + SANITY_CHECK_RETURN_ERROR(p_ReplicGroup, E_INVALID_HANDLE); + + if (p_ReplicGroup->owners) + RETURN_ERROR(MAJOR, + E_INVALID_STATE, + ("the group has owners and can't be deleted")); + + DeleteGroup(p_ReplicGroup); + + return E_OK; +} + + +/*****************************************************************************/ +/* API Run-time Frame replicator Control unit functions */ +/*****************************************************************************/ +t_Error FM_PCD_FrmReplicAddMember(t_Handle h_ReplicGroup, + uint16_t memberIndex, + t_FmPcdCcNextEngineParams *p_MemberParams) +{ + t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup*) h_ReplicGroup; + t_FmPcdFrmReplicMember *p_NewMember, *p_CurrentMember = NULL, *p_PreviousMember = NULL; + t_Error err; + uint8_t memberPosition; + + SANITY_CHECK_RETURN_ERROR(p_ReplicGroup, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_MemberParams, E_INVALID_HANDLE); + + /* group lock */ + err = FrmReplicGroupTryLock(p_ReplicGroup); + if (err) + { + if (GET_ERROR_TYPE(err) == E_BUSY) + return ERROR_CODE(E_BUSY); + else + RETURN_ERROR(MAJOR, err, ("try lock in Add member")); + } + + if (memberIndex > p_ReplicGroup->numOfEntries) + { + /* unlock */ + FrmReplicGroupUnlock(p_ReplicGroup); + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, + ("memberIndex is greater than the members in the list")); + } + + if (memberIndex >= p_ReplicGroup->maxNumOfEntries) + { + /* unlock */ + FrmReplicGroupUnlock(p_ReplicGroup); + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("memberIndex is greater than the allowed number of members in the group")); + } + + if ((p_ReplicGroup->numOfEntries + 1) > FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES) + { + /* unlock */ + FrmReplicGroupUnlock(p_ReplicGroup); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("numOfEntries with new entry can not be larger than %d\n", + FM_PCD_FRM_REPLIC_MAX_NUM_OF_ENTRIES)); + } + + err = MemberCheckParams(p_ReplicGroup->h_FmPcd, p_MemberParams); + if (err) + { + /* unlock */ + FrmReplicGroupUnlock(p_ReplicGroup); + RETURN_ERROR(MAJOR, err, ("member check parameters in add operation")); + } + /* determine the member position in the group */ + memberPosition = GetMemberPosition(p_ReplicGroup, + memberIndex, + TRUE/* add operation */); + + /* Initialize a new member */ + p_NewMember = InitMember(p_ReplicGroup, + p_MemberParams, + (memberPosition == FRM_REPLIC_LAST_MEMBER_INDEX ? TRUE : FALSE)); + if (!p_NewMember) + { + /* unlock */ + FrmReplicGroupUnlock(p_ReplicGroup); + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("No available member")); + } + + switch (memberPosition) + { + case FRM_REPLIC_FIRST_MEMBER_INDEX: + p_CurrentMember = GetMemberByIndex(p_ReplicGroup, memberIndex); + ASSERT_COND(p_CurrentMember); + + LinkMemberToMember(p_ReplicGroup, p_NewMember, p_CurrentMember); + + /* update the internal group source TD */ + LinkSourceToMember(p_ReplicGroup, + p_ReplicGroup->p_SourceTd, + p_NewMember); + + /* add member to the internal sw member list */ + AddMemberToList(p_ReplicGroup, + p_NewMember, + &p_ReplicGroup->membersList); + break; + + case FRM_REPLIC_MIDDLE_MEMBER_INDEX: + p_CurrentMember = GetMemberByIndex(p_ReplicGroup, memberIndex); + ASSERT_COND(p_CurrentMember); + + p_PreviousMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex-1)); + ASSERT_COND(p_PreviousMember); + + LinkMemberToMember(p_ReplicGroup, p_NewMember, p_CurrentMember); + LinkMemberToMember(p_ReplicGroup, p_PreviousMember, p_NewMember); + + AddMemberToList(p_ReplicGroup, p_NewMember, &p_PreviousMember->node); + break; + + case FRM_REPLIC_LAST_MEMBER_INDEX: + p_PreviousMember = GetMemberByIndex(p_ReplicGroup, (uint16_t)(memberIndex-1)); + ASSERT_COND(p_PreviousMember); + + LinkMemberToMember(p_ReplicGroup, p_PreviousMember, p_NewMember); + FillReplicAdOfTypeResult(p_PreviousMember->p_MemberAd, FALSE/*last*/); + + /* add the new member to the internal sw member list */ + AddMemberToList(p_ReplicGroup, p_NewMember, &p_PreviousMember->node); + break; + + default: + /* unlock */ + FrmReplicGroupUnlock(p_ReplicGroup); + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("member position in add member")); + + } + + /* unlock */ + FrmReplicGroupUnlock(p_ReplicGroup); + + return E_OK; +} + +t_Error FM_PCD_FrmReplicRemoveMember(t_Handle h_ReplicGroup, + uint16_t memberIndex) +{ + t_FmPcdFrmReplicGroup *p_ReplicGroup = (t_FmPcdFrmReplicGroup*) h_ReplicGroup; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_ReplicGroup, E_INVALID_HANDLE); + + /* lock */ + err = FrmReplicGroupTryLock(p_ReplicGroup); + if (err) + { + if (GET_ERROR_TYPE(err) == E_BUSY) + return ERROR_CODE(E_BUSY); + else + RETURN_ERROR(MAJOR, err, ("try lock in Remove member")); + } + + if (memberIndex >= p_ReplicGroup->numOfEntries) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("member index to remove")); + + /* Design decision: group must contain at least one member + No possibility to remove the last member from the group */ + if (p_ReplicGroup->numOfEntries == 1) + RETURN_ERROR(MAJOR, E_CONFLICT, ("Can't remove the last member. At least one member should be related to a group.")); + + err = RemoveMember(p_ReplicGroup, memberIndex); + + /* unlock */ + FrmReplicGroupUnlock(p_ReplicGroup); + + switch (GET_ERROR_TYPE(err)) + { + case E_OK: + return E_OK; + + case E_BUSY: + DBG(TRACE, ("E_BUSY error")); + return ERROR_CODE(E_BUSY); + + default: + RETURN_ERROR(MAJOR, err, NO_MSG); + } +} + +/*********************** End of API routines ************************/ + + diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_replic.h b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_replic.h new file mode 100644 index 0000000..3cfd67e --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fm_replic.h @@ -0,0 +1,104 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/****************************************************************************** + @File fm_replic.h + + @Description FM frame replicator +*//***************************************************************************/ +#ifndef __FM_REPLIC_H +#define __FM_REPLIC_H + +#include "std_ext.h" +#include "error_ext.h" + + +#define FRM_REPLIC_SOURCE_TD_OPCODE 0x75 +#define NEXT_FRM_REPLIC_ADDR_SHIFT 4 +#define NEXT_FRM_REPLIC_MEMBER_INDEX_SHIFT 16 +#define FRM_REPLIC_FR_BIT 0x08000000 +#define FRM_REPLIC_NL_BIT 0x10000000 +#define FRM_REPLIC_INVALID_MEMBER_INDEX 0xffff +#define FRM_REPLIC_FIRST_MEMBER_INDEX 0 + +#define FRM_REPLIC_MIDDLE_MEMBER_INDEX 1 +#define FRM_REPLIC_LAST_MEMBER_INDEX 2 + +#define SOURCE_TD_ITSELF_OPTION 0x01 +#define SOURCE_TD_COPY_OPTION 0x02 +#define SOURCE_TD_ITSELF_AND_COPY_OPTION SOURCE_TD_ITSELF_OPTION | SOURCE_TD_COPY_OPTION +#define SOURCE_TD_NONE 0x04 + +/*typedef enum e_SourceTdOption +{ + e_SOURCE_TD_NONE = 0, + e_SOURCE_TD_ITSELF_OPTION = 1, + e_SOURCE_TD_COPY_OPTION = 2, + e_SOURCE_TD_ITSELF_AND_COPY_OPTION = e_SOURCE_TD_ITSELF_OPTION | e_SOURCE_TD_COPY_OPTION +} e_SourceTdOption; +*/ + +typedef _Packed struct +{ + volatile uint32_t type; + volatile uint32_t frGroupPointer; + volatile uint32_t operationCode; + volatile uint32_t reserved; +} _PackedType t_FrmReplicGroupSourceAd; + +typedef struct t_FmPcdFrmReplicMember +{ + void *p_MemberAd; /**< pointer to the member AD */ + void *p_StatisticsAd;/**< pointer to the statistics AD of the member */ + t_Handle h_Manip; /**< manip handle - need for free routines */ + t_List node; +} t_FmPcdFrmReplicMember; + +typedef struct t_FmPcdFrmReplicGroup +{ + t_Handle h_FmPcd; +#ifdef UNDER_CONSTRUCTION_STATISTICS_SUPPORT + e_FmPcdCcStatsMode statisticsMode; +#endif /* UNDER_CONSTRUCTION_STATISTICS_SUPPORT */ + + uint8_t maxNumOfEntries;/**< maximal number of members in the group */ + uint8_t numOfEntries; /**< actual number of members in the group */ + uint16_t owners; /**< how many keys share this frame replicator group */ + void *p_SourceTd; /**< pointer to the frame replicator source table descriptor */ + t_List membersList; /**< the members list - should reflect the order of the members as in the hw linked list*/ + t_List availableMembersList;/**< list of all the available members in the group */ + t_FmPcdLock *p_Lock; +} t_FmPcdFrmReplicGroup; + + +#endif /* __FM_REPLIC_H */ diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fman_kg.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fman_kg.c new file mode 100644 index 0000000..6e77c9386 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fman_kg.c @@ -0,0 +1,888 @@ +/* + * Copyright 2008-2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fsl_fman_kg.h" + +/****************************************/ +/* static functions */ +/****************************************/ + + +static uint32_t build_ar_bind_scheme(uint8_t hwport_id, bool write) +{ + uint32_t rw; + + rw = write ? (uint32_t)FM_KG_KGAR_WRITE : (uint32_t)FM_KG_KGAR_READ; + + return (uint32_t)(FM_KG_KGAR_GO | + rw | + FM_PCD_KG_KGAR_SEL_PORT_ENTRY | + hwport_id | + FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP); +} + +static void clear_pe_all_scheme(struct fman_kg_regs *regs, uint8_t hwport_id) +{ + uint32_t ar; + + fman_kg_write_sp(regs, 0xffffffff, 0); + + ar = build_ar_bind_scheme(hwport_id, TRUE); + fman_kg_write_ar_wait(regs, ar); +} + +static uint32_t build_ar_bind_cls_plan(uint8_t hwport_id, bool write) +{ + uint32_t rw; + + rw = write ? (uint32_t)FM_KG_KGAR_WRITE : (uint32_t)FM_KG_KGAR_READ; + + return (uint32_t)(FM_KG_KGAR_GO | + rw | + FM_PCD_KG_KGAR_SEL_PORT_ENTRY | + hwport_id | + FM_PCD_KG_KGAR_SEL_PORT_WSEL_CPP); +} + +static void clear_pe_all_cls_plan(struct fman_kg_regs *regs, uint8_t hwport_id) +{ + uint32_t ar; + + fman_kg_write_cpp(regs, 0); + + ar = build_ar_bind_cls_plan(hwport_id, TRUE); + fman_kg_write_ar_wait(regs, ar); +} + +static uint8_t get_gen_ht_code(enum fman_kg_gen_extract_src src, + bool no_validation, + uint8_t *offset) +{ + int code; + + switch (src) { + case E_FMAN_KG_GEN_EXTRACT_ETH: + code = no_validation ? 0x73 : 0x3; + break; + + case E_FMAN_KG_GEN_EXTRACT_ETYPE: + code = no_validation ? 0x77 : 0x7; + break; + + case E_FMAN_KG_GEN_EXTRACT_SNAP: + code = no_validation ? 0x74 : 0x4; + break; + + case E_FMAN_KG_GEN_EXTRACT_VLAN_TCI_1: + code = no_validation ? 0x75 : 0x5; + break; + + case E_FMAN_KG_GEN_EXTRACT_VLAN_TCI_N: + code = no_validation ? 0x76 : 0x6; + break; + + case E_FMAN_KG_GEN_EXTRACT_PPPoE: + code = no_validation ? 0x78 : 0x8; + break; + + case E_FMAN_KG_GEN_EXTRACT_MPLS_1: + code = no_validation ? 0x79 : 0x9; + break; + + case E_FMAN_KG_GEN_EXTRACT_MPLS_2: + code = no_validation ? FM_KG_SCH_GEN_HT_INVALID : 0x19; + break; + + case E_FMAN_KG_GEN_EXTRACT_MPLS_3: + code = no_validation ? FM_KG_SCH_GEN_HT_INVALID : 0x29; + break; + + case E_FMAN_KG_GEN_EXTRACT_MPLS_N: + code = no_validation ? 0x7a : 0xa; + break; + + case E_FMAN_KG_GEN_EXTRACT_IPv4_1: + code = no_validation ? 0x7b : 0xb; + break; + + case E_FMAN_KG_GEN_EXTRACT_IPv6_1: + code = no_validation ? 0x7b : 0x1b; + break; + + case E_FMAN_KG_GEN_EXTRACT_IPv4_2: + code = no_validation ? 0x7c : 0xc; + break; + + case E_FMAN_KG_GEN_EXTRACT_IPv6_2: + code = no_validation ? 0x7c : 0x1c; + break; + + case E_FMAN_KG_GEN_EXTRACT_MINENCAP: + code = no_validation ? 0x7c : 0x2c; + break; + + case E_FMAN_KG_GEN_EXTRACT_IP_PID: + code = no_validation ? 0x72 : 0x2; + break; + + case E_FMAN_KG_GEN_EXTRACT_GRE: + code = no_validation ? 0x7d : 0xd; + break; + + case E_FMAN_KG_GEN_EXTRACT_TCP: + code = no_validation ? 0x7e : 0xe; + break; + + case E_FMAN_KG_GEN_EXTRACT_UDP: + code = no_validation ? 0x7e : 0x1e; + break; + + case E_FMAN_KG_GEN_EXTRACT_SCTP: + code = no_validation ? 0x7e : 0x3e; + break; + + case E_FMAN_KG_GEN_EXTRACT_DCCP: + code = no_validation ? 0x7e : 0x4e; + break; + + case E_FMAN_KG_GEN_EXTRACT_IPSEC_AH: + code = no_validation ? 0x7e : 0x2e; + break; + + case E_FMAN_KG_GEN_EXTRACT_IPSEC_ESP: + code = no_validation ? 0x7e : 0x6e; + break; + + case E_FMAN_KG_GEN_EXTRACT_SHIM_1: + code = 0x70; + break; + + case E_FMAN_KG_GEN_EXTRACT_SHIM_2: + code = 0x71; + break; + + case E_FMAN_KG_GEN_EXTRACT_FROM_DFLT: + code = 0x10; + break; + + case E_FMAN_KG_GEN_EXTRACT_FROM_FRAME_START: + code = 0x40; + break; + + case E_FMAN_KG_GEN_EXTRACT_FROM_PARSE_RESULT: + code = 0x20; + break; + + case E_FMAN_KG_GEN_EXTRACT_FROM_END_OF_PARSE: + code = 0x7f; + break; + + case E_FMAN_KG_GEN_EXTRACT_FROM_FQID: + code = 0x20; + *offset += 0x20; + break; + + default: + code = FM_KG_SCH_GEN_HT_INVALID; + } + + return (uint8_t)code; +} + +static uint32_t build_ar_scheme(uint8_t scheme, + uint8_t hwport_id, + bool update_counter, + bool write) +{ + uint32_t rw; + + rw = (uint32_t)(write ? FM_KG_KGAR_WRITE : FM_KG_KGAR_READ); + + return (uint32_t)(FM_KG_KGAR_GO | + rw | + FM_KG_KGAR_SEL_SCHEME_ENTRY | + hwport_id | + ((uint32_t)scheme << FM_KG_KGAR_NUM_SHIFT) | + (update_counter ? FM_KG_KGAR_SCM_WSEL_UPDATE_CNT : 0)); +} + +static uint32_t build_ar_cls_plan(uint8_t grp, + uint8_t entries_mask, + uint8_t hwport_id, + bool write) +{ + uint32_t rw; + + rw = (uint32_t)(write ? FM_KG_KGAR_WRITE : FM_KG_KGAR_READ); + + return (uint32_t)(FM_KG_KGAR_GO | + rw | + FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY | + hwport_id | + ((uint32_t)grp << FM_KG_KGAR_NUM_SHIFT) | + ((uint32_t)entries_mask << FM_KG_KGAR_WSEL_SHIFT)); +} + +int fman_kg_write_ar_wait(struct fman_kg_regs *regs, uint32_t fmkg_ar) +{ + iowrite32be(fmkg_ar, ®s->fmkg_ar); + /* Wait for GO to be idle and read error */ + while ((fmkg_ar = ioread32be(®s->fmkg_ar)) & FM_KG_KGAR_GO) ; + if (fmkg_ar & FM_PCD_KG_KGAR_ERR) + return -EINVAL; + return 0; +} + +void fman_kg_write_sp(struct fman_kg_regs *regs, uint32_t sp, bool add) +{ + + struct fman_kg_pe_regs *kgpe_regs; + uint32_t tmp; + + kgpe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]); + tmp = ioread32be(&kgpe_regs->fmkg_pe_sp); + + if (add) + tmp |= sp; + else /* clear */ + tmp &= ~sp; + + iowrite32be(tmp, &kgpe_regs->fmkg_pe_sp); + +} + +void fman_kg_write_cpp(struct fman_kg_regs *regs, uint32_t cpp) +{ + struct fman_kg_pe_regs *kgpe_regs; + + kgpe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]); + + iowrite32be(cpp, &kgpe_regs->fmkg_pe_cpp); +} + +void fman_kg_get_event(struct fman_kg_regs *regs, + uint32_t *event, + uint32_t *scheme_idx) +{ + uint32_t mask, force; + + *event = ioread32be(®s->fmkg_eer); + mask = ioread32be(®s->fmkg_eeer); + *scheme_idx = ioread32be(®s->fmkg_seer); + *scheme_idx &= ioread32be(®s->fmkg_seeer); + + *event &= mask; + + /* clear the forced events */ + force = ioread32be(®s->fmkg_feer); + if (force & *event) + iowrite32be(force & ~*event ,®s->fmkg_feer); + + iowrite32be(*event, ®s->fmkg_eer); + iowrite32be(*scheme_idx, ®s->fmkg_seer); +} + + +void fman_kg_init(struct fman_kg_regs *regs, + uint32_t exceptions, + uint32_t dflt_nia) +{ + uint32_t tmp; + int i; + + iowrite32be(FM_EX_KG_DOUBLE_ECC | FM_EX_KG_KEYSIZE_OVERFLOW, + ®s->fmkg_eer); + + tmp = 0; + if (exceptions & FM_EX_KG_DOUBLE_ECC) + tmp |= FM_EX_KG_DOUBLE_ECC; + + if (exceptions & FM_EX_KG_KEYSIZE_OVERFLOW) + tmp |= FM_EX_KG_KEYSIZE_OVERFLOW; + + iowrite32be(tmp, ®s->fmkg_eeer); + iowrite32be(0, ®s->fmkg_fdor); + iowrite32be(0, ®s->fmkg_gdv0r); + iowrite32be(0, ®s->fmkg_gdv1r); + iowrite32be(dflt_nia, ®s->fmkg_gcr); + + /* Clear binding between ports to schemes and classification plans + * so that all ports are not bound to any scheme/classification plan */ + for (i = 0; i < FMAN_MAX_NUM_OF_HW_PORTS; i++) { + clear_pe_all_scheme(regs, (uint8_t)i); + clear_pe_all_cls_plan(regs, (uint8_t)i); + } +} + +void fman_kg_enable_scheme_interrupts(struct fman_kg_regs *regs) +{ + /* enable and enable all scheme interrupts */ + iowrite32be(0xFFFFFFFF, ®s->fmkg_seer); + iowrite32be(0xFFFFFFFF, ®s->fmkg_seeer); +} + +void fman_kg_enable(struct fman_kg_regs *regs) +{ + iowrite32be(ioread32be(®s->fmkg_gcr) | FM_KG_KGGCR_EN, + ®s->fmkg_gcr); +} + +void fman_kg_disable(struct fman_kg_regs *regs) +{ + iowrite32be(ioread32be(®s->fmkg_gcr) & ~FM_KG_KGGCR_EN, + ®s->fmkg_gcr); +} + +void fman_kg_set_data_after_prs(struct fman_kg_regs *regs, uint8_t offset) +{ + iowrite32be(offset, ®s->fmkg_fdor); +} + +void fman_kg_set_dflt_val(struct fman_kg_regs *regs, + uint8_t def_id, + uint32_t val) +{ + if(def_id == 0) + iowrite32be(val, ®s->fmkg_gdv0r); + else + iowrite32be(val, ®s->fmkg_gdv1r); +} + + +void fman_kg_set_exception(struct fman_kg_regs *regs, + uint32_t exception, + bool enable) +{ + uint32_t tmp; + + tmp = ioread32be(®s->fmkg_eeer); + + if (enable) { + tmp |= exception; + } else { + tmp &= ~exception; + } + + iowrite32be(tmp, ®s->fmkg_eeer); +} + +void fman_kg_get_exception(struct fman_kg_regs *regs, + uint32_t *events, + uint32_t *scheme_ids, + bool clear) +{ + uint32_t mask; + + *events = ioread32be(®s->fmkg_eer); + mask = ioread32be(®s->fmkg_eeer); + *events &= mask; + + *scheme_ids = 0; + + if (*events & FM_EX_KG_KEYSIZE_OVERFLOW) { + *scheme_ids = ioread32be(®s->fmkg_seer); + mask = ioread32be(®s->fmkg_seeer); + *scheme_ids &= mask; + } + + if (clear) { + iowrite32be(*scheme_ids, ®s->fmkg_seer); + iowrite32be(*events, ®s->fmkg_eer); + } +} + +void fman_kg_get_capture(struct fman_kg_regs *regs, + struct fman_kg_ex_ecc_attr *ecc_attr, + bool clear) +{ + uint32_t tmp; + + tmp = ioread32be(®s->fmkg_serc); + + if (tmp & KG_FMKG_SERC_CAP) { + /* Captured data is valid */ + ecc_attr->valid = TRUE; + ecc_attr->double_ecc = + (bool)((tmp & KG_FMKG_SERC_CET) ? TRUE : FALSE); + ecc_attr->single_ecc_count = + (uint8_t)((tmp & KG_FMKG_SERC_CNT_MSK) >> + KG_FMKG_SERC_CNT_SHIFT); + ecc_attr->addr = (uint16_t)(tmp & KG_FMKG_SERC_ADDR_MSK); + + if (clear) + iowrite32be(KG_FMKG_SERC_CAP, ®s->fmkg_serc); + } else { + /* No ECC error is captured */ + ecc_attr->valid = FALSE; + } +} + +int fman_kg_build_scheme(struct fman_kg_scheme_params *params, + struct fman_kg_scheme_regs *scheme_regs) +{ + struct fman_kg_extract_params *extract_params; + struct fman_kg_gen_extract_params *gen_params; + uint32_t tmp_reg, i, select, mask, fqb; + uint8_t offset, shift, ht; + + /* Zero out all registers so no need to care about unused ones */ + memset(scheme_regs, 0, sizeof(struct fman_kg_scheme_regs)); + + /* Mode register */ + tmp_reg = fm_kg_build_nia(params->next_engine, + params->next_engine_action); + if (tmp_reg == KG_NIA_INVALID) { + return -EINVAL; + } + + if (params->next_engine == E_FMAN_PCD_PLCR) { + tmp_reg |= FMAN_KG_SCH_MODE_NIA_PLCR; + } + else if (params->next_engine == E_FMAN_PCD_CC) { + tmp_reg |= (uint32_t)params->cc_params.base_offset << + FMAN_KG_SCH_MODE_CCOBASE_SHIFT; + } + + tmp_reg |= FMAN_KG_SCH_MODE_EN; + scheme_regs->kgse_mode = tmp_reg; + + /* Match vector */ + scheme_regs->kgse_mv = params->match_vector; + + extract_params = ¶ms->extract_params; + + /* Scheme default values registers */ + scheme_regs->kgse_dv0 = extract_params->def_scheme_0; + scheme_regs->kgse_dv1 = extract_params->def_scheme_1; + + /* Extract Known Fields Command register */ + scheme_regs->kgse_ekfc = extract_params->known_fields; + + /* Entry Extract Known Default Value register */ + tmp_reg = 0; + tmp_reg |= extract_params->known_fields_def.mac_addr << + FMAN_KG_SCH_DEF_MAC_ADDR_SHIFT; + tmp_reg |= extract_params->known_fields_def.vlan_tci << + FMAN_KG_SCH_DEF_VLAN_TCI_SHIFT; + tmp_reg |= extract_params->known_fields_def.etype << + FMAN_KG_SCH_DEF_ETYPE_SHIFT; + tmp_reg |= extract_params->known_fields_def.ppp_sid << + FMAN_KG_SCH_DEF_PPP_SID_SHIFT; + tmp_reg |= extract_params->known_fields_def.ppp_pid << + FMAN_KG_SCH_DEF_PPP_PID_SHIFT; + tmp_reg |= extract_params->known_fields_def.mpls << + FMAN_KG_SCH_DEF_MPLS_SHIFT; + tmp_reg |= extract_params->known_fields_def.ip_addr << + FMAN_KG_SCH_DEF_IP_ADDR_SHIFT; + tmp_reg |= extract_params->known_fields_def.ptype << + FMAN_KG_SCH_DEF_PTYPE_SHIFT; + tmp_reg |= extract_params->known_fields_def.ip_tos_tc << + FMAN_KG_SCH_DEF_IP_TOS_TC_SHIFT; + tmp_reg |= extract_params->known_fields_def.ipv6_fl << + FMAN_KG_SCH_DEF_IPv6_FL_SHIFT; + tmp_reg |= extract_params->known_fields_def.ipsec_spi << + FMAN_KG_SCH_DEF_IPSEC_SPI_SHIFT; + tmp_reg |= extract_params->known_fields_def.l4_port << + FMAN_KG_SCH_DEF_L4_PORT_SHIFT; + tmp_reg |= extract_params->known_fields_def.tcp_flg << + FMAN_KG_SCH_DEF_TCP_FLG_SHIFT; + + scheme_regs->kgse_ekdv = tmp_reg; + + /* Generic extract registers */ + if (extract_params->gen_extract_num > FM_KG_NUM_OF_GENERIC_REGS) { + return -EINVAL; + } + + for (i = 0; i < extract_params->gen_extract_num; i++) { + gen_params = extract_params->gen_extract + i; + + tmp_reg = FMAN_KG_SCH_GEN_VALID; + tmp_reg |= (uint32_t)gen_params->def_val << + FMAN_KG_SCH_GEN_DEF_SHIFT; + + if (gen_params->type == E_FMAN_KG_HASH_EXTRACT) { + if ((gen_params->extract > FMAN_KG_SCH_GEN_SIZE_MAX) || + (gen_params->extract == 0)) { + return -EINVAL; + } + } else { + tmp_reg |= FMAN_KG_SCH_GEN_OR; + } + + tmp_reg |= (uint32_t)gen_params->extract << + FMAN_KG_SCH_GEN_SIZE_SHIFT; + tmp_reg |= (uint32_t)gen_params->mask << + FMAN_KG_SCH_GEN_MASK_SHIFT; + + offset = gen_params->offset; + ht = get_gen_ht_code(gen_params->src, + gen_params->no_validation, + &offset); + tmp_reg |= (uint32_t)ht << FMAN_KG_SCH_GEN_HT_SHIFT; + tmp_reg |= offset; + + scheme_regs->kgse_gec[i] = tmp_reg; + } + + /* Masks registers */ + if (extract_params->masks_num > FM_KG_EXTRACT_MASKS_NUM) { + return -EINVAL; + } + + select = 0; + mask = 0; + fqb = 0; + for (i = 0; i < extract_params->masks_num; i++) { + /* MCSx fields */ + KG_GET_MASK_SEL_SHIFT(shift, i); + if (extract_params->masks[i].is_known) { + /* Mask known field */ + select |= extract_params->masks[i].field_or_gen_idx << + shift; + } else { + /* Mask generic extract */ + select |= (extract_params->masks[i].field_or_gen_idx + + FM_KG_MASK_SEL_GEN_BASE) << shift; + } + + /* MOx fields - spread between se_bmch and se_fqb registers */ + KG_GET_MASK_OFFSET_SHIFT(shift, i); + if (i < 2) { + select |= (uint32_t)extract_params->masks[i].offset << + shift; + } else { + fqb |= (uint32_t)extract_params->masks[i].offset << + shift; + } + + /* BMx fields */ + KG_GET_MASK_SHIFT(shift, i); + mask |= (uint32_t)extract_params->masks[i].mask << shift; + } + + /* Finish with rest of BMx fileds - + * don't mask bits for unused masks by setting + * corresponding BMx field = 0xFF */ + for (i = extract_params->masks_num; i < FM_KG_EXTRACT_MASKS_NUM; i++) { + KG_GET_MASK_SHIFT(shift, i); + mask |= 0xFF << shift; + } + + scheme_regs->kgse_bmch = select; + scheme_regs->kgse_bmcl = mask; + + /* Finish with FQB register initialization. + * Check fqid is 24-bit value. */ + if (params->base_fqid & ~0x00FFFFFF) { + return -EINVAL; + } + + fqb |= params->base_fqid; + scheme_regs->kgse_fqb = fqb; + + /* Hash Configuration register */ + tmp_reg = 0; + if (params->hash_params.use_hash) { + /* Check hash mask is 24-bit value */ + if (params->hash_params.mask & ~0x00FFFFFF) { + return -EINVAL; + } + + /* Hash function produces 64-bit value, 24 bits of that + * are used to generate fq_id and policer profile. + * Thus, maximal shift is 40 bits to allow 24 bits out of 64. + */ + if (params->hash_params.shift_r > FMAN_KG_SCH_HASH_HSHIFT_MAX) { + return -EINVAL; + } + + tmp_reg |= params->hash_params.mask; + tmp_reg |= (uint32_t)params->hash_params.shift_r << + FMAN_KG_SCH_HASH_HSHIFT_SHIFT; + + if (params->hash_params.sym) { + tmp_reg |= FMAN_KG_SCH_HASH_SYM; + } + + } + + if (params->bypass_fqid_gen) { + tmp_reg |= FMAN_KG_SCH_HASH_NO_FQID_GEN; + } + + scheme_regs->kgse_hc = tmp_reg; + + /* Policer Profile register */ + if (params->policer_params.bypass_pp_gen) { + tmp_reg = FMAN_KG_SCH_PP_NO_GEN; + } else { + /* Lower 8 bits of 24-bits extracted from hash result + * are used for policer profile generation. + * That leaves maximum shift value = 23. */ + if (params->policer_params.shift > FMAN_KG_SCH_PP_SHIFT_MAX) { + return -EINVAL; + } + + tmp_reg = params->policer_params.base; + tmp_reg |= ((uint32_t)params->policer_params.shift << + FMAN_KG_SCH_PP_SH_SHIFT) & + FMAN_KG_SCH_PP_SH_MASK; + tmp_reg |= ((uint32_t)params->policer_params.shift << + FMAN_KG_SCH_PP_SL_SHIFT) & + FMAN_KG_SCH_PP_SL_MASK; + tmp_reg |= (uint32_t)params->policer_params.mask << + FMAN_KG_SCH_PP_MASK_SHIFT; + } + + scheme_regs->kgse_ppc = tmp_reg; + + /* Coarse Classification Bit Select register */ + if (params->next_engine == E_FMAN_PCD_CC) { + scheme_regs->kgse_ccbs = params->cc_params.qlcv_bits_sel; + } + + /* Packets Counter register */ + if (params->update_counter) { + scheme_regs->kgse_spc = params->counter_value; + } + + return 0; +} + +int fman_kg_write_scheme(struct fman_kg_regs *regs, + uint8_t scheme_id, + uint8_t hwport_id, + struct fman_kg_scheme_regs *scheme_regs, + bool update_counter) +{ + struct fman_kg_scheme_regs *kgse_regs; + uint32_t tmp_reg; + int err, i; + + /* Write indirect scheme registers */ + kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]); + + iowrite32be(scheme_regs->kgse_mode, &kgse_regs->kgse_mode); + iowrite32be(scheme_regs->kgse_ekfc, &kgse_regs->kgse_ekfc); + iowrite32be(scheme_regs->kgse_ekdv, &kgse_regs->kgse_ekdv); + iowrite32be(scheme_regs->kgse_bmch, &kgse_regs->kgse_bmch); + iowrite32be(scheme_regs->kgse_bmcl, &kgse_regs->kgse_bmcl); + iowrite32be(scheme_regs->kgse_fqb, &kgse_regs->kgse_fqb); + iowrite32be(scheme_regs->kgse_hc, &kgse_regs->kgse_hc); + iowrite32be(scheme_regs->kgse_ppc, &kgse_regs->kgse_ppc); + iowrite32be(scheme_regs->kgse_spc, &kgse_regs->kgse_spc); + iowrite32be(scheme_regs->kgse_dv0, &kgse_regs->kgse_dv0); + iowrite32be(scheme_regs->kgse_dv1, &kgse_regs->kgse_dv1); + iowrite32be(scheme_regs->kgse_ccbs, &kgse_regs->kgse_ccbs); + iowrite32be(scheme_regs->kgse_mv, &kgse_regs->kgse_mv); + + for (i = 0 ; i < FM_KG_NUM_OF_GENERIC_REGS ; i++) + iowrite32be(scheme_regs->kgse_gec[i], &kgse_regs->kgse_gec[i]); + + /* Write AR (Action register) */ + tmp_reg = build_ar_scheme(scheme_id, hwport_id, update_counter, TRUE); + err = fman_kg_write_ar_wait(regs, tmp_reg); + return err; +} + +int fman_kg_delete_scheme(struct fman_kg_regs *regs, + uint8_t scheme_id, + uint8_t hwport_id) +{ + struct fman_kg_scheme_regs *kgse_regs; + uint32_t tmp_reg; + int err, i; + + kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]); + + /* Clear all registers including enable bit in mode register */ + for (i = 0; i < (sizeof(struct fman_kg_scheme_regs)) / 4; ++i) { + iowrite32be(0, ((uint32_t *)kgse_regs + i)); + } + + /* Write AR (Action register) */ + tmp_reg = build_ar_scheme(scheme_id, hwport_id, FALSE, TRUE); + err = fman_kg_write_ar_wait(regs, tmp_reg); + return err; +} + +int fman_kg_get_scheme_counter(struct fman_kg_regs *regs, + uint8_t scheme_id, + uint8_t hwport_id, + uint32_t *counter) +{ + struct fman_kg_scheme_regs *kgse_regs; + uint32_t tmp_reg; + int err; + + kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]); + + tmp_reg = build_ar_scheme(scheme_id, hwport_id, TRUE, FALSE); + err = fman_kg_write_ar_wait(regs, tmp_reg); + + if (err != 0) + return err; + + *counter = ioread32be(&kgse_regs->kgse_spc); + + return 0; +} + +int fman_kg_set_scheme_counter(struct fman_kg_regs *regs, + uint8_t scheme_id, + uint8_t hwport_id, + uint32_t counter) +{ + struct fman_kg_scheme_regs *kgse_regs; + uint32_t tmp_reg; + int err; + + kgse_regs = (struct fman_kg_scheme_regs *)&(regs->fmkg_indirect[0]); + + tmp_reg = build_ar_scheme(scheme_id, hwport_id, TRUE, FALSE); + + err = fman_kg_write_ar_wait(regs, tmp_reg); + if (err != 0) + return err; + + /* Keygen indirect access memory contains all scheme_id registers + * by now. Change only counter value. */ + iowrite32be(counter, &kgse_regs->kgse_spc); + + /* Write back scheme registers */ + tmp_reg = build_ar_scheme(scheme_id, hwport_id, TRUE, TRUE); + err = fman_kg_write_ar_wait(regs, tmp_reg); + + return err; +} + +uint32_t fman_kg_get_schemes_total_counter(struct fman_kg_regs *regs) +{ + return ioread32be(®s->fmkg_tpc); +} + +int fman_kg_build_cls_plan(struct fman_kg_cls_plan_params *params, + struct fman_kg_cp_regs *cls_plan_regs) +{ + uint8_t entries_set, entry_bit; + int i; + + /* Zero out all group's register */ + memset(cls_plan_regs, 0, sizeof(struct fman_kg_cp_regs)); + + /* Go over all classification entries in params->entries_mask and + * configure the corresponding cpe register */ + entries_set = params->entries_mask; + for (i = 0; entries_set; i++) { + entry_bit = (uint8_t)(0x80 >> i); + if ((entry_bit & entries_set) == 0) + continue; + entries_set ^= entry_bit; + cls_plan_regs->kgcpe[i] = params->mask_vector[i]; + } + + return 0; +} + +int fman_kg_write_cls_plan(struct fman_kg_regs *regs, + uint8_t grp_id, + uint8_t entries_mask, + uint8_t hwport_id, + struct fman_kg_cp_regs *cls_plan_regs) +{ + struct fman_kg_cp_regs *kgcpe_regs; + uint32_t tmp_reg; + int i, err; + + /* Check group index is valid and the group isn't empty */ + if (grp_id >= FM_KG_CLS_PLAN_GRPS_NUM) + return -EINVAL; + + /* Write indirect classification plan registers */ + kgcpe_regs = (struct fman_kg_cp_regs *)&(regs->fmkg_indirect[0]); + + for (i = 0; i < FM_KG_NUM_CLS_PLAN_ENTR; i++) { + iowrite32be(cls_plan_regs->kgcpe[i], &kgcpe_regs->kgcpe[i]); + } + + tmp_reg = build_ar_cls_plan(grp_id, entries_mask, hwport_id, TRUE); + err = fman_kg_write_ar_wait(regs, tmp_reg); + return err; +} + +int fman_kg_write_bind_schemes(struct fman_kg_regs *regs, + uint8_t hwport_id, + uint32_t schemes) +{ + struct fman_kg_pe_regs *kg_pe_regs; + uint32_t tmp_reg; + int err; + + kg_pe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]); + + iowrite32be(schemes, &kg_pe_regs->fmkg_pe_sp); + + tmp_reg = build_ar_bind_scheme(hwport_id, TRUE); + err = fman_kg_write_ar_wait(regs, tmp_reg); + return err; +} + +int fman_kg_build_bind_cls_plans(uint8_t grp_base, + uint8_t grp_mask, + uint32_t *bind_cls_plans) +{ + /* Check grp_base and grp_mask are 5-bits values */ + if ((grp_base & ~0x0000001F) || (grp_mask & !0x0000001F)) + return -EINVAL; + + *bind_cls_plans = (uint32_t) ((grp_mask << FMAN_KG_PE_CPP_MASK_SHIFT) | grp_base); + return 0; +} + + +int fman_kg_write_bind_cls_plans(struct fman_kg_regs *regs, + uint8_t hwport_id, + uint32_t bind_cls_plans) +{ + struct fman_kg_pe_regs *kg_pe_regs; + uint32_t tmp_reg; + int err; + + kg_pe_regs = (struct fman_kg_pe_regs *)&(regs->fmkg_indirect[0]); + + iowrite32be(bind_cls_plans, &kg_pe_regs->fmkg_pe_cpp); + + tmp_reg = build_ar_bind_cls_plan(hwport_id, TRUE); + err = fman_kg_write_ar_wait(regs, tmp_reg); + return err; +} diff --git a/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fman_prs.c b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fman_prs.c new file mode 100644 index 0000000..caa1d28 --- /dev/null +++ b/drivers/net/ethernet/freescale/fman/Peripherals/FM/Pcd/fman_prs.c @@ -0,0 +1,124 @@ +/* + * Copyright 2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fsl_fman_prs.h" + +uint32_t fman_prs_get_err_event(struct fman_prs_regs *regs, uint32_t ev_mask) +{ + return ioread32be(®s->fmpr_perr) & ev_mask; +} + +uint32_t fman_prs_get_err_ev_mask(struct fman_prs_regs *regs) +{ + return ioread32be(®s->fmpr_perer); +} + +void fman_prs_ack_err_event(struct fman_prs_regs *regs, uint32_t event) +{ + iowrite32be(event, ®s->fmpr_perr); +} + +uint32_t fman_prs_get_expt_event(struct fman_prs_regs *regs, uint32_t ev_mask) +{ + return ioread32be(®s->fmpr_pevr) & ev_mask; +} + +uint32_t fman_prs_get_expt_ev_mask(struct fman_prs_regs *regs) +{ + return ioread32be(®s->fmpr_pever); +} + +void fman_prs_ack_expt_event(struct fman_prs_regs *regs, uint32_t event) +{ + iowrite32be(event, ®s->fmpr_pevr); +} + +void fman_prs_defconfig(struct fman_prs_cfg *cfg) +{ + cfg->port_id_stat = 0; + cfg->max_prs_cyc_lim = DEFAULT_MAX_PRS_CYC_LIM; + cfg->prs_exceptions = 0x03000000; +} + +int fman_prs_init(struct fman_prs_regs *regs, struct fman_prs_cfg *cfg) +{ + uint32_t tmp; + + iowrite32be(cfg->max_prs_cyc_lim, ®s->fmpr_rpclim); + iowrite32be((FM_PCD_PRS_SINGLE_ECC | FM_PCD_PRS_PORT_IDLE_STS), + ®s->fmpr_pevr); + + if (cfg->prs_exceptions & FM_PCD_EX_PRS_SINGLE_ECC) + iowrite32be(FM_PCD_PRS_SINGLE_ECC, ®s->fmpr_pever); + else + iowrite32be(0, ®s->fmpr_pever); + + iowrite32be(FM_PCD_PRS_DOUBLE_ECC, ®s->fmpr_perr); + + tmp = 0; + if (cfg->prs_exceptions & FM_PCD_EX_PRS_DOUBLE_ECC) + tmp |= FM_PCD_PRS_DOUBLE_ECC; + iowrite32be(tmp, ®s->fmpr_perer); + + iowrite32be(cfg->port_id_stat, ®s->fmpr_ppsc); + + return 0; +} + +void fman_prs_enable(struct fman_prs_regs *regs) +{ + uint32_t tmp; + + tmp = ioread32be(®s->fmpr_rpimac) | FM_PCD_PRS_RPIMAC_EN; + iowrite32be(tmp, ®s->fmpr_rpimac); +} + +void fman_prs_disable(struct fman_prs_regs *regs) +{ + uint32_t tmp; + + tmp = ioread32be(®s->fmpr_rpimac) & ~FM_PCD_PRS_RPIMAC_EN; + iowrite32be(tmp, ®s->fmpr_rpimac); +} + +void fman_prs_set_stst_port_msk(struct fman_prs_regs *regs, uint32_t pid_msk) +{ + iowrite32be(pid_msk, ®s->fmpr_ppsc); +} + +void fman_prs_set_stst(struct fman_prs_regs *regs, bool enable) +{ + if (enable) + iowrite32be(FM_PCD_PRS_PPSC_ALL_PORTS, ®s->fmpr_ppsc); + else + iowrite32be(0, ®s->fmpr_ppsc); +} |