summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeanina Floarea <anca.floarea@freescale.com>2013-03-15 10:18:53 (GMT)
committerFleming Andrew-AFLEMING <AFLEMING@freescale.com>2013-04-30 16:25:28 (GMT)
commitbc1ae866146a4fcd1c14c1a3e28a02db217a0187 (patch)
treed6b1bc865f3a7b177e05e32d81b118fa642144e9
parentbc75bd1cfbcc101733b340cd9c8c3c696d06352f (diff)
downloadlinux-fsl-qoriq-bc1ae866146a4fcd1c14c1a3e28a02db217a0187.tar.xz
dpa_offload: Add DPA Stats component
The purpose of the DPA Stats module is to provide to the application a unitary method for retrieving counters that are spread at different hardware or software locations. Signed-off-by: Anca Jeanina FLOAREA <anca.floarea@freescale.com> Change-Id: I3b4d886ef5aab00f6de6a330e068b7401bc24b6c Reviewed-on: http://git.am.freescale.net:8181/2237 Reviewed-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com> Tested-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com>
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_stats.c3193
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_stats.h197
-rw-r--r--include/linux/fsl_dpa_stats.h810
3 files changed, 4200 insertions, 0 deletions
diff --git a/drivers/staging/fsl_dpa_offload/dpa_stats.c b/drivers/staging/fsl_dpa_offload/dpa_stats.c
new file mode 100644
index 0000000..6cc4716
--- /dev/null
+++ b/drivers/staging/fsl_dpa_offload/dpa_stats.c
@@ -0,0 +1,3193 @@
+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DPA Statistics Application Programming Interface implementation
+ */
+
+#include <linux/of_platform.h>
+#include "lnxwrp_fm.h"
+#include "dpaa_eth.h"
+
+/* DPA offloading layer includes */
+#include "dpa_stats.h"
+#include "dpa_classifier.h"
+
+/* FMD includes */
+#include "fm_pcd_ext.h"
+
+#define STATS_VAL_SIZE 4
+#define CLASSIF_STATS_SHIFT 4
+#define WORKQUEUE_MAX_ACTIVE 3
+
+/* Global dpa_stats component */
+struct dpa_stats *gbl_dpa_stats;
+
+static int get_cnt_cls_tbl_frag_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb);
+
+static int get_cnt_cls_tbl_match_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb);
+
+static int get_cnt_cls_tbl_hash_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb);
+
+static int get_cnt_cls_tbl_index_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb);
+
+static int get_cnt_ccnode_match_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb);
+
+static int get_cnt_ccnode_hash_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb);
+
+static int get_cnt_ccnode_index_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb);
+
+static void async_req_work_func(struct work_struct *work);
+
+/* check that the provided params are valid */
+static int check_dpa_stats_params(const struct dpa_stats_params *params)
+{
+ /* Check init parameters */
+ if (!params) {
+ pr_err("Invalid DPA Stats parameters handle\n");
+ return -EINVAL;
+ }
+
+ /* There must be at least one counter */
+ if (params->max_counters == 0 ||
+ params->max_counters > DPA_STATS_MAX_NUM_OF_COUNTERS) {
+ pr_err("Invalid DPA Stats number of counters\n");
+ return -EDOM;
+ }
+
+ if (!params->storage_area) {
+ pr_err("Invalid DPA Stats storage area\n");
+ return -EINVAL;
+ }
+
+ if (params->storage_area_len < STATS_VAL_SIZE) {
+ pr_err("Invalid DPA Stats storage area length\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_tbl_cls_counter(struct dpa_stats_cnt_cb *cnt_cb,
+ struct dpa_stats_lookup_key *entry)
+{
+ t_FmPcdCcKeyStatistics stats;
+ int err;
+
+ switch (cnt_cb->tbl_cb.type) {
+ case DPA_CLS_TBL_HASH:
+ err = FM_PCD_HashTableFindNGetKeyStatistics(entry->cc_node,
+ entry->key.size, entry->key.byte, &stats);
+ if (err != 0) {
+ pr_err("Couldn't retrieve Classif Table statistics\n");
+ return -EIO;
+ }
+ cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_hash_stats;
+ break;
+ case DPA_CLS_TBL_INDEXED:
+ err = FM_PCD_MatchTableGetKeyStatistics(
+ entry->cc_node, entry->key.byte[0], &stats);
+ if (err != 0) {
+ pr_err("Invalid Classifier Table counter parameters\n");
+ return -EIO;
+ }
+ cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_index_stats;
+ break;
+ case DPA_CLS_TBL_EXACT_MATCH:
+ err = FM_PCD_MatchTableFindNGetKeyStatistics(entry->cc_node,
+ entry->key.size, entry->key.byte,
+ entry->key.mask, &stats);
+ if (err != 0) {
+ pr_err("Invalid Classifier Table counter parameters\n");
+ return -EINVAL;
+ }
+ cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_match_stats;
+ break;
+ default:
+ pr_err("Invalid table type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int check_ccnode_counter(struct dpa_stats_cnt_cb *cnt_cb,
+ enum dpa_stats_classif_node_type ccnode_type,
+ struct dpa_offload_lookup_key *key)
+{
+ t_FmPcdCcKeyStatistics stats;
+ int err;
+
+ switch (ccnode_type) {
+ case DPA_STATS_CLASSIF_NODE_HASH:
+ err = FM_PCD_HashTableFindNGetKeyStatistics(
+ cnt_cb->ccnode_cb.cc_node,
+ key->size, key->byte, &stats);
+ if (err != 0) {
+ pr_err("Couldn't retrieve Classif Table statistics\n");
+ return -EIO;
+ }
+ cnt_cb->f_get_cnt_stats = get_cnt_ccnode_hash_stats;
+ break;
+ case DPA_STATS_CLASSIF_NODE_INDEXED:
+ err = FM_PCD_MatchTableGetKeyStatistics(
+ cnt_cb->ccnode_cb.cc_node,
+ key->byte[0], &stats);
+ if (err != 0) {
+ pr_err("Invalid Classifier Table counter parameters\n");
+ return -EIO;
+ }
+ cnt_cb->f_get_cnt_stats = get_cnt_ccnode_index_stats;
+ break;
+ case DPA_STATS_CLASSIF_NODE_EXACT_MATCH:
+ err = FM_PCD_MatchTableFindNGetKeyStatistics(
+ cnt_cb->ccnode_cb.cc_node, key->size, key->byte,
+ key->mask, &stats);
+ if (err != 0) {
+ pr_err("Invalid Classifier Table counter parameters\n");
+ return -EINVAL;
+ }
+ cnt_cb->f_get_cnt_stats = get_cnt_ccnode_match_stats;
+ break;
+ default:
+ pr_err("Invalid table type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int get_new_cnt(struct dpa_stats *dpa_stats,
+ struct dpa_stats_cnt_cb **cnt_cb)
+{
+ struct dpa_stats_cnt_cb *new_cnt;
+ uint32_t id;
+ int i;
+
+ /* Acquire DPA Stats instance lock */
+ mutex_lock(&dpa_stats->lock);
+
+ /* Get an id for new Counter */
+ if (cq_get_4bytes(dpa_stats->cnt_id_cq, &id) < 0) {
+ pr_err("No more unused counter ids\n");
+ mutex_unlock(&dpa_stats->lock);
+ return -EDOM;
+ }
+
+ /*
+ * Use 'used_cnt_ids' array in order to store counter ids that are
+ * 'in use' . Array can be further used to remove counters
+ */
+ for (i = 0; i < dpa_stats->config.max_counters; i++)
+ if (dpa_stats->used_cnt_ids[i] == DPA_OFFLD_INVALID_OBJECT_ID)
+ break;
+
+ if (i == dpa_stats->config.max_counters) {
+ pr_err("All counters have been used\n");
+ cq_put_4bytes(dpa_stats->cnt_id_cq, id);
+ mutex_unlock(&dpa_stats->lock);
+ return -EDOM;
+ }
+
+ /* Acquire a preallocated Counter Control Block */
+ new_cnt = &dpa_stats->cnts_cb[id];
+ new_cnt->id = id;
+ new_cnt->index = i;
+
+ /* Store on the current position the counter id */
+ dpa_stats->used_cnt_ids[i] = id;
+
+ /* Release DPA Stats instance lock */
+ mutex_unlock(&dpa_stats->lock);
+
+ *cnt_cb = new_cnt;
+
+ return 0;
+}
+
+static int get_new_req(struct dpa_stats *dpa_stats,
+ int *dpa_stats_req_id,
+ struct dpa_stats_req_cb **req_cb)
+{
+ struct dpa_stats_req_cb *new_req;
+ uint32_t id;
+ int i;
+
+ /* Acquire DPA Stats instance lock */
+ mutex_lock(&dpa_stats->lock);
+
+ /* Get an id for a new request */
+ if (cq_get_4bytes(dpa_stats->req_id_cq, &id) < 0) {
+ pr_err("No more unused request ids\n");
+ mutex_unlock(&dpa_stats->lock);
+ return -EDOM;
+ }
+
+ /*
+ * Use 'used_req_ids' array in order to store requests ids that are
+ * 'in use' . Array can be further used to remove requests
+ */
+ for (i = 0; i < DPA_STATS_MAX_NUM_OF_REQUESTS; i++)
+ if (dpa_stats->used_req_ids[i] == DPA_OFFLD_INVALID_OBJECT_ID)
+ break;
+
+ if (i == DPA_STATS_MAX_NUM_OF_REQUESTS) {
+ pr_err("All requests have been used\n");
+ cq_put_4bytes(dpa_stats->req_id_cq, id);
+ mutex_unlock(&dpa_stats->lock);
+ return -EDOM;
+ }
+
+ /* Acquire a preallocated Request Control Block */
+ new_req = &dpa_stats->reqs_cb[id];
+ new_req->id = id;
+ new_req->index = i;
+
+ /* Store on the current position the request id */
+ dpa_stats->used_req_ids[i] = id;
+
+ /* Release DPA Stats instance lock */
+ mutex_unlock(&dpa_stats->lock);
+
+ *req_cb = new_req;
+ *dpa_stats_req_id = id;
+
+ return 0;
+}
+
+static int put_cnt(struct dpa_stats *dpa_stats, struct dpa_stats_cnt_cb *cnt_cb)
+{
+ int err = 0;
+
+ /* Acquire DPA Stats instance lock */
+ mutex_lock(&dpa_stats->lock);
+
+ /* Release the Counter id in the Counter IDs circular queue */
+ err = cq_put_4bytes(dpa_stats->cnt_id_cq, cnt_cb->id);
+ if (err < 0) {
+ pr_err("Could not release the counter id %d\n", cnt_cb->id);
+ return -EDOM;
+ }
+
+ /* Mark the Counter id as 'not used' */
+ dpa_stats->used_cnt_ids[cnt_cb->index] =
+ DPA_OFFLD_INVALID_OBJECT_ID;
+
+ /* Clear all 'cnt_cb' information */
+ cnt_cb->index = DPA_OFFLD_INVALID_OBJECT_ID;
+ cnt_cb->id = DPA_STATS_MAX_NUM_OF_COUNTERS;
+ cnt_cb->bytes_num = 0;
+ cnt_cb->f_get_cnt_stats = NULL;
+
+ switch (cnt_cb->type) {
+ case DPA_STATS_CNT_ETH:
+ case DPA_STATS_CNT_REASS:
+ case DPA_STATS_CNT_FRAG:
+ case DPA_STATS_CNT_POLICER:
+ memset(&cnt_cb->gen_cb, 0, sizeof(cnt_cb->gen_cb));
+ break;
+ case DPA_STATS_CNT_CLASSIF_TBL:
+ memset(&cnt_cb->tbl_cb, 0, sizeof(cnt_cb->tbl_cb));
+ break;
+ case DPA_STATS_CNT_CLASSIF_NODE:
+ memset(&cnt_cb->ccnode_cb, 0, sizeof(cnt_cb->ccnode_cb));
+ break;
+ case DPA_STATS_CNT_IPSEC:
+ memset(&cnt_cb->ipsec_cb, 0, sizeof(cnt_cb->ipsec_cb));
+ break;
+ default:
+ break;
+ }
+
+ /* Release DPA Stats instance lock */
+ mutex_unlock(&dpa_stats->lock);
+
+ return 0;
+}
+
+static int put_req(struct dpa_stats *dpa_stats, struct dpa_stats_req_cb *req_cb)
+{
+ int err = 0;
+
+ /* Acquire DPA Stats instance lock */
+ mutex_lock(&dpa_stats->lock);
+
+ /* Release the Counter id in the Counter IDs circular queue */
+ err = cq_put_4bytes(dpa_stats->req_id_cq, req_cb->id);
+ if (err < 0) {
+ pr_err("Could not release the counter id %d\n", req_cb->id);
+ mutex_unlock(&dpa_stats->lock);
+ return -EDOM;
+ }
+
+ /* Mark the Counter id as 'not used' */
+ dpa_stats->used_req_ids[req_cb->index] =
+ DPA_OFFLD_INVALID_OBJECT_ID;
+
+ /* Clear all 'req_cb' information by setting them to a maximum value */
+ req_cb->index = DPA_OFFLD_INVALID_OBJECT_ID;
+ req_cb->id = DPA_STATS_MAX_NUM_OF_REQUESTS;
+ req_cb->bytes_num = 0;
+ req_cb->cnts_num = 0;
+ req_cb->request_area = NULL;
+ req_cb->request_done = NULL;
+
+ /* Release DPA Stats instance lock */
+ mutex_unlock(&dpa_stats->lock);
+
+ return 0;
+}
+
+static int init_cnts_resources(struct dpa_stats *dpa_stats)
+{
+ struct dpa_stats_params config = dpa_stats->config;
+ int i;
+
+ /* Create circular queue that holds free counter IDs */
+ dpa_stats->cnt_id_cq = cq_new(config.max_counters, sizeof(int));
+ if (!dpa_stats->cnt_id_cq) {
+ pr_err("Could not create Counter IDs circular queue\n");
+ return -ENOMEM;
+ }
+
+ /* Fill the circular queue with ids */
+ for (i = 0; i < config.max_counters; i++)
+ if (cq_put_4bytes(dpa_stats->cnt_id_cq, i) < 0) {
+ pr_err("Could not fill Counter IDs circular queue\n");
+ return -EDOM;
+ }
+
+ /* Allocate array to store counter ids that are 'in use' */
+ dpa_stats->used_cnt_ids = kmalloc(
+ config.max_counters * sizeof(uint32_t), GFP_KERNEL);
+ if (!dpa_stats->used_cnt_ids) {
+ pr_err("No more memory for used counter ids array\n");
+ return -ENOMEM;
+ }
+ memset(dpa_stats->used_cnt_ids, DPA_OFFLD_INVALID_OBJECT_ID,
+ config.max_counters * sizeof(uint32_t));
+
+ /* Allocate array to store counters control blocks */
+ dpa_stats->cnts_cb = kzalloc(config.max_counters *
+ sizeof(struct dpa_stats_cnt_cb), GFP_KERNEL);
+ if (!dpa_stats->cnts_cb) {
+ pr_err("No more memory for used counters control blocks\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < config.max_counters; i++) {
+ mutex_init(&dpa_stats->cnts_cb[i].lock);
+ dpa_stats->cnts_cb[i].dpa_stats = dpa_stats;
+ dpa_stats->cnts_cb[i].index = DPA_OFFLD_INVALID_OBJECT_ID;
+ }
+
+ return 0;
+}
+
+static int free_cnts_resources(struct dpa_stats *dpa_stats)
+{
+ uint32_t id, i;
+ int err = 0;
+
+ for (i = 0; i < dpa_stats->config.max_counters; i++) {
+ mutex_lock(&dpa_stats->lock);
+ id = dpa_stats->used_cnt_ids[i];
+ mutex_unlock(&dpa_stats->lock);
+
+ if (id != DPA_OFFLD_INVALID_OBJECT_ID)
+ /* Release the counter id in the Counter IDs cq */
+ err = put_cnt(dpa_stats, &dpa_stats->cnts_cb[id]);
+ if (err < 0) {
+ pr_err("Failed to release a counter id\n");
+ return err;
+ }
+ }
+
+ /* Release counters IDs circular queue */
+ if (dpa_stats->cnt_id_cq) {
+ cq_delete(dpa_stats->cnt_id_cq);
+ dpa_stats->cnt_id_cq = NULL;
+ }
+
+ /* Release counters control blocks */
+ kfree(dpa_stats->cnts_cb);
+ dpa_stats->cnts_cb = NULL;
+
+ /* Release counters 'used ids' array */
+ kfree(dpa_stats->used_cnt_ids);
+ dpa_stats->used_cnt_ids = NULL;
+
+ return 0;
+}
+
+static int init_reqs_resources(struct dpa_stats *dpa_stats)
+{
+ int i;
+
+ /*
+ * Create work queue to defer work when asynchronous
+ * counters requests are received
+ */
+ dpa_stats->async_req_workqueue = alloc_workqueue("async_req_workqueue",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, WORKQUEUE_MAX_ACTIVE);
+ if (!dpa_stats->async_req_workqueue) {
+ pr_err("Creating async request work queue failed\n");
+ return -ENOSPC;
+ }
+
+ /* Create circular queue that holds free counter request IDs */
+ dpa_stats->req_id_cq = cq_new(
+ DPA_STATS_MAX_NUM_OF_REQUESTS, sizeof(int));
+ if (!dpa_stats->req_id_cq) {
+ pr_err("Could not create Request IDs circular queue\n");
+ return -ENOMEM;
+ }
+
+ /* Fill the circular queue with ids */
+ for (i = 0; i < DPA_STATS_MAX_NUM_OF_REQUESTS; i++)
+ if (cq_put_4bytes(dpa_stats->req_id_cq, i) < 0) {
+ pr_err("Could not fill Request IDs circular queue\n");
+ return -EDOM;
+ }
+
+ /* Allocate array to store requests ids that are 'in use' */
+ dpa_stats->used_req_ids = kmalloc(DPA_STATS_MAX_NUM_OF_REQUESTS *
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!dpa_stats->used_req_ids) {
+ pr_err("No more memory for used req ids array\n");
+ return -ENOMEM;
+ }
+ memset(dpa_stats->used_req_ids, DPA_OFFLD_INVALID_OBJECT_ID,
+ DPA_STATS_MAX_NUM_OF_REQUESTS * sizeof(uint32_t));
+
+ /* Allocate array to store requests control blocks */
+ dpa_stats->reqs_cb = kzalloc(DPA_STATS_MAX_NUM_OF_REQUESTS *
+ sizeof(struct dpa_stats_req_cb), GFP_KERNEL);
+ if (!dpa_stats->reqs_cb) {
+ pr_err("No more memory for requests control blocks\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate array to store the counter ids */
+ for (i = 0; i < DPA_STATS_MAX_NUM_OF_REQUESTS; i++) {
+ dpa_stats->reqs_cb[i].config.cnts_ids =
+ kzalloc(DPA_STATS_MAX_NUM_OF_COUNTERS *
+ sizeof(int), GFP_KERNEL);
+ if (!dpa_stats->reqs_cb[i].config.cnts_ids) {
+ pr_err("No more memory for array of counter ids\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize work to be done for each request */
+ INIT_WORK(&dpa_stats->reqs_cb[i].async_req_work,
+ async_req_work_func);
+ }
+
+ return 0;
+}
+
+static int free_reqs_resources(struct dpa_stats *dpa_stats)
+{
+ struct dpa_stats_req_cb *req_cb = NULL;
+ uint32_t id, i;
+ int err = 0;
+
+ for (i = 0; i < DPA_STATS_MAX_NUM_OF_REQUESTS; i++) {
+ mutex_lock(&dpa_stats->lock);
+ id = dpa_stats->used_req_ids[i];
+ mutex_unlock(&dpa_stats->lock);
+
+ if (id != DPA_OFFLD_INVALID_OBJECT_ID) {
+ req_cb = &dpa_stats->reqs_cb[id];
+
+ flush_work(&req_cb->async_req_work);
+
+ /* Release the request id in the Requests IDs cq */
+ err = put_req(dpa_stats, req_cb);
+ if (err < 0) {
+ pr_err("Failed to release a request id\n");
+ return err;
+ }
+
+ /* Release the array of counter ids */
+ kfree(req_cb->config.cnts_ids);
+ req_cb->config.cnts_ids = NULL;
+ }
+ }
+
+ /* Release requests IDs circular queue */
+ if (dpa_stats->req_id_cq) {
+ cq_delete(dpa_stats->req_id_cq);
+ dpa_stats->req_id_cq = NULL;
+ }
+
+ /* Release requests control blocks */
+ kfree(dpa_stats->reqs_cb);
+ dpa_stats->reqs_cb = NULL;
+
+ /* Release requests 'used ids' array */
+ kfree(dpa_stats->used_req_ids);
+ dpa_stats->used_req_ids = NULL;
+
+ /* destroy asynchronous requests workqueue */
+ if (dpa_stats->async_req_workqueue) {
+ destroy_workqueue(dpa_stats->async_req_workqueue);
+ dpa_stats->async_req_workqueue = NULL;
+ }
+
+ return 0;
+}
+
+/* cleanup DPA Stats */
+static void free_resources(void)
+{
+ struct dpa_stats *dpa_stats;
+
+ /* Sanity check */
+ if (!gbl_dpa_stats) {
+ pr_err("DPA stats instance is not initialized\n");
+ return;
+ }
+ dpa_stats = gbl_dpa_stats;
+
+ /* free resources occupied by counters control blocks */
+ free_cnts_resources(dpa_stats);
+
+ /* free resources occupied by requests control blocks */
+ free_reqs_resources(dpa_stats);
+
+ kfree(dpa_stats);
+ gbl_dpa_stats = NULL;
+}
+
+static int treat_cnts_request(struct dpa_stats *dpa_stats,
+ struct dpa_stats_req_cb *req_cb)
+{
+ struct dpa_stats_cnt_request_params params = req_cb->config;
+ struct dpa_stats_cnt_cb *cnt_cb = NULL;
+ int id = 0, err = 0;
+ uint32_t i = 0;
+
+ for (i = 0; i < params.cnts_ids_len; i++) {
+ id = params.cnts_ids[i];
+
+ /* Get counter's control block */
+ cnt_cb = &dpa_stats->cnts_cb[id];
+
+ /* Acquire counter lock */
+ mutex_lock(&cnt_cb->lock);
+
+ cnt_cb->info.reset = req_cb->config.reset_cnts;
+
+ /* Call counter's retrieve function */
+ err = cnt_cb->f_get_cnt_stats(req_cb, cnt_cb);
+ if (err < 0) {
+ pr_err("Failed to retrieve counter values\n");
+ mutex_unlock(&cnt_cb->lock);
+ unblock_sched_cnts(dpa_stats, params.cnts_ids,
+ params.cnts_ids_len);
+ return err;
+ }
+
+ /*
+ * Update number of bytes and number of counters
+ * successfully written so far
+ */
+ req_cb->bytes_num += cnt_cb->bytes_num;
+ req_cb->cnts_num += 1;
+
+ mutex_unlock(&cnt_cb->lock);
+ }
+
+ unblock_sched_cnts(dpa_stats, params.cnts_ids, params.cnts_ids_len);
+
+ return 0;
+}
+
+static void create_cnt_eth_stats(struct dpa_stats *dpa_stats)
+{
+ /* DPA_STATS_CNT_ETH_DROP_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][0] =
+ offsetof(struct t_FmMacStatistics, eStatsDropEvents);
+ /* DPA_STATS_CNT_ETH_BYTES */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][1] =
+ offsetof(struct t_FmMacStatistics, ifInOctets);
+ /* DPA_STATS_CNT_ETH_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][2] =
+ offsetof(struct t_FmMacStatistics, ifInPkts);
+ /* DPA_STATS_CNT_ETH_BC_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][3] =
+ offsetof(struct t_FmMacStatistics, ifInBcastPkts);
+ /* DPA_STATS_CNT_ETH_MC_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][4] =
+ offsetof(struct t_FmMacStatistics, ifInMcastPkts);
+ /* DPA_STATS_CNT_ETH_CRC_ALIGN_ERR */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][5] =
+ offsetof(struct t_FmMacStatistics, eStatCRCAlignErrors);
+ /* DPA_STATS_CNT_ETH_UNDERSIZE_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][6] =
+ offsetof(struct t_FmMacStatistics, eStatUndersizePkts);
+ /* DPA_STATS_CNT_ETH_OVERSIZE_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][7] =
+ offsetof(struct t_FmMacStatistics, eStatOversizePkts);
+ /* DPA_STATS_CNT_ETH_FRAGMENTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][8] =
+ offsetof(struct t_FmMacStatistics, eStatFragments);
+ /* DPA_STATS_CNT_ETH_JABBERS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][9] =
+ offsetof(struct t_FmMacStatistics, eStatJabbers);
+ /* DPA_STATS_CNT_ETH_64BYTE_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][10] =
+ offsetof(struct t_FmMacStatistics, eStatPkts64);
+ /* DPA_STATS_CNT_ETH_65_127BYTE_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][11] =
+ offsetof(struct t_FmMacStatistics, eStatPkts65to127);
+ /* DPA_STATS_CNT_ETH_128_255BYTE_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][12] =
+ offsetof(struct t_FmMacStatistics, eStatPkts128to255);
+ /* DPA_STATS_CNT_ETH_256_511BYTE_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][13] =
+ offsetof(struct t_FmMacStatistics, eStatPkts256to511);
+ /* DPA_STATS_CNT_ETH_512_1023BYTE_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][14] =
+ offsetof(struct t_FmMacStatistics, eStatPkts512to1023);
+ /* DPA_STATS_CNT_ETH_1024_1518BYTE_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][15] =
+ offsetof(struct t_FmMacStatistics, eStatPkts1024to1518);
+ /* DPA_STATS_CNT_ETH_OUT_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][16] =
+ offsetof(struct t_FmMacStatistics, ifOutPkts);
+ /* DPA_STATS_CNT_ETH_OUT_DROP_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][17] =
+ offsetof(struct t_FmMacStatistics, ifOutDiscards);
+ /* DPA_STATS_CNT_ETH_OUT_BYTES */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][18] =
+ offsetof(struct t_FmMacStatistics, ifOutOctets);
+ /* DPA_STATS_CNT_ETH_IN_ERRORS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][19] =
+ offsetof(struct t_FmMacStatistics, ifInErrors);
+ /* DPA_STATS_CNT_ETH_OUT_ERRORS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][20] =
+ offsetof(struct t_FmMacStatistics, ifOutErrors);
+ /* DPA_STATS_CNT_ETH_IN_UNICAST_PKTS : not supported on dTSEC MAC */
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][21] =
+ offsetof(struct t_FmMacStatistics, ifInUcastPkts);
+ /* DPA_STATS_CNT_ETH_OUT_UNICAST_PKTS : not supported on dTSEC MAC*/
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH][22] =
+ offsetof(struct t_FmMacStatistics, ifOutUcastPkts);
+}
+
+static void create_cnt_reass_stats(struct dpa_stats *dpa_stats)
+{
+ /* DPA_STATS_CNT_REASS_TIMEOUT */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][0] =
+ offsetof(struct t_FmPcdManipReassemIpStats, timeout);
+ /* DPA_STATS_CNT_REASS_RFD_POOL_BUSY */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][1] = offsetof(
+ struct t_FmPcdManipReassemIpStats, rfdPoolBusy);
+ /* DPA_STATS_CNT_REASS_INT_BUFF_BUSY */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][2] = offsetof(
+ struct t_FmPcdManipReassemIpStats, internalBufferBusy);
+ /* DPA_STATS_CNT_REASS_EXT_BUFF_BUSY */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][3] = offsetof(
+ struct t_FmPcdManipReassemIpStats, externalBufferBusy);
+ /* DPA_STATS_CNT_REASS_SG_FRAGS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][4] = offsetof(
+ struct t_FmPcdManipReassemIpStats, sgFragments);
+ /* DPA_STATS_CNT_REASS_DMA_SEM */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][5] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ dmaSemaphoreDepletion);
+ /* DPA_STATS_CNT_REASS_IPv4_FRAMES */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][7] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[0].successfullyReassembled);
+ /* DPA_STATS_CNT_REASS_IPv4_FRAGS_VALID */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][8] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[0].validFragments);
+ /* DPA_STATS_CNT_REASS_IPv4_FRAGS_TOTAL */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][9] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[0].processedFragments);
+ /* DPA_STATS_CNT_REASS_IPv4_FRAGS_MALFORMED */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][10] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[0].malformedFragments);
+ /* DPA_STATS_CNT_REASS_IPv4_FRAGS_DISCARDED */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][11] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[0].discardedFragments);
+ /* DPA_STATS_CNT_REASS_IPv4_AUTOLEARN_BUSY */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][12] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[0].autoLearnBusy);
+ /* DPA_STATS_CNT_REASS_IPv4_EXCEED_16FRAGS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][13] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[0].moreThan16Fragments);
+ /* DPA_STATS_CNT_REASS_IPv6_FRAMES */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][15] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[1].successfullyReassembled);
+ /* DPA_STATS_CNT_REASS_IPv6_FRAGS_VALID */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][16] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[1].validFragments);
+ /* DPA_STATS_CNT_REASS_IPv6_FRAGS_TOTAL */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][17] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[1].processedFragments);
+ /* DPA_STATS_CNT_REASS_IPv6_FRAGS_MALFORMED */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][18] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[1].malformedFragments);
+ /* DPA_STATS_CNT_REASS_IPv6_FRAGS_DISCARDED */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][19] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[1].discardedFragments);
+ /* DPA_STATS_CNT_REASS_IPv6_AUTOLEARN_BUSY */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][20] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[1].autoLearnBusy);
+ /* DPA_STATS_CNT_REASS_IPv6_EXCEED_16FRAGS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS][21] = offsetof(
+ struct t_FmPcdManipReassemIpStats,
+ specificHdrStatistics[1].moreThan16Fragments);
+}
+
+static void create_cnt_frag_stats(struct dpa_stats *dpa_stats)
+{
+ /* DPA_STATS_CNT_FRAG_TOTAL_FRAMES */
+ dpa_stats->stats_sel[DPA_STATS_CNT_FRAG][0] =
+ offsetof(struct t_FmPcdManipFragIpStats, totalFrames);
+ /* DPA_STATS_CNT_FRAG_FRAMES */
+ dpa_stats->stats_sel[DPA_STATS_CNT_FRAG][1] = offsetof(
+ struct t_FmPcdManipFragIpStats, fragmentedFrames);
+ /* DPA_STATS_CNT_FRAG_GEN_FRAGS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_FRAG][2] = offsetof(
+ struct t_FmPcdManipFragIpStats, generatedFragments);
+}
+
+static void create_cnt_plcr_stats(struct dpa_stats *dpa_stats)
+{
+ /* DPA_STATS_CNT_PLCR_GREEN_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_POLICER][0] =
+ e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER;
+ /* DPA_STATS_CNT_PLCR_YELLOW_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_POLICER][1] =
+ e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER;
+ /* DPA_STATS_CNT_PLCR_RED_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_POLICER][2] =
+ e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER;
+ /* DPA_STATS_CNT_PLCR_RECOLOR_YELLOW_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_POLICER][3] =
+ e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER;
+ /* DPA_STATS_CNT_PLCR_RECOLOR_RED_PKTS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_POLICER][4] =
+ e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER;
+}
+
+static void create_classif_stats(struct dpa_stats *dpa_stats)
+{
+ /* DPA_STATS_CNT_CLASSIF_BYTES */
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE][0] =
+ offsetof(struct t_FmPcdCcKeyStatistics, byteCount);
+ /* DPA_STATS_CNT_CLASSIF_PACKETS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE][1] =
+ offsetof(struct t_FmPcdCcKeyStatistics, frameCount);
+#if (DPAA_VERSION >= 11)
+ /* DPA_STATS_CNT_CLASSIF_RANGE1 */
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE][2] = offsetof(
+ struct t_FmPcdCcKeyStatistics,
+ frameLengthRangeCount[0]);
+ /* DPA_STATS_CNT_CLASSIF_RANGE2 */
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE][3] = offsetof(
+ struct t_FmPcdCcKeyStatistics,
+ frameLengthRangeCount[1]);
+ /* DPA_STATS_CNT_CLASSIF_RANGE3 */
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE][4] = offsetof(
+ struct t_FmPcdCcKeyStatistics,
+ frameLengthRangeCount[2]);
+ /* DPA_STATS_CNT_CLASSIF_RANGE4 */
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE][5] = offsetof(
+ struct t_FmPcdCcKeyStatistics,
+ frameLengthRangeCount[3]);
+ /* DPA_STATS_CNT_CLASSIF_RANGE5 */
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE][6] = offsetof(
+ struct t_FmPcdCcKeyStatistics,
+ frameLengthRangeCount[4]);
+ /* DPA_STATS_CNT_CLASSIF_RANGE6 */
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE][7] = offsetof(
+ struct t_FmPcdCcKeyStatistics,
+ frameLengthRangeCount[5]);
+ /* DPA_STATS_CNT_CLASSIF_RANGE7 */
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE][8] = offsetof(
+ struct t_FmPcdCcKeyStatistics,
+ frameLengthRangeCount[6]);
+ /* DPA_STATS_CNT_CLASSIF_RANGE8 */
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE][9] = offsetof(
+ struct t_FmPcdCcKeyStatistics,
+ frameLengthRangeCount[7]);
+ /* DPA_STATS_CNT_CLASSIF_RANGE9 */
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE][10] = offsetof(
+ struct t_FmPcdCcKeyStatistics,
+ frameLengthRangeCount[8]);
+ /* DPA_STATS_CNT_CLASSIF_RANGE10 */
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE][11] = offsetof(
+ struct t_FmPcdCcKeyStatistics,
+ frameLengthRangeCount[9]);
+#endif
+}
+
+static void create_cnt_ipsec_stats(struct dpa_stats *dpa_stats)
+{
+ /* DPA_STATS_CNT_NUM_OF_BYTES */
+ dpa_stats->stats_sel[DPA_STATS_CNT_IPSEC][0] = offsetof(
+ struct dpa_ipsec_sa_stats, bytes_count);
+ /* DPA_STATS_CNT_NUM_OF_PACKETS */
+ dpa_stats->stats_sel[DPA_STATS_CNT_IPSEC][1] = offsetof(
+ struct dpa_ipsec_sa_stats, packets_count);
+}
+
+static int copy_key_descriptor(const struct dpa_offload_lookup_key *src,
+ struct dpa_offload_lookup_key *dst)
+{
+ /* Check that key byte pointer is valid */
+ if (!src->byte) {
+ pr_err("Key byte pointer can't be NULL\n");
+ return -EINVAL;
+ }
+
+ /* Check that key size is not zero */
+ if (src->size == 0) {
+ pr_err("Key size can't be zero\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory to store the key byte array */
+ dst->byte = kmalloc(src->size, GFP_KERNEL);
+ if (!dst->byte) {
+ pr_err("No more memory for key byte\n");
+ return -ENOMEM;
+ }
+ memcpy(dst->byte, src->byte, src->size);
+
+ /* If there is a valid key mask pointer */
+ if (src->mask) {
+ /* Allocate memory to store the key mask array */
+ dst->mask = kmalloc(src->size, GFP_KERNEL);
+ if (!dst->mask) {
+ pr_err("No more memory for key mask\n");
+ kfree(dst->byte);
+ return -ENOMEM;
+ }
+ memcpy(dst->mask, src->mask, src->size);
+ } else
+ dst->mask = NULL;
+
+ /* Store the key size */
+ dst->size = src->size;
+
+ return 0;
+}
+
+static t_Handle get_fman_mac_handle(struct device_node *parent_dev_node,
+ int port_id,
+ char *mac_name)
+{
+ struct device_node *dev_node, *tmp_node = NULL;
+ struct mac_device *mac_dev = NULL;
+ const uint32_t *cell_index;
+ int lenp;
+
+ while ((dev_node = of_find_compatible_node(tmp_node, NULL,
+ mac_name)) != NULL) {
+ if (parent_dev_node == of_get_parent(dev_node)) {
+ cell_index = of_get_property(
+ dev_node, "cell-index", &lenp);
+ if (*cell_index == port_id) {
+ mac_dev = dev_get_drvdata(&
+ of_find_device_by_node(dev_node)->dev);
+ return mac_dev->get_mac_handle(mac_dev);
+ }
+ }
+
+ tmp_node = dev_node;
+ }
+
+ return NULL;
+}
+
+static struct device_node *get_fman_dev_node(int fman_id)
+{
+ struct device_node *dev_node, *tmp_node = NULL;
+ const uint32_t *cell_index;
+ int lenp;
+
+ while ((dev_node = of_find_compatible_node(tmp_node, NULL, "fsl,fman"))
+ != NULL) {
+ cell_index = of_get_property(dev_node, "cell-index", &lenp);
+ if (*cell_index == fman_id)
+ break;
+
+ tmp_node = dev_node;
+ }
+
+ return dev_node;
+}
+
+static int get_fm_mac(struct dpa_stats_cnt_eth_src src, void **mac)
+{
+ struct device_node *dev_node = NULL;
+ t_Handle *fm_mac = NULL;
+ char *mac_name;
+
+ /* Get FMAN device node */
+ dev_node = get_fman_dev_node(src.engine_id);
+ if (!dev_node) {
+ pr_err("FMan device node couldn't be found\n");
+ return -EINVAL;
+ }
+
+ if (src.eth_id > DPA_STATS_ETH_1G_PORT5) {
+ /* Get Ethernet device node first for DTSEC case 10G port*/
+ mac_name = "fsl,fman-10g-mac";
+ src.eth_id -= DPA_STATS_ETH_10G_PORT0;
+
+ fm_mac = get_fman_mac_handle(dev_node, src.eth_id, mac_name);
+ if (!fm_mac) {
+ /* Get Ethernet device node for MEMAC case 10G port */
+ mac_name = "fsl,fman-memac";
+ fm_mac = get_fman_mac_handle(
+ dev_node, src.eth_id, mac_name);
+ if (!fm_mac) {
+ pr_err("Ethernet device node couldn't be found\n");
+ return -EINVAL;
+ }
+ }
+ } else {
+ /* Get Ethernet device node first for DTSEC case 1G port*/
+ mac_name = "fsl,fman-1g-mac";
+
+ fm_mac = get_fman_mac_handle(dev_node, src.eth_id, mac_name);
+ if (!fm_mac) {
+ /* Get Ethernet device node for MEMAC case 1G port*/
+ mac_name = "fsl,fman-memac";
+ fm_mac = get_fman_mac_handle(
+ dev_node, src.eth_id, mac_name);
+ if (!fm_mac) {
+ pr_err("Ethernet device node couldn't be found\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Return FM MAC handle */
+ *mac = fm_mac;
+
+ return 0;
+}
+
+static void cnt_sel_to_stats(struct stats_info *stats_info,
+ int *stats_sel,
+ uint32_t cnt_sel)
+{
+ uint32_t bitVal = 0, bitPos = 0, cntPos = 1;
+
+ while (cnt_sel > 0) {
+ bitVal = cnt_sel & 0x00000001;
+ stats_info->stats_off[cntPos - bitVal] = stats_sel[bitPos++];
+ cntPos += bitVal;
+ cnt_sel >>= 1;
+ }
+
+ stats_info->stats_num = cntPos - 1;
+}
+
+static int set_frag_manip(int td, struct dpa_stats_lookup_key *entry)
+{
+ struct dpa_cls_tbl_action action;
+ struct t_FmPcdManipStats stats;
+ int err = 0;
+
+ err = dpa_classif_table_lookup_by_key(td, &entry->key, &action);
+ if (err != 0) {
+ pr_err("Unable to retrieve next action parameters\n");
+ return -EINVAL;
+ }
+
+ if (action.type != DPA_CLS_TBL_ACTION_ENQ) {
+ pr_err("Fragmentation statistics per flow are "
+ "supported only for action enqueue\n");
+ return -EINVAL;
+ }
+
+ entry->frag = dpa_classif_get_frag_hm_handle(action.enq_params.hmd);
+ if (!entry->frag) {
+ pr_err("Unable to retrieve fragmentation handle\n");
+ return -EINVAL;
+ }
+
+ /* Check the user-provided fragmentation handle */
+ err = FM_PCD_ManipGetStatistics(entry->frag, &stats);
+ if (err < 0) {
+ pr_err("Invalid Fragmentation manip handle\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_cnt_eth_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cnt_params *params)
+{
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ uint32_t cnt_sel = params->eth_params.cnt_sel;
+ t_Handle fm_mac = NULL;
+ int err = 0;
+
+ if (!dpa_stats) {
+ pr_err("Invalid argument: NULL DPA Stats instance\n");
+ return -EFAULT;
+ }
+
+ /* Check Ethernet counter selection */
+ if (cnt_sel == 0 || cnt_sel > DPA_STATS_CNT_ETH_ALL) {
+ pr_err("Invalid Ethernet counter selection\n");
+ return -EINVAL;
+ }
+
+ /* Decrease one to obtain the mask for all statistics */
+ if (cnt_sel == DPA_STATS_CNT_ETH_ALL)
+ cnt_sel -= 1;
+
+ /* Get FM MAC handle */
+ err = get_fm_mac(params->eth_params.src, &fm_mac);
+ if (err != 0) {
+ pr_err("Could not obtain FM MAC handle!\n");
+ return -EINVAL;
+ }
+
+ cnt_cb->gen_cb.objs[0] = fm_mac;
+ cnt_cb->members_num = 1;
+
+ /* Map Ethernet counter selection to FM MAC statistics */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH], cnt_sel);
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
+
+ return 0;
+}
+
+static int set_cnt_reass_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cnt_params *params)
+{
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ uint32_t cnt_sel = params->reass_params.cnt_sel;
+ struct t_FmPcdManipStats stats;
+ int err;
+
+ if (!dpa_stats) {
+ pr_err("Invalid argument: NULL DPA Stats instance\n");
+ return -EFAULT;
+ }
+
+ /* User can combine counters only from a group */
+ if (!((cnt_sel != 0 && cnt_sel <= DPA_STATS_CNT_REASS_GEN_ALL) ||
+ (cnt_sel >= DPA_STATS_CNT_REASS_IPv4_FRAMES &&
+ cnt_sel <= DPA_STATS_CNT_REASS_IPv4_ALL) ||
+ (cnt_sel >= DPA_STATS_CNT_REASS_IPv6_FRAMES &&
+ cnt_sel <= DPA_STATS_CNT_REASS_IPv6_ALL))) {
+ pr_err("Invalid Reassembly counter selection\n");
+ return -EINVAL;
+ }
+
+ cnt_cb->gen_cb.objs[0] = params->reass_params.reass;
+ cnt_cb->members_num = 1;
+
+ /* Check the user-provided reassembly manip */
+ err = FM_PCD_ManipGetStatistics(params->reass_params.reass, &stats);
+ if (err < 0) {
+ pr_err("Invalid Reassembly manip handle\n");
+ return -EINVAL;
+ }
+
+ /* Based on user option, change mask to all statistics in one group */
+ if (cnt_sel == DPA_STATS_CNT_REASS_GEN_ALL)
+ cnt_sel -= 1;
+ else if (cnt_sel == DPA_STATS_CNT_REASS_IPv4_ALL)
+ cnt_sel = (cnt_sel - 1) &
+ ~(DPA_STATS_CNT_REASS_IPv4_FRAMES - 1);
+ else if (cnt_sel == DPA_STATS_CNT_REASS_IPv6_ALL)
+ cnt_sel = (cnt_sel - 1) &
+ ~(DPA_STATS_CNT_REASS_IPv6_FRAMES - 1);
+
+ /* Map Reassembly counter selection to Manip statistics */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS], cnt_sel);
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
+
+ return 0;
+}
+
+static int set_cnt_frag_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cnt_params *params)
+{
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ uint32_t cnt_sel = params->frag_params.cnt_sel;
+ struct t_FmPcdManipStats stats;
+ int err;
+
+ if (!dpa_stats) {
+ pr_err("Invalid argument: NULL DPA Stats instance\n");
+ return -EFAULT;
+ }
+
+ /* Check Fragmentation counter selection */
+ if (cnt_sel == 0 || cnt_sel > DPA_STATS_CNT_FRAG_ALL) {
+ pr_err("Invalid Fragmentation counter selection\n");
+ return -EINVAL;
+ }
+
+ cnt_cb->gen_cb.objs[0] = params->frag_params.frag;
+ cnt_cb->members_num = 1;
+
+ /* Check the user-provided fragmentation handle */
+ err = FM_PCD_ManipGetStatistics(params->frag_params.frag, &stats);
+ if (err < 0) {
+ pr_err("Invalid Fragmentation manip handle\n");
+ return -EINVAL;
+ }
+
+ /* Decrease one to obtain the mask for all statistics */
+ if (cnt_sel == DPA_STATS_CNT_FRAG_ALL)
+ cnt_sel -= 1;
+
+ /* Map Fragmentation counter selection to Manip statistics */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_FRAG], cnt_sel);
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
+
+ return 0;
+}
+
+static int set_cnt_plcr_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cnt_params *params)
+{
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ uint32_t cnt_sel = params->reass_params.cnt_sel;
+
+ if (!dpa_stats) {
+ pr_err("Invalid argument: NULL DPA Stats instance\n");
+ return -EFAULT;
+ }
+
+ /* Check Policer counter selection */
+ if (cnt_sel == 0 || cnt_sel > DPA_STATS_CNT_PLCR_ALL) {
+ pr_err("Invalid Policer counter selection\n");
+ return -EINVAL;
+ }
+
+ cnt_cb->gen_cb.objs[0] = params->plcr_params.plcr;
+ cnt_cb->members_num = 1;
+
+ /* Decrease one to obtain the mask for all statistics */
+ if (cnt_sel == DPA_STATS_CNT_PLCR_ALL)
+ cnt_sel -= 1;
+
+ /* Map Policer counter selection to policer statistics */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_POLICER], cnt_sel);
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
+
+ return 0;
+}
+
+static int set_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cnt_params *params)
+{
+ struct dpa_stats_cnt_classif_tbl_cb *cnt_tbl_cb = &cnt_cb->tbl_cb;
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ struct dpa_stats_cnt_classif_tbl prm = params->classif_tbl_params;
+ struct dpa_cls_tbl_params cls_tbl;
+ uint32_t cnt_sel = prm.cnt_sel;
+ int err = 0, frag_stats = -1;
+
+ if (!dpa_stats) {
+ pr_err("Invalid argument: NULL DPA Stats instance\n");
+ return -EFAULT;
+ }
+
+ /* Check Classifier Table counter selection */
+ if (cnt_sel >= DPA_STATS_CNT_CLASSIF_BYTES &&
+ cnt_sel <= DPA_STATS_CNT_CLASSIF_ALL) {
+
+ /* Entire group of counters was selected */
+ if (cnt_sel == DPA_STATS_CNT_CLASSIF_ALL)
+ cnt_sel -= 1;
+ frag_stats = 0;
+
+ } else if (cnt_sel >= DPA_STATS_CNT_FRAG_TOTAL_FRAMES &&
+ cnt_sel <= DPA_STATS_CNT_FRAG_ALL) {
+
+ /* Entire group of counters was selected */
+ if (cnt_sel == DPA_STATS_CNT_FRAG_ALL)
+ cnt_sel -= 1;
+ frag_stats = 1;
+
+ } else {
+ pr_err("Invalid Classifier Table counter selection\n");
+ return -EINVAL;
+ }
+
+ err = dpa_classif_table_get_params(prm.td, &cls_tbl);
+ if (err != 0) {
+ pr_err("Invalid Classifier Table descriptor\n");
+ return -EINVAL;
+ }
+
+ /* Copy the key descriptor */
+ err = copy_key_descriptor(&prm.key, &cnt_tbl_cb->keys[0].key);
+ if (err != 0) {
+ pr_err("Unable to copy key descriptor\n");
+ return -EINVAL;
+ }
+
+ /* Store CcNode handle and set number of keys to one */
+ cnt_tbl_cb->keys[0].cc_node = cls_tbl.cc_node;
+ cnt_tbl_cb->keys[0].valid = TRUE;
+ cnt_cb->members_num = 1;
+
+ /* Store DPA Classifier Table type */
+ cnt_tbl_cb->type = cls_tbl.type;
+
+ /* Check the Classifier Table counter */
+ err = check_tbl_cls_counter(cnt_cb, &cnt_tbl_cb->keys[0]);
+ if (err != 0)
+ return -EINVAL;
+
+ if (frag_stats) {
+ err = set_frag_manip(prm.td, &cnt_tbl_cb->keys[0]);
+ if (err < 0) {
+ pr_err("Invalid Fragmentation manip handle\n");
+ return -EINVAL;
+ }
+ /* Map Classifier Table counter selection to Frag stats */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_FRAG], cnt_sel);
+
+ /* Change the retrieve routine */
+ cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_frag_stats;
+ } else
+ /* Map Classifier Table counter selection to CcNode stats */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE],
+ cnt_sel >> CLASSIF_STATS_SHIFT);
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
+
+ return 0;
+}
+
+static int set_cnt_ccnode_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cnt_params *params)
+{
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ struct dpa_stats_cnt_classif_node prm = params->classif_node_params;
+ int err = 0;
+
+ if (!dpa_stats) {
+ pr_err("Invalid argument: NULL DPA Stats instance\n");
+ return -EFAULT;
+ }
+
+ /* Check Classification Node counter selection */
+ if (prm.cnt_sel == 0 || prm.cnt_sel > DPA_STATS_CNT_CLASSIF_ALL) {
+ pr_err("Invalid Classif_Node counter selection\n");
+ return -EINVAL;
+ }
+
+ /* Copy the key descriptor */
+ err = copy_key_descriptor(&prm.key, &cnt_cb->ccnode_cb.keys[0]);
+ if (err != 0) {
+ pr_err("Unable to copy key descriptor\n");
+ return -EINVAL;
+ }
+
+ /* Store CcNode handle and set number of keys to one */
+ cnt_cb->ccnode_cb.cc_node = prm.cc_node;
+ cnt_cb->members_num = 1;
+
+ /* Check the Classifier Node counter parameters */
+ err = check_ccnode_counter(cnt_cb,
+ prm.ccnode_type, &cnt_cb->ccnode_cb.keys[0]);
+ if (err != 0) {
+ pr_err("Invalid Classif Node counter parameters\n");
+ return -EINVAL;
+ }
+
+ /* Map Classif Node counter selection to CcNode statistics */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE],
+ prm.cnt_sel >> CLASSIF_STATS_SHIFT);
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
+
+ return 0;
+}
+
+static int set_cnt_ipsec_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cnt_params *params)
+{
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ struct dpa_ipsec_sa_stats stats;
+ uint32_t cnt_sel = params->ipsec_params.cnt_sel;
+ int err = 0;
+
+ if (!dpa_stats) {
+ pr_err("Invalid argument: NULL DPA Stats instance\n");
+ return -EFAULT;
+ }
+
+ /* Map IPSec counter selection to statistics */
+ if (cnt_sel == DPA_STATS_CNT_NUM_OF_BYTES) {
+ cnt_cb->info.stats_off[0] = dpa_stats->stats_sel[
+ DPA_STATS_CNT_IPSEC][DPA_STATS_CNT_NUM_OF_BYTES];
+ cnt_cb->info.stats_num = 1;
+ } else if (cnt_sel == DPA_STATS_CNT_NUM_OF_PACKETS) {
+ cnt_cb->info.stats_off[0] = dpa_stats->stats_sel[
+ DPA_STATS_CNT_IPSEC][DPA_STATS_CNT_NUM_OF_PACKETS];
+ cnt_cb->info.stats_num = 1;
+ } else if (cnt_sel == DPA_STATS_CNT_NUM_ALL) {
+ cnt_cb->info.stats_off[0] = dpa_stats->stats_sel[
+ DPA_STATS_CNT_IPSEC][DPA_STATS_CNT_NUM_OF_BYTES];
+ cnt_cb->info.stats_off[1] = dpa_stats->stats_sel[
+ DPA_STATS_CNT_IPSEC][DPA_STATS_CNT_NUM_OF_PACKETS];
+ cnt_cb->info.stats_num = 2;
+ } else {
+ pr_err("Invalid IPSec counter selection\n");
+ return -EINVAL;
+ }
+
+ cnt_cb->ipsec_cb.sa_id[0] = params->ipsec_params.sa_id;
+ cnt_cb->ipsec_cb.valid[0] = TRUE;
+ cnt_cb->members_num = 1;
+
+ err = dpa_ipsec_sa_get_stats(cnt_cb->ipsec_cb.sa_id[0], &stats);
+ if (err < 0) {
+ pr_err("Invalid IPSec counter parameters\n");
+ return -EINVAL;
+ }
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
+
+ return 0;
+}
+
+static int set_cls_cnt_eth_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cls_cnt_params *params)
+{
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ uint32_t cnt_sel = params->eth_params.cnt_sel;
+ t_Handle fm_mac = NULL;
+ uint32_t i = 0;
+ int err = 0;
+
+ if (!dpa_stats) {
+ pr_err("Invalid argument: NULL DPA Stats instance\n");
+ return -EFAULT;
+ }
+
+ /* Check Ethernet counter selection */
+ if (params->eth_params.cnt_sel == 0 ||
+ params->eth_params.cnt_sel > DPA_STATS_CNT_ETH_ALL) {
+ pr_err("Invalid Ethernet counter selection\n");
+ return -EINVAL;
+ }
+
+ /* Decrease one to obtain the mask for all statistics */
+ if (cnt_sel == DPA_STATS_CNT_ETH_ALL)
+ cnt_sel -= 1;
+
+ for (i = 0; i < params->class_members; i++) {
+ /* 1G Port, DTSEC controller */
+ if (params->eth_params.src[i].eth_id <
+ DPA_STATS_ETH_10G_PORT0) {
+ if (cnt_sel & DPA_STATS_CNT_ETH_IN_UNICAST_PKTS ||
+ cnt_sel & DPA_STATS_CNT_ETH_OUT_UNICAST_PKTS) {
+ pr_err("Invalid Ethernet counter selection\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Get FM MAC handle */
+ err = get_fm_mac(params->eth_params.src[i], &fm_mac);
+ if (err != 0) {
+ pr_err("Could not obtain FM MAC handle!\n");
+ return -EINVAL;
+ }
+
+ cnt_cb->gen_cb.objs[i] = fm_mac;
+ }
+
+ cnt_cb->members_num = params->class_members;
+
+ /* Map Ethernet counter selection to FM MAC statistics */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_ETH], cnt_sel);
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = cnt_cb->members_num *
+ STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ return 0;
+}
+
+static int set_cls_cnt_reass_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cls_cnt_params *params)
+{
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ struct t_FmPcdManipStats stats;
+ uint32_t cnt_sel = params->reass_params.cnt_sel;
+ uint32_t i = 0;
+ int err = 0;
+
+ if (!dpa_stats) {
+ pr_err("Invalid argument: NULL DPA Stats instance\n");
+ return -EFAULT;
+ }
+
+ /* User can combine counters only from a group */
+ if (!((cnt_sel != 0 && cnt_sel <= DPA_STATS_CNT_REASS_GEN_ALL) ||
+ (cnt_sel >= DPA_STATS_CNT_REASS_IPv4_FRAMES &&
+ cnt_sel <= DPA_STATS_CNT_REASS_IPv4_ALL) ||
+ (cnt_sel >= DPA_STATS_CNT_REASS_IPv6_FRAMES &&
+ cnt_sel <= DPA_STATS_CNT_REASS_IPv6_ALL))) {
+ pr_err("Invalid Reassembly counter selection\n");
+ return -EINVAL;
+ }
+
+ cnt_cb->members_num = params->class_members;
+
+ for (i = 0; i < params->class_members; i++) {
+ cnt_cb->gen_cb.objs[i] = params->reass_params.reass[i];
+
+ /* Check the user-provided reassembly manip */
+ err = FM_PCD_ManipGetStatistics(cnt_cb->gen_cb.objs[i], &stats);
+ if (err < 0) {
+ pr_err("Invalid Reassembly manip handle\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Based on user option, change mask to all statistics in one group */
+ if (cnt_sel == DPA_STATS_CNT_REASS_GEN_ALL)
+ cnt_sel -= 1;
+ else if (cnt_sel == DPA_STATS_CNT_REASS_IPv4_ALL)
+ cnt_sel = (cnt_sel - 1) &
+ ~(DPA_STATS_CNT_REASS_IPv4_FRAMES - 1);
+ else if (cnt_sel == DPA_STATS_CNT_REASS_IPv6_ALL)
+ cnt_sel = (cnt_sel - 1) &
+ ~(DPA_STATS_CNT_REASS_IPv6_FRAMES - 1);
+
+ /* Map Reassembly counter selection to Manip statistics */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_REASS], cnt_sel);
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = cnt_cb->members_num *
+ STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ return 0;
+}
+
+static int set_cls_cnt_frag_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cls_cnt_params *params)
+{
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ uint32_t cnt_sel = params->frag_params.cnt_sel, i;
+ struct t_FmPcdManipStats stats;
+ int err;
+
+ if (!dpa_stats) {
+ pr_err("Invalid argument: NULL DPA Stats instance\n");
+ return -EFAULT;
+ }
+
+ /* Check Fragmentation counter selection */
+ if ((cnt_sel == 0) || (cnt_sel > DPA_STATS_CNT_FRAG_ALL)) {
+ pr_err("Invalid Fragmentation counter selection\n");
+ return -EINVAL;
+ }
+
+ cnt_cb->members_num = params->class_members;
+
+ for (i = 0; i < params->class_members; i++) {
+ cnt_cb->gen_cb.objs[i] = params->frag_params.frag[i];
+
+ /* Check the user-provided fragmentation handle */
+ err = FM_PCD_ManipGetStatistics(cnt_cb->gen_cb.objs[i], &stats);
+ if (err < 0) {
+ pr_err("Invalid Fragmentation manip handle\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Decrease one to obtain the mask for all statistics */
+ if (cnt_sel == DPA_STATS_CNT_FRAG_ALL)
+ cnt_sel -= 1;
+
+ /* Map Fragmentation counter selection to Manip statistics */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_FRAG], cnt_sel);
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = cnt_cb->members_num *
+ STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ return 0;
+}
+
+static int set_cls_cnt_plcr_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cls_cnt_params *params)
+{
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ struct stats_info *info = &cnt_cb->info;
+ uint32_t cnt_sel = params->plcr_params.cnt_sel, i;
+
+ if (!dpa_stats) {
+ pr_err("Invalid argument: NULL DPA Stats instance\n");
+ return -EFAULT;
+ }
+
+ /* Check Policer counter selection */
+ if (cnt_sel == 0 || cnt_sel > DPA_STATS_CNT_PLCR_ALL) {
+ pr_err("Invalid Policer counter selection\n");
+ return -EINVAL;
+ }
+
+ cnt_cb->members_num = params->class_members;
+
+ for (i = 0; i < params->class_members; i++) {
+ cnt_cb->gen_cb.objs[i] = params->plcr_params.plcr[i];
+ /* Check the user-provided policer handle */
+ FM_PCD_PlcrProfileGetCounter(cnt_cb->gen_cb.objs[i],
+ info->stats_off[0]);
+ /*
+ * in case of bad counter the error will be displayed at
+ * creation time
+ */
+ }
+
+ /* Decrease one to obtain the mask for all statistics */
+ if (cnt_sel == DPA_STATS_CNT_PLCR_ALL)
+ cnt_sel -= 1;
+
+ /* Map Policer counter selection to policer statistics */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_POLICER], cnt_sel);
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = cnt_cb->members_num *
+ STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ return 0;
+}
+
+static int set_cls_cnt_classif_tbl_pair(
+ struct dpa_stats_cnt_classif_tbl_cb *cnt_tbl_cb, int td,
+ const struct dpa_offload_lookup_key_pair *pair,
+ struct dpa_stats_lookup_key *lookup_key)
+{
+ struct dpa_cls_tbl_params cls_tbl;
+ struct dpa_offload_lookup_key tbl_key;
+ struct dpa_cls_tbl_action action;
+ int err = 0;
+
+ /* Check that key byte is not NULL */
+ if (!pair->first_key.byte) {
+ pr_err("Invalid argument: NULL key byte pointer\n");
+ return -EFAULT;
+ }
+
+ /* Copy first key descriptor parameters*/
+ err = copy_key_descriptor(&pair->first_key, &tbl_key);
+ if (err != 0) {
+ pr_err("Unable to copy key descriptor\n");
+ return -EINVAL;
+ }
+
+ /* Use the first key of the pair to lookup in the classifier
+ * table the next table connected on a "next-action" */
+ err = dpa_classif_table_lookup_by_key(td, &tbl_key, &action);
+ if (err != 0) {
+ pr_err("Unable to retrieve next action parameters\n");
+ return -EINVAL;
+ }
+
+ if (action.type != DPA_CLS_TBL_ACTION_NEXT_TABLE) {
+ pr_err("Double key is supported only if "
+ "two tables are connected\n");
+ return -EINVAL;
+ }
+
+ /* Get CcNode from new table descriptor */
+ err = dpa_classif_table_get_params(
+ action.next_table_params.next_td, &cls_tbl);
+ if (err != 0) {
+ pr_err("Unable to retrieve next table parameters\n");
+ return -EINVAL;
+ }
+
+ /* Store DPA Classifier Table type */
+ cnt_tbl_cb->type = cls_tbl.type;
+
+ /* Store CcNode handle */
+ lookup_key->cc_node = cls_tbl.cc_node;
+
+ /* Set as lookup key the second key descriptor from the pair */
+ err = copy_key_descriptor(&pair->second_key, &lookup_key->key);
+ if (err != 0) {
+ pr_err("Unable to copy key descriptor\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cls_cnt_params *params)
+{
+ struct dpa_stats_cnt_classif_tbl_cb *cnt_tbl_cb = &cnt_cb->tbl_cb;
+ struct dpa_stats_cls_cnt_classif_tbl prm = params->classif_tbl_params;
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ struct dpa_cls_tbl_params cls_tbl;
+ uint32_t i = 0, cnt_sel = prm.cnt_sel;
+ int err = 0, frag_stats = -1;
+
+ /* Check Classifier Table descriptor */
+ if (params->classif_tbl_params.td == DPA_OFFLD_INVALID_OBJECT_ID) {
+ pr_err("Invalid Classifier Table descriptor\n");
+ return -EINVAL;
+ }
+
+ /* Check Classifier Table counter selection */
+ if (cnt_sel >= DPA_STATS_CNT_CLASSIF_BYTES &&
+ cnt_sel <= DPA_STATS_CNT_CLASSIF_ALL) {
+
+ /* Entire group of counters was selected */
+ if (cnt_sel == DPA_STATS_CNT_CLASSIF_ALL)
+ cnt_sel -= 1;
+ frag_stats = 0;
+
+ } else if (cnt_sel >= DPA_STATS_CNT_FRAG_TOTAL_FRAMES &&
+ cnt_sel <= DPA_STATS_CNT_FRAG_ALL) {
+
+ /* Entire group of counters was selected */
+ if (cnt_sel == DPA_STATS_CNT_FRAG_ALL)
+ cnt_sel -= 1;
+ frag_stats = 1;
+
+ } else {
+ pr_err("Invalid Classifier Table counter selection\n");
+ return -EINVAL;
+ }
+
+ cnt_tbl_cb->td = params->classif_tbl_params.td;
+ cnt_cb->members_num = params->class_members;
+
+ switch (prm.key_type) {
+ case DPA_STATS_CLASSIF_SINGLE_KEY:
+ /* Get CcNode from table descriptor */
+ err = dpa_classif_table_get_params(prm.td, &cls_tbl);
+ if (err != 0) {
+ pr_err("Invalid argument: Table descriptor\n");
+ return -EINVAL;
+ }
+
+ /* Store DPA Classifier Table type */
+ cnt_tbl_cb->type = cls_tbl.type;
+
+ for (i = 0; i < params->class_members; i++) {
+ /* Store CcNode handle */
+ cnt_tbl_cb->keys[i].cc_node = cls_tbl.cc_node;
+
+ if (!prm.keys[i].byte) {
+ /* Key is not valid for now */
+ cnt_tbl_cb->keys[i].valid = FALSE;
+ continue;
+ }
+
+ /* Copy the key descriptor */
+ err = copy_key_descriptor(&prm.keys[i],
+ &cnt_tbl_cb->keys[i].key);
+ if (err != 0) {
+ pr_err("Unable to copy key descriptor\n");
+ return -EINVAL;
+ }
+
+ /* Check the Classifier Table counter */
+ err = check_tbl_cls_counter(cnt_cb,
+ &cnt_tbl_cb->keys[i]);
+ if (err != 0)
+ return -EINVAL;
+
+ cnt_tbl_cb->keys[i].valid = TRUE;
+ }
+ break;
+ case DPA_STATS_CLASSIF_PAIR_KEY:
+ for (i = 0; i < params->class_members; i++) {
+ if (!prm.pairs[i].first_key.byte) {
+ /* Key is not valid for now */
+ cnt_tbl_cb->keys[i].valid = FALSE;
+ continue;
+ }
+
+ err = set_cls_cnt_classif_tbl_pair(cnt_tbl_cb, prm.td,
+ &prm.pairs[i], &cnt_tbl_cb->keys[i]);
+ if (err != 0) {
+ pr_err("Unable to set the key pair\n");
+ return -EINVAL;
+ }
+
+ /* Check the Classifier Table counter */
+ err = check_tbl_cls_counter(cnt_cb,
+ &cnt_tbl_cb->keys[i]);
+ if (err != 0)
+ return -EINVAL;
+
+ cnt_tbl_cb->keys[i].valid = TRUE;
+ }
+ break;
+ default:
+ pr_err("Invalid argument: key type\n");
+ return -EINVAL;
+ }
+
+ if (frag_stats) {
+ /* For every valid key, retrieve the hmcd */
+ for (i = 0; i < params->class_members; i++) {
+ if (!cnt_tbl_cb->keys[i].valid)
+ continue;
+
+ err = set_frag_manip(prm.td, &cnt_cb->tbl_cb.keys[i]);
+ if (err < 0) {
+ pr_err("Invalid Fragmentation manip handle\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Map Classif Node counter selection to fragmentation stats */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_FRAG], cnt_sel);
+
+ /* Change the retrieve routine */
+ cnt_cb->f_get_cnt_stats = get_cnt_cls_tbl_frag_stats;
+ } else
+ /* Map Classif Node counter selection to CcNode statistics */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE],
+ cnt_sel >> CLASSIF_STATS_SHIFT);
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = cnt_cb->members_num *
+ STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ return 0;
+}
+
+static int set_cls_cnt_ccnode_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cls_cnt_params *params)
+{
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ struct dpa_stats_cls_cnt_classif_node prm = params->classif_node_params;
+ uint32_t i = 0;
+ int err = 0;
+
+ if (!dpa_stats) {
+ pr_err("Invalid argument: NULL DPA Stats instance\n");
+ return -EFAULT;
+ }
+
+ /* Check Classification Cc Node counter selection */
+ if (prm.cnt_sel == 0 || prm.cnt_sel > DPA_STATS_CNT_CLASSIF_ALL) {
+ pr_err("Invalid Classif_Node counter selection\n");
+ return -EINVAL;
+ }
+
+ cnt_cb->ccnode_cb.cc_node = prm.cc_node;
+ cnt_cb->members_num = params->class_members;
+
+ for (i = 0; i < params->class_members; i++) {
+ /* Copy the key descriptor */
+ err = copy_key_descriptor(&prm.keys[i],
+ &cnt_cb->ccnode_cb.keys[i]);
+ if (err != 0) {
+ pr_err("Unable to copy key descriptor\n");
+ return -EINVAL;
+ }
+
+ /* Check the Classifier Node counter parameters */
+ err = check_ccnode_counter(cnt_cb,
+ prm.ccnode_type, &cnt_cb->ccnode_cb.keys[i]);
+ if (err != 0) {
+ pr_err("Invalid Classif Node counter parameters\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Map Classif Node counter selection to CcNode statistics */
+ cnt_sel_to_stats(&cnt_cb->info,
+ dpa_stats->stats_sel[DPA_STATS_CNT_CLASSIF_NODE],
+ prm.cnt_sel >> CLASSIF_STATS_SHIFT);
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = cnt_cb->members_num *
+ STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ return 0;
+}
+
+static int set_cls_cnt_ipsec_cb(struct dpa_stats_cnt_cb *cnt_cb,
+ const struct dpa_stats_cls_cnt_params *prm)
+{
+ struct dpa_stats_cnt_ipsec_cb *cnt_ipsec_cb = &cnt_cb->ipsec_cb;
+ struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
+ struct dpa_ipsec_sa_stats stats;
+ uint32_t cnt_sel = prm->ipsec_params.cnt_sel, i = 0;
+ int err = 0;
+
+ if (!dpa_stats) {
+ pr_err("Invalid argument: NULL DPA Stats instance\n");
+ return -EFAULT;
+ }
+
+ /* Map IPSec counter selection to statistics */
+ if (cnt_sel == DPA_STATS_CNT_NUM_OF_BYTES) {
+ cnt_cb->info.stats_off[0] = dpa_stats->stats_sel[
+ DPA_STATS_CNT_IPSEC][DPA_STATS_CNT_NUM_OF_BYTES];
+ cnt_cb->info.stats_num = 1;
+ } else if (cnt_sel == DPA_STATS_CNT_NUM_OF_PACKETS) {
+ cnt_cb->info.stats_off[0] = dpa_stats->stats_sel[
+ DPA_STATS_CNT_IPSEC][DPA_STATS_CNT_NUM_OF_PACKETS];
+ cnt_cb->info.stats_num = 1;
+ } else if (cnt_sel == DPA_STATS_CNT_NUM_ALL) {
+ cnt_cb->info.stats_off[0] = dpa_stats->stats_sel[
+ DPA_STATS_CNT_IPSEC][DPA_STATS_CNT_NUM_OF_BYTES];
+ cnt_cb->info.stats_off[1] = dpa_stats->stats_sel[
+ DPA_STATS_CNT_IPSEC][DPA_STATS_CNT_NUM_OF_PACKETS];
+ cnt_cb->info.stats_num = 2;
+ } else {
+ pr_err("Invalid IPSec counter selection\n");
+ return -EINVAL;
+ }
+
+ cnt_cb->members_num = prm->class_members;
+
+ for (i = 0; i < prm->class_members; i++) {
+ if (prm->ipsec_params.sa_id[i] != DPA_OFFLD_INVALID_OBJECT_ID) {
+ cnt_ipsec_cb->sa_id[i] = prm->ipsec_params.sa_id[i];
+ cnt_ipsec_cb->valid[i] = TRUE;
+
+ err = dpa_ipsec_sa_get_stats(cnt_cb->ipsec_cb.sa_id[i],
+ &stats);
+ if (err < 0) {
+ pr_err("Invalid IPSec counter parameters\n");
+ return -EINVAL;
+ }
+ } else {
+ cnt_ipsec_cb->valid[i] = FALSE;
+ }
+ }
+
+ /* Set number of bytes that will be written by this counter */
+ cnt_cb->bytes_num = cnt_cb->members_num *
+ STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ return 0;
+}
+
+int set_classif_tbl_member(const struct dpa_stats_cls_member_params *prm,
+ int member_index,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ struct dpa_stats_cnt_classif_tbl_cb *tbl_cb = &cnt_cb->tbl_cb;
+ uint32_t i = 0;
+ int err = 0;
+
+ /* Check that counter is of type Classifier table */
+ if (cnt_cb->type != DPA_STATS_CNT_CLASSIF_TBL) {
+ pr_err("Operation permitted only on counter "
+ "type DPA_STATS_CNT_CLASSIF_TBL\n");
+ return -EINVAL;
+ }
+
+ /* Check that member index does not exceeds class size */
+ if (member_index < 0 || member_index >= cnt_cb->members_num) {
+ pr_err("Member index is out of class counter size\n");
+ return -EINVAL;
+ }
+
+ /* Release the old key memory */
+ kfree(tbl_cb->keys[member_index].key.byte);
+ tbl_cb->keys[member_index].key.byte = NULL;
+
+ kfree(tbl_cb->keys[member_index].key.mask);
+ tbl_cb->keys[member_index].key.mask = NULL;
+
+ /* Reset the statistics */
+ for (i = 0; i < cnt_cb->info.stats_num; i++) {
+ cnt_cb->info.stats[member_index][i] = 0;
+ cnt_cb->info.last_stats[member_index][i] = 0;
+ }
+
+ if ((prm->type == DPA_STATS_CLS_MEMBER_SINGLE_KEY && !prm->key.byte) ||
+ (prm->type == DPA_STATS_CLS_MEMBER_PAIR_KEY &&
+ !prm->pair.first_key.byte)) {
+ /* Mark the key as invalid */
+ tbl_cb->keys[member_index].valid = FALSE;
+ return 0;
+ } else {
+ tbl_cb->keys[member_index].valid = TRUE;
+
+ if (prm->type == DPA_STATS_CLS_MEMBER_SINGLE_KEY) {
+ /* Copy the key descriptor */
+ err = copy_key_descriptor(&prm->key,
+ &tbl_cb->keys[member_index].key);
+ if (err != 0) {
+ pr_err("Unable to copy key descriptor\n");
+ return -EINVAL;
+ }
+ } else {
+ err = set_cls_cnt_classif_tbl_pair(tbl_cb, tbl_cb->td,
+ &prm->pair, &tbl_cb->keys[member_index]);
+ if (err != 0) {
+ pr_err("Unable to configure the key pair\n");
+ return -EINVAL;
+ }
+ }
+ if (cnt_cb->f_get_cnt_stats != get_cnt_cls_tbl_frag_stats) {
+ err = check_tbl_cls_counter(cnt_cb,
+ &tbl_cb->keys[member_index]);
+ if (err != 0)
+ return -EINVAL;
+ } else{
+ err = set_frag_manip(tbl_cb->td,
+ &tbl_cb->keys[member_index]);
+ if (err < 0) {
+ pr_err("Invalid Fragmentation manip handle\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int set_ipsec_member(const struct dpa_stats_cls_member_params *params,
+ int member_idx,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ struct dpa_stats_cnt_ipsec_cb *ipsec_cb = &cnt_cb->ipsec_cb;
+ uint32_t i = 0;
+
+ /* Check that counter is of type IPSec */
+ if (cnt_cb->type != DPA_STATS_CNT_IPSEC) {
+ pr_err("Operation permitted only on counter "
+ "type DPA_STATS_CNT_IPSEC\n");
+ return -EINVAL;
+ }
+
+ /* Check that member index does not exceeds class size */
+ if (member_idx < 0 || member_idx >= cnt_cb->members_num) {
+ pr_err("Member index is out of class counter size\n");
+ return -EINVAL;
+ }
+
+ /* Reset the statistics */
+ for (i = 0; i < cnt_cb->info.stats_num; i++) {
+ cnt_cb->info.stats[member_idx][i] = 0;
+ cnt_cb->info.last_stats[member_idx][i] = 0;
+ }
+
+ if (params->sa_id == DPA_OFFLD_INVALID_OBJECT_ID) {
+ /* Mark that corresponding SA id as invalid */
+ ipsec_cb->valid[member_idx] = FALSE;
+ } else {
+ /* Mark the corresponding SA id as valid */
+ ipsec_cb->valid[member_idx] = TRUE;
+ ipsec_cb->sa_id[member_idx] = params->sa_id;
+ }
+
+ return 0;
+}
+
+static inline void get_cnt_32bit_stats(struct dpa_stats_req_cb *req_cb,
+ struct stats_info *stats_info,
+ void *stats, uint32_t idx)
+{
+ uint32_t j = 0;
+ uint64_t stats_val;
+
+ for (j = 0; j < stats_info->stats_num; j++) {
+ /* Get statistics value */
+ stats_val = (uint64_t)(*((uint32_t *)
+ (stats + stats_info->stats_off[j])));
+
+ /* Check for rollover */
+ if (stats_val < stats_info->last_stats[idx][j])
+ stats_info->stats[idx][j] +=
+ ((unsigned long int)0xffffffff -
+ stats_info->last_stats[idx][j]) + stats_val;
+ else
+ stats_info->stats[idx][j] += stats_val -
+ stats_info->last_stats[idx][j];
+
+ /* Store the current value as the last read value */
+ stats_info->last_stats[idx][j] = stats_val;
+
+ /* Write the memory location */
+ *(uint32_t *)(req_cb->request_area) =
+ (uint32_t)stats_info->stats[idx][j];
+
+ /* Update the memory pointer */
+ req_cb->request_area += STATS_VAL_SIZE;
+
+ if (stats_info->reset)
+ stats_info->stats[idx][j] = 0;
+ }
+}
+
+static inline void get_cnt_64bit_stats(struct dpa_stats_req_cb *req_cb,
+ struct stats_info *stats_info,
+ void *stats, uint32_t idx)
+{
+ uint32_t j = 0;
+ uint64_t stats_val;
+
+ for (j = 0; j < stats_info->stats_num; j++) {
+ /* Get statistics value */
+ stats_val = *((uint64_t *)(stats + stats_info->stats_off[j]));
+
+ /* Check for rollover */
+ if (stats_val < stats_info->last_stats[idx][j])
+ stats_info->stats[idx][j] +=
+ ((unsigned long int)0xffffffff -
+ stats_info->last_stats[idx][j]) + stats_val;
+ else
+ stats_info->stats[idx][j] += stats_val -
+ stats_info->last_stats[idx][j];
+
+ /* Store the current value as the last read value */
+ stats_info->last_stats[idx][j] = stats_val;
+
+ /* Write the memory location */
+ *(uint32_t *)(req_cb->request_area) =
+ (uint32_t)stats_info->stats[idx][j];
+
+ /* Update the memory pointer */
+ req_cb->request_area += STATS_VAL_SIZE;
+
+ if (stats_info->reset)
+ stats_info->stats[idx][j] = 0;
+ }
+}
+
+static int get_cnt_eth_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ t_FmMacStatistics stats;
+ uint32_t i = 0;
+ int err = 0;
+
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ err = FM_MAC_GetStatistics(cnt_cb->gen_cb.objs[i], &stats);
+ if (err != 0) {
+ pr_err("Couldn't retrieve Ethernet Counter value\n");
+ return -ENOENT;
+ }
+
+ get_cnt_64bit_stats(req_cb, &cnt_cb->info, (void *)&stats, i);
+ }
+
+ return 0;
+}
+
+static int get_cnt_reass_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ struct t_FmPcdManipStats stats;
+ uint32_t i = 0;
+ int err = 0;
+
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ err = FM_PCD_ManipGetStatistics(cnt_cb->gen_cb.objs[i], &stats);
+ if (err < 0) {
+ pr_err("Couldn't retrieve Reassembly statistics\n");
+ return -ESRCH;
+ }
+
+ get_cnt_32bit_stats(req_cb, &cnt_cb->info,
+ &stats.u.reassem.u.ipReassem, i);
+ }
+
+ return 0;
+}
+
+static int get_cnt_frag_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ struct t_FmPcdManipStats stats;
+ uint32_t i = 0;
+ int err = 0;
+
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ err = FM_PCD_ManipGetStatistics(cnt_cb->gen_cb.objs[i], &stats);
+ if (err < 0) {
+ pr_err("Couldn't retrieve Fragmentation statistics\n");
+ return -EINTR;
+ }
+
+ get_cnt_32bit_stats(req_cb, &cnt_cb->info,
+ &stats.u.frag.u.ipFrag, i);
+ }
+
+ return 0;
+}
+
+static int get_cnt_plcr_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ struct stats_info *info = &cnt_cb->info;
+ uint64_t stats_val = 0;
+ uint32_t i = 0, j = 0;
+
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ for (j = 0; j < info->stats_num; j++) {
+ stats_val = (uint64_t)FM_PCD_PlcrProfileGetCounter(
+ cnt_cb->gen_cb.objs[i], info->stats_off[j]);
+
+ /* Check for rollover */
+ if (stats_val < info->last_stats[i][j])
+ info->stats[i][j] +=
+ ((unsigned long int)0xffffffff -
+ info->last_stats[i][j]) + stats_val;
+ else
+ info->stats[i][j] += stats_val -
+ info->last_stats[i][j];
+
+ /* Store the current value as the last read value */
+ info->last_stats[i][j] = stats_val;
+
+ /* Write the memory location */
+ *(uint32_t *)(req_cb->request_area) =
+ (uint32_t)info->stats[i][j];
+
+ /* Update the memory pointer */
+ req_cb->request_area += STATS_VAL_SIZE;
+
+ if (info->reset)
+ info->stats[i][j] = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int get_cnt_cls_tbl_match_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ t_FmPcdCcKeyStatistics stats;
+ uint32_t i = 0;
+ int err = 0;
+
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ if (!cnt_cb->tbl_cb.keys[i].valid) {
+ /* Write the memory location */
+ memset(req_cb->request_area, 0,
+ cnt_cb->info.stats_num * STATS_VAL_SIZE);
+
+ /* Update the memory pointer */
+ req_cb->request_area += STATS_VAL_SIZE *
+ cnt_cb->info.stats_num;
+ continue;
+ }
+ err = FM_PCD_MatchTableFindNGetKeyStatistics(
+ cnt_cb->tbl_cb.keys[i].cc_node,
+ cnt_cb->tbl_cb.keys[i].key.size,
+ cnt_cb->tbl_cb.keys[i].key.byte,
+ cnt_cb->tbl_cb.keys[i].key.mask, &stats);
+ if (err != 0) {
+ pr_err("Couldn't retrieve Classif Table statistics\n");
+ return -EIO;
+ }
+ get_cnt_32bit_stats(req_cb, &cnt_cb->info, &stats, i);
+ }
+
+ return 0;
+}
+
+static int get_cnt_cls_tbl_hash_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ t_FmPcdCcKeyStatistics stats;
+ uint32_t i = 0;
+ int err = 0;
+
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ if (!cnt_cb->tbl_cb.keys[i].valid) {
+ /* Write the memory location */
+ memset(req_cb->request_area, 0,
+ cnt_cb->info.stats_num * STATS_VAL_SIZE);
+
+ /* Update the memory pointer */
+ req_cb->request_area += STATS_VAL_SIZE *
+ cnt_cb->info.stats_num;
+ continue;
+ }
+ err = FM_PCD_HashTableFindNGetKeyStatistics(
+ cnt_cb->tbl_cb.keys[i].cc_node,
+ cnt_cb->tbl_cb.keys[i].key.size,
+ cnt_cb->tbl_cb.keys[i].key.byte,
+ &stats);
+ if (err != 0) {
+ pr_err("Couldn't retrieve Classif Table statistics\n");
+ return -EIO;
+ }
+ get_cnt_32bit_stats(req_cb, &cnt_cb->info, &stats, i);
+ }
+
+ return 0;
+}
+
+static int get_cnt_cls_tbl_index_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ t_FmPcdCcKeyStatistics stats;
+ uint32_t i = 0;
+ int err = 0;
+
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ if (!cnt_cb->tbl_cb.keys[i].valid) {
+ /* Write the memory location */
+ memset(req_cb->request_area, 0,
+ cnt_cb->info.stats_num * STATS_VAL_SIZE);
+
+ /* Update the memory pointer */
+ req_cb->request_area += STATS_VAL_SIZE *
+ cnt_cb->info.stats_num;
+ continue;
+ }
+ err = FM_PCD_MatchTableGetKeyStatistics(
+ cnt_cb->tbl_cb.keys[i].cc_node,
+ cnt_cb->tbl_cb.keys[i].key.byte[0],
+ &stats);
+ if (err != 0) {
+ pr_err("Couldn't retrieve Classif Table statistics\n");
+ return -EIO;
+ }
+ get_cnt_32bit_stats(req_cb, &cnt_cb->info, &stats, i);
+ }
+
+ return 0;
+}
+
+static int get_cnt_cls_tbl_frag_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ struct t_FmPcdManipStats stats;
+ uint32_t i = 0;
+ int err = 0;
+
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ if (!cnt_cb->tbl_cb.keys[i].valid) {
+ /* Write the memory location */
+ memset(req_cb->request_area, 0,
+ cnt_cb->info.stats_num * STATS_VAL_SIZE);
+
+ /* Update the memory pointer */
+ req_cb->request_area += STATS_VAL_SIZE *
+ cnt_cb->info.stats_num;
+ continue;
+ }
+
+ err = FM_PCD_ManipGetStatistics(
+ cnt_cb->tbl_cb.keys[i].frag, &stats);
+ if (err < 0) {
+ pr_err("Couldn't retrieve Fragmentation statistics\n");
+ return -EINTR;
+ }
+ get_cnt_32bit_stats(req_cb,
+ &cnt_cb->info, &stats.u.frag.u.ipFrag, i);
+ }
+
+ return 0;
+}
+
+static int get_cnt_ccnode_match_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ t_FmPcdCcKeyStatistics stats;
+ uint32_t i = 0;
+ int err = 0;
+
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ err = FM_PCD_MatchTableFindNGetKeyStatistics(
+ cnt_cb->ccnode_cb.cc_node,
+ cnt_cb->ccnode_cb.keys[i].size,
+ cnt_cb->ccnode_cb.keys[i].byte,
+ cnt_cb->ccnode_cb.keys[i].mask, &stats);
+ if (err != 0) {
+ pr_err("Couldn't retrieve Classif Node statistics\n");
+ return -ENXIO;
+ }
+
+ get_cnt_32bit_stats(req_cb, &cnt_cb->info, (void *)&stats, i);
+ }
+ return 0;
+}
+
+static int get_cnt_ccnode_hash_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ t_FmPcdCcKeyStatistics stats;
+ uint32_t i = 0;
+ int err = 0;
+
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ err = FM_PCD_HashTableFindNGetKeyStatistics(
+ cnt_cb->ccnode_cb.cc_node,
+ cnt_cb->ccnode_cb.keys[i].size,
+ cnt_cb->ccnode_cb.keys[i].byte, &stats);
+ if (err != 0) {
+ pr_err("Couldn't retrieve Classif Node statistics\n");
+ return -ENXIO;
+ }
+
+ get_cnt_32bit_stats(req_cb, &cnt_cb->info, (void *)&stats, i);
+ }
+ return 0;
+}
+
+static int get_cnt_ccnode_index_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ t_FmPcdCcKeyStatistics stats;
+ uint32_t i = 0;
+ int err = 0;
+
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ err = FM_PCD_MatchTableGetKeyStatistics(
+ cnt_cb->ccnode_cb.cc_node,
+ cnt_cb->ccnode_cb.keys[i].byte[0], &stats);
+ if (err != 0) {
+ pr_err("Couldn't retrieve Classif Node statistics\n");
+ return -ENXIO;
+ }
+
+ get_cnt_32bit_stats(req_cb, &cnt_cb->info, (void *)&stats, i);
+ }
+ return 0;
+}
+
+static int get_cnt_ipsec_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ struct dpa_ipsec_sa_stats stats;
+ uint32_t i = 0;
+ int err = 0;
+
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ if (!cnt_cb->ipsec_cb.valid[i]) {
+ /* Write the memory location */
+ memset(req_cb->request_area, 0,
+ cnt_cb->info.stats_num * STATS_VAL_SIZE);
+
+ /* Update the memory pointer */
+ req_cb->request_area +=
+ STATS_VAL_SIZE * cnt_cb->info.stats_num;
+
+ continue;
+ }
+
+ err = dpa_ipsec_sa_get_stats(cnt_cb->ipsec_cb.sa_id[i], &stats);
+ if (err < 0) {
+ pr_err("Couldn't retrieve IPSec statistics\n");
+ return -E2BIG;
+ }
+
+ get_cnt_32bit_stats(req_cb, &cnt_cb->info, &stats, i);
+ }
+
+ return 0;
+}
+
+static void async_req_work_func(struct work_struct *work)
+{
+ struct dpa_stats_req_cb *req_cb = NULL;
+ struct dpa_stats *dpa_stats = NULL;
+ int err = 0;
+
+ dpa_stats = gbl_dpa_stats;
+
+ req_cb = container_of(work, struct dpa_stats_req_cb, async_req_work);
+ BUG_ON(!req_cb);
+
+ err = treat_cnts_request(dpa_stats, req_cb);
+ if (err < 0) {
+ pr_err("Failed to retrieve counter values\n");
+ req_cb->bytes_num = err;
+ }
+
+ /* Notify the application */
+ req_cb->request_done(0, req_cb->config.storage_area_offset,
+ req_cb->cnts_num, req_cb->bytes_num);
+
+ /* Release the request control block */
+ err = put_req(dpa_stats, req_cb);
+ if (err < 0)
+ pr_err("Failed to release request control block\n");
+
+ return;
+}
+
+int dpa_stats_init(const struct dpa_stats_params *params, int *dpa_stats_id)
+{
+ struct dpa_stats *dpa_stats = NULL;
+ int err = 0;
+
+ /* Multiple DPA Stats instances are not currently supported */
+ unused(dpa_stats_id);
+
+ /* Sanity checks */
+ if (gbl_dpa_stats) {
+ pr_err("dpa_stats component already initialized.\n");
+ pr_err("Multiple DPA Stats Instances are not supported.\n");
+ return -EPERM;
+ }
+
+ /* Check user-provided parameters */
+ err = check_dpa_stats_params(params);
+ if (err < 0)
+ return err;
+
+ /* Control block allocation */
+ dpa_stats = kzalloc(sizeof(struct dpa_stats), GFP_KERNEL);
+ if (!dpa_stats) {
+ pr_err("Could not allocate memory for control block.\n");
+ return -ENOMEM;
+ }
+
+ /* Store parameters */
+ dpa_stats->config = *params;
+
+ /* Initialize DPA Stats instance lock */
+ mutex_init(&dpa_stats->lock);
+ mutex_init(&dpa_stats->sched_cnt_lock);
+
+ /* Allocate and initialize resources occupied by counters */
+ err = init_cnts_resources(dpa_stats);
+ if (err < 0) {
+ free_resources();
+ return err;
+ }
+
+ /* Allocate and initialize requests control block */
+ err = init_reqs_resources(dpa_stats);
+ if (err < 0) {
+ free_resources();
+ return err;
+ }
+
+ /* Map each Ethernet counter selection to a FM-MAC statistics */
+ create_cnt_eth_stats(dpa_stats);
+
+ /* Map Reassembly counters to FMAN Reassembly statistics */
+ create_cnt_reass_stats(dpa_stats);
+
+ /* Map Fragmentation counters to FMAN Fragmentation statistics */
+ create_cnt_frag_stats(dpa_stats);
+
+ /* Map Policer counters to FMAN Policer statistics */
+ create_cnt_plcr_stats(dpa_stats);
+
+ /* Map Classifier counters to FMAN Classifier statistics */
+ create_classif_stats(dpa_stats);
+
+ /* Map IPSec counters */
+ create_cnt_ipsec_stats(dpa_stats);
+
+ gbl_dpa_stats = dpa_stats;
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_stats_init);
+
+int dpa_stats_create_counter(int dpa_stats_id,
+ const struct dpa_stats_cnt_params *params,
+ int *dpa_stats_cnt_id)
+{
+ struct dpa_stats *dpa_stats = NULL;
+ struct dpa_stats_cnt_cb *cnt_cb = NULL;
+ int err = 0, err_rb = 0;
+
+ /* multiple DPA Stats instances are not currently supported */
+ unused(dpa_stats_id);
+
+ if (!gbl_dpa_stats) {
+ pr_err("dpa_stats component is not initialized\n");
+ return -EPERM;
+ }
+
+ if (!dpa_stats_cnt_id) {
+ pr_err("dpa_stats_cnt_id can't be NULL\n");
+ return -EINVAL;
+ }
+ *dpa_stats_cnt_id = DPA_OFFLD_INVALID_OBJECT_ID;
+
+ dpa_stats = gbl_dpa_stats;
+
+ err = get_new_cnt(dpa_stats, &cnt_cb);
+ if (err < 0) {
+ pr_err("Failed retrieving a preallocated counter\n");
+ return err;
+ }
+
+ /* Acquire the lock for the counter control block */
+ mutex_lock(&cnt_cb->lock);
+
+ switch (params->type) {
+ case DPA_STATS_CNT_ETH:
+ cnt_cb->type = DPA_STATS_CNT_ETH;
+ cnt_cb->f_get_cnt_stats = get_cnt_eth_stats;
+
+ err = set_cnt_eth_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create ETH counter\n");
+ goto create_counter_err;
+ }
+ break;
+ case DPA_STATS_CNT_REASS:
+ cnt_cb->type = DPA_STATS_CNT_REASS;
+ cnt_cb->f_get_cnt_stats = get_cnt_reass_stats;
+
+ err = set_cnt_reass_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create Reassembly counter\n");
+ goto create_counter_err;
+ }
+ break;
+ case DPA_STATS_CNT_FRAG:
+ cnt_cb->type = DPA_STATS_CNT_FRAG;
+ cnt_cb->f_get_cnt_stats = get_cnt_frag_stats;
+
+ err = set_cnt_frag_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create Fragmentation counter\n");
+ goto create_counter_err;
+ }
+ break;
+ case DPA_STATS_CNT_POLICER:
+ cnt_cb->type = DPA_STATS_CNT_POLICER;
+ cnt_cb->f_get_cnt_stats = get_cnt_plcr_stats;
+
+ err = set_cnt_plcr_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create Policer counter\n");
+ goto create_counter_err;
+ }
+ break;
+ case DPA_STATS_CNT_CLASSIF_TBL:
+ cnt_cb->type = DPA_STATS_CNT_CLASSIF_TBL;
+
+ err = set_cnt_classif_tbl_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create Classif Table counter\n");
+ goto create_counter_err;
+ }
+ break;
+ case DPA_STATS_CNT_CLASSIF_NODE:
+ cnt_cb->type = DPA_STATS_CNT_CLASSIF_NODE;
+
+ err = set_cnt_ccnode_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create Classif Cc Node counter\n");
+ goto create_counter_err;
+ }
+ break;
+ case DPA_STATS_CNT_IPSEC:
+ cnt_cb->type = DPA_STATS_CNT_IPSEC;
+ cnt_cb->f_get_cnt_stats = get_cnt_ipsec_stats;
+
+ err = set_cnt_ipsec_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create IPSec counter\n");
+ goto create_counter_err;
+ }
+ break;
+ case DPA_STATS_CNT_TRAFFIC_MNG:
+ pr_err("Counter type not supported\n");
+ mutex_unlock(&cnt_cb->lock);
+ return -EINVAL;
+ default:
+ pr_err("Invalid counter type\n");
+ mutex_unlock(&cnt_cb->lock);
+ return -EINVAL;
+ };
+
+ /* Counter was created. Return the counter id */
+ *dpa_stats_cnt_id = cnt_cb->id;
+
+ /* Unlock the counter control block structure */
+ mutex_unlock(&cnt_cb->lock);
+
+ return 0;
+
+create_counter_err:
+ /*
+ * An invalid Counter ID is returned if 'put_cnt' succeeds and the
+ * actual reserved Counter ID if it fails. The Counter ID can be used
+ * to try again to free resources by calling dpa_stats_remove_counter
+ */
+
+ *dpa_stats_cnt_id = cnt_cb->id;
+
+ err_rb = put_cnt(dpa_stats, cnt_cb);
+ if (!err_rb)
+ *dpa_stats_cnt_id = DPA_OFFLD_INVALID_OBJECT_ID;
+
+ /* Unlock the counter control block structure */
+ mutex_unlock(&cnt_cb->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(dpa_stats_create_counter);
+
+int dpa_stats_create_class_counter(int dpa_stats_id,
+ const struct dpa_stats_cls_cnt_params *params,
+ int *dpa_stats_cnt_id)
+{
+ struct dpa_stats *dpa_stats = NULL;
+ struct dpa_stats_cnt_cb *cnt_cb = NULL;
+ int err = 0, err_rb = 0;
+
+ /* multiple DPA Stats instances are not currently supported */
+ unused(dpa_stats_id);
+
+ if (!gbl_dpa_stats) {
+ pr_err("dpa_stats component is not initialized\n");
+ return -EPERM;
+ }
+
+ if (!dpa_stats_cnt_id) {
+ pr_err("dpa_stats_cnt_id can't be NULL\n");
+ return -EINVAL;
+ }
+ *dpa_stats_cnt_id = DPA_OFFLD_INVALID_OBJECT_ID;
+
+ if (params->class_members > DPA_STATS_MAX_NUM_OF_CLASS_MEMBERS) {
+ pr_err("exceed maximum number of class members: %d\n",
+ DPA_STATS_MAX_NUM_OF_CLASS_MEMBERS);
+ return -EINVAL;
+ }
+
+ dpa_stats = gbl_dpa_stats;
+
+ err = get_new_cnt(dpa_stats, &cnt_cb);
+ if (err < 0) {
+ pr_err("Failed retrieving a preallocated counter\n");
+ return err;
+ }
+
+ /* Acquire the lock for the counter control block */
+ mutex_lock(&cnt_cb->lock);
+
+ switch (params->type) {
+ case DPA_STATS_CNT_ETH:
+ cnt_cb->type = DPA_STATS_CNT_ETH;
+ cnt_cb->f_get_cnt_stats = get_cnt_eth_stats;
+
+ err = set_cls_cnt_eth_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create ETH counter\n");
+ goto create_counter_err;
+ }
+ break;
+ case DPA_STATS_CNT_REASS:
+ cnt_cb->type = DPA_STATS_CNT_REASS;
+ cnt_cb->f_get_cnt_stats = get_cnt_reass_stats;
+
+ err = set_cls_cnt_reass_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create Reassembly counter\n");
+ goto create_counter_err;
+ }
+ break;
+ case DPA_STATS_CNT_FRAG:
+ cnt_cb->type = DPA_STATS_CNT_FRAG;
+ cnt_cb->f_get_cnt_stats = get_cnt_frag_stats;
+
+ err = set_cls_cnt_frag_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create Fragmentation counter\n");
+ goto create_counter_err;
+ }
+ break;
+ case DPA_STATS_CNT_POLICER:
+ cnt_cb->type = DPA_STATS_CNT_POLICER;
+ cnt_cb->f_get_cnt_stats = get_cnt_plcr_stats;
+
+ err = set_cls_cnt_plcr_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create Policer counter\n");
+ goto create_counter_err;
+ }
+ break;
+ case DPA_STATS_CNT_CLASSIF_TBL:
+ cnt_cb->type = DPA_STATS_CNT_CLASSIF_TBL;
+
+ err = set_cls_cnt_classif_tbl_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create Classif Table counter\n");
+ goto create_counter_err;
+ }
+ break;
+ case DPA_STATS_CNT_CLASSIF_NODE:
+ cnt_cb->type = DPA_STATS_CNT_CLASSIF_NODE;
+
+ err = set_cls_cnt_ccnode_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create Classif Cc Node counter\n");
+ goto create_counter_err;
+ }
+ break;
+ case DPA_STATS_CNT_IPSEC:
+ cnt_cb->type = DPA_STATS_CNT_IPSEC;
+ cnt_cb->f_get_cnt_stats = get_cnt_ipsec_stats;
+
+ err = set_cls_cnt_ipsec_cb(cnt_cb, params);
+ if (err != 0) {
+ pr_err("Failed to create IPSec counter\n");
+ goto create_counter_err;
+ }
+
+ break;
+ case DPA_STATS_CNT_TRAFFIC_MNG:
+ pr_err("Counter type not supported\n");
+ mutex_unlock(&cnt_cb->lock);
+ return -EINVAL;
+ default:
+ pr_err("Invalid counter type\n");
+ mutex_unlock(&cnt_cb->lock);
+ return -EINVAL;
+ };
+
+ /* Counter was created. Return the counter id */
+ *dpa_stats_cnt_id = cnt_cb->id;
+
+ /* Unlock the counter control block */
+ mutex_unlock(&cnt_cb->lock);
+
+ return 0;
+
+create_counter_err:
+ /*
+ * An invalid Counter ID is returned if 'put_cnt' succeeds and the
+ * actual reserved Counter ID if it fails. The Counter ID can be used
+ * to try again to free resources by calling dpa_stats_remove_counter
+ */
+ *dpa_stats_cnt_id = cnt_cb->id;
+
+ err_rb = put_cnt(dpa_stats, cnt_cb);
+ if (!err_rb)
+ *dpa_stats_cnt_id = DPA_OFFLD_INVALID_OBJECT_ID;
+
+ /* Unlock the counter control block */
+ mutex_unlock(&cnt_cb->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(dpa_stats_create_class_counter);
+
+int dpa_stats_modify_class_counter(int dpa_stats_cnt_id,
+ const struct dpa_stats_cls_member_params *params,
+ int member_index)
+{
+ struct dpa_stats *dpa_stats = NULL;
+ struct dpa_stats_cnt_cb *cnt_cb = NULL;
+ int err = 0;
+
+ if (!gbl_dpa_stats) {
+ pr_err("dpa_stats component is not initialized\n");
+ return -EPERM;
+ }
+
+ dpa_stats = gbl_dpa_stats;
+
+ if (dpa_stats_cnt_id < 0 ||
+ dpa_stats_cnt_id > dpa_stats->config.max_counters) {
+ pr_err("Invalid Counter id %d provided\n", dpa_stats_cnt_id);
+ return -EINVAL;
+ }
+
+ /* Counter scheduled for the retrieve mechanism can't be modified */
+ if (cnt_is_sched(dpa_stats, dpa_stats_cnt_id)) {
+ pr_err("Counter id %d is in use\n", dpa_stats_cnt_id);
+ return -EBUSY;
+ }
+
+ /* Get counter control block */
+ cnt_cb = &dpa_stats->cnts_cb[dpa_stats_cnt_id];
+
+ /* Acquire counter control block lock */
+ err = mutex_trylock(&cnt_cb->lock);
+ if (err == 0)
+ return -EAGAIN;
+
+ /* Validity check for this counter */
+ if (cnt_cb->index == DPA_OFFLD_INVALID_OBJECT_ID) {
+ pr_err("Invalid Counter id %d provided\n", dpa_stats_cnt_id);
+ mutex_unlock(&cnt_cb->lock);
+ return -EINVAL;
+ }
+
+ if (params->type == DPA_STATS_CLS_MEMBER_SINGLE_KEY ||
+ params->type == DPA_STATS_CLS_MEMBER_PAIR_KEY) {
+ /* Modify classifier table class member */
+ err = set_classif_tbl_member(params, member_index, cnt_cb);
+ if (err < 0) {
+ pr_err("Failed to modify class member\n");
+ mutex_unlock(&cnt_cb->lock);
+ return -EINVAL;
+ }
+
+ } else if (params->type == DPA_STATS_CLS_MEMBER_SA_ID) {
+ /* Modify IPSec class member */
+ err = set_ipsec_member(params, member_index, cnt_cb);
+ if (err < 0) {
+ pr_err("Failed to modify class member\n");
+ mutex_unlock(&cnt_cb->lock);
+ return -EINVAL;
+ }
+ } else {
+ pr_err("Invalid member type\n");
+ mutex_unlock(&cnt_cb->lock);
+ return -EINVAL;
+ }
+
+ /* Unlock the counter control block */
+ mutex_unlock(&cnt_cb->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_stats_modify_class_counter);
+
+int dpa_stats_remove_counter(int dpa_stats_cnt_id)
+{
+ struct dpa_stats *dpa_stats = NULL;
+ struct dpa_stats_cnt_cb *cnt_cb = NULL;
+ int err = 0;
+ uint32_t i;
+
+ if (!gbl_dpa_stats) {
+ pr_err("dpa_stats component is not initialized\n");
+ return -EPERM;
+ }
+
+ dpa_stats = gbl_dpa_stats;
+
+ if (dpa_stats_cnt_id < 0 ||
+ dpa_stats_cnt_id > dpa_stats->config.max_counters) {
+ pr_err("Invalid Counter id %d provided\n", dpa_stats_cnt_id);
+ return -EINVAL;
+ }
+
+ /* Counter scheduled for the retrieve mechanism can't be removed */
+ if (cnt_is_sched(dpa_stats, dpa_stats_cnt_id)) {
+ pr_err("Counter id %d is in use\n", dpa_stats_cnt_id);
+ return -EBUSY;
+ }
+
+ /* Get counter control block */
+ cnt_cb = &dpa_stats->cnts_cb[dpa_stats_cnt_id];
+
+ /* Acquire counter control block lock */
+ err = mutex_trylock(&cnt_cb->lock);
+ if (err == 0)
+ return -EAGAIN;
+
+ /* Validity check for this counter */
+ if (cnt_cb->index == DPA_OFFLD_INVALID_OBJECT_ID) {
+ pr_err("Invalid Counter id %d provided\n", dpa_stats_cnt_id);
+ mutex_unlock(&cnt_cb->lock);
+ return -EINVAL;
+ }
+
+ /* Remove the allocated memory for keys bytes and masks */
+ if (cnt_cb->type == DPA_STATS_CNT_CLASSIF_NODE)
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ kfree(cnt_cb->ccnode_cb.keys[i].byte);
+ kfree(cnt_cb->ccnode_cb.keys[i].mask);
+ }
+
+ /* Remove the allocated memory for keys bytes and masks */
+ if (cnt_cb->type == DPA_STATS_CNT_CLASSIF_TBL)
+ for (i = 0; i < cnt_cb->members_num; i++) {
+ kfree(cnt_cb->tbl_cb.keys[i].key.byte);
+ kfree(cnt_cb->tbl_cb.keys[i].key.mask);
+ }
+
+ /* Release the counter id in the Counter IDs circular queue */
+ err = put_cnt(dpa_stats, cnt_cb);
+ if (err < 0) {
+ pr_err("Failed to release a preallocated counter\n");
+ mutex_unlock(&cnt_cb->lock);
+ return -EINVAL;
+ }
+
+ /* Release counter lock */
+ mutex_unlock(&cnt_cb->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_stats_remove_counter);
+
+int dpa_stats_get_counters(struct dpa_stats_cnt_request_params params,
+ int *cnts_len,
+ dpa_stats_request_cb request_done)
+{
+ struct dpa_stats *dpa_stats = NULL;
+ struct dpa_stats_req_cb *req_cb = NULL;
+ struct dpa_stats_cnt_cb *cnt_cb = NULL;
+ int err = 0, cnt_id = 0, req_id = 0;
+ uint32_t i = 0;
+
+ if (!gbl_dpa_stats) {
+ pr_err("dpa_stats component is not initialized\n");
+ return -EPERM;
+ }
+
+ /* Check user-provided cnts_len pointer */
+ if (!cnts_len) {
+ pr_err("Parameter cnts_len can't be NULL\n");
+ return -EINVAL;
+ }
+
+ /* Check user-provided params.cnts_ids pointer */
+ if (!params.cnts_ids) {
+ pr_err("Parameter params.cnts_ids can't be NULL\n");
+ return -EINVAL;
+ }
+
+ dpa_stats = gbl_dpa_stats;
+
+ *cnts_len = 0;
+
+ for (i = 0; i < params.cnts_ids_len; i++) {
+ if (params.cnts_ids[i] == DPA_OFFLD_INVALID_OBJECT_ID ||
+ params.cnts_ids[i] > dpa_stats->config.max_counters) {
+ pr_err("Invalid Counter id %d provided\n",
+ params.cnts_ids[i]);
+ return -EINVAL;
+ }
+ }
+
+ block_sched_cnts(dpa_stats, params.cnts_ids, params.cnts_ids_len);
+
+ /* Calculate number of bytes occupied by the counters */
+ for (i = 0; i < params.cnts_ids_len; i++) {
+ cnt_id = params.cnts_ids[i];
+
+ /* Get counter's control block */
+ cnt_cb = &dpa_stats->cnts_cb[cnt_id];
+
+ /* Acquire counter lock */
+ err = mutex_trylock(&cnt_cb->lock);
+ if (err == 0) {
+ pr_err("Counter %d is being used\n", cnt_id);
+ unblock_sched_cnts(dpa_stats, params.cnts_ids,
+ params.cnts_ids_len);
+ return -EBUSY;
+ }
+
+ /* Check if counter control block is initialized */
+ if (cnt_cb->index == DPA_OFFLD_INVALID_OBJECT_ID) {
+ pr_err("Invalid Counter id %d provided\n", cnt_id);
+ mutex_unlock(&cnt_cb->lock);
+ unblock_sched_cnts(dpa_stats, params.cnts_ids,
+ params.cnts_ids_len);
+ return -EINVAL;
+ }
+
+ *cnts_len += cnt_cb->bytes_num;
+ mutex_unlock(&cnt_cb->lock);
+ }
+
+ /* Check user-provided parameters */
+ if ((params.storage_area_offset + *cnts_len) >
+ dpa_stats->config.storage_area_len) {
+ pr_err("Invalid offset %d provided\n",
+ params.storage_area_offset);
+ unblock_sched_cnts(dpa_stats, params.cnts_ids,
+ params.cnts_ids_len);
+ return -EINVAL;
+ }
+
+ /* Create a new request */
+ err = get_new_req(dpa_stats, &req_id, &req_cb);
+ if (err < 0) {
+ pr_err("Failed retrieving a preallocated request\n");
+ /* Release counters locks */
+ unblock_sched_cnts(dpa_stats, params.cnts_ids,
+ params.cnts_ids_len);
+ return err;
+ }
+
+ /* Store user-provided request parameters */
+ memcpy(req_cb->config.cnts_ids,
+ params.cnts_ids, params.cnts_ids_len * sizeof(int));
+
+ req_cb->config.reset_cnts = params.reset_cnts;
+ req_cb->config.storage_area_offset = params.storage_area_offset;
+ req_cb->config.cnts_ids_len = params.cnts_ids_len;
+ req_cb->request_done = request_done;
+
+ /* Set memory area where the request should write */
+ req_cb->request_area = dpa_stats->config.storage_area +
+ params.storage_area_offset;
+
+ if (!req_cb->request_done) {
+ /* Call is synchronous */
+ err = treat_cnts_request(dpa_stats, req_cb);
+ if (err < 0)
+ pr_err("Failed to retrieve counter values\n");
+
+ err = put_req(dpa_stats, req_cb);
+
+ return err;
+ } else {
+ /* Call is asynchronous */
+ queue_work(dpa_stats->async_req_workqueue,
+ &req_cb->async_req_work);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_stats_get_counters);
+
+int dpa_stats_reset_counters(int *cnts_ids, unsigned int cnts_ids_len)
+{
+ struct dpa_stats *dpa_stats = NULL;
+ struct dpa_stats_cnt_cb *cnt_cb = NULL;
+ uint32_t i = 0;
+ int err = 0;
+
+ if (!gbl_dpa_stats) {
+ pr_err("dpa_stats component is not initialized\n");
+ return -EPERM;
+ }
+
+ /* Check user-provided cnts_len pointer */
+ if (cnts_ids_len == 0) {
+ pr_err("Parameter cnts_ids_len can't be 0\n");
+ return -EINVAL;
+ }
+
+ /* Check user-provided cnts_ids pointer */
+ if (!cnts_ids) {
+ pr_err("Parameter cnts_ids can't be NULL\n");
+ return -EINVAL;
+ }
+
+ dpa_stats = gbl_dpa_stats;
+
+ for (i = 0; i < cnts_ids_len; i++)
+ if (cnts_ids[i] == DPA_OFFLD_INVALID_OBJECT_ID ||
+ cnts_ids[i] > dpa_stats->config.max_counters) {
+ pr_err("Invalid Counter id %d provided\n", cnts_ids[i]);
+ return -EINVAL;
+ }
+
+ block_sched_cnts(dpa_stats, cnts_ids, cnts_ids_len);
+
+ /* Calculate number of bytes occupied by the counters */
+ for (i = 0; i < cnts_ids_len; i++) {
+ /* Get counter's control block */
+ cnt_cb = &dpa_stats->cnts_cb[cnts_ids[i]];
+
+ /* Acquire counter lock */
+ err = mutex_trylock(&cnt_cb->lock);
+ if (err == 0) {
+ pr_err("Counter %d is being used\n", cnts_ids[i]);
+ unblock_sched_cnts(dpa_stats,
+ cnts_ids, cnts_ids_len);
+ return -EBUSY;
+ }
+
+ /* Check if counter control block is initialized */
+ if (cnt_cb->index == DPA_OFFLD_INVALID_OBJECT_ID) {
+ pr_err("Invalid Counter id %d provided\n", cnts_ids[i]);
+ mutex_unlock(&cnt_cb->lock);
+ unblock_sched_cnts(dpa_stats,
+ cnts_ids, cnts_ids_len);
+ return -EINVAL;
+ }
+ memset(&cnt_cb->info.stats, 0, (MAX_NUM_OF_MEMBERS *
+ MAX_NUM_OF_STATS * sizeof(uint64_t)));
+ mutex_unlock(&cnt_cb->lock);
+ }
+
+ unblock_sched_cnts(dpa_stats, cnts_ids, cnts_ids_len);
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_stats_reset_counters);
+
+int dpa_stats_free(int dpa_stats_id)
+{
+ /* multiple DPA Stats instances are not currently supported */
+ unused(dpa_stats_id);
+
+ free_resources();
+
+ return 0;
+}
+EXPORT_SYMBOL(dpa_stats_free);
diff --git a/drivers/staging/fsl_dpa_offload/dpa_stats.h b/drivers/staging/fsl_dpa_offload/dpa_stats.h
new file mode 100644
index 0000000..a429258
--- /dev/null
+++ b/drivers/staging/fsl_dpa_offload/dpa_stats.h
@@ -0,0 +1,197 @@
+
+/* Copyright 2008-2013 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Internal DPA Statistics Application Programming Interface
+ */
+
+#ifndef __DPA_STATS_H
+#define __DPA_STATS_H
+
+/* DPA offloading layer includes */
+#include "linux/fsl_dpa_stats.h"
+#include "linux/fsl_dpa_ipsec.h"
+#include "cq.h"
+
+#define MAX_NUM_OF_STATS 23
+#define NUM_OF_CNT_TYPES (DPA_STATS_CNT_TRAFFIC_MNG + 1)
+#define MAX_NUM_OF_MEMBERS DPA_STATS_MAX_NUM_OF_CLASS_MEMBERS
+
+
+/* DPA Stats - Control Block */
+struct dpa_stats {
+ struct dpa_stats_params config; /* Configuration parameters */
+ struct cq *cnt_id_cq; /* Circular Queue with ids for stats counters */
+ int *used_cnt_ids; /* Counter ids used by this dpa_stats instance*/
+ struct dpa_stats_cnt_cb *cnts_cb; /* Array of counters control blocks */
+
+ int *used_req_ids; /* Request ids used by this dpa_stats instance*/
+ struct dpa_stats_req_cb *reqs_cb; /* Array of counter requests */
+ struct cq *req_id_cq; /* Circular Queue with ids for counters request */
+ /*
+ * Array that stores the mapping
+ * between counter selection and statistics values
+ */
+ int stats_sel[NUM_OF_CNT_TYPES][MAX_NUM_OF_STATS];
+ /*
+ * Multi threaded work queue used to defer the work to be
+ * done when an asynchronous counters request is received
+ */
+ struct workqueue_struct *async_req_workqueue;
+ struct mutex lock; /* Lock for this dpa_stats instance */
+ /* Counters that are scheduled for a retrieve operation */
+ bool sched_cnt_ids[DPA_STATS_MAX_NUM_OF_COUNTERS];
+ struct mutex sched_cnt_lock; /* Lock for array of scheduled counters */
+};
+
+/* DPA Stats request control block */
+struct dpa_stats_req_cb {
+ struct work_struct async_req_work; /* Asynchronous request work */
+ struct dpa_stats_cnt_request_params config;
+ /* Parameters provided to the request */
+ uint32_t id; /* Request id */
+ int index; /* Request index in the 'used_req_ids'*/
+ void *request_area;
+ /* Address in the storage area associated with this request */
+ uint32_t bytes_num; /* Number of bytes written by this request */
+ uint32_t cnts_num; /* Number of counters written by this request */
+ dpa_stats_request_cb request_done; /* Callback to notify upper layer */
+};
+
+/* DPA Stats - statistics information */
+struct stats_info {
+ /*
+ * Array of statistics offsets relative to
+ * corresponding statistics area
+ */
+ unsigned int stats_off[MAX_NUM_OF_STATS];
+ unsigned int stats_num; /* Number of statistics to retrieve */
+ uint64_t stats[MAX_NUM_OF_MEMBERS][MAX_NUM_OF_STATS];
+ /* Array to store statistics values */
+ uint64_t last_stats[MAX_NUM_OF_MEMBERS][MAX_NUM_OF_STATS];
+ /* Array to store previous statistics values */
+ bool reset; /* Reset counter's statistics */
+};
+
+/* DPA Stats General Counter control block */
+struct dpa_stats_cnt_gen_cb {
+ /* Array of objects for which to retrieve statistics */
+ void *objs[MAX_NUM_OF_MEMBERS];
+};
+
+/* DPA Stats Classifier Table key descriptor */
+struct dpa_stats_lookup_key {
+ void *cc_node; /* Handle of Cc Node the lookup key belongs to */
+ struct dpa_offload_lookup_key key; /* Key descriptor */
+ bool valid; /* Lookup key is valid */
+ void *frag; /* Fragmentation handle corresponding to this key */
+};
+
+/* DPA Stats Classif Table control block */
+struct dpa_stats_cnt_classif_tbl_cb {
+ int td; /* Table descriptor */
+ enum dpa_cls_tbl_type type; /* The type of the DPA Classifier table */
+ struct dpa_stats_lookup_key keys[MAX_NUM_OF_MEMBERS]; /* Array of
+ key descriptors for which to provide statistics */
+};
+
+/* DPA Stats Classif Node control block */
+struct dpa_stats_cnt_classif_cb {
+ void *cc_node; /* Handle of Cc Node the lookup keys belong to */
+ struct dpa_offload_lookup_key keys[MAX_NUM_OF_MEMBERS];
+ /* Array of key descriptors for which to provide statistics */
+};
+
+/* DPA Stats IPSec Counter control block */
+struct dpa_stats_cnt_ipsec_cb {
+ int sa_id[MAX_NUM_OF_MEMBERS]; /* Array of Security Association ids */
+ bool valid[MAX_NUM_OF_MEMBERS]; /* Security Association id is valid */
+};
+
+typedef int get_cnt_stats(struct dpa_stats_req_cb *req_cb,
+ struct dpa_stats_cnt_cb *cnt_cb);
+
+/* DPA Stats counter control block */
+struct dpa_stats_cnt_cb {
+ struct dpa_stats *dpa_stats; /* Pointer to DPA Stats */
+ uint32_t id; /* Counter identifier */
+ int index; /* Counter index in the 'used_cnt_ids'*/
+ uint32_t bytes_num; /* Number of bytes occupied by this counter */
+ struct mutex lock; /* Lock for this counter control block */
+ bool used; /* Counter has been scheduled for retrieve */
+ enum dpa_stats_cnt_type type; /* Counter type */
+ struct stats_info info; /* Counter's statistics information */
+ unsigned int members_num; /* Number of objects to retrieve statistics */
+ union {
+ struct dpa_stats_cnt_gen_cb gen_cb;
+ struct dpa_stats_cnt_classif_tbl_cb tbl_cb;
+ struct dpa_stats_cnt_classif_cb ccnode_cb;
+ struct dpa_stats_cnt_ipsec_cb ipsec_cb;
+ };
+ /* Function used to retrieve the statistics for a specific counter */
+ get_cnt_stats *f_get_cnt_stats;
+};
+
+static inline void block_sched_cnts(struct dpa_stats *dpa_stats,
+ int *cnts_ids, int cnts_ids_len)
+{
+ int i;
+
+ mutex_lock(&dpa_stats->sched_cnt_lock);
+ for (i = 0; i < cnts_ids_len; i++)
+ dpa_stats->sched_cnt_ids[cnts_ids[i]] = TRUE;
+ mutex_unlock(&dpa_stats->sched_cnt_lock);
+}
+
+static inline void unblock_sched_cnts(struct dpa_stats *dpa_stats,
+ int *cnts_ids, int cnts_ids_len)
+{
+ int i;
+
+ mutex_lock(&dpa_stats->sched_cnt_lock);
+ for (i = 0; i < cnts_ids_len; i++)
+ dpa_stats->sched_cnt_ids[cnts_ids[i]] = FALSE;
+ mutex_unlock(&dpa_stats->sched_cnt_lock);
+}
+
+static inline int cnt_is_sched(struct dpa_stats *dpa_stats, int cnt_id)
+{
+ int ret = 0;
+
+ mutex_lock(&dpa_stats->sched_cnt_lock);
+ ret = dpa_stats->sched_cnt_ids[cnt_id];
+ mutex_unlock(&dpa_stats->sched_cnt_lock);
+
+ return ret;
+}
+
+#endif /* __DPA_STATS_H */
diff --git a/include/linux/fsl_dpa_stats.h b/include/linux/fsl_dpa_stats.h
new file mode 100644
index 0000000..6abd08f
--- /dev/null
+++ b/include/linux/fsl_dpa_stats.h
@@ -0,0 +1,810 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DPA Statistics Application Programming Interface.
+ */
+
+#ifndef __FSL_DPA_STATS_H
+#define __FSL_DPA_STATS_H
+
+/* DPA offloading layer includes */
+#include "fsl_dpa_offload.h"
+
+/* Other includes */
+#include <linux/types.h>
+
+/* Maximum number simultaneous counters requests */
+#define DPA_STATS_MAX_NUM_OF_REQUESTS 256
+
+/* Maximum number of single and class counters */
+#define DPA_STATS_MAX_NUM_OF_COUNTERS 128
+
+/* Maximum number of class counter members */
+#define DPA_STATS_MAX_NUM_OF_CLASS_MEMBERS 256
+
+/* DPA Stats instance parameters */
+struct dpa_stats_params {
+
+ /* Maximum number of counters managed by DPA Stats instance */
+ unsigned int max_counters;
+
+ /* Pointer to the memory area where the values of the counters
+ * will be written by the DPA Stats instance */
+ void *storage_area;
+
+ /* Length in bytes of the storage memory area */
+ unsigned int storage_area_len;
+};
+
+/*
+ * Callback used to notify the upper layer that the requested counters values
+ * were written in the storage area. The 'storage_area_offset' is the offset
+ * in the storage_area and the 'cnts_written' represents the number of counters
+ * successfully written. The 'bytes_written' parameter can have a positive value
+ * and in this case it's value the size of the memory area written or it can
+ * have a negative value and contain the code of the error that occurred.
+ */
+typedef void (*dpa_stats_request_cb)(int dpa_stats_id,
+ unsigned int storage_area_offset,
+ unsigned int cnts_written,
+ int bytes_written);
+
+/* DPA Stats Request parameters */
+struct dpa_stats_cnt_request_params {
+ /* Array of counter IDs to retrieve values for */
+ int *cnts_ids;
+
+ /* Size of array of counters to retrieve values for */
+ unsigned int cnts_ids_len;
+
+ /* Reset counters after the retrieve operation */
+ bool reset_cnts;
+
+ /* Storage area offset, expressed in bytes */
+ unsigned int storage_area_offset;
+};
+
+/* DPA Stats counter types */
+enum dpa_stats_cnt_type {
+ DPA_STATS_CNT_ETH = 0, /* Ethernet counter */
+ DPA_STATS_CNT_REASS, /* IP Reassembly counter */
+ DPA_STATS_CNT_FRAG, /* IP Fragmentation counter */
+ DPA_STATS_CNT_POLICER, /* Policer counter */
+ DPA_STATS_CNT_CLASSIF_TBL, /* Classification Table counter */
+ DPA_STATS_CNT_CLASSIF_NODE, /* Classification Node counter */
+ DPA_STATS_CNT_IPSEC, /* IPSec counter */
+ DPA_STATS_CNT_TRAFFIC_MNG, /* Traffic Manager counter */
+#ifdef DPA_STATS_UNDER_CONSTRUCTION
+ DPA_STATS_CNT_RAPIDIO /* RapidIO counter */
+#endif
+};
+
+/* DPA Stats counter selection */
+enum dpa_stats_cnt_sel {
+ /* Total number of bytes */
+ DPA_STATS_CNT_NUM_OF_BYTES = 0,
+ /* Total number of packets */
+ DPA_STATS_CNT_NUM_OF_PACKETS,
+ /* Total number of bytes and total number of packets */
+ DPA_STATS_CNT_NUM_ALL
+};
+
+/* DPA Stats Ethernet counter selection */
+enum dpa_stats_cnt_eth_sel {
+ /* Total number of dropped packets on receive */
+ DPA_STATS_CNT_ETH_DROP_PKTS = 0x00000001,
+ /* Total number of received bytes of data */
+ DPA_STATS_CNT_ETH_BYTES = 0x00000002,
+ /* Total number of received packets */
+ DPA_STATS_CNT_ETH_PKTS = 0x00000004,
+ /* Total number of received broadcast packets */
+ DPA_STATS_CNT_ETH_BC_PKTS = 0x00000008,
+ /* Total number of received multicast packets */
+ DPA_STATS_CNT_ETH_MC_PKTS = 0x00000010,
+ /* Total number of received frames with alignment error or invalid FCS*/
+ DPA_STATS_CNT_ETH_CRC_ALIGN_ERR = 0x00000020,
+ /*
+ * Total number of received packets that were
+ * less than 64 bytes and were well formed
+ */
+ DPA_STATS_CNT_ETH_UNDERSIZE_PKTS = 0x00000040,
+ /*
+ * Total number of received packets that were above 1518 bytes
+ * (non-VLAN) or 1522 (VLAN) or above a configured maximum frame
+ * length and were well formed
+ */
+ DPA_STATS_CNT_ETH_OVERSIZE_PKTS = 0x00000080,
+ /*
+ * Total number of received packets that were less than
+ * 64 bytes and had a bad FCS
+ */
+ DPA_STATS_CNT_ETH_FRAGMENTS = 0x00000100,
+ /*
+ * Total number of received packets with size above 1518 bytes
+ * (non-VLAN) or 1522 (VLAN) or a configured maximum frame length
+ * and with an invalid FCS or an alignment error
+ */
+ DPA_STATS_CNT_ETH_JABBERS = 0x00000200,
+ /* Total number of received packets with size below 64 bytes*/
+ DPA_STATS_CNT_ETH_64BYTE_PKTS = 0x00000400,
+ /* Total number of received packets with size between 65 and 127bytes */
+ DPA_STATS_CNT_ETH_65_127BYTE_PKTS = 0x00000800,
+ /* Total number of received packets with size between 128-255bytes */
+ DPA_STATS_CNT_ETH_128_255BYTE_PKTS = 0x00001000,
+ /* Total number of received packets with size between 256-511bytes */
+ DPA_STATS_CNT_ETH_256_511BYTE_PKTS = 0x00002000,
+ /* Total number of received packets with size between 512-1023bytes */
+ DPA_STATS_CNT_ETH_512_1023BYTE_PKTS = 0x00004000,
+ /* Total number of received packets with size between 1024-1518bytes */
+ DPA_STATS_CNT_ETH_1024_1518BYTE_PKTS = 0x00008000,
+ /* Total number of packets on transmit */
+ DPA_STATS_CNT_ETH_OUT_PKTS = 0x00010000,
+ /* Total number of dropped packets on transmit */
+ DPA_STATS_CNT_ETH_OUT_DROP_PKTS = 0x00020000,
+ /* Total number of transmitted bytes of data */
+ DPA_STATS_CNT_ETH_OUT_BYTES = 0x00040000,
+ /* Total number of received frames with errors */
+ DPA_STATS_CNT_ETH_IN_ERRORS = 0x00080000,
+ /* Total number of transmitted frames with errors */
+ DPA_STATS_CNT_ETH_OUT_ERRORS = 0x00100000,
+ /* Total number of unicast packets on receive */
+ DPA_STATS_CNT_ETH_IN_UNICAST_PKTS = 0x00200000,
+ /* Total number of unicast packets on transmit */
+ DPA_STATS_CNT_ETH_OUT_UNICAST_PKTS = 0x00400000,
+ /* Select all counters */
+ DPA_STATS_CNT_ETH_ALL = 0x00800000
+};
+
+/* DPA Stats Ethernet id */
+enum dpa_stats_cnt_eth_id {
+ DPA_STATS_ETH_1G_PORT0 = 0, /* 1G port, ETH id 0 */
+ DPA_STATS_ETH_1G_PORT1, /* 1G port, ETH id 1 */
+ DPA_STATS_ETH_1G_PORT2, /* 1G port, ETH id 2 */
+ DPA_STATS_ETH_1G_PORT3, /* 1G port, ETH id 3 */
+ DPA_STATS_ETH_1G_PORT4, /* 1G port, ETH id 4 */
+ DPA_STATS_ETH_1G_PORT5, /* 1G port, ETH id 5 */
+ DPA_STATS_ETH_10G_PORT0, /* 10G port, ETH id 0 */
+ DPA_STATS_ETH_10G_PORT1 /* 10G port, ETH id 1 */
+};
+
+/* DPA Stats Ethernet counter source definition */
+struct dpa_stats_cnt_eth_src {
+ /* Index of the engine device the Ethernet interface belongs to */
+ uint8_t engine_id;
+
+ /* Index of the Ethernet interface, relative to the engine */
+ enum dpa_stats_cnt_eth_id eth_id;
+};
+
+/* DPA Stats Ethernet counter parameters */
+struct dpa_stats_cnt_eth {
+ /* Ethernet counter source */
+ struct dpa_stats_cnt_eth_src src;
+
+ /*
+ * Single or multiple selections of Ethernet counters
+ * from enumeration dpa_stats_cnt_eth_sel
+ */
+ uint32_t cnt_sel;
+};
+
+/*
+ * DPA Stats IP Reassembly selection of counters that provide
+ * common information for both IPv4 and IPv6 protocols
+ */
+enum dpa_stats_cnt_reass_gen_sel {
+ /* Number of timeout occurrences */
+ DPA_STATS_CNT_REASS_TIMEOUT = 0x00000001,
+ /* Number of failed attempts to allocate a Reassembly Frame Descriptor*/
+ DPA_STATS_CNT_REASS_RFD_POOL_BUSY = 0x00000002,
+ /* Number of internal buffer busy occurrences */
+ DPA_STATS_CNT_REASS_INT_BUFF_BUSY = 0x00000004,
+ /* Number of external buffer busy occurrences */
+ DPA_STATS_CNT_REASS_EXT_BUFF_BUSY = 0x00000008,
+ /* Number of Scatter/Gather fragments */
+ DPA_STATS_CNT_REASS_SG_FRAGS = 0x00000010,
+ /* Number of failed attempts to allocate a DMA semaphore */
+ DPA_STATS_CNT_REASS_DMA_SEM = 0x00000020,
+ /* Select all counters from dpa_stats_cnt_reass_common_sel */
+ DPA_STATS_CNT_REASS_GEN_ALL = 0x00000040
+};
+
+/*
+ * DPA Stats IP Reassembly selection of counters that provide
+ * information only for IPv4 protocol
+ */
+enum dpa_stats_cnt_reass_ipv4_sel {
+ /* Number of successfully reassembled IPv4 frames */
+ DPA_STATS_CNT_REASS_IPv4_FRAMES = 0x00000080,
+ /* Number of valid IPv4 fragments */
+ DPA_STATS_CNT_REASS_IPv4_FRAGS_VALID = 0x00000100,
+ /* Number of processed IPv4 fragments */
+ DPA_STATS_CNT_REASS_IPv4_FRAGS_TOTAL = 0x00000200,
+ /* Number of malformed IPv4 fragments */
+ DPA_STATS_CNT_REASS_IPv4_FRAGS_MALFORMED = 0x00000400,
+ /* Number of discarded IPv4 fragments except Timeout condition */
+ DPA_STATS_CNT_REASS_IPv4_FRAGS_DISCARDED = 0x00000800,
+ /* Number of busy conditions due to Automatic Learning Hash access */
+ DPA_STATS_CNT_REASS_IPv4_AUTOLEARN_BUSY = 0x00001000,
+ /*
+ * Number of IPv4 fragments occurrences when the number of
+ * fragments-per-frame exceeds 16
+ */
+ DPA_STATS_CNT_REASS_IPv4_EXCEED_16FRAGS = 0x00002000,
+ /* Select all counters from dpa_stats_cnt_reass_ipv4_sel */
+ DPA_STATS_CNT_REASS_IPv4_ALL = 0x00004000
+};
+
+/*
+ * DPA Stats IP Reassembly selection of counters that provide
+ * information only for IPv6 protocol
+ */
+enum dpa_stats_cnt_reass_ipv6_sel {
+ /* Number of successfully reassembled IPv6 frames*/
+ DPA_STATS_CNT_REASS_IPv6_FRAMES = 0x00008000,
+ /* Number of valid IPv6 fragments */
+ DPA_STATS_CNT_REASS_IPv6_FRAGS_VALID = 0x00010000,
+ /* Number of processed IPv6 fragments */
+ DPA_STATS_CNT_REASS_IPv6_FRAGS_TOTAL = 0x00020000,
+ /* Number of malformed IPv6 fragments */
+ DPA_STATS_CNT_REASS_IPv6_FRAGS_MALFORMED = 0x00040000,
+ /* Number of discarded IPv6 fragments except Timeout condition */
+ DPA_STATS_CNT_REASS_IPv6_FRAGS_DISCARDED = 0x00080000,
+ /* Number of busy conditions due to Automatic Learning Hash access */
+ DPA_STATS_CNT_REASS_IPv6_AUTOLEARN_BUSY = 0x00100000,
+ /*
+ * Number of IPv6 fragments occurrences when the number of
+ * fragments-per-frame exceeds 16
+ */
+ DPA_STATS_CNT_REASS_IPv6_EXCEED_16FRAGS = 0x00200000,
+ /* Select all counters from dpa_stats_cnt_reass_ipv6_sel */
+ DPA_STATS_CNT_REASS_IPv6_ALL = 0x00400000
+};
+
+/* DPA Stats IP Reassembly counter parameters */
+struct dpa_stats_cnt_reass {
+ /* Pointer to the IP Reassembly object*/
+ void *reass;
+
+ /*
+ * Single or multiple selection of IP Reassembly counters from one of
+ * the enums: dpa_stats_cnt_reass_gen_sel, dpa_stats_cnt_reass_ipv4_sel
+ * or dpa_stats_cnt_reass_ipv6_sel
+ */
+ unsigned int cnt_sel;
+};
+
+/* DPA Stats Fragmentation counters */
+enum dpa_stats_cnt_frag_sel {
+ /* Number of frames processed by fragmentation manipulation */
+ DPA_STATS_CNT_FRAG_TOTAL_FRAMES = 0x00000001,
+ /* Number of fragmented frames */
+ DPA_STATS_CNT_FRAG_FRAMES = 0x00000002,
+ /* Number of generated fragments */
+ DPA_STATS_CNT_FRAG_GEN_FRAGS = 0x00000004,
+ /* Select all counters from dpa_stats_cnt_frag_sel */
+ DPA_STATS_CNT_FRAG_ALL = 0x00000008
+};
+
+/* DPA Stats Fragmentation counter parameters */
+struct dpa_stats_cnt_frag {
+ /* Pointer to the IP Fragmentation object*/
+ void *frag;
+
+ /*
+ * Single or multiple selection of Fragmentation
+ * counters from enum dpa_stats_cnt_frag_sel
+ */
+ unsigned int cnt_sel;
+};
+
+/* DPA Stats Policer counters */
+enum dpa_stats_cnt_plcr_sel {
+ /* Number of 'green' frames */
+ DPA_STATS_CNT_PLCR_GREEN_PKTS = 0x00000001,
+ /* Number of 'yellow' frames */
+ DPA_STATS_CNT_PLCR_YELLOW_PKTS = 0x00000002,
+ /* Number of 'red' frames */
+ DPA_STATS_CNT_PLCR_RED_PKTS = 0x00000004,
+ /* Number of recolored 'yellow' frames */
+ DPA_STATS_CNT_PLCR_RECOLOR_YELLOW_PKTS = 0x00000008,
+ /* Number of recolored 'red' frames */
+ DPA_STATS_CNT_PLCR_RECOLOR_RED_PKTS = 0x00000010,
+ /* Select all counters */
+ DPA_STATS_CNT_PLCR_ALL = 0x00000020
+};
+
+/* DPA Stats Policer counter parameters */
+struct dpa_stats_cnt_plcr {
+ /* Pointer to the Policer object */
+ void *plcr;
+
+ /*
+ * Single or multiple selection of Policer counters
+ * from enum dpa_stats_cnt_plcr_sel
+ */
+ unsigned int cnt_sel;
+};
+
+/* DPA Stats Classification counters */
+enum dpa_stats_cnt_classif_sel {
+ /* Number of bytes processed by classification entry */
+ DPA_STATS_CNT_CLASSIF_BYTES = 0x00000010,
+ /* Number of frames processed by classification entry */
+ DPA_STATS_CNT_CLASSIF_PACKETS = 0x00000020,
+ /* Number of frames for frame length range 0 */
+ DPA_STATS_CNT_CLASSIF_RMON_RANGE0 = 0x00000040,
+ /* Number of frames for frame length range 1 */
+ DPA_STATS_CNT_CLASSIF_RMON_RANGE1 = 0x00000080,
+ /* Number of frames for frame length range 2 */
+ DPA_STATS_CNT_CLASSIF_RMON_RANGE2 = 0x00000100,
+ /* Number of frames for frame length range 3 */
+ DPA_STATS_CNT_CLASSIF_RMON_RANGE3 = 0x00000200,
+ /* Number of frames for frame length range 4 */
+ DPA_STATS_CNT_CLASSIF_RMON_RANGE4 = 0x00000400,
+ /* Number of frames for frame length range 5 */
+ DPA_STATS_CNT_CLASSIF_RMON_RANGE5 = 0x00000800,
+ /* Number of frames for frame length range 6 */
+ DPA_STATS_CNT_CLASSIF_RMON_RANGE6 = 0x00001000,
+ /* Number of frames for frame length range 7 */
+ DPA_STATS_CNT_CLASSIF_RMON_RANGE7 = 0x00002000,
+ /* Number of frames for frame length range 8 */
+ DPA_STATS_CNT_CLASSIF_RMON_RANGE8 = 0x00004000,
+ /* Number of frames for frame length range 9 */
+ DPA_STATS_CNT_CLASSIF_RMON_RANGE9 = 0x00008000,
+ /* Select all counters */
+ DPA_STATS_CNT_CLASSIF_ALL = 0x00010000
+};
+
+/* DPA Stats Classifier Table counter parameters */
+struct dpa_stats_cnt_classif_tbl {
+
+ /* Table descriptor */
+ int td;
+
+ /* Key to identify a specific entry */
+ struct dpa_offload_lookup_key key;
+
+ /*
+ * Single or multiple selection of Classifier Table counters
+ * from one of the enums: dpa_stats_cnt_classif_sel or
+ * dpa_stats_cnt_frag_sel
+ */
+ unsigned int cnt_sel;
+};
+
+/* DPA Stats Classification Node Type */
+enum dpa_stats_classif_node_type {
+ /* Classification Node type HASH */
+ DPA_STATS_CLASSIF_NODE_HASH = 0,
+ /* Classification Node type Indexed */
+ DPA_STATS_CLASSIF_NODE_INDEXED,
+ /* Classification Node type Exact match */
+ DPA_STATS_CLASSIF_NODE_EXACT_MATCH
+};
+
+/* DPA Stats Classification Node parameters */
+struct dpa_stats_cnt_classif_node {
+ /*
+ * Handle of the FMAN Cc node, more precisely handle of
+ * the classification element previously created in the
+ * distribution XML file
+ */
+ void *cc_node;
+
+ /* The type of FMAN Classification Node */
+ enum dpa_stats_classif_node_type ccnode_type;
+
+ /* Key to identify a specific entry */
+ struct dpa_offload_lookup_key key;
+
+ /*
+ * Single or multiple selection of Classifier
+ * counters from enum: dpa_stats_cnt_classif_sel
+ */
+ unsigned int cnt_sel;
+};
+
+/* DPA Stats IPSec counter parameters */
+struct dpa_stats_cnt_ipsec {
+
+ /* Security Association id */
+ int sa_id;
+
+ /* Select IPSec counter */
+ enum dpa_stats_cnt_sel cnt_sel;
+};
+
+/* DPA Stats Traffic Manager counter source */
+enum dpa_stats_cnt_traffic_mng_src {
+ /* Traffic Manager Class counter */
+ DPA_STATS_CNT_TRAFFIC_CLASS = 0,
+ /* Traffic Manager Congestion Group counter */
+ DPA_STATS_CNT_TRAFFIC_CG
+};
+
+/* DPA Stats Traffic Manager counter parameters */
+struct dpa_stats_cnt_traffic_mng {
+ /* Traffic Manager counter source */
+ enum dpa_stats_cnt_traffic_mng_src src;
+
+ /*
+ * Depending on the Traffic Manager source, the 'src_id' has a
+ * different meaning: it represents the 'class id' or the
+ * 'congestion group id'
+ */
+ uint8_t src_id;
+
+ /*
+ * Traffic Manager Class: Number of bytes/frames dequeued from a Class
+ * Traffic Manager Congestion Group: Number of bytes/frames whose
+ * enqueues was rejected in all Class queues that belong to the
+ * Congestion Group
+ */
+ enum dpa_stats_cnt_sel cnt_sel;
+};
+
+/* DPA Stats counter parameters */
+struct dpa_stats_cnt_params {
+
+ /* The type of DPA Stats counter */
+ enum dpa_stats_cnt_type type;
+
+ union {
+ /* Parameters for Ethernet counter */
+ struct dpa_stats_cnt_eth eth_params;
+
+ /* Parameters for IP Reassembly counter */
+ struct dpa_stats_cnt_reass reass_params;
+
+ /* Parameters for IP Fragmentation counter */
+ struct dpa_stats_cnt_frag frag_params;
+
+ /* Parameters for Policer counter */
+ struct dpa_stats_cnt_plcr plcr_params;
+
+ /* Parameters for Classification Table counter */
+ struct dpa_stats_cnt_classif_tbl classif_tbl_params;
+
+ /* Parameters for Classification Node counter */
+ struct dpa_stats_cnt_classif_node classif_node_params;
+
+ /* Parameters for IPSec counter */
+ struct dpa_stats_cnt_ipsec ipsec_params;
+
+ /* Parameters for Traffic Manager counter */
+ struct dpa_stats_cnt_traffic_mng traffic_mng_params;
+ };
+};
+
+/* DPA Stats Ethernet class counter parameters */
+struct dpa_stats_cls_cnt_eth {
+ /* Array of Ethernet counters sources */
+ struct dpa_stats_cnt_eth_src *src;
+
+ /* Single selection of Ethernet counter */
+ enum dpa_stats_cnt_eth_sel cnt_sel;
+};
+
+/* DPA Stats IP Reassembly class counter parameters */
+struct dpa_stats_cls_cnt_reass {
+ /* Array of pointers of IP Reassembly objects */
+ void **reass;
+
+ /*
+ * Single or multiple selections of IP Reassembly counters
+ * from one of the enums dpa_stats_cnt_reass_gen_sel,
+ * dpa_stats_cnt_reass_ipv4_sel or dpa_stats_cnt_reass_ipv6_sel
+ */
+ unsigned int cnt_sel;
+};
+
+/* DPA Stats IP Fragmentation counter parameters */
+struct dpa_stats_cls_cnt_frag {
+ /* Array of pointers of IP Fragmentation objects */
+ void **frag;
+
+ /*
+ * Single or multiple selection of Fragmentation
+ * counters from enum dpa_stats_cnt_frag_sel
+ */
+ unsigned int cnt_sel;
+};
+
+/* DPA Stats Policer class counter parameters */
+struct dpa_stats_cls_cnt_plcr {
+ /* Array of pointers of Policer objects */
+ void **plcr;
+
+ /*
+ * Single or multiple selection of Policer counters
+ * from enum dpa_stats_cnt_plcr_sel
+ */
+ unsigned int cnt_sel;
+};
+
+/* DPA Stats Classification key type */
+enum dpa_stats_classif_key_type {
+
+ /* Entry identified through a single key */
+ DPA_STATS_CLASSIF_SINGLE_KEY = 0,
+
+ /*
+ * Entry identified through a pair of keys: the first key
+ * uniquely identifies the first entry, while the second key
+ * identifies the entry connected to the first entry
+ */
+ DPA_STATS_CLASSIF_PAIR_KEY
+};
+
+/* DPA Stats Classification counter - pair of keys */
+struct dpa_offload_lookup_key_pair {
+
+ /* Key to identify the first entry */
+ struct dpa_offload_lookup_key first_key;
+
+ /* Key to identify the entry connected to the first entry */
+ struct dpa_offload_lookup_key second_key;
+};
+
+/* DPA Stats Classifier Table class counter parameters */
+struct dpa_stats_cls_cnt_classif_tbl {
+
+ /* Table descriptor */
+ int td;
+
+ /* Mechanism used to identify an entry */
+ enum dpa_stats_classif_key_type key_type;
+
+ union {
+ /*
+ * Invalid keys can be provided during class counter creation
+ * and the statistics values for such keys will be 0. Function
+ * 'dpa_stats_modify_class_counter' can be further used to
+ * modify a specific key.
+ */
+
+ /*
+ * Array of keys to identify specific entries. A key can be
+ * 'invalidated' by providing the 'byte' and 'mask' pointers
+ * set to NULL.
+ */
+ struct dpa_offload_lookup_key *keys;
+
+ /*
+ * Array of 'pair-keys' to identify specific entries. A key pair
+ * can be 'invalidated' by providing the 'byte' and 'mask'
+ * pointers of the first key set to NULL
+ */
+ struct dpa_offload_lookup_key_pair *pairs;
+ };
+
+ /*
+ * Single or multiple selection of Classifier Table counters
+ * from one of the enums: dpa_stats_cnt_classif_sel or
+ * dpa_stats_cnt_frag_sel
+ */
+ unsigned int cnt_sel;
+};
+
+/* DPA Stats Classification Node class counter parameters */
+struct dpa_stats_cls_cnt_classif_node {
+ /*
+ * Handle of the FMAN Cc node, more precisely handle of
+ * the classification element previously created in the
+ * distribution XML file
+ */
+ void *cc_node;
+
+ /* The type of FMAN Classification Node */
+ enum dpa_stats_classif_node_type ccnode_type;
+
+ /* Array of keys to identify specific entries */
+ struct dpa_offload_lookup_key *keys;
+
+ /*
+ * Single or multiple selection of Classifier counters
+ * from enum dpa_stats_cnt_classif_sel
+ */
+ unsigned int cnt_sel;
+};
+
+/* DPA Stats IPSec class counter parameters */
+struct dpa_stats_cls_cnt_ipsec {
+
+ /*
+ * Array of security association IDs. Invalid security association
+ * identifiers(DPA_OFFLD_INVALID_OBJECT_ID) can be provided during
+ * class counter creation and the statistics values for such ids will
+ * be 0. Function 'dpa_stats_modify_class_counter' can be further used
+ * to modify a specific security association identifier.
+ */
+ int *sa_id;
+
+ /* Select IPSec counter */
+ enum dpa_stats_cnt_sel cnt_sel;
+};
+
+/* DPA Stats Traffic Manager class counter parameters */
+struct dpa_stats_cls_cnt_traffic_mng {
+
+ /* Traffic Manager source */
+ enum dpa_stats_cnt_traffic_mng_src src;
+
+ /*
+ * Depending on the Traffic Manager source, the 'src_id' has a
+ * different meaning: it represents the array of 'class ids' or the
+ * array of 'congestion group ids'
+ */
+ uint8_t *src_id;
+
+ /*
+ * Traffic Manager Class: Number of bytes/frames dequeued from a Class
+ * Traffic Manager Congestion Group: Number of bytes/frames whose
+ * enqueues was rejected in all Class queues that belong to the
+ * Congestion Group
+ */
+ enum dpa_stats_cnt_sel cnt_sel;
+};
+
+/* DPA Stats class counter parameters */
+struct dpa_stats_cls_cnt_params {
+
+ /* Number of members the class can have */
+ unsigned int class_members;
+
+ /* The type of DPA Stats class counter */
+ enum dpa_stats_cnt_type type;
+
+ union {
+ /* Parameters for Ethernet class counter */
+ struct dpa_stats_cls_cnt_eth eth_params;
+
+ /* Parameters for IP Reassembly class counter */
+ struct dpa_stats_cls_cnt_reass reass_params;
+
+ /* Parameters for IP Fragmentation class counter */
+ struct dpa_stats_cls_cnt_frag frag_params;
+
+ /* Parameters for Policer class counter */
+ struct dpa_stats_cls_cnt_plcr plcr_params;
+
+ /* Parameters for Classifier Table class counter */
+ struct dpa_stats_cls_cnt_classif_tbl classif_tbl_params;
+
+ /* Parameters for Classification Node class counter */
+ struct dpa_stats_cls_cnt_classif_node classif_node_params;
+
+ /* Parameters for IPSec class counter */
+ struct dpa_stats_cls_cnt_ipsec ipsec_params;
+
+ /* Parameters for Traffic Manager class counter */
+ struct dpa_stats_cls_cnt_traffic_mng traffic_mng_params;
+ };
+};
+
+/* DPA Stats class counter member type */
+enum dpa_stats_cls_member_type {
+ /* Classifier table class member single key */
+ DPA_STATS_CLS_MEMBER_SINGLE_KEY = 0,
+ /* Classifier table class member pair key */
+ DPA_STATS_CLS_MEMBER_PAIR_KEY,
+ /* IPSec class member security association id */
+ DPA_STATS_CLS_MEMBER_SA_ID
+};
+
+/* DPA Stats class member parameters */
+struct dpa_stats_cls_member_params {
+
+ /* The type of DPA Stats class counter member */
+ enum dpa_stats_cls_member_type type;
+
+ union {
+ /*
+ * Key to set or update in case the byte and mask pointers are
+ * not NULL, or class member to invalidate otherwise
+ */
+ struct dpa_offload_lookup_key key;
+
+ /*
+ * Key to set or update in case the byte and mask pointers of
+ * the first key are not NULL, or class member to invalidate
+ * otherwise
+ */
+ struct dpa_offload_lookup_key_pair pair;
+
+ /*
+ * Security association identifier to set or update or class
+ * member to invalidate in case the security association has
+ * an invalid value
+ */
+ int sa_id;
+ };
+};
+
+/* Creates and initializes a DPA Stats instance */
+int dpa_stats_init(const struct dpa_stats_params *params, int *dpa_stats_id);
+
+/*
+ * Create and initialize a DPA Stats counter. The returned 'dpa_stats_cnt_id'
+ * will be further used to uniquely identify a counter
+ */
+int dpa_stats_create_counter(int dpa_stats_id,
+ const struct dpa_stats_cnt_params *params,
+ int *dpa_stats_cnt_id);
+
+/*
+ * Creates and initializes a DPA Stats class counter. The returned
+ * 'dpa_stats_cnt_id' will be further used to uniquely identify a counter
+ */
+int dpa_stats_create_class_counter(int dpa_stats_id,
+ const struct dpa_stats_cls_cnt_params *params,
+ int *dpa_stats_cnt_id);
+
+/*
+ * Modify a specific member of a DPA Stats class counter. The member to be
+ * modified is identified through the 'member_index' parameter which represents
+ * the member position in the corresponding class counter.
+ */
+int dpa_stats_modify_class_counter(int dpa_stats_cnt_id,
+ const struct dpa_stats_cls_member_params *params,
+ int member_index);
+
+/* Remove a DPA Stats counter by releasing all associated resources */
+int dpa_stats_remove_counter(int dpa_stats_cnt_id);
+
+/*
+ * Create a request to retrieve the values of one or multiple single or class
+ * of counters. Counters that are in the 'requested_cnts' array will be
+ * retrieved in the order given by the position in the array. The counters
+ * values are written in the storage area, at offset defined by
+ * 'storage_area_offset' and the user is notified through the callback
+ * 'request_done'.
+ */
+int dpa_stats_get_counters(struct dpa_stats_cnt_request_params params,
+ int *cnts_len,
+ dpa_stats_request_cb request_done);
+
+/* Reset the statistics for a group of counters */
+int dpa_stats_reset_counters(int *cnts_ids,
+ unsigned int cnts_ids_len);
+/*
+ * Releases all resources associated with a DPA Stats instance
+ * and destroys it.
+ */
+int dpa_stats_free(int dpa_stats_id);
+
+int dpa_stats_create_sampling_group(void);
+
+int dpa_stats_remove_sampling_group(void);
+
+#endif /* __FSL_DPA_STATS_H */