summaryrefslogtreecommitdiff
path: root/drivers/staging
diff options
context:
space:
mode:
authorRich Schmitt <B43082@freescale.com>2014-10-31 17:13:10 (GMT)
committerMatthew Weigel <Matthew.Weigel@freescale.com>2014-12-11 18:39:42 (GMT)
commitb89ac5428b869cd9125dd9a4cad5a87a08fef446 (patch)
treeff9f72488e5096bd64ff97ad926dcfe171bcdbbe /drivers/staging
parent30e0b7dd208bcc1fa5728d6de84030f4a3c13651 (diff)
parent2f98157ac2424b3ec842b0f2e80b86e10b262008 (diff)
downloadlinux-fsl-qoriq-b89ac5428b869cd9125dd9a4cad5a87a08fef446.tar.xz
dpa-offload Linux Kernel Driver Updates for SDK 1.7
Merge remote-tracking branch 'dpaa/dpa-offload-devel'
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_classifier.c767
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_classifier.h8
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_classifier_ioctl.h30
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_ipsec.c12
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_stats.c130
-rw-r--r--drivers/staging/fsl_dpa_offload/wrp_dpa_classifier.c61
-rw-r--r--drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c89
-rw-r--r--drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c84
8 files changed, 822 insertions, 359 deletions
diff --git a/drivers/staging/fsl_dpa_offload/dpa_classifier.c b/drivers/staging/fsl_dpa_offload/dpa_classifier.c
index 63f3010..65bc333 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_classifier.c
+++ b/drivers/staging/fsl_dpa_offload/dpa_classifier.c
@@ -947,8 +947,7 @@ static int hash_table_modify_entry(
hash_set_index = crc64_ecma(key->byte,
ptable->params.hash_params.key_size,
hash_set_index);
- hash_set_index =
- (u64)(hash_set_index & ptable->hash_mask) >>
+ hash_set_index = (u64)(hash_set_index & ptable->hash_mask) >>
(8 * (6 - ptable->params.hash_params.hash_offs) + 4);
/*
@@ -965,12 +964,10 @@ static int hash_table_modify_entry(
memset(&key_params, 0, sizeof(t_FmPcdCcKeyParams));
- cc_node_index =
- ptable->entry[entry_id].int_cc_node_index;
+ cc_node_index = ptable->entry[entry_id].int_cc_node_index;
entry_index = ptable->entry[entry_id].entry_index;
- cc_node =
- (t_Handle)ptable->int_cc_node[cc_node_index].cc_node;
+ cc_node = (t_Handle)ptable->int_cc_node[cc_node_index].cc_node;
if (!action) {
/* Save action to next engine params */
@@ -998,8 +995,6 @@ static int hash_table_modify_entry(
&key_params.ccNextEngineParams);
if (err)
return -err;
-
- hmd = ptable->entry[entry_id].hmd;
}
} else {
/*
@@ -1565,7 +1560,6 @@ static int table_delete_entry_by_ref(struct dpa_cls_table *ptable, int entry_id)
{
t_Error err;
struct dpa_cls_tbl_shadow_entry *shadow_entry;
- struct dpa_cls_tbl_shadow_entry_indexed *shadow_entry_indexed;
uint8_t entry_index;
unsigned int cc_node_index;
t_Handle cc_node;
@@ -1635,30 +1629,16 @@ static int table_delete_entry_by_ref(struct dpa_cls_table *ptable, int entry_id)
int_cc_node->used--;
if (ptable->shadow_table) {
- if (ptable->params.type == DPA_CLS_TBL_INDEXED) {
- shadow_list_entry = ptable->shadow_table[0].
- shadow_entry[entry_index].next;
- shadow_entry_indexed = list_entry(shadow_list_entry,
- struct dpa_cls_tbl_shadow_entry_indexed,
- list_node);
-
- list_del(&shadow_entry_indexed->list_node);
-
- kfree(shadow_entry_indexed);
- } else {
- shadow_list_entry =
- ptable->entry[entry_id].shadow_entry;
- shadow_entry = list_entry(shadow_list_entry,
+ shadow_list_entry = ptable->entry[entry_id].shadow_entry;
+ shadow_entry = list_entry(shadow_list_entry,
struct dpa_cls_tbl_shadow_entry,
list_node);
- list_del(&shadow_entry->list_node);
-
- kfree(shadow_entry->key.byte);
- kfree(shadow_entry->key.mask);
- kfree(shadow_entry);
- }
+ list_del(&shadow_entry->list_node);
+ kfree(shadow_entry->key.byte);
+ kfree(shadow_entry->key.mask);
+ kfree(shadow_entry);
}
dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
@@ -2081,6 +2061,51 @@ static int table_get_entry_stats_by_ref(struct dpa_cls_table *ptable,
return ret;
}
+int dpa_classif_table_get_miss_stats(int td,
+ struct dpa_cls_tbl_entry_stats *stats)
+{
+ struct dpa_cls_table *ptable;
+ t_FmPcdCcKeyStatistics key_stats;
+ t_Error err;
+ int i, ret = 0;
+
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) -->\n",
+ __func__, __LINE__));
+
+ /* Parameters sanity check. */
+ if (!stats) {
+ log_err("\"stats\" cannot be NULL.\n");
+ return -EINVAL;
+ }
+
+ LOCK_OBJECT(table_array, td, ptable, -EINVAL);
+ memset(stats, 0, sizeof(*stats));
+ for (i = 0; i < ptable->int_cc_nodes_count; i++) {
+ memset(&key_stats, 0, sizeof(key_stats));
+ err = FM_PCD_MatchTableGetMissStatistics(
+ (t_Handle)ptable->int_cc_node[i].cc_node,
+ &key_stats);
+ if (err != E_OK) {
+ log_warn("FMan driver call failed - FM_PCD_MatchTableGetMissStatistics. Failed to acquire key statistics.\n");
+ memset(stats, 0, sizeof(*stats));
+ ret = -EPERM;
+ break;
+ }
+ stats->pkts += key_stats.frameCount;
+ stats->bytes += key_stats.byteCount;
+ }
+ RELEASE_OBJECT(ptable);
+
+ if (ret < 0)
+ log_err("Failed to get miss stats in table td=%d.\n", td);
+
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n",
+ __func__, __LINE__));
+
+ return ret;
+}
+EXPORT_SYMBOL(dpa_classif_table_get_miss_stats);
+
int dpa_classif_table_get_params(int td, struct dpa_cls_tbl_params *params)
{
struct dpa_cls_table *ptable;
@@ -3085,6 +3110,11 @@ static int action_to_next_engine_params(const struct dpa_cls_tbl_action *action,
policer_params->modify_policer_params;
next_engine_params->params.plcrParams.
newFqid = action->enq_params.new_fqid;
+#if (DPAA_VERSION >= 11)
+ next_engine_params->params.plcrParams.
+ newRelativeStorageProfileId =
+ action->enq_params.new_rel_vsp_id;
+#endif /* (DPAA_VERSION >= 11) */
} else {
next_engine_params->nextEngine = e_FM_PCD_DONE;
next_engine_params->params.enqueueParams.
@@ -3095,12 +3125,12 @@ static int action_to_next_engine_params(const struct dpa_cls_tbl_action *action,
next_engine_params->params.
enqueueParams.overrideFqid =
TRUE;
- }
#if (DPAA_VERSION >= 11)
- next_engine_params->params.enqueueParams.
- newRelativeStorageProfileId =
- action->enq_params.new_rel_vsp_id;
+ next_engine_params->params.enqueueParams.
+ newRelativeStorageProfileId =
+ action->enq_params.new_rel_vsp_id;
#endif
+ }
}
if (action->enq_params.hmd != DPA_OFFLD_DESC_NONE) {
@@ -3185,7 +3215,7 @@ static int action_to_next_engine_params(const struct dpa_cls_tbl_action *action,
return -EINVAL;
}
next_engine_params->h_Manip = (t_Handle)
- dpa_classif_hm_lock_chain(action->enq_params.hmd);
+ dpa_classif_hm_lock_chain(action->mcast_params.hmd);
if (!next_engine_params->h_Manip) {
log_err("Failed to attach HM op hmd=%d to "
"classification entry.\n",
@@ -3193,7 +3223,7 @@ static int action_to_next_engine_params(const struct dpa_cls_tbl_action *action,
return -EINVAL;
}
- *hmd = action->enq_params.hmd;
+ *hmd = action->mcast_params.hmd;
} else
next_engine_params->h_Manip = NULL;
next_engine_params->nextEngine = e_FM_PCD_FR;
@@ -3742,7 +3772,6 @@ static int import_hm_nodes_to_chain(void * const *node_array,
BUG_ON(!node_array);
BUG_ON(!hm);
- /* This HM operation is linked to another HM op */
for (i = num_nodes - 1; i >= 0; i--) {
/*
* If the node is empty, save an empty space and skip
@@ -3778,6 +3807,8 @@ static int import_hm_nodes_to_chain(void * const *node_array,
/* Node does not exist, we need to create it */
hm->hm_node[i] = kzalloc(sizeof(struct dpa_cls_hm_node),
GFP_KERNEL);
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Created new hm_node = 0x%p\n", __func__,
+ __LINE__, hm->hm_node[i]));
if (!hm->hm_node[i]) {
log_err("Not enough memory for HM node "
"management.\n");
@@ -3788,6 +3819,9 @@ static int import_hm_nodes_to_chain(void * const *node_array,
hm->hm_node[i]->node = node_array[i];
INIT_LIST_HEAD(&hm->hm_node[i]->list_node);
+ /* Initialize dontParseAfterManip to TRUE */
+ hm->hm_node[i]->params.u.hdr.dontParseAfterManip = TRUE;
+
/* Add this new node to the HM chain: */
list_add(&hm->hm_node[i]->list_node,
hm->hm_chain);
@@ -3797,6 +3831,166 @@ static int import_hm_nodes_to_chain(void * const *node_array,
return 0;
}
+static struct dpa_cls_hm_node *try_compatible_node(const struct dpa_cls_hm *hm)
+{
+ struct dpa_cls_hm_node *hm_node = NULL;
+ const int update_flags = DPA_CLS_HM_UPDATE_IPv4_UPDATE |
+ DPA_CLS_HM_UPDATE_IPv6_UPDATE |
+ DPA_CLS_HM_UPDATE_UDP_TCP_UPDATE;
+ const int replace_flags = DPA_CLS_HM_REPLACE_IPv4_BY_IPv6 |
+ DPA_CLS_HM_REPLACE_IPv6_BY_IPv4;
+
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) -->\n", __func__,
+ __LINE__));
+
+ if (list_empty(hm->hm_chain)) {
+ /*
+ * There is nothing in the HM node chain. Don't bother any more
+ * to look for anything:
+ */
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <-- did not find a compatible node.\n",
+ __func__, __LINE__));
+ return NULL;
+ }
+
+ /* Get the last item in the chain */
+ hm_node = list_entry(hm->hm_chain->next,
+ struct dpa_cls_hm_node, list_node);
+ /*
+ * If the previous HM node is not a HDR_MANIP, then it can't be
+ * compatible for aggregation:
+ */
+ if (hm_node->params.type != e_FM_PCD_MANIP_HDR) {
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <-- did not find a compatible node.\n",
+ __func__, __LINE__));
+ return NULL;
+ }
+
+ switch (hm->type) {
+ case DPA_CLS_HM_TYPE_REMOVE:
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Looking for REMOVE HM compatible nodes...\n",
+ __func__, __LINE__));
+ /*
+ * If in the previous HM node the remove operation is already
+ * used, then it is not compatible for aggregation:
+ */
+ if (hm_node->params.u.hdr.rmv)
+ hm_node = NULL;
+ break;
+ case DPA_CLS_HM_TYPE_INSERT:
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Looking for INSERT HM compatible nodes...\n",
+ __func__, __LINE__));
+ /*
+ * If in the previous HM node the insert operation is already
+ * used, then it is not compatible for aggregation:
+ */
+ if (hm_node->params.u.hdr.insrt)
+ hm_node = NULL;
+ break;
+ case DPA_CLS_HM_TYPE_UPDATE:
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Looking for UPDATE HM compatible nodes...\n",
+ __func__, __LINE__));
+ /*
+ * If in the previous HM node the update operation is already
+ * used and we also have to do header updates, then it is not
+ * compatible for aggregation:
+ */
+ if ((hm->update_params.op_flags & update_flags) &&
+ (hm_node->params.u.hdr.fieldUpdate))
+ hm_node = NULL;
+
+ /*
+ * If in the previous HM node the custom header replace
+ * operation is already used and we also have to do header
+ * replacement, then it is not compatible for aggregation:
+ */
+ if ((hm->update_params.op_flags & replace_flags) &&
+ (hm_node->params.u.hdr.custom))
+ hm_node = NULL;
+ break;
+ case DPA_CLS_HM_TYPE_VLAN:
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Looking for VLAN HM compatible nodes...\n",
+ __func__, __LINE__));
+ switch (hm->vlan_params.type) {
+ case DPA_CLS_HM_VLAN_INGRESS:
+ /*
+ * If in the previous HM node the remove operation is
+ * already used, then it is not compatible for
+ * aggregation:
+ */
+ if (hm_node->params.u.hdr.rmv)
+ hm_node = NULL;
+ break;
+ case DPA_CLS_HM_VLAN_EGRESS:
+ /*
+ * If in the previous HM node the insert operation is
+ * already used and we need to insert VLANs, then it is
+ * not compatible for aggregation:
+ */
+ if ((hm->vlan_params.egress.num_tags) &&
+ (hm_node->params.u.hdr.insrt))
+ hm_node = NULL;
+ /*
+ * If in the previous HM node the update operation is
+ * already used and we need to do VLAN update, then it
+ * is not compatible for aggregation:
+ */
+ if ((hm->vlan_params.egress.update_op) &&
+ (hm_node->params.u.hdr.fieldUpdate))
+ hm_node = NULL;
+ break;
+ default:
+ hm_node = NULL;
+ break;
+ }
+ break;
+ case DPA_CLS_HM_TYPE_MPLS:
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Looking for MPLS HM compatible nodes...\n",
+ __func__, __LINE__));
+ switch (hm->mpls_params.type) {
+ case DPA_CLS_HM_MPLS_INSERT_LABELS:
+ /*
+ * If in the previous HM node the insert operation is
+ * already used, then it is not compatible for
+ * aggregation:
+ */
+ if (hm_node->params.u.hdr.insrt)
+ hm_node = NULL;
+ break;
+ case DPA_CLS_HM_MPLS_REMOVE_ALL_LABELS:
+ /*
+ * If in the previous HM node the remove operation is
+ * already used, then it is not compatible for
+ * aggregation:
+ */
+ if (hm_node->params.u.hdr.rmv)
+ hm_node = NULL;
+ break;
+ default:
+ hm_node = NULL;
+ break;
+ }
+ break;
+ default:
+ hm_node = NULL;
+ break;
+ }
+
+#ifdef DPA_CLASSIFIER_DEBUG
+ if (hm_node)
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): FOUND compatible hm_node = 0x%p.\n",
+ __func__, __LINE__, hm_node));
+ else
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Compatible hm_node NOT FOUND.\n",
+ __func__, __LINE__));
+#endif /* DPA_CLASSIFIER_DEBUG */
+
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
+ __LINE__));
+
+ return hm_node;
+}
+
static int add_local_hm_nodes_to_chain(struct dpa_cls_hm *phm)
{
int i;
@@ -3847,8 +4041,8 @@ static int init_hm_chain(void *fm_pcd, struct list_head *chain_head,
pcurrent->params.h_NextManip = (pnext) ? (t_Handle)pnext->node : NULL;
#ifdef DPA_CLASSIFIER_DEBUG
- dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Dumping HM node params.\n",
- __func__, __LINE__));
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Dumping HM node params for hm_node @ 0x%p\n",
+ __func__, __LINE__, pcurrent));
switch (pcurrent->params.type) {
case e_FM_PCD_MANIP_HDR:
dpa_cls_dbg((" hm_node_params.type = "
@@ -3856,9 +4050,33 @@ static int init_hm_chain(void *fm_pcd, struct list_head *chain_head,
dpa_cls_dbg((" hm_node_params.u.hdr.rmv = %d\n",
pcurrent->params.u.hdr.rmv));
if (pcurrent->params.u.hdr.rmv) {
- dpa_cls_dbg((" hm_node_params.u.hdr.rmvParams"
- ".type = %d\n",
- pcurrent->params.u.hdr.rmvParams.type));
+ switch (pcurrent->params.u.hdr.rmvParams.type) {
+ case e_FM_PCD_MANIP_RMV_GENERIC:
+ dpa_cls_dbg((" hm_node_params.u.hdr.rmvParams"
+ ".type = e_FM_PCD_MANIP_RMV_GENERIC\n"));
+ dpa_cls_dbg((" hm_node_params.u.hdr.rmvParams."
+ "u.generic.offset = %u\n",
+ pcurrent->params.u.hdr.rmvParams.u.generic.offset));
+ dpa_cls_dbg((" hm_node_params.u.hdr.rmvParams."
+ "u.generic.size = %u\n",
+ pcurrent->params.u.hdr.rmvParams.u.generic.size));
+ break;
+ case e_FM_PCD_MANIP_RMV_BY_HDR:
+ dpa_cls_dbg((" hm_node_params.u.hdr.rmvParams"
+ ".type = e_FM_PCD_MANIP_RMV_BY_HDR\n"));
+ if (pcurrent->params.u.hdr.rmvParams.u.byHdr.type == e_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2) {
+ dpa_cls_dbg((" hm_node_params.u.hdr.rmvParams."
+ "u.byHdr.type = e_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2\n"));
+ dpa_cls_dbg((" hm_node_params.u.hdr.rmvParams."
+ "u.byHdr.u.specificL2 = %d\n",
+ pcurrent->params.u.hdr.rmvParams.u.byHdr.u.specificL2));
+ } else {
+ dpa_cls_dbg((" hm_node_params.u.hdr.rmvParams."
+ "u.byHdr.type = %d\n",
+ pcurrent->params.u.hdr.rmvParams.u.byHdr.type));
+ }
+ break;
+ }
}
dpa_cls_dbg((" hm_node_params.u.hdr.insrt = %d\n",
pcurrent->params.u.hdr.insrt));
@@ -3951,10 +4169,26 @@ static int init_hm_chain(void *fm_pcd, struct list_head *chain_head,
dpa_cls_dbg((" hm_node_params.u.hdr.custom = %d\n",
pcurrent->params.u.hdr.custom));
if (pcurrent->params.u.hdr.custom) {
- dpa_cls_dbg((" hm_node_params.u.hdr."
- "custom.type = %d\n",
- pcurrent->params.u.hdr.customParams.type));
+ if (pcurrent->params.u.hdr.customParams.type ==
+ e_FM_PCD_MANIP_HDR_CUSTOM_IP_REPLACE) {
+ dpa_cls_dbg((" hm_node_params.u.hdr.customParams."
+ "type = e_FM_PCD_MANIP_HDR_CUSTOM_IP_REPLACE\n"));
+ dpa_cls_dbg((" hm_node_params.u.hdr.customParams.u.ipHdrReplace.replaceType = %d\n",
+ pcurrent->params.u.hdr.customParams.u.ipHdrReplace.replaceType));
+ dpa_cls_dbg((" hm_node_params.u.hdr.customParams.u.ipHdrReplace.decTtlHl = %d\n",
+ pcurrent->params.u.hdr.customParams.u.ipHdrReplace.decTtlHl));
+ dpa_cls_dbg((" hm_node_params.u.hdr.customParams.u.ipHdrReplace.updateIpv4Id = %d\n",
+ pcurrent->params.u.hdr.customParams.u.ipHdrReplace.updateIpv4Id));
+ dpa_cls_dbg((" hm_node_params.u.hdr.customParams.u.ipHdrReplace.id = %u\n",
+ pcurrent->params.u.hdr.customParams.u.ipHdrReplace.id));
+ dpa_cls_dbg((" hm_node_params.u.hdr.customParams.u.ipHdrReplace.hdrSize = %u\n",
+ pcurrent->params.u.hdr.customParams.u.ipHdrReplace.hdrSize));
+ } else
+ dpa_cls_dbg((" hm_node_params.u.hdr.customParams.type = %d\n",
+ pcurrent->params.u.hdr.customParams.type));
}
+ dpa_cls_dbg((" hm_node_params.u.hdr.dontParseAfterManip = %d\n",
+ pcurrent->params.u.hdr.dontParseAfterManip));
break;
case e_FM_PCD_MANIP_FRAG:
dpa_cls_dbg((" hm_node_params.type = "
@@ -4090,41 +4324,6 @@ static void remove_hm_node(struct dpa_cls_hm_node *node)
kfree(node);
}
-static struct dpa_cls_hm_node
- *find_compatible_hm_node(enum dpa_cls_hm_node_type type,
- struct list_head *list)
-{
- struct dpa_cls_hm_node *phm_node;
- e_FmPcdManipHdrFieldUpdateType val;
-
- BUG_ON(!list);
-
- switch (type) {
- case DPA_CLS_HM_NODE_IPv4_HDR_UPDATE:
- val = e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV4;
- break;
- case DPA_CLS_HM_NODE_IPv6_HDR_UPDATE:
- val = e_FM_PCD_MANIP_HDR_FIELD_UPDATE_IPV6;
- break;
- case DPA_CLS_HM_NODE_TCPUDP_HDR_UPDATE:
- val = e_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP;
- break;
- default:
- log_err("Don't know how to search for nodes compatible with "
- "type=%d.\n", type);
- return NULL;
- }
-
- list_for_each_entry(phm_node, list, list_node) {
- if ((phm_node->params.type == e_FM_PCD_MANIP_HDR) &&
- (phm_node->params.u.hdr.fieldUpdate) &&
- (phm_node->params.u.hdr.fieldUpdateParams.type == val))
- return phm_node;
- }
-
- return NULL;
-}
-
static int create_new_hm_op(int *hmd, int next_hmd)
{
int err;
@@ -4324,12 +4523,8 @@ static int nat_hm_prepare_nodes(struct dpa_cls_hm *pnat_hm,
const struct dpa_cls_hm_nat_resources *res)
{
struct dpa_cls_hm_node *hm_node = NULL;
- struct dpa_cls_hm *pnext_hm = NULL;
void * const *phm_nodes;
int err = 0;
- enum dpa_cls_hm_node_type l3_update_node = DPA_CLS_HM_NODE_LAST_ENTRY;
- enum dpa_cls_hm_node_type l4_update_node = DPA_CLS_HM_NODE_LAST_ENTRY;
- unsigned int ip_ver = 0;
dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) -->\n", __func__,
__LINE__));
@@ -4341,83 +4536,47 @@ static int nat_hm_prepare_nodes(struct dpa_cls_hm *pnat_hm,
if (res) { /* Import HM nodes */
phm_nodes = &res->l3_update_node;
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
+ __LINE__));
+
return import_hm_nodes_to_chain(phm_nodes,
pnat_hm->num_nodes,
pnat_hm);
}
- /* Create HM nodes */
- if (pnat_hm->nat_params.type == DPA_CLS_HM_NAT_TYPE_TRADITIONAL) {
- if (pnat_hm->nat_params.flags &
- DPA_CLS_HM_NAT_UPDATE_SIP)
- ip_ver = pnat_hm->nat_params.nat.sip.version;
- if (pnat_hm->nat_params.flags &
- DPA_CLS_HM_NAT_UPDATE_DIP)
- ip_ver = pnat_hm->nat_params.nat.dip.version;
- if ((pnat_hm->nat_params.flags &
- DPA_CLS_HM_NAT_UPDATE_SPORT) ||
- (pnat_hm->nat_params.flags &
- DPA_CLS_HM_NAT_UPDATE_DPORT))
- l4_update_node = DPA_CLS_HM_NODE_TCPUDP_HDR_UPDATE;
- if (ip_ver) {
- if (ip_ver == 4)
- l3_update_node =
- DPA_CLS_HM_NODE_IPv4_HDR_UPDATE;
- else
- l3_update_node =
- DPA_CLS_HM_NODE_IPv6_HDR_UPDATE;
- }
- } else {
- if (pnat_hm->nat_params.nat_pt.type ==
- DPA_CLS_HM_NAT_PT_IPv6_TO_IPv4)
- l3_update_node =
- DPA_CLS_HM_NODE_HDR_REPLACE_IPv6_BY_IPv4;
- else
- l3_update_node =
- DPA_CLS_HM_NODE_HDR_REPLACE_IPv4_BY_IPv6;
- }
-
- /* Check if we can attach to an existing update node */
- if (!list_empty(&pnat_hm->list_node))
- pnext_hm = list_entry(pnat_hm->list_node.next,
- struct dpa_cls_hm,
- list_node);
-
- if (l3_update_node != DPA_CLS_HM_NODE_LAST_ENTRY) {
- /* Check if we can attach to an existing L3 update node */
- if (pnext_hm)
- hm_node = find_compatible_hm_node(l3_update_node,
- pnext_hm->hm_chain);
- /* If not, create an L3 update node: */
- if (!hm_node) {
- hm_node = kzalloc(sizeof(*hm_node), GFP_KERNEL);
- if (!hm_node) {
- log_err("No more memory for header manip "
- "nodes.\n");
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&hm_node->list_node);
- pnat_hm->hm_node[0] = hm_node;
- }
+ /* Create a header manip node for this update: */
+ hm_node = kzalloc(sizeof(*hm_node), GFP_KERNEL);
+
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Created new hm_node = 0x%p\n",
+ __func__, __LINE__, hm_node));
+ if (!hm_node) {
+ log_err("No more memory for header manip nodes.\n");
+ return -ENOMEM;
}
- hm_node = NULL;
- if (l4_update_node != DPA_CLS_HM_NODE_LAST_ENTRY) {
- /* Check if we can attach to an existing L4 update node */
- if (pnext_hm)
- hm_node = find_compatible_hm_node(l4_update_node,
- pnext_hm->hm_chain);
- /* If not create an L4 update node: */
+ INIT_LIST_HEAD(&hm_node->list_node);
+
+ /* Initialize dontParseAfterManip to TRUE */
+ hm_node->params.u.hdr.dontParseAfterManip = TRUE;
+
+ pnat_hm->hm_node[0] = hm_node;
+
+ if (pnat_hm->nat_params.flags &
+ (DPA_CLS_HM_NAT_UPDATE_SPORT | DPA_CLS_HM_NAT_UPDATE_DPORT)) {
+ hm_node = kzalloc(sizeof(*hm_node), GFP_KERNEL);
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Created new hm_node = 0x%p\n",
+ __func__, __LINE__, hm_node));
if (!hm_node) {
- hm_node = kzalloc(sizeof(*hm_node), GFP_KERNEL);
- if (!hm_node) {
- log_err("No more memory for header manip "
- "nodes.\n");
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&hm_node->list_node);
- pnat_hm->hm_node[1] = hm_node;
+ log_err("No more memory for header manip nodes.\n");
+ return -ENOMEM;
}
+
+ INIT_LIST_HEAD(&hm_node->list_node);
+
+ /* Initialize dontParseAfterManip to TRUE */
+ hm_node->params.u.hdr.dontParseAfterManip = TRUE;
+
+ pnat_hm->hm_node[1] = hm_node;
}
add_local_hm_nodes_to_chain(pnat_hm);
@@ -4444,7 +4603,12 @@ static int nat_hm_update_params(struct dpa_cls_hm *pnat_hm)
hm_node = pnat_hm->hm_node[0];
hm_node->params.type = e_FM_PCD_MANIP_HDR;
- hm_node->params.u.hdr.dontParseAfterManip = TRUE;
+ if (pnat_hm->hm_node[1])
+ hm_node->params.u.hdr.dontParseAfterManip = TRUE;
+ else
+ hm_node->params.u.hdr.dontParseAfterManip &=
+ (pnat_hm->nat_params.reparse) ? FALSE :
+ TRUE;
if (pnat_hm->nat_params.type ==
DPA_CLS_HM_NAT_TYPE_TRADITIONAL) {
@@ -4559,10 +4723,12 @@ static int nat_hm_update_params(struct dpa_cls_hm *pnat_hm)
hm_node->params.type = e_FM_PCD_MANIP_HDR;
hm_node->params.u.hdr.fieldUpdate = TRUE;
- hm_node->params.u.hdr.dontParseAfterManip = TRUE;
hm_node->params.u.hdr.fieldUpdateParams.type =
e_FM_PCD_MANIP_HDR_FIELD_UPDATE_TCP_UDP;
+ hm_node->params.u.hdr.dontParseAfterManip &=
+ (pnat_hm->nat_params.reparse) ? FALSE : TRUE;
+
if (pnat_hm->nat_params.flags & DPA_CLS_HM_NAT_UPDATE_SPORT) {
hm_node->params.u.hdr.fieldUpdateParams.u.tcpUdp.
validUpdates |= HDR_MANIP_TCP_UDP_SRC;
@@ -4882,24 +5048,37 @@ static int fwd_hm_prepare_nodes(struct dpa_cls_hm *pfwd_hm,
if (res) { /* Import HM nodes */
phm_nodes = &res->fwd_node;
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
+ __LINE__));
+
return import_hm_nodes_to_chain(phm_nodes,
pfwd_hm->num_nodes,
pfwd_hm);
}
+ /* Create a header manip node: */
hm_node = kzalloc(sizeof(*hm_node), GFP_KERNEL);
+
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Created new hm_node = 0x%p\n", __func__,
+ __LINE__, hm_node));
if (!hm_node) {
- log_err("Not enough memory for header manip nodes.\n");
+ log_err("No more memory for header manip nodes.\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&hm_node->list_node);
- pfwd_hm->hm_node[0] = hm_node;
+
+ /* Initialize dontParseAfterManip to TRUE */
+ hm_node->params.u.hdr.dontParseAfterManip = TRUE;
+
+ pfwd_hm->hm_node[0] = hm_node;
if (pfwd_hm->update_params.ip_frag_params.mtu) {
/* IP fragmentation option is enabled */
/* Create a header manip node: */
hm_node = kzalloc(sizeof(*hm_node), GFP_KERNEL);
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Created new hm_node = 0x%p\n", __func__,
+ __LINE__, hm_node));
if (!hm_node) {
log_err("No more memory for header manip nodes.\n");
return -ENOMEM;
@@ -4932,8 +5111,10 @@ static int fwd_hm_update_params(struct dpa_cls_hm *pfwd_hm)
hm_node = pfwd_hm->hm_node[0];
- hm_node->params.type = e_FM_PCD_MANIP_HDR;
- hm_node->params.u.hdr.dontParseAfterManip = TRUE;
+ hm_node->params.type = e_FM_PCD_MANIP_HDR;
+ hm_node->params.u.hdr.dontParseAfterManip &=
+ (pfwd_hm->fwd_params.reparse) ? FALSE : TRUE;
+
switch (pfwd_hm->fwd_params.out_if_type) {
case DPA_CLS_HM_IF_TYPE_ETHERNET:
/* Update Ethernet MACS */
@@ -4941,7 +5122,6 @@ static int fwd_hm_update_params(struct dpa_cls_hm *pfwd_hm)
hm_node->params.u.hdr.insrtParams.type =
e_FM_PCD_MANIP_INSRT_GENERIC;
hm_node->params.u.hdr.insrtParams.u.generic.replace = TRUE;
- hm_node->params.u.hdr.dontParseAfterManip = TRUE;
size = (uint8_t)(sizeof(struct ethhdr) - ETHERTYPE_SIZE);
pdata = kzalloc(size, GFP_KERNEL);
@@ -5309,22 +5489,36 @@ static int remove_hm_prepare_nodes(struct dpa_cls_hm *premove_hm,
if (res) { /* Import HM nodes */
phm_nodes = &res->remove_node;
- err = import_hm_nodes_to_chain(phm_nodes,
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
+ __LINE__));
+
+ return import_hm_nodes_to_chain(phm_nodes,
premove_hm->num_nodes,
premove_hm);
- } else { /* Create HM nodes */
+ }
+
+ hm_node = try_compatible_node(premove_hm);
+ if (hm_node == NULL) {
+ /* Create a header manip node for this remove: */
hm_node = kzalloc(sizeof(*hm_node), GFP_KERNEL);
+
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Created new hm_node = 0x%p\n", __func__,
+ __LINE__, hm_node));
if (!hm_node) {
- log_err("Not enough memory for header manip nodes.\n");
+ log_err("No more memory for header manip nodes.\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&hm_node->list_node);
- premove_hm->hm_node[0] = hm_node;
- add_local_hm_nodes_to_chain(premove_hm);
+ /* Initialize dontParseAfterManip to TRUE */
+ hm_node->params.u.hdr.dontParseAfterManip = TRUE;
}
+ premove_hm->hm_node[0] = hm_node;
+
+ add_local_hm_nodes_to_chain(premove_hm);
+
dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
__LINE__));
@@ -5344,9 +5538,11 @@ static int remove_hm_update_params(struct dpa_cls_hm *premove_hm)
hm_node = premove_hm->hm_node[0];
- hm_node->params.type = e_FM_PCD_MANIP_HDR;
- hm_node->params.u.hdr.rmv = TRUE;
- hm_node->params.u.hdr.dontParseAfterManip = TRUE;
+ hm_node->params.type = e_FM_PCD_MANIP_HDR;
+ hm_node->params.u.hdr.rmv = TRUE;
+
+ hm_node->params.u.hdr.dontParseAfterManip &=
+ (premove_hm->remove_params.reparse) ? FALSE : TRUE;
switch (premove_hm->remove_params.type) {
case DPA_CLS_HM_REMOVE_ETHERNET:
@@ -5594,22 +5790,36 @@ static int insert_hm_prepare_nodes(struct dpa_cls_hm *pinsert_hm,
if (res) { /* Import HM nodes */
phm_nodes = &res->insert_node;
- err = import_hm_nodes_to_chain(phm_nodes,
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
+ __LINE__));
+
+ return import_hm_nodes_to_chain(phm_nodes,
pinsert_hm->num_nodes,
pinsert_hm);
- } else { /* Create HM nodes */
+ }
+
+ hm_node = try_compatible_node(pinsert_hm);
+ if (hm_node == NULL) {
+ /* Create a header manip node for this insert: */
hm_node = kzalloc(sizeof(*hm_node), GFP_KERNEL);
+
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Created new hm_node = 0x%p\n", __func__,
+ __LINE__, hm_node));
if (!hm_node) {
- log_err("Not enough memory for header manip nodes.\n");
+ log_err("No more memory for header manip nodes.\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&hm_node->list_node);
- pinsert_hm->hm_node[0] = hm_node;
- add_local_hm_nodes_to_chain(pinsert_hm);
+ /* Initialize dontParseAfterManip to TRUE */
+ hm_node->params.u.hdr.dontParseAfterManip = TRUE;
}
+ pinsert_hm->hm_node[0] = hm_node;
+
+ add_local_hm_nodes_to_chain(pinsert_hm);
+
dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
__LINE__));
@@ -5635,7 +5845,9 @@ static int insert_hm_update_params(struct dpa_cls_hm *pinsert_hm)
hm_node->params.type = e_FM_PCD_MANIP_HDR;
hm_node->params.u.hdr.insrt = TRUE;
hm_node->params.u.hdr.insrtParams.type = e_FM_PCD_MANIP_INSRT_GENERIC;
- hm_node->params.u.hdr.dontParseAfterManip = TRUE;
+
+ hm_node->params.u.hdr.dontParseAfterManip &=
+ (pinsert_hm->insert_params.reparse) ? FALSE : TRUE;
switch (pinsert_hm->insert_params.type) {
case DPA_CLS_HM_INSERT_ETHERNET:
@@ -5986,8 +6198,6 @@ static int update_hm_prepare_nodes(struct dpa_cls_hm *pupdate_hm,
{
struct dpa_cls_hm_node *hm_node = NULL;
void * const *phm_nodes;
- struct dpa_cls_hm *pnext_hm = NULL;
- int update_ops, replace_ops;
int err = 0;
dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) -->\n", __func__,
@@ -6000,73 +6210,32 @@ static int update_hm_prepare_nodes(struct dpa_cls_hm *pupdate_hm,
if (res) { /* Import HM nodes */
phm_nodes = &res->update_node;
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
+ __LINE__));
+
return import_hm_nodes_to_chain(phm_nodes,
pupdate_hm->num_nodes,
pupdate_hm);
}
- update_ops = DPA_CLS_HM_UPDATE_IPv4_UPDATE |
- DPA_CLS_HM_UPDATE_IPv6_UPDATE |
- DPA_CLS_HM_UPDATE_UDP_TCP_UPDATE;
-
- replace_ops = DPA_CLS_HM_REPLACE_IPv4_BY_IPv6 |
- DPA_CLS_HM_REPLACE_IPv6_BY_IPv4;
-
- if ((pupdate_hm->update_params.op_flags & update_ops) ||
- (pupdate_hm->update_params.op_flags & replace_ops)) {
- /* Create HM nodes */
- /* Check if we can attach to an existing update node */
- if (!list_empty(&pupdate_hm->list_node)) {
- pnext_hm = list_entry(pupdate_hm->list_node.next,
- struct dpa_cls_hm,
- list_node);
-
- if (pupdate_hm->update_params.op_flags &
- DPA_CLS_HM_UPDATE_IPv4_UPDATE)
- /*
- * See if there is any other IPv4 update node
- * in this chain
- */
- hm_node = find_compatible_hm_node(
- DPA_CLS_HM_NODE_IPv4_HDR_UPDATE,
- pnext_hm->hm_chain);
-
- if (pupdate_hm->update_params.op_flags &
- DPA_CLS_HM_UPDATE_IPv6_UPDATE)
- /*
- * See if there is any other IPv6 update node
- * in this chain
- */
- hm_node = find_compatible_hm_node(
- DPA_CLS_HM_NODE_IPv6_HDR_UPDATE,
- pnext_hm->hm_chain);
-
- if (pupdate_hm->update_params.op_flags &
- DPA_CLS_HM_UPDATE_UDP_TCP_UPDATE)
- /*
- * See if there is any other TCP/UDP header
- * update node in this chain
- */
- hm_node = find_compatible_hm_node(
- DPA_CLS_HM_NODE_TCPUDP_HDR_UPDATE,
- pnext_hm->hm_chain);
- }
-
- /*
- * If no compatible HM node was found for the header update
- * operations...
- */
- if (!hm_node) {
+ if (pupdate_hm->update_params.op_flags != DPA_CLS_HM_UPDATE_NONE) {
+ hm_node = try_compatible_node(pupdate_hm);
+ if ((pupdate_hm->update_params.ip_frag_params.mtu) ||
+ (hm_node == NULL)) {
/* Create a header manip node for this update: */
hm_node = kzalloc(sizeof(*hm_node), GFP_KERNEL);
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Created new hm_node = 0x%p\n",
+ __func__, __LINE__, hm_node));
if (!hm_node) {
- log_err("No more memory for header manip "
- "nodes.\n");
+ log_err("No more memory for header manip nodes.\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&hm_node->list_node);
+
+ /* Initialize dontParseAfterManip to TRUE */
+ hm_node->params.u.hdr.dontParseAfterManip = TRUE;
}
pupdate_hm->hm_node[0] = hm_node;
@@ -6076,6 +6245,8 @@ static int update_hm_prepare_nodes(struct dpa_cls_hm *pupdate_hm,
/* IP fragmentation option is enabled */
/* Create a header manip node: */
hm_node = kzalloc(sizeof(*hm_node), GFP_KERNEL);
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Created new hm_node = 0x%p\n", __func__,
+ __LINE__, hm_node));
if (!hm_node) {
log_err("No more memory for header manip nodes.\n");
return -ENOMEM;
@@ -6116,7 +6287,9 @@ static int update_hm_update_params(struct dpa_cls_hm *pupdate_hm)
if (pupdate_hm->update_params.op_flags & update_ops) {
hm_node->params.type = e_FM_PCD_MANIP_HDR;
hm_node->params.u.hdr.fieldUpdate = TRUE;
- hm_node->params.u.hdr.dontParseAfterManip = TRUE;
+
+ hm_node->params.u.hdr.dontParseAfterManip &=
+ (pupdate_hm->update_params.reparse) ? FALSE : TRUE;
if (pupdate_hm->update_params.op_flags &
DPA_CLS_HM_UPDATE_IPv4_UPDATE) {
@@ -6256,11 +6429,13 @@ static int update_hm_update_params(struct dpa_cls_hm *pupdate_hm)
}
if (pupdate_hm->update_params.op_flags & replace_ops) {
- hm_node->params.type = e_FM_PCD_MANIP_HDR;
- hm_node->params.u.hdr.custom = TRUE;
- hm_node->params.u.hdr.customParams.type =
+ hm_node->params.type = e_FM_PCD_MANIP_HDR;
+ hm_node->params.u.hdr.custom = TRUE;
+ hm_node->params.u.hdr.customParams.type =
e_FM_PCD_MANIP_HDR_CUSTOM_IP_REPLACE;
- hm_node->params.u.hdr.dontParseAfterManip = TRUE;
+
+ hm_node->params.u.hdr.dontParseAfterManip &=
+ (pupdate_hm->update_params.reparse) ? FALSE : TRUE;
if (pupdate_hm->update_params.op_flags &
DPA_CLS_HM_REPLACE_IPv4_BY_IPv6) {
@@ -6525,6 +6700,12 @@ int dpa_classif_modify_update_hm(int hmd,
}
}
+ if (modify_flags & DPA_CLS_HM_UPDATE_MOD_IP_FRAG_MTU) {
+ pupdate_hm->update_params.ip_frag_params.mtu =
+ new_update_params->ip_frag_params.mtu;
+ update[1] = true;
+ }
+
if (update[0]) {
ret = update_hm_update_params(pupdate_hm);
if (ret == 0) {
@@ -6553,7 +6734,34 @@ int dpa_classif_modify_update_hm(int hmd,
}
}
- /* update[1] not supported at this time */
+ if (update[1]) {
+ ret = update_hm_update_params(pupdate_hm);
+ if (ret == 0) {
+ t_FmPcdManipParams new_hm_node_params;
+
+ hm_node = pupdate_hm->hm_node[1];
+
+ /*
+ * Have to make a copy of the manip node params because
+ * ManipNodeReplace does not accept h_NextManip != NULL.
+ */
+ memcpy(&new_hm_node_params, &hm_node->params,
+ sizeof(new_hm_node_params));
+ new_hm_node_params.h_NextManip = NULL;
+ error = FM_PCD_ManipNodeReplace(hm_node->node,
+ &new_hm_node_params);
+ if (error != E_OK) {
+ release_desc_table(&hm_array);
+ mutex_unlock(&pupdate_hm->access);
+ log_err("FMan driver call failed - "
+ "FM_PCD_ManipNodeReplace, while trying "
+ "to modify hmd=%d, manip node "
+ "handle=0x%p.\n", hmd, hm_node->node);
+ return -EBUSY;
+ }
+ }
+
+ }
release_desc_table(&hm_array);
mutex_unlock(&pupdate_hm->access);
@@ -6670,22 +6878,36 @@ static int vlan_hm_prepare_nodes(struct dpa_cls_hm *pvlan_hm,
if (res) { /* Import HM nodes */
phm_nodes = &res->vlan_node;
- err = import_hm_nodes_to_chain(phm_nodes,
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
+ __LINE__));
+
+ return import_hm_nodes_to_chain(phm_nodes,
pvlan_hm->num_nodes,
pvlan_hm);
- } else { /* Create HM nodes */
+ }
+
+ hm_node = try_compatible_node(pvlan_hm);
+ if (hm_node == NULL) {
+ /* Create a header manip node for this insert: */
hm_node = kzalloc(sizeof(*hm_node), GFP_KERNEL);
+
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Created new hm_node = 0x%p\n", __func__,
+ __LINE__, hm_node));
if (!hm_node) {
- log_err("Not enough memory for header manip nodes.\n");
+ log_err("No more memory for header manip nodes.\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&hm_node->list_node);
- pvlan_hm->hm_node[0] = hm_node;
- add_local_hm_nodes_to_chain(pvlan_hm);
+ /* Initialize dontParseAfterManip to TRUE */
+ hm_node->params.u.hdr.dontParseAfterManip = TRUE;
}
+ pvlan_hm->hm_node[0] = hm_node;
+
+ add_local_hm_nodes_to_chain(pvlan_hm);
+
dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
__LINE__));
@@ -6708,6 +6930,8 @@ static int vlan_hm_update_params(struct dpa_cls_hm *pvlan_hm)
hm_node = pvlan_hm->hm_node[0];
hm_node->params.type = e_FM_PCD_MANIP_HDR;
+ hm_node->params.u.hdr.dontParseAfterManip &=
+ (pvlan_hm->vlan_params.reparse) ? FALSE : TRUE;
switch (pvlan_hm->vlan_params.type) {
case DPA_CLS_HM_VLAN_INGRESS:
@@ -6718,7 +6942,6 @@ static int vlan_hm_update_params(struct dpa_cls_hm *pvlan_hm)
e_FM_PCD_MANIP_RMV_BY_HDR_SPECIFIC_L2;
hm_node->params.u.hdr.rmvParams.u.byHdr.u.specificL2 =
e_FM_PCD_MANIP_HDR_RMV_STACKED_QTAGS;
- hm_node->params.u.hdr.dontParseAfterManip = TRUE;
break;
case DPA_CLS_HM_VLAN_EGRESS:
@@ -6760,7 +6983,6 @@ static int vlan_hm_update_params(struct dpa_cls_hm *pvlan_hm)
hm_node->params.u.hdr.fieldUpdate = TRUE;
hm_node->params.u.hdr.fieldUpdateParams.type =
e_FM_PCD_MANIP_HDR_FIELD_UPDATE_VLAN;
- hm_node->params.u.hdr.dontParseAfterManip = TRUE;
switch (pvlan_hm->vlan_params.egress.update_op) {
case DPA_CLS_HM_VLAN_UPDATE_VPri:
@@ -7057,22 +7279,36 @@ static int mpls_hm_prepare_nodes(struct dpa_cls_hm *pmpls_hm,
if (res) { /* Import HM nodes */
phm_nodes = &res->ins_rm_node;
- err = import_hm_nodes_to_chain(phm_nodes,
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
+ __LINE__));
+
+ return import_hm_nodes_to_chain(phm_nodes,
pmpls_hm->num_nodes,
pmpls_hm);
- } else { /* Create HM nodes */
+ }
+
+ hm_node = try_compatible_node(pmpls_hm);
+ if (hm_node == NULL) {
+ /* Create a header manip node for this insert: */
hm_node = kzalloc(sizeof(*hm_node), GFP_KERNEL);
+
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Created new hm_node = 0x%p\n", __func__,
+ __LINE__, hm_node));
if (!hm_node) {
- log_err("Not enough memory for header manip nodes.\n");
+ log_err("No more memory for header manip nodes.\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&hm_node->list_node);
- pmpls_hm->hm_node[0] = hm_node;
- add_local_hm_nodes_to_chain(pmpls_hm);
+ /* Initialize dontParseAfterManip to TRUE */
+ hm_node->params.u.hdr.dontParseAfterManip = TRUE;
}
+ pmpls_hm->hm_node[0] = hm_node;
+
+ add_local_hm_nodes_to_chain(pmpls_hm);
+
dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d) <--\n", __func__,
__LINE__));
@@ -7095,6 +7331,8 @@ static int mpls_hm_update_params(struct dpa_cls_hm *pmpls_hm)
hm_node = pmpls_hm->hm_node[0];
hm_node->params.type = e_FM_PCD_MANIP_HDR;
+ hm_node->params.u.hdr.dontParseAfterManip &=
+ (pmpls_hm->mpls_params.reparse) ? FALSE : TRUE;
switch (pmpls_hm->mpls_params.type) {
case DPA_CLS_HM_MPLS_REMOVE_ALL_LABELS:
@@ -7579,23 +7817,22 @@ int dpa_classif_mcast_create_group(
sizeof(struct dpa_cls_mcast_group_params));
/*
- * initialize the array of used members
+ * initialize the array of indexes of used members
*/
- pgroup->entries = kzalloc(sizeof(struct members) * max_members,
- GFP_KERNEL);
- if (!pgroup->entries) {
- log_err("No more memory for DPA multicast member entries.\n");
+ pgroup->member_ids = kzalloc(sizeof(int) * max_members, GFP_KERNEL);
+ if (!pgroup->member_ids) {
+ log_err("No more memory for DPA multicast index members array.\n");
err = -ENOMEM;
goto dpa_classif_mcast_create_group_error;
}
/*
- * initialize the array of indexes of used members
+ * initialize the array of used members
*/
- pgroup->member_ids = kzalloc(sizeof(int) * max_members, GFP_KERNEL);
- if (!pgroup->member_ids) {
- log_err("No more memory for DPA multicast index members "
- "array.\n");
+ pgroup->entries = kzalloc(sizeof(struct members) * max_members,
+ GFP_KERNEL);
+ if (!pgroup->entries) {
+ log_err("No more memory for DPA multicast member entries.\n");
err = -ENOMEM;
goto dpa_classif_mcast_create_group_error;
}
@@ -7748,8 +7985,10 @@ int dpa_classif_mcast_create_group(
dpa_classif_mcast_create_group_error:
if (pgroup) {
- dpa_classif_hm_release_chain(pgroup->entries[0].hmd);
- kfree(pgroup->entries);
+ if (pgroup->entries) {
+ dpa_classif_hm_release_chain(pgroup->entries[0].hmd);
+ kfree(pgroup->entries);
+ }
kfree(pgroup->member_ids);
mutex_destroy(&pgroup->access);
if (*grpd != DPA_OFFLD_DESC_NONE) {
@@ -7759,6 +7998,7 @@ dpa_classif_mcast_create_group_error:
}
kfree(pgroup);
}
+ kfree(replic_grp_params);
*grpd = DPA_OFFLD_DESC_NONE;
@@ -7975,9 +8215,16 @@ int dpa_classif_mcast_remove_member(int grpd, int md)
mutex_lock(&pgroup->access);
release_desc_table(&mcast_grp_array);
- if ((md <= 0) || (md > pgroup->group_params.max_members)) {
+ if (pgroup->num_members <= 1) {
+ mutex_unlock(&pgroup->access);
+ log_err("Last member in group cannot be removed (md=%d).\n",
+ md);
+ return -EINVAL;
+ }
+
+ if ((md < 0) || (md > pgroup->group_params.max_members)) {
mutex_unlock(&pgroup->access);
- log_err("Invalid member descriptor (grpd=%d).\n", md);
+ log_err("Invalid member descriptor (md=%d).\n", md);
return -EINVAL;
}
diff --git a/drivers/staging/fsl_dpa_offload/dpa_classifier.h b/drivers/staging/fsl_dpa_offload/dpa_classifier.h
index 659e1b7..59ae335 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_classifier.h
+++ b/drivers/staging/fsl_dpa_offload/dpa_classifier.h
@@ -508,14 +508,6 @@ static inline void key_apply_mask(const struct dpa_offload_lookup_key *key,
uint8_t *new_key);
/*
- * Finds in a chain of low level header manipulation nodes a node which is
- * compatible with a specific operation, so that the node can be reused.
- */
-static struct dpa_cls_hm_node
- *find_compatible_hm_node(enum dpa_cls_hm_node_type type,
- struct list_head *list);
-
-/*
* Import a set of low level header manipulation nodes into an existing
* low level header manipulation nodes list (associated with a classifier
* header manipulation op).
diff --git a/drivers/staging/fsl_dpa_offload/dpa_classifier_ioctl.h b/drivers/staging/fsl_dpa_offload/dpa_classifier_ioctl.h
index 88a157c..9595c1a 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_classifier_ioctl.h
+++ b/drivers/staging/fsl_dpa_offload/dpa_classifier_ioctl.h
@@ -1,4 +1,3 @@
-
/* Copyright 2008-2012 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
@@ -110,6 +109,11 @@ struct ioc_dpa_cls_tbl_entry_stats_by_ref {
struct dpa_cls_tbl_entry_stats stats;
};
+struct ioc_dpa_cls_tbl_miss_stats {
+ int td;
+ struct dpa_cls_tbl_entry_stats stats;
+};
+
struct ioc_dpa_cls_hm_remove_params {
struct dpa_cls_hm_remove_params rm_params;
int next_hmd;
@@ -298,6 +302,7 @@ struct dpa_cls_compat_hm_remove_params {
enum dpa_cls_hm_remove_type type;
struct dpa_cls_hm_custom_rm_params custom;
compat_uptr_t fm_pcd;
+ bool reparse;
};
struct compat_ioc_dpa_cls_hm_remove_params {
@@ -320,14 +325,15 @@ struct dpa_cls_compat_hm_custom_ins_params {
};
struct dpa_cls_compat_hm_insert_params {
- enum dpa_cls_hm_insert_type type;
+ enum dpa_cls_hm_insert_type type;
union {
struct dpa_cls_hm_eth_ins_params eth;
- struct dpa_cls_hm_pppoe_ins_params pppoe;
- uint16_t ppp_pid;
- struct dpa_cls_compat_hm_custom_ins_params custom;
+ struct dpa_cls_hm_pppoe_ins_params pppoe;
+ uint16_t ppp_pid;
+ struct dpa_cls_compat_hm_custom_ins_params custom;
};
- compat_uptr_t fm_pcd;
+ compat_uptr_t fm_pcd;
+ bool reparse;
};
struct compat_ioc_dpa_cls_hm_insert_params {
@@ -346,6 +352,7 @@ struct dpa_cls_compat_hm_vlan_params {
struct dpa_cls_hm_egress_vlan_params egress;
};
compat_uptr_t fm_pcd;
+ bool reparse;
};
struct dpa_cls_compat_hm_vlan_resources {
@@ -377,7 +384,7 @@ struct dpa_cls_compat_hm_nat_pt_params {
};
struct dpa_cls_compat_hm_nat_params {
- int flags;
+ int flags;
enum dpa_cls_hm_nat_proto proto;
enum dpa_cls_hm_nat_type type;
union {
@@ -387,6 +394,7 @@ struct dpa_cls_compat_hm_nat_params {
uint16_t sport;
uint16_t dport;
compat_uptr_t fm_pcd;
+ bool reparse;
};
struct dpa_cls_compat_hm_nat_resources {
@@ -404,7 +412,7 @@ struct compat_ioc_dpa_cls_hm_nat_params {
};
struct dpa_cls_compat_hm_update_params {
- int op_flags;
+ int op_flags;
union {
struct compat_ipv4_header new_ipv4_hdr;
struct ipv6_header new_ipv6_hdr;
@@ -415,6 +423,7 @@ struct dpa_cls_compat_hm_update_params {
} update;
struct dpa_cls_hm_ip_frag_params ip_frag_params;
compat_uptr_t fm_pcd;
+ bool reparse;
};
struct dpa_cls_compat_hm_update_resources {
@@ -441,6 +450,7 @@ struct dpa_cls_compat_hm_fwd_params {
};
struct dpa_cls_hm_ip_frag_params ip_frag_params;
compat_uptr_t fm_pcd;
+ bool reparse;
};
struct dpa_cls_compat_hm_fwd_resources {
@@ -463,6 +473,7 @@ struct dpa_cls_compat_hm_mpls_params {
struct mpls_header mpls_hdr[DPA_CLS_HM_MAX_MPLS_LABELS];
unsigned int num_labels;
compat_uptr_t fm_pcd;
+ bool reparse;
};
struct dpa_cls_compat_hm_mpls_resources {
@@ -691,6 +702,9 @@ int dpa_cls_mcast_member_params_compatcpy(
#define DPA_CLS_IOC_TBL_GET_STATS_BY_REF \
_IOR(DPA_CLS_IOC_MAGIC, 12, struct ioc_dpa_cls_tbl_entry_stats_by_ref)
+#define DPA_CLS_IOC_TBL_GET_MISS_STATS \
+ _IOR(DPA_CLS_IOC_MAGIC, 13, struct ioc_dpa_cls_tbl_miss_stats)
+
#define DPA_CLS_IOC_TBL_GET_PARAMS \
_IOWR(DPA_CLS_IOC_MAGIC, 15, struct ioc_dpa_cls_tbl_params)
diff --git a/drivers/staging/fsl_dpa_offload/dpa_ipsec.c b/drivers/staging/fsl_dpa_offload/dpa_ipsec.c
index 3e14ca5..5171df4 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_ipsec.c
+++ b/drivers/staging/fsl_dpa_offload/dpa_ipsec.c
@@ -518,7 +518,7 @@ static int create_inpol_cls_tbl(struct dpa_ipsec *dpa_ipsec,
memset(&params, 0, sizeof(params));
params.entry_mgmt = DPA_CLS_TBL_MANAGE_BY_REF;
params.type = DPA_CLS_TBL_EXACT_MATCH;
- params.exact_match_params.entries_cnt = DPA_IPSEC_MAX_IN_POL_PER_SA;
+ params.exact_match_params.entries_cnt = DPA_IPSEC_MAX_POL_PER_SA;
params.exact_match_params.key_size = dpa_ipsec->sa_mng.inpol_key_size;
params.exact_match_params.use_priorities = true;
params.cc_node = cc_node;
@@ -3240,6 +3240,13 @@ static int check_sa_params(struct dpa_ipsec_sa_params *sa_params)
return -EINVAL;
}
+ if (sa_params->crypto_params.alg_suite <
+ DPA_IPSEC_CIPHER_ALG_3DES_CBC_HMAC_96_MD5_128 ||
+ sa_params->crypto_params.alg_suite >
+ DPA_IPSEC_CIPHER_ALG_AES_CTR_HMAC_SHA_512_256) {
+ log_err("Invalid alg_suite value\n");
+ return -EINVAL;
+ }
/*
* check crypto params:
* - an authentication key must always be provided
@@ -3657,6 +3664,7 @@ int dpa_ipsec_free(int dpa_ipsec_id)
sa_id = instance->used_sa_ids[i];
if (sa_id != DPA_OFFLD_INVALID_OBJECT_ID) {
sa = get_sa_from_sa_id(instance, sa_id);
+ BUG_ON(!sa);
if (sa_is_inbound(sa)) {
if (sa_is_child(sa))
remove_inbound_sa(sa->parent_sa);
@@ -5855,7 +5863,7 @@ int dpa_ipsec_sa_get_seq_number(int sa_id, uint64_t *seq)
out:
put_instance(dpa_ipsec);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(dpa_ipsec_sa_get_seq_number);
diff --git a/drivers/staging/fsl_dpa_offload/dpa_stats.c b/drivers/staging/fsl_dpa_offload/dpa_stats.c
index b3902d0..08941dc 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_stats.c
+++ b/drivers/staging/fsl_dpa_offload/dpa_stats.c
@@ -46,13 +46,17 @@
/* FMD includes */
#include "fm_pcd_ext.h"
-#define STATS_VAL_SIZE 4
#define UNSUPPORTED_CNT_SEL -1
#define CLASSIF_STATS_SHIFT 4
#define WORKQUEUE_MAX_ACTIVE 3
#define DPA_STATS_US_CNT 0x80000000
+#define CHECK_INSTANCE_ZERO \
+ if (dpa_stats_id != 0) { \
+ log_err("DPA Stats supports only instance zero\n"); \
+ return -ENOSYS; \
+ }
/* Global dpa_stats component */
struct dpa_stats *gbl_dpa_stats;
@@ -120,9 +124,12 @@ static int check_dpa_stats_params(const struct dpa_stats_params *params)
return -EINVAL;
}
- if (params->storage_area_len < STATS_VAL_SIZE) {
- log_err("Parameter storage_area_len %d cannot be bellow %d\n",
- params->storage_area_len, STATS_VAL_SIZE);
+ /* Check user-provided storage area length */
+ if (params->storage_area_len < DPA_STATS_CNT_SEL_LEN ||
+ params->storage_area_len > DPA_STATS_MAX_STORAGE_AREA_SIZE) {
+ log_err("Parameter storage_area_len %d must be in range (%d - %d)\n",
+ params->storage_area_len,
+ DPA_STATS_CNT_SEL_LEN, DPA_STATS_MAX_STORAGE_AREA_SIZE);
return -EINVAL;
}
@@ -153,13 +160,13 @@ static int set_cnt_classif_node_retrieve_func(struct dpa_stats_cnt_cb *cnt_cb,
enum dpa_stats_classif_node_type ccnode_type)
{
switch (ccnode_type) {
- case DPA_CLS_TBL_HASH:
+ case DPA_STATS_CLASSIF_NODE_HASH:
cnt_cb->f_get_cnt_stats = get_cnt_ccnode_hash_stats;
break;
- case DPA_CLS_TBL_INDEXED:
+ case DPA_STATS_CLASSIF_NODE_INDEXED:
cnt_cb->f_get_cnt_stats = get_cnt_ccnode_index_stats;
break;
- case DPA_CLS_TBL_EXACT_MATCH:
+ case DPA_STATS_CLASSIF_NODE_EXACT_MATCH:
cnt_cb->f_get_cnt_stats = get_cnt_ccnode_match_stats;
break;
default:
@@ -1152,7 +1159,7 @@ static int copy_key_descriptor(const struct dpa_offload_lookup_key *src,
}
/* Check that key size is not zero */
- if (src->size == 0) {
+ if ((src->size == 0) || (src->size > DPA_OFFLD_MAXENTRYKEYSIZE)) {
log_err("Lookup key descriptor size (%d) must be in range (1 - %d) bytes\n",
src->size, DPA_OFFLD_MAXENTRYKEYSIZE);
return -EINVAL;
@@ -1378,7 +1385,7 @@ static int cnt_gen_sel_to_stats(struct dpa_stats_cnt_cb *cnt_cb,
/* Set number of bytes that will be written by this counter */
cnt_cb->bytes_num = cnt_cb->members_num *
- STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
return 0;
}
@@ -1498,7 +1505,7 @@ static int set_cnt_eth_cb(struct dpa_stats_cnt_cb *cnt_cb,
return err;
/* Set number of bytes that will be written by this counter */
- cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ cnt_cb->bytes_num = DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
/* Get FM MAC handle */
err = get_fm_mac(params->eth_params.src, &fm_mac);
@@ -1589,7 +1596,7 @@ static int set_cnt_reass_cb(struct dpa_stats_cnt_cb *cnt_cb,
return err;
/* Set number of bytes that will be written by this counter */
- cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ cnt_cb->bytes_num = DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
/* Check the user-provided reassembly manip */
err = FM_PCD_ManipGetStatistics(params->reass_params.reass, &stats);
@@ -1653,7 +1660,7 @@ static int set_cnt_frag_cb(struct dpa_stats_cnt_cb *cnt_cb,
return err;
/* Set number of bytes that will be written by this counter */
- cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ cnt_cb->bytes_num = DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
/* Check the user-provided fragmentation handle */
err = FM_PCD_ManipGetStatistics(params->frag_params.frag, &stats);
@@ -1718,7 +1725,7 @@ static int set_cnt_plcr_cb(struct dpa_stats_cnt_cb *cnt_cb,
return err;
/* Set number of bytes that will be written by this counter */
- cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ cnt_cb->bytes_num = DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
err = alloc_cnt_stats(&cnt_cb->info, cnt_cb->members_num);
if (err)
@@ -1816,7 +1823,7 @@ static int set_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb,
cnt_cb->members_num = 1;
/* Set number of bytes that will be written by this counter */
- cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ cnt_cb->bytes_num = DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
err = alloc_cnt_stats(&cnt_cb->info, cnt_cb->members_num);
if (err)
@@ -1921,7 +1928,7 @@ static int set_cnt_ccnode_cb(struct dpa_stats_cnt_cb *cnt_cb,
return err;
/* Set number of bytes that will be written by this counter */
- cnt_cb->bytes_num = STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ cnt_cb->bytes_num = DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
err = alloc_cnt_stats(&cnt_cb->info, cnt_cb->members_num);
if (err)
@@ -2151,7 +2158,7 @@ static int set_cls_cnt_eth_cb(struct dpa_stats_cnt_cb *cnt_cb,
/* Set number of bytes that will be written by this counter */
cnt_cb->bytes_num = cnt_cb->members_num *
- STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
cnt_cb->gen_cb.objs = kcalloc(cnt_cb->members_num, sizeof(t_Handle),
GFP_KERNEL);
@@ -2235,7 +2242,7 @@ static int set_cls_cnt_reass_cb(struct dpa_stats_cnt_cb *cnt_cb,
/* Set number of bytes that will be written by this counter */
cnt_cb->bytes_num = cnt_cb->members_num *
- STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
cnt_cb->gen_cb.objs = kcalloc(cnt_cb->members_num, sizeof(t_Handle),
GFP_KERNEL);
@@ -2303,7 +2310,7 @@ static int set_cls_cnt_frag_cb(struct dpa_stats_cnt_cb *cnt_cb,
/* Set number of bytes that will be written by this counter */
cnt_cb->bytes_num = cnt_cb->members_num *
- STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
cnt_cb->gen_cb.objs = kcalloc(cnt_cb->members_num, sizeof(t_Handle),
GFP_KERNEL);
@@ -2342,7 +2349,8 @@ static int set_cls_cnt_plcr_cb(struct dpa_stats_cnt_cb *cnt_cb,
{
struct dpa_stats *dpa_stats = cnt_cb->dpa_stats;
uint32_t cnt_sel = params->plcr_params.cnt_sel;
- uint32_t i, j, stats, stats_idx, stats_base_idx;
+ uint32_t i, j, stats_idx, stats_base_idx;
+ uint64_t stats;
int err;
if (!dpa_stats) {
@@ -2371,7 +2379,7 @@ static int set_cls_cnt_plcr_cb(struct dpa_stats_cnt_cb *cnt_cb,
/* Set number of bytes that will be written by this counter */
cnt_cb->bytes_num = cnt_cb->members_num *
- STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
cnt_cb->gen_cb.objs = kcalloc(cnt_cb->members_num, sizeof(t_Handle),
GFP_KERNEL);
@@ -2550,7 +2558,7 @@ static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb,
/* Set number of bytes that will be written by this counter */
cnt_cb->bytes_num = cnt_cb->members_num *
- STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
/* Allocate memory for key descriptors */
tbl_cb->keys = kcalloc(params->class_members, sizeof(*tbl_cb->keys),
@@ -2745,7 +2753,7 @@ static int set_cls_cnt_ccnode_cb(struct dpa_stats_cnt_cb *cnt_cb,
/* Set number of bytes that will be written by this counter */
cnt_cb->bytes_num = cnt_cb->members_num *
- STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
/* Set retrieve function depending on counter type */
err = set_cnt_classif_node_retrieve_func(cnt_cb, prm.ccnode_type);
@@ -3173,10 +3181,10 @@ static inline void get_cnt_32bit_stats(struct dpa_stats_req_cb *req_cb,
for (j = 0; j < stats_info->stats_num; j++) {
if (stats_info->stats_off[j] == UNSUPPORTED_CNT_SEL) {
/* Write the memory location */
- memset(req_cb->request_area, 0, STATS_VAL_SIZE);
+ memset(req_cb->request_area, 0, DPA_STATS_CNT_SEL_LEN);
/* Update the memory pointer */
- req_cb->request_area += STATS_VAL_SIZE;
+ req_cb->request_area += DPA_STATS_CNT_SEL_LEN;
continue;
}
@@ -3203,7 +3211,7 @@ static inline void get_cnt_32bit_stats(struct dpa_stats_req_cb *req_cb,
stats_info->stats[stats_index];
/* Update the memory pointer */
- req_cb->request_area += STATS_VAL_SIZE;
+ req_cb->request_area += DPA_STATS_CNT_SEL_LEN;
if (stats_info->reset)
stats_info->stats[stats_index] = 0;
@@ -3244,7 +3252,7 @@ static inline void get_cnt_64bit_stats(struct dpa_stats_req_cb *req_cb,
(uint32_t)stats_info->stats[stats_index];
/* Update the memory pointer */
- req_cb->request_area += STATS_VAL_SIZE;
+ req_cb->request_area += DPA_STATS_CNT_SEL_LEN;
if (stats_info->reset)
stats_info->stats[stats_index] = 0;
@@ -3352,7 +3360,7 @@ static int get_cnt_plcr_stats(struct dpa_stats_req_cb *req_cb,
(uint32_t)info->stats[stats_index];
/* Update the memory pointer */
- req_cb->request_area += STATS_VAL_SIZE;
+ req_cb->request_area += DPA_STATS_CNT_SEL_LEN;
if (info->reset)
info->stats[stats_index] = 0;
@@ -3373,11 +3381,11 @@ static int get_cnt_cls_tbl_match_stats(struct dpa_stats_req_cb *req_cb,
if (!cnt_cb->tbl_cb.keys[i].valid) {
/* Write the memory location */
memset(req_cb->request_area, 0,
- cnt_cb->info.stats_num * STATS_VAL_SIZE);
+ cnt_cb->info.stats_num * DPA_STATS_CNT_SEL_LEN);
/* Update the memory pointer */
- req_cb->request_area += STATS_VAL_SIZE *
- cnt_cb->info.stats_num;
+ req_cb->request_area +=
+ DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
continue;
}
@@ -3423,11 +3431,11 @@ static int get_cnt_cls_tbl_hash_stats(struct dpa_stats_req_cb *req_cb,
if (!cnt_cb->tbl_cb.keys[i].valid) {
/* Write the memory location */
memset(req_cb->request_area, 0,
- cnt_cb->info.stats_num * STATS_VAL_SIZE);
+ cnt_cb->info.stats_num * DPA_STATS_CNT_SEL_LEN);
/* Update the memory pointer */
- req_cb->request_area += STATS_VAL_SIZE *
- cnt_cb->info.stats_num;
+ req_cb->request_area +=
+ DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
continue;
}
@@ -3463,11 +3471,11 @@ static int get_cnt_cls_tbl_index_stats(struct dpa_stats_req_cb *req_cb,
if (!cnt_cb->tbl_cb.keys[i].valid) {
/* Write the memory location */
memset(req_cb->request_area, 0,
- cnt_cb->info.stats_num * STATS_VAL_SIZE);
+ cnt_cb->info.stats_num * DPA_STATS_CNT_SEL_LEN);
/* Update the memory pointer */
- req_cb->request_area += STATS_VAL_SIZE *
- cnt_cb->info.stats_num;
+ req_cb->request_area +=
+ DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
continue;
}
@@ -3503,11 +3511,11 @@ static int get_cnt_cls_tbl_frag_stats(struct dpa_stats_req_cb *req_cb,
if (!cnt_cb->tbl_cb.keys[i].valid) {
/* Write the memory location */
memset(req_cb->request_area, 0,
- cnt_cb->info.stats_num * STATS_VAL_SIZE);
+ cnt_cb->info.stats_num * DPA_STATS_CNT_SEL_LEN);
/* Update the memory pointer */
- req_cb->request_area += STATS_VAL_SIZE *
- cnt_cb->info.stats_num;
+ req_cb->request_area +=
+ DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
continue;
}
@@ -3628,11 +3636,11 @@ static int get_cnt_ipsec_stats(struct dpa_stats_req_cb *req_cb,
if (!cnt_cb->ipsec_cb.valid[i]) {
/* Write the memory location */
memset(req_cb->request_area, 0,
- cnt_cb->info.stats_num * STATS_VAL_SIZE);
+ cnt_cb->info.stats_num * DPA_STATS_CNT_SEL_LEN);
/* Update the memory pointer */
req_cb->request_area +=
- STATS_VAL_SIZE * cnt_cb->info.stats_num;
+ DPA_STATS_CNT_SEL_LEN * cnt_cb->info.stats_num;
continue;
}
@@ -3704,7 +3712,7 @@ static int get_cnt_us_stats(struct dpa_stats_req_cb *req_cb,
/* Write the memory location */
*(uint32_t *)(req_cb->request_area) = 0;
/* Update the memory pointer */
- req_cb->request_area += STATS_VAL_SIZE;
+ req_cb->request_area += DPA_STATS_CNT_SEL_LEN;
}
}
return 0;
@@ -3744,15 +3752,18 @@ int dpa_stats_init(const struct dpa_stats_params *params, int *dpa_stats_id)
struct dpa_stats *dpa_stats = NULL;
int err = 0;
- /* Multiple DPA Stats instances are not currently supported */
- unused(dpa_stats_id);
-
/* Sanity checks */
if (gbl_dpa_stats) {
log_err("DPA Stats component already initialized. Multiple DPA Stats instances are not supported.\n");
return -EPERM;
}
+ /*
+ * Multiple DPA Stats instances are not currently supported. The only
+ * supported instance instance is zero.
+ */
+ *dpa_stats_id = 0;
+
/* Check user-provided parameters */
err = check_dpa_stats_params(params);
if (err < 0)
@@ -3822,7 +3833,7 @@ int dpa_stats_create_counter(int dpa_stats_id,
int err = 0, err_rb = 0;
/* multiple DPA Stats instances are not currently supported */
- unused(dpa_stats_id);
+ CHECK_INSTANCE_ZERO;
if (!gbl_dpa_stats) {
log_err("DPA Stats component is not initialized\n");
@@ -3981,7 +3992,7 @@ int dpa_stats_create_class_counter(int dpa_stats_id,
int err = 0, err_rb = 0;
/* multiple DPA Stats instances are not currently supported */
- unused(dpa_stats_id);
+ CHECK_INSTANCE_ZERO;
if (!gbl_dpa_stats) {
log_err("DPA Stats component is not initialized\n");
@@ -4339,6 +4350,14 @@ int dpa_stats_get_counters(struct dpa_stats_cnt_request_params params,
return -EPERM;
}
+ /* Check user-provided size for array of counters */
+ if (params.cnts_ids_len == 0 ||
+ params.cnts_ids_len > DPA_STATS_REQ_CNTS_IDS_LEN) {
+ log_err("Number of requested counter ids (%d) must be in range (1 - %d)\n",
+ params.cnts_ids_len, DPA_STATS_REQ_CNTS_IDS_LEN);
+ return -EINVAL;
+ }
+
/* Check user-provided cnts_len pointer */
if (!cnts_len) {
log_err("Parameter cnts_len cannot be NULL\n");
@@ -4357,10 +4376,10 @@ int dpa_stats_get_counters(struct dpa_stats_cnt_request_params params,
for (i = 0; i < params.cnts_ids_len; i++) {
if (params.cnts_ids[i] == DPA_OFFLD_INVALID_OBJECT_ID ||
- params.cnts_ids[i] > dpa_stats->config.max_counters) {
- log_err("Counter id (cnt_ids[%d]) %d is not initialized or is greater than maximum counters %d\n",
+ params.cnts_ids[i] >= dpa_stats->config.max_counters) {
+ log_err("Counter id (cnt_ids[%d]) %d is not initialized or is greater than maximum counter id %d\n",
i, params.cnts_ids[i],
- dpa_stats->config.max_counters);
+ dpa_stats->config.max_counters - 1);
return -EINVAL;
}
}
@@ -4458,8 +4477,9 @@ int dpa_stats_reset_counters(int *cnts_ids, unsigned int cnts_ids_len)
}
/* Check user-provided cnts_len pointer */
- if (cnts_ids_len == 0) {
- log_err("Parameter cnts_ids_len cannot be 0\n");
+ if (cnts_ids_len == 0 || cnts_ids_len > DPA_STATS_REQ_CNTS_IDS_LEN) {
+ log_err("Parameter cnts_ids_len %d must be in range (1 - %d)\n",
+ cnts_ids_len, DPA_STATS_REQ_CNTS_IDS_LEN);
return -EINVAL;
}
@@ -4473,8 +4493,8 @@ int dpa_stats_reset_counters(int *cnts_ids, unsigned int cnts_ids_len)
for (i = 0; i < cnts_ids_len; i++)
if (cnts_ids[i] == DPA_OFFLD_INVALID_OBJECT_ID ||
- cnts_ids[i] > dpa_stats->config.max_counters) {
- log_err("Counter id (cnts_ids[%d]) %d is not initialized or is greater than maximum counters %d\n",
+ cnts_ids[i] >= dpa_stats->config.max_counters) {
+ log_err("Counter id (cnt_ids[%d]) %d is not initialized or is greater than maximum counter id %d\n",
i, cnts_ids[i],
dpa_stats->config.max_counters - 1);
return -EINVAL;
@@ -4527,7 +4547,7 @@ EXPORT_SYMBOL(dpa_stats_reset_counters);
int dpa_stats_free(int dpa_stats_id)
{
/* multiple DPA Stats instances are not currently supported */
- unused(dpa_stats_id);
+ CHECK_INSTANCE_ZERO;
return free_resources();
}
diff --git a/drivers/staging/fsl_dpa_offload/wrp_dpa_classifier.c b/drivers/staging/fsl_dpa_offload/wrp_dpa_classifier.c
index 1e35b09..9c15dba 100644
--- a/drivers/staging/fsl_dpa_offload/wrp_dpa_classifier.c
+++ b/drivers/staging/fsl_dpa_offload/wrp_dpa_classifier.c
@@ -452,6 +452,34 @@ long wrp_dpa_classif_do_ioctl(
break;
}
+ case DPA_CLS_IOC_TBL_GET_MISS_STATS:
+ {
+ struct ioc_dpa_cls_tbl_miss_stats param;
+
+ dpa_cls_wrp_dbg((
+ "DEBUG: classifier_wrp %s (%d): get_miss_stats\n",
+ __func__, __LINE__));
+
+ /* Prepare arguments */
+ if (copy_from_user(&param, (void *) args, sizeof(param))) {
+ log_err("Read failed: dpa_classif_table_get_miss_stats user space args.\n");
+ return -EBUSY;
+ }
+
+ /* Call function */
+ ret = dpa_classif_table_get_miss_stats(param.td, &param.stats);
+ if (ret < 0)
+ return ret;
+
+ /* Return results to user space */
+ if (copy_to_user((void *) args, &param, sizeof(param))) {
+ log_err("Write failed: dpa_classif_table_get_miss_stats result.\n");
+ return -EBUSY;
+ }
+
+ break;
+ }
+
#ifdef CONFIG_COMPAT
case DPA_CLS_IOC_COMPAT_TBL_GET_PARAMS:
#endif /* CONFIG_COMPAT */
@@ -2637,9 +2665,10 @@ int dpa_cls_hm_remove_params_compatcpy(
memcpy(&kparam->rm_params.custom, &uparam->rm_params.custom,
sizeof(struct dpa_cls_hm_custom_rm_params));
- kparam->rm_params.fm_pcd = compat_ptr(uparam->rm_params.fm_pcd);
- kparam->next_hmd = uparam->next_hmd;
- kparam->hmd = uparam->hmd;
+ kparam->rm_params.fm_pcd = compat_ptr(uparam->rm_params.fm_pcd);
+ kparam->rm_params.reparse = uparam->rm_params.reparse;
+ kparam->next_hmd = uparam->next_hmd;
+ kparam->hmd = uparam->hmd;
if (uparam->res.remove_node)
kparam->res.remove_node = compat_get_id2ptr(
@@ -2687,9 +2716,10 @@ int dpa_cls_hm_insert_params_compatcpy(
break;
}
- kparam->ins_params.fm_pcd = compat_ptr(uparam->ins_params.fm_pcd);
- kparam->next_hmd = uparam->next_hmd;
- kparam->hmd = uparam->hmd;
+ kparam->ins_params.fm_pcd = compat_ptr(uparam->ins_params.fm_pcd);
+ kparam->ins_params.reparse = uparam->ins_params.reparse;
+ kparam->next_hmd = uparam->next_hmd;
+ kparam->hmd = uparam->hmd;
if (uparam->res.insert_node)
kparam->res.insert_node = compat_get_id2ptr(
uparam->res.insert_node,
@@ -2727,6 +2757,7 @@ int dpa_cls_hm_vlan_params_compatcpy(
}
kparam->vlan_params.fm_pcd = compat_ptr(uparam->vlan_params.fm_pcd);
+ kparam->vlan_params.reparse = uparam->vlan_params.reparse;
kparam->next_hmd = uparam->next_hmd;
kparam->hmd = uparam->hmd;
@@ -2778,11 +2809,12 @@ int dpa_cls_hm_nat_params_compatcpy(
memcpy(&kparam->nat_params.nat, &uparam->nat_params.nat,
sizeof(struct dpa_cls_hm_traditional_nat_params));
- kparam->nat_params.fm_pcd = compat_ptr(uparam->nat_params.fm_pcd);
- kparam->nat_params.sport = uparam->nat_params.sport;
- kparam->nat_params.dport = uparam->nat_params.dport;
- kparam->next_hmd = uparam->next_hmd;
- kparam->hmd = uparam->hmd;
+ kparam->nat_params.fm_pcd = compat_ptr(uparam->nat_params.fm_pcd);
+ kparam->nat_params.reparse = uparam->nat_params.reparse;
+ kparam->nat_params.sport = uparam->nat_params.sport;
+ kparam->nat_params.dport = uparam->nat_params.dport;
+ kparam->next_hmd = uparam->next_hmd;
+ kparam->hmd = uparam->hmd;
if (uparam->res.l3_update_node)
kparam->res.l3_update_node = compat_get_id2ptr(
@@ -2840,6 +2872,7 @@ int dpa_cls_hm_update_params_compatcpy(
kparam->update_params.fm_pcd = compat_ptr(uparam->update_params.fm_pcd);
+ kparam->update_params.reparse = uparam->update_params.reparse;
kparam->next_hmd = uparam->next_hmd;
kparam->hmd = uparam->hmd;
@@ -2869,8 +2902,9 @@ int dpa_cls_hm_fwd_params_compatcpy(
{
int type;
- kparam->fwd_params.out_if_type = uparam->fwd_params.out_if_type;
- kparam->fwd_params.fm_pcd = compat_ptr(uparam->fwd_params.fm_pcd);
+ kparam->fwd_params.out_if_type = uparam->fwd_params.out_if_type;
+ kparam->fwd_params.fm_pcd = compat_ptr(uparam->fwd_params.fm_pcd);
+ kparam->fwd_params.reparse = uparam->fwd_params.reparse;
type = kparam->fwd_params.out_if_type;
switch (type) {
@@ -2932,6 +2966,7 @@ int dpa_cls_hm_mpls_params_compatcpy(
sizeof(struct mpls_header) * DPA_CLS_HM_MAX_MPLS_LABELS);
kparam->mpls_params.num_labels = uparam->mpls_params.num_labels;
kparam->mpls_params.fm_pcd = compat_ptr(uparam->mpls_params.fm_pcd);
+ kparam->mpls_params.reparse = uparam->mpls_params.reparse;
kparam->next_hmd = uparam->next_hmd;
kparam->hmd = uparam->hmd;
diff --git a/drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c b/drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c
index c07df76..ddff1ba 100644
--- a/drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c
+++ b/drivers/staging/fsl_dpa_offload/wrp_dpa_ipsec.c
@@ -411,6 +411,15 @@ static int do_copy_sa_params(struct dpa_ipsec_sa_params *prm, void *args)
kfree(sa_out_iv);
return err;
}
+
+ if (sa_out_iv->length > DPA_IPSEC_MAX_IV_LEN) {
+ err = -EINVAL;
+ log_err("Error - IV length greater than %d\n",
+ DPA_IPSEC_MAX_IV_LEN);
+ kfree(sa_out_iv);
+ return err;
+ }
+
sa_out_prm->init_vector = sa_out_iv;
/* if the IV array is NULL, don't bother to copy it */
@@ -807,7 +816,7 @@ static int do_sa_get_policies_ioctl(void *args)
}
num_pol = prm.num_pol;
- if (num_pol <= 0) {
+ if (num_pol <= 0 || num_pol > DPA_IPSEC_MAX_POL_PER_SA) {
log_err("Invalid number of policies for SA ID# %d\n", sa_id);
return -EINVAL;
}
@@ -927,7 +936,7 @@ static int do_sa_get_policies_compat_ioctl(void *args)
}
num_pol = prm.num_pol;
- if (num_pol <= 0) {
+ if (num_pol <= 0 || num_pol > DPA_IPSEC_MAX_POL_PER_SA) {
log_err("Invalid number of policies for SA ID# %d\n", sa_id);
return -EINVAL;
}
@@ -1626,7 +1635,7 @@ long wrp_dpa_ipsec_do_compat_ioctl(struct file *filp, unsigned int cmd,
}
- case DPA_IPSEC_IOC_SA_MODIFY: {
+ case DPA_IPSEC_IOC_SA_MODIFY_COMPAT: {
struct dpa_ipsec_sa_modify_prm modify_prm;
int sa_id, ret;
@@ -1645,6 +1654,80 @@ free:
break;
}
+
+ case DPA_IPSEC_IOC_SA_REQUEST_SEQ_NUMBER: {
+ int sa_id;
+
+ if (copy_from_user(&sa_id, (int *)args, sizeof(int))) {
+ log_err("Could not copy SA id\n");
+ return -EINVAL;
+ }
+
+ ret = dpa_ipsec_sa_request_seq_number(sa_id);
+ break;
+ }
+
+ case DPA_IPSEC_IOC_SA_GET_SEQ_NUMBER: {
+ struct ioc_dpa_ipsec_sa_get_seq_num prm;
+
+ if (copy_from_user(&prm,
+ (struct ioc_dpa_ipsec_sa_get_seq_num *)args,
+ sizeof(prm))) {
+ log_err("Could not copy from user stats params\n");
+ return -EINVAL;
+ }
+
+ if (prm.sa_id < 0) {
+ log_err("Invalid input SA id\n");
+ return -EINVAL;
+ }
+
+ ret = dpa_ipsec_sa_get_seq_number(prm.sa_id, &prm.seq);
+ if (ret < 0) {
+ log_err("Get SEQ number for SA %d failed\n", prm.sa_id);
+ break;
+ }
+
+ if (copy_to_user((struct ioc_dpa_ipsec_sa_get_seq_num *)args,
+ &prm, sizeof(prm))) {
+ log_err("Could not copy SEQ number to user for SA %d\n",
+ prm.sa_id);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ case DPA_IPSEC_IOC_SA_GET_OUT_PATH: {
+ struct ioc_dpa_ipsec_sa_get_out_path prm;
+
+ if (copy_from_user(&prm,
+ (struct ioc_dpa_ipsec_sa_get_out_path *)args,
+ sizeof(prm))) {
+ log_err("Could not copy from user out_path params\n");
+ return -EINVAL;
+ }
+
+ if (prm.sa_id < 0) {
+ log_err("Invalid input SA id\n");
+ return -EINVAL;
+ }
+
+ ret = dpa_ipsec_sa_get_out_path(prm.sa_id, &prm.fqid);
+ if (ret < 0) {
+ log_err("Get out path for SA %d failed\n", prm.sa_id);
+ break;
+ }
+
+ if (copy_to_user((struct ioc_dpa_ipsec_sa_get_out_path *)args,
+ &prm, sizeof(prm))) {
+ log_err("Could not copy out_path to user for SA %d\n",
+ prm.sa_id);
+ return -EINVAL;
+ }
+
+ break;
+ }
+
default:
log_err("Invalid DPA IPsec ioctl (0x%x)\n", cmd);
ret = -EINVAL;
diff --git a/drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c b/drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c
index 49545e85..48ed7ca 100644
--- a/drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c
+++ b/drivers/staging/fsl_dpa_offload/wrp_dpa_stats.c
@@ -555,6 +555,15 @@ static long do_ioctl_stats_init(struct ioc_dpa_stats_params *prm)
long ret = 0;
uint16_t i;
+ /* Check user-provided storage area length */
+ if (prm->storage_area_len < DPA_STATS_CNT_SEL_LEN ||
+ prm->storage_area_len > DPA_STATS_MAX_STORAGE_AREA_SIZE) {
+ log_err("Parameter storage_area_len %d must be in range (%d - %d)\n",
+ prm->storage_area_len,
+ DPA_STATS_CNT_SEL_LEN, DPA_STATS_MAX_STORAGE_AREA_SIZE);
+ return -EINVAL;
+ }
+
/* Save user-provided parameters */
params.max_counters = prm->max_counters;
params.storage_area_len = prm->storage_area_len;
@@ -841,6 +850,13 @@ static int do_ioctl_stats_create_class_counter(void *args)
return -EINVAL;
}
+ if (prm.cnt_params.class_members > DPA_STATS_MAX_NUM_OF_CLASS_MEMBERS) {
+ log_err("Parameter class_members %d exceeds maximum number of class members: %d\n",
+ prm.cnt_params.class_members,
+ DPA_STATS_MAX_NUM_OF_CLASS_MEMBERS);
+ return -EINVAL;
+ }
+
cls_mbrs = prm.cnt_params.class_members;
switch (prm.cnt_params.type) {
@@ -909,7 +925,7 @@ static int do_ioctl_stats_create_class_counter(void *args)
/* Override user-space pointers with kernel memory */
tbl->keys = kzalloc(cls_mbrs *
- sizeof(**tbl->keys), GFP_KERNEL);
+ sizeof(*tbl->keys), GFP_KERNEL);
if (!tbl->keys) {
log_err("Cannot allocate kernel memory for lookup keys array\n");
return -ENOMEM;
@@ -931,7 +947,7 @@ static int do_ioctl_stats_create_class_counter(void *args)
/* Override user-space pointers with kernel memory */
tbl->pairs = kzalloc(cls_mbrs *
- sizeof(**tbl->pairs), GFP_KERNEL);
+ sizeof(*tbl->pairs), GFP_KERNEL);
if (!tbl->pairs) {
log_err("Cannot allocate kernel memory for lookup pairs array\n");
return -ENOMEM;
@@ -961,7 +977,7 @@ static int do_ioctl_stats_create_class_counter(void *args)
/* Override user-space pointers with kernel memory */
cnode->keys = kzalloc(cls_mbrs *
- sizeof(**cnode->keys), GFP_KERNEL);
+ sizeof(*cnode->keys), GFP_KERNEL);
if (!cnode->keys) {
log_err("No more memory to store array of keys\n");
return -ENOMEM;
@@ -1102,6 +1118,13 @@ static int do_ioctl_stats_compat_create_class_counter(void *args)
return -EINVAL;
}
+ if (uprm_cls->class_members > DPA_STATS_MAX_NUM_OF_CLASS_MEMBERS) {
+ log_err("Parameter class_members %d exceeds maximum number of class members: %d\n",
+ uprm_cls->class_members,
+ DPA_STATS_MAX_NUM_OF_CLASS_MEMBERS);
+ return -EINVAL;
+ }
+
memset(&kprm, 0, sizeof(struct ioc_dpa_stats_cls_cnt_params));
kprm.stats_id = uprm.stats_id;
kprm_cls->type = uprm_cls->type;
@@ -1325,8 +1348,6 @@ static int do_ioctl_stats_modify_class_counter(void *args)
}
kfree(prm.params.pair);
/* Restore user-provided key */
- prm.params.pair->first_key = us_pair->first_key;
- prm.params.pair->second_key = us_pair->second_key;
prm.params.pair = us_pair;
}
break;
@@ -1451,6 +1472,14 @@ static int do_ioctl_stats_get_counters(void *args)
return -EINVAL;
}
+ if (prm.req_params.cnts_ids_len == 0 ||
+ prm.req_params.cnts_ids_len > DPA_STATS_REQ_CNTS_IDS_LEN) {
+ log_err("Number of requested counter ids (%d) must be in range (1 - %d)\n",
+ prm.req_params.cnts_ids_len,
+ DPA_STATS_REQ_CNTS_IDS_LEN);
+ return -EINVAL;
+ }
+
/* Save the user-space array of counter ids */
cnts_ids = prm.req_params.cnts_ids;
@@ -1473,8 +1502,10 @@ static int do_ioctl_stats_get_counters(void *args)
/* If counters request is asynchronous */
if (prm.request_done) {
ret = store_get_cnts_async_params(&prm, cnts_ids);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(prm.req_params.cnts_ids);
return ret;
+ }
}
ret = dpa_stats_get_counters(prm.req_params,
@@ -1493,6 +1524,7 @@ static int do_ioctl_stats_get_counters(void *args)
prm.req_params.storage_area_offset),
prm.cnts_len)) {
log_err("Cannot copy counter values to storage area\n");
+ kfree(prm.req_params.cnts_ids);
return -EINVAL;
}
@@ -1520,8 +1552,15 @@ static int do_ioctl_stats_compat_get_counters(void *args)
return -EINVAL;
}
+ if (uprm.req_params.cnts_ids_len == 0 ||
+ uprm.req_params.cnts_ids_len > DPA_STATS_REQ_CNTS_IDS_LEN) {
+ log_err("Number of requested counter ids (%d) must be in range (1 - %d)\n",
+ uprm.req_params.cnts_ids_len,
+ DPA_STATS_REQ_CNTS_IDS_LEN);
+ return -EINVAL;
+ }
+
memset(&kprm, 0, sizeof(struct ioc_dpa_stats_cnt_request_params));
- kprm.cnts_len = uprm.cnts_len;
kprm.request_done = (dpa_stats_request_cb)
((compat_ptr)(uprm.request_done));
kprm.req_params.cnts_ids_len = uprm.req_params.cnts_ids_len;
@@ -1550,8 +1589,10 @@ static int do_ioctl_stats_compat_get_counters(void *args)
if (kprm.request_done) {
ret = store_get_cnts_async_params(&kprm,
(compat_ptr)(uprm.req_params.cnts_ids));
- if (ret < 0)
+ if (ret < 0) {
+ kfree(kprm.req_params.cnts_ids);
return ret;
+ }
}
ret = dpa_stats_get_counters(kprm.req_params,
@@ -1608,6 +1649,13 @@ static int do_ioctl_stats_reset_counters(void *args)
return -EINVAL;
}
+ if (prm.cnts_ids_len == 0 ||
+ prm.cnts_ids_len > DPA_STATS_REQ_CNTS_IDS_LEN) {
+ log_err("Number of counters to reset %d must be in range (1 - %d)\n",
+ prm.cnts_ids_len, DPA_STATS_REQ_CNTS_IDS_LEN);
+ return -EINVAL;
+ }
+
/* Allocate kernel-space memory area to copy the counters ids */
cnt_ids = kcalloc(prm.cnts_ids_len, sizeof(int), GFP_KERNEL);
if (!cnt_ids) {
@@ -1653,6 +1701,13 @@ static int do_ioctl_stats_compat_reset_counters(void *args)
return -EINVAL;
}
+ if (uprm.cnts_ids_len == 0 ||
+ uprm.cnts_ids_len > DPA_STATS_REQ_CNTS_IDS_LEN) {
+ log_err("Number of counters to reset %d must be in range (1 - %d)\n",
+ uprm.cnts_ids_len, DPA_STATS_REQ_CNTS_IDS_LEN);
+ return -EINVAL;
+ }
+
memset(&kprm, 0, sizeof(struct ioc_dpa_stats_cnts_reset_params));
kprm.cnts_ids_len = uprm.cnts_ids_len;
@@ -1863,7 +1918,6 @@ static long store_get_cnts_async_params(
mutex_lock(&wrp_dpa_stats.async_req_lock);
if (list_empty(&wrp_dpa_stats.async_req_pool)) {
log_err("Reached maximum supported number of simultaneous asynchronous requests\n");
- kfree(kprm->req_params.cnts_ids);
mutex_unlock(&wrp_dpa_stats.async_req_lock);
return -EDOM;
}
@@ -2005,6 +2059,14 @@ static int copy_key_descriptor_compatcpy(
return -ENOMEM;
}
+ if ((compat_ptr(key.byte) || compat_ptr(key.mask))) {
+ if (key.size == 0 || key.size > DPA_OFFLD_MAXENTRYKEYSIZE) {
+ log_err("Key size should be between %d and %d.\n", 1,
+ DPA_OFFLD_MAXENTRYKEYSIZE);
+ return -EINVAL;
+ }
+ }
+
if (compat_ptr(key.byte)) {
/* Allocate memory to store the key byte array */
kparam->byte = kmalloc(key.size, GFP_KERNEL);
@@ -2356,7 +2418,6 @@ static long dpa_stats_tbl_cls_compatcpy(
return ret;
}
}
- kfree(us_keys);
}
if (kprm->key_type == DPA_STATS_CLASSIF_PAIR_KEY) {
@@ -2421,6 +2482,9 @@ static long dpa_stats_tbl_cls_compatcpy(
}
}
}
+
+ kfree(us_keys);
+
return 0;
}