From 267ef5f07eff84a1a3bbc0763f784861e62e3260 Mon Sep 17 00:00:00 2001 From: Marian Chereji Date: Tue, 24 Sep 2013 18:20:39 +0300 Subject: dpa_offload: Allow 0xff masks for HASH classifier table entries As key masks are not supported in HASH tables, the DPA Classifier was refusing to add an entry to a HASH table with a key that had any mask (mask pointer not NULL). This was changed so that the DPA Classifier can recognize a 0xff mask all over and ignore it. Any other (more complex) mask will still be refused. Change-Id: I0732d873fa5d10def4f2646c2bd248aadcffa736 Signed-off-by: Marian Chereji Reviewed-on: http://git.am.freescale.net:8181/4911 Tested-by: Review Code-CDREVIEW Reviewed-by: Bulie Radu-Andrei-B37577 Reviewed-by: Rivera Jose-B46482 diff --git a/drivers/staging/fsl_dpa_offload/dpa_classifier.c b/drivers/staging/fsl_dpa_offload/dpa_classifier.c index 5056b2e..3dc35bc 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_classifier.c +++ b/drivers/staging/fsl_dpa_offload/dpa_classifier.c @@ -2567,8 +2567,12 @@ static int table_insert_entry_hash(struct dpa_cls_table *cls_table, } if (key->mask) { - log_err("Key masks are not supported by HASH tables.\n"); - return -EINVAL; + /* Only full 0xFF masks supported: */ + for (j = 0; j < key->size; j++) + if (key->mask[j] ^ 0xff) { + log_err("Only key masks 0xff all over are supported by HASH tables.\n"); + return -EINVAL; + } } memset(&key_params, 0, sizeof(t_FmPcdCcKeyParams)); -- cgit v0.10.2 From a4e5e25cb91ce9ee356fdbe2c33e584fc2e659f6 Mon Sep 17 00:00:00 2001 From: Anca Jeanina FLOAREA Date: Thu, 26 Sep 2013 12:11:59 +0300 Subject: dpa_offload: Change memory allocation to support 512 counters The driver fails to allocate a huge amount of memory in order to support 512 counters. Fixed it by changing memory allocation from static to dynamic for certain structure members. Counters that do not support the modify action will continue to have the memory allocated static. Change-Id: Ibdf76a9ce9e1a11d590166a2f93bbfd282edbeea Signed-off-by: Anca Jeanina FLOAREA Reviewed-on: http://git.am.freescale.net:8181/5009 Reviewed-by: Zanoschi Aurelian-B43522 Tested-by: Review Code-CDREVIEW Reviewed-by: Rivera Jose-B46482 diff --git a/drivers/staging/fsl_dpa_offload/dpa_stats.c b/drivers/staging/fsl_dpa_offload/dpa_stats.c index 95120fa..41580ab 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_stats.c +++ b/drivers/staging/fsl_dpa_offload/dpa_stats.c @@ -411,7 +411,7 @@ static int get_new_req(struct dpa_stats *dpa_stats, static int put_cnt(struct dpa_stats *dpa_stats, struct dpa_stats_cnt_cb *cnt_cb) { - int err = 0; + int err = 0, i = 0; /* Acquire DPA Stats instance lock */ mutex_lock(&dpa_stats->lock); @@ -452,6 +452,16 @@ static int put_cnt(struct dpa_stats *dpa_stats, struct dpa_stats_cnt_cb *cnt_cb) break; } + /* Reset all statistics information */ + memset(cnt_cb->info.stats_off, 0, + MAX_NUM_OF_STATS * sizeof(*cnt_cb->info.stats_off)); + for (i = 0; i < MAX_NUM_OF_MEMBERS; i++) { + memset(cnt_cb->info.stats[i], 0, + MAX_NUM_OF_STATS * sizeof(uint64_t)); + memset(cnt_cb->info.last_stats[i], 0, + MAX_NUM_OF_STATS * sizeof(uint64_t)); + } + /* Release DPA Stats instance lock */ mutex_unlock(&dpa_stats->lock); @@ -490,10 +500,69 @@ static int put_req(struct dpa_stats *dpa_stats, struct dpa_stats_req_cb *req_cb) return 0; } +static int alloc_cnt_cb(struct dpa_stats *dpa_stats, + struct dpa_stats_cnt_cb *cnt_cb) +{ + int i = 0; + + /* Initialize counter lock */ + mutex_init(&cnt_cb->lock); + /* Store dpa_stats instance */ + cnt_cb->dpa_stats = dpa_stats; + /* Counter is not initialized, set the index to invalid value */ + cnt_cb->index = DPA_OFFLD_INVALID_OBJECT_ID; + + /* Allocate array of statistics offsets */ + cnt_cb->info.stats_off = kcalloc(MAX_NUM_OF_STATS, + sizeof(*cnt_cb->info.stats_off), GFP_KERNEL); + if (!cnt_cb->info.stats_off) { + log_err("Cannot allocate memory to store array of " + "statistics offsets\n"); + return -ENOMEM; + } + + /* Allocate array of currently read statistics */ + cnt_cb->info.stats = kcalloc(MAX_NUM_OF_MEMBERS, + sizeof(uint64_t *), GFP_KERNEL); + if (!cnt_cb->info.stats) { + log_err("Cannot allocate memory to store array of " + "statistics for all members\n"); + return -ENOMEM; + } + for (i = 0; i < MAX_NUM_OF_MEMBERS; i++) { + cnt_cb->info.stats[i] = kcalloc(MAX_NUM_OF_STATS, + sizeof(uint64_t), GFP_KERNEL); + if (!cnt_cb->info.stats[i]) { + log_err("Cannot allocate memory to store array of " + "statistics for %d member\n", i); + return -ENOMEM; + } + } + + /* Allocate array of previously read statistics */ + cnt_cb->info.last_stats = kcalloc(MAX_NUM_OF_MEMBERS, + sizeof(uint64_t *), GFP_KERNEL); + if (!cnt_cb->info.last_stats) { + log_err("Cannot allocate memory to store array of " + "previous read statistics for all members\n"); + return -ENOMEM; + } + for (i = 0; i < MAX_NUM_OF_MEMBERS; i++) { + cnt_cb->info.last_stats[i] = kcalloc(MAX_NUM_OF_STATS, + sizeof(uint64_t), GFP_KERNEL); + if (!cnt_cb->info.last_stats[i]) { + log_err("Cannot allocate memory to store array of " + "previous read statistics for %d member\n", i); + return -ENOMEM; + } + } + return 0; +} + static int init_cnts_resources(struct dpa_stats *dpa_stats) { struct dpa_stats_params config = dpa_stats->config; - int i; + int i, err; /* Create circular queue that holds free counter IDs */ dpa_stats->cnt_id_cq = cq_new(config.max_counters, sizeof(int)); @@ -521,7 +590,16 @@ static int init_cnts_resources(struct dpa_stats *dpa_stats) memset(dpa_stats->used_cnt_ids, DPA_OFFLD_INVALID_OBJECT_ID, config.max_counters * sizeof(uint32_t)); - /* Allocate array to store counters control blocks */ + /* Allocate array to store counter ids scheduled for retrieve */ + dpa_stats->sched_cnt_ids = kcalloc( + config.max_counters, sizeof(bool), GFP_KERNEL); + if (!dpa_stats->sched_cnt_ids) { + log_err("Cannot allocate memory to store %d scheduled counter " + "ids\n", config.max_counters); + return -ENOMEM; + } + + /* Allocate array of counters control blocks */ dpa_stats->cnts_cb = kzalloc(config.max_counters * sizeof(struct dpa_stats_cnt_cb), GFP_KERNEL); if (!dpa_stats->cnts_cb) { @@ -530,10 +608,11 @@ static int init_cnts_resources(struct dpa_stats *dpa_stats) return -ENOMEM; } + /* Allocate memory for every counter control block */ for (i = 0; i < config.max_counters; i++) { - mutex_init(&dpa_stats->cnts_cb[i].lock); - dpa_stats->cnts_cb[i].dpa_stats = dpa_stats; - dpa_stats->cnts_cb[i].index = DPA_OFFLD_INVALID_OBJECT_ID; + err = alloc_cnt_cb(dpa_stats, &dpa_stats->cnts_cb[i]); + if (err < 0) + return err; } return 0; @@ -541,7 +620,7 @@ static int init_cnts_resources(struct dpa_stats *dpa_stats) static int free_cnts_resources(struct dpa_stats *dpa_stats) { - uint32_t id, i; + uint32_t id, i, j; int err = 0; for (i = 0; i < dpa_stats->config.max_counters; i++) { @@ -549,13 +628,21 @@ static int free_cnts_resources(struct dpa_stats *dpa_stats) id = dpa_stats->used_cnt_ids[i]; mutex_unlock(&dpa_stats->lock); - if (id != DPA_OFFLD_INVALID_OBJECT_ID) + if (id != DPA_OFFLD_INVALID_OBJECT_ID) { /* Release the counter id in the Counter IDs cq */ err = put_cnt(dpa_stats, &dpa_stats->cnts_cb[id]); if (err < 0) { log_err("Cannot release counter id %d\n", id); return err; } + } + for (j = 0; j < MAX_NUM_OF_MEMBERS; j++) { + kfree(dpa_stats->cnts_cb[i].info.stats[j]); + kfree(dpa_stats->cnts_cb[i].info.last_stats[j]); + } + kfree(dpa_stats->cnts_cb[i].info.stats_off); + kfree(dpa_stats->cnts_cb[i].info.stats); + kfree(dpa_stats->cnts_cb[i].info.last_stats); } /* Release counters IDs circular queue */ @@ -572,6 +659,10 @@ static int free_cnts_resources(struct dpa_stats *dpa_stats) kfree(dpa_stats->used_cnt_ids); dpa_stats->used_cnt_ids = NULL; + /* Release scheduled counters ids array */ + kfree(dpa_stats->sched_cnt_ids); + dpa_stats->sched_cnt_ids = NULL; + return 0; } @@ -629,7 +720,7 @@ static int init_reqs_resources(struct dpa_stats *dpa_stats) /* Allocate array to store the counter ids */ for (i = 0; i < DPA_STATS_MAX_NUM_OF_REQUESTS; i++) { dpa_stats->reqs_cb[i].cnts_ids = - kzalloc(DPA_STATS_MAX_NUM_OF_COUNTERS * + kzalloc(dpa_stats->config.max_counters * sizeof(int), GFP_KERNEL); if (!dpa_stats->reqs_cb[i].cnts_ids) { log_err("Cannot allocate memory for array of counter " @@ -1565,6 +1656,13 @@ static int set_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb, prm.td, cnt_cb->id); return -EINVAL; } + /* Allocate memory for one key descriptor */ + cnt_tbl_cb->keys = kzalloc(sizeof(*cnt_tbl_cb->keys), GFP_KERNEL); + if (!cnt_tbl_cb->keys) { + log_err("Cannot allocate memory for key descriptor " + "for counter id %d\n", cnt_cb->id); + return -ENOMEM; + } /* Store CcNode handle and set number of keys to one */ cnt_tbl_cb->keys[0].cc_node = cls_tbl.cc_node; @@ -1710,6 +1808,23 @@ static int set_cnt_ipsec_cb(struct dpa_stats_cnt_cb *cnt_cb, return -EFAULT; } + /* Allocate memory for one security association id */ + cnt_cb->ipsec_cb.sa_id = kzalloc(sizeof(*cnt_cb->ipsec_cb.sa_id), + GFP_KERNEL); + if (!cnt_cb->ipsec_cb.sa_id) { + log_err("Cannot allocate memory for security association id " + "for counter id %d\n", cnt_cb->id); + return -ENOMEM; + } + + /* Allocate memory to store if security association is valid */ + cnt_cb->ipsec_cb.valid = kzalloc(sizeof(*cnt_cb->ipsec_cb.valid), + GFP_KERNEL); + if (!cnt_cb->ipsec_cb.valid) { + log_err("Cannot allocate memory to store if security " + "association is valid for counter id %d\n", cnt_cb->id); + return -ENOMEM; + } cnt_cb->ipsec_cb.sa_id[0] = params->ipsec_params.sa_id; cnt_cb->ipsec_cb.valid[0] = TRUE; @@ -2173,6 +2288,15 @@ static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb, tbl_cb->td = params->classif_tbl_params.td; cnt_cb->members_num = params->class_members; + /* Allocate memory for key descriptors */ + tbl_cb->keys = kcalloc(params->class_members, + sizeof(*tbl_cb->keys), GFP_KERNEL); + if (!tbl_cb->keys) { + log_err("Cannot allocate memory for array of key descriptors " + "for counter id %d\n", cnt_cb->id); + return -ENOMEM; + } + switch (prm.key_type) { case DPA_STATS_CLASSIF_SINGLE_KEY: if (!prm.keys) { @@ -2421,6 +2545,25 @@ static int set_cls_cnt_ipsec_cb(struct dpa_stats_cnt_cb *cnt_cb, cnt_cb->members_num = prm->class_members; + /* Allocate memory for array of security association ids */ + cnt_cb->ipsec_cb.sa_id = kcalloc(cnt_cb->members_num, + sizeof(*cnt_cb->ipsec_cb.sa_id), GFP_KERNEL); + if (!cnt_cb->ipsec_cb.sa_id) { + log_err("Cannot allocate memory for array of security " + "association ids, for counter id %d\n", cnt_cb->id); + return -ENOMEM; + } + + /* Allocate memory for array that stores if SA id is valid */ + cnt_cb->ipsec_cb.valid = kcalloc(cnt_cb->members_num, + sizeof(*cnt_cb->ipsec_cb.valid), GFP_KERNEL); + if (!cnt_cb->ipsec_cb.valid) { + log_err("Cannot allocate memory for array that stores if " + "security association ids are valid for counter id %d\n", + cnt_cb->id); + return -ENOMEM; + } + for (i = 0; i < prm->class_members; i++) { if (prm->ipsec_params.sa_id[i] != DPA_OFFLD_INVALID_OBJECT_ID) { cnt_ipsec_cb->sa_id[i] = prm->ipsec_params.sa_id[i]; @@ -3748,19 +3891,30 @@ int dpa_stats_remove_counter(int dpa_stats_cnt_id) return -EINVAL; } - /* Remove the allocated memory for keys bytes and masks */ - if (cnt_cb->type == DPA_STATS_CNT_CLASSIF_NODE) + switch (cnt_cb->type) { + case DPA_STATS_CNT_CLASSIF_NODE: + /* Remove the allocated memory for keys bytes and masks */ for (i = 0; i < cnt_cb->members_num; i++) { kfree(cnt_cb->ccnode_cb.keys[i].byte); kfree(cnt_cb->ccnode_cb.keys[i].mask); } - - /* Remove the allocated memory for keys bytes and masks */ - if (cnt_cb->type == DPA_STATS_CNT_CLASSIF_TBL) + break; + case DPA_STATS_CNT_CLASSIF_TBL: + /* Remove the allocated memory for keys bytes, masks and keys */ for (i = 0; i < cnt_cb->members_num; i++) { kfree(cnt_cb->tbl_cb.keys[i].key.byte); kfree(cnt_cb->tbl_cb.keys[i].key.mask); } + kfree(cnt_cb->tbl_cb.keys); + break; + case DPA_STATS_CNT_IPSEC: + /* Remove the allocated memory for security associations */ + kfree(cnt_cb->ipsec_cb.sa_id); + kfree(cnt_cb->ipsec_cb.valid); + break; + default: + break; + } /* Release the counter id in the Counter IDs circular queue */ err = put_cnt(dpa_stats, cnt_cb); @@ -3905,7 +4059,7 @@ int dpa_stats_reset_counters(int *cnts_ids, unsigned int cnts_ids_len) { struct dpa_stats *dpa_stats = NULL; struct dpa_stats_cnt_cb *cnt_cb = NULL; - uint32_t i = 0; + uint32_t i = 0, j = 0; int err = 0; if (!gbl_dpa_stats) { @@ -3963,8 +4117,11 @@ int dpa_stats_reset_counters(int *cnts_ids, unsigned int cnts_ids_len) cnts_ids, cnts_ids_len); return -EINVAL; } - memset(&cnt_cb->info.stats, 0, (MAX_NUM_OF_MEMBERS * - MAX_NUM_OF_STATS * sizeof(uint64_t))); + /* Reset stored statistics values */ + for (j = 0; j < MAX_NUM_OF_MEMBERS; j++) + memset(cnt_cb->info.stats[j], 0, + MAX_NUM_OF_STATS * sizeof(uint64_t)); + mutex_unlock(&cnt_cb->lock); } diff --git a/drivers/staging/fsl_dpa_offload/dpa_stats.h b/drivers/staging/fsl_dpa_offload/dpa_stats.h index 3d682af..eec099c 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_stats.h +++ b/drivers/staging/fsl_dpa_offload/dpa_stats.h @@ -68,8 +68,7 @@ struct dpa_stats { */ struct workqueue_struct *async_req_workqueue; struct mutex lock; /* Lock for this dpa_stats instance */ - /* Counters that are scheduled for a retrieve operation */ - bool sched_cnt_ids[DPA_STATS_MAX_NUM_OF_COUNTERS]; + bool *sched_cnt_ids; /* Counters scheduled for a retrieve operation */ struct mutex sched_cnt_lock; /* Lock for array of scheduled counters */ }; @@ -94,12 +93,10 @@ struct stats_info { * Array of statistics offsets relative to * corresponding statistics area */ - unsigned int stats_off[MAX_NUM_OF_STATS]; + unsigned int *stats_off; unsigned int stats_num; /* Number of statistics to retrieve */ - uint64_t stats[MAX_NUM_OF_MEMBERS][MAX_NUM_OF_STATS]; - /* Array to store statistics values */ - uint64_t last_stats[MAX_NUM_OF_MEMBERS][MAX_NUM_OF_STATS]; - /* Array to store previous statistics values */ + uint64_t **stats; /* Array to store statistics values */ + uint64_t **last_stats;/* Array to store previous statistics values */ bool reset; /* Reset counter's statistics */ }; @@ -122,7 +119,7 @@ struct dpa_stats_lookup_key { struct dpa_stats_cnt_classif_tbl_cb { int td; /* Table descriptor */ enum dpa_cls_tbl_type type; /* The type of the DPA Classifier table */ - struct dpa_stats_lookup_key keys[MAX_NUM_OF_MEMBERS]; /* Array of + struct dpa_stats_lookup_key *keys; /* Array of key descriptors for which to provide statistics */ }; @@ -135,8 +132,8 @@ struct dpa_stats_cnt_classif_cb { /* DPA Stats IPSec Counter control block */ struct dpa_stats_cnt_ipsec_cb { - int sa_id[MAX_NUM_OF_MEMBERS]; /* Array of Security Association ids */ - bool valid[MAX_NUM_OF_MEMBERS]; /* Security Association id is valid */ + int *sa_id; /* Array of Security Association ids */ + bool *valid; /* Security Association id is valid */ }; typedef int get_cnt_stats(struct dpa_stats_req_cb *req_cb, -- cgit v0.10.2 From 1dae7fa3b2b222d23d0f2d78f8be712572bdb3a4 Mon Sep 17 00:00:00 2001 From: Marian Chereji Date: Wed, 4 Sep 2013 11:44:11 +0300 Subject: dpa_offload: Improve debug printing in DPA Classifier Signed-off-by: Marian Chereji Change-Id: Ibe6ba10afc6447cf284b27cc7e3caed854eab304 Reviewed-on: http://git.am.freescale.net:8181/4413 Tested-by: Review Code-CDREVIEW Reviewed-by: Bulie Radu-Andrei-B37577 Reviewed-by: Rivera Jose-B46482 diff --git a/drivers/staging/fsl_dpa_offload/dpa_classifier.c b/drivers/staging/fsl_dpa_offload/dpa_classifier.c index 3dc35bc..f8ed8b7 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_classifier.c +++ b/drivers/staging/fsl_dpa_offload/dpa_classifier.c @@ -1673,6 +1673,12 @@ static int flush_table(struct dpa_cls_table *ptable) int_cc_node = &ptable->int_cc_node[cc_node_index]; dpa_classif_hm_release_chain(index_entry->hmd); +#ifdef DPA_CLASSIFIER_DEBUG + dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Remove " + "entry #%d from table cc_node=0x%p.\n", + __func__, __LINE__, index_entry->entry_index, + cc_node)); +#endif /* DPA_CLASSIFIER_DEBUG */ err = FM_PCD_MatchTableRemoveKey(cc_node, index_entry->entry_index); if (err != E_OK) { @@ -2444,6 +2450,18 @@ static int table_insert_entry_exact_match(struct dpa_cls_table *cls_table, } /* Add the key to the selected Cc node */ +#ifdef DPA_CLASSIFIER_DEBUG + dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Insert new entry in table " + "cc_node=0x%p.\n", __func__, __LINE__, + cls_table->int_cc_node[0].cc_node)); + dpa_cls_dbg((" index=%d; action type (id)=%d; hmd=%d; h_Manip=0x%p\n", + cls_table->entry[k].entry_index, action->type, hmd, + key_params.ccNextEngineParams.h_Manip)); + dpa_cls_dbg((" Lookup key (%d bytes): ", + cls_table->params.exact_match_params.key_size)); + dump_lookup_key(key); + pr_err("\n"); +#endif /* DPA_CLASSIFIER_DEBUG */ err = FM_PCD_MatchTableAddKey((t_Handle)cls_table-> int_cc_node[0].cc_node, cls_table->entry[k].entry_index, -- cgit v0.10.2 From 1c0997b604759faa2741e3e5e518f87edf80fed0 Mon Sep 17 00:00:00 2001 From: Marian Chereji Date: Wed, 4 Sep 2013 11:39:23 +0300 Subject: dpa_offload: Fix static index initialization in DPA Classifier The static indexes used in recursive functions "init_hm_chain" and "remove_hm_chain" were not initialized. Also, "remove_hm_chain" was not decrementing this index as it removed header manipulations. Signed-off-by: Marian Chereji Change-Id: I00bd44c00749578a4c785741082e0ad2c1076c27 Reviewed-on: http://git.am.freescale.net:8181/4412 Tested-by: Review Code-CDREVIEW Reviewed-by: Bulie Radu-Andrei-B37577 Reviewed-by: Rivera Jose-B46482 diff --git a/drivers/staging/fsl_dpa_offload/dpa_classifier.c b/drivers/staging/fsl_dpa_offload/dpa_classifier.c index f8ed8b7..751dacd 100644 --- a/drivers/staging/fsl_dpa_offload/dpa_classifier.c +++ b/drivers/staging/fsl_dpa_offload/dpa_classifier.c @@ -3585,7 +3585,7 @@ static int init_hm_chain(void *fm_pcd, struct list_head *chain_head, t_Error error; struct dpa_cls_hm_node *pcurrent, *pnext; t_FmPcdManipParams params; - static int index; + static int index = 0; static int num_int_nodes; BUG_ON(!chain_head); @@ -3798,7 +3798,7 @@ int remove_hm_chain(struct list_head *chain_head, struct list_head *item) int err = 0; struct dpa_cls_hm_node *pcurrent; t_Error error; - static int index; + static int index = 0; BUG_ON(!chain_head); BUG_ON(!item); @@ -3829,6 +3829,8 @@ int remove_hm_chain(struct list_head *chain_head, struct list_head *item) remove_hm_node(pcurrent); + index--; + return err; } -- cgit v0.10.2