summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJ. German Rivera <Jose.G.Rivera@freescale.com>2013-09-27 20:11:16 (GMT)
committerJ. German Rivera <German.Rivera@freescale.com>2013-09-27 20:11:16 (GMT)
commitd103ca95cb3fbde0aaae4ca8a8be727d7e2bed71 (patch)
treecf7c0872f2e2d2eb85c5504ca038147b67b521d2 /drivers
parent4ec77d3ac057d6dc9d213fb4861be1549fe40481 (diff)
parent59eed258ba79a6de6179f6de911280cac653990b (diff)
downloadlinux-fsl-qoriq-d103ca95cb3fbde0aaae4ca8a8be727d7e2bed71.tar.xz
Merge branch 'sdk-kernel-3.8'
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c77
-rw-r--r--drivers/net/phy/at803x.c26
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_classifier.c32
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_stats.c191
-rw-r--r--drivers/staging/fsl_dpa_offload/dpa_stats.h17
6 files changed, 311 insertions, 34 deletions
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 46dcb54..088a31a 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -81,7 +81,7 @@ config MTD_DATAFLASH_OTP
config MTD_M25P80
tristate "Support most SPI Flash chips (AT26DF, M25P, W25X, ...)"
- depends on SPI_MASTER && EXPERIMENTAL
+ depends on SPI_MASTER
help
This enables access to most modern SPI flash chips, used for
program and data storage. Series supported include Atmel AT26DF,
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 7462fa1..49af135 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -135,6 +135,69 @@ static struct nand_ecclayout oob_4096_ecc8 = {
.oobfree = { {2, 6}, {136, 82} },
};
+/* 8192-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_8192_ecc4 = {
+ .eccbytes = 128,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ },
+ .oobfree = { {2, 6}, {136, 208} },
+};
+
+/* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */
+static struct nand_ecclayout oob_8192_ecc8 = {
+ .eccbytes = 256,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 258, 259, 260, 261, 262, 263,
+ },
+ .oobfree = { {2, 6}, {264, 80} },
+};
/*
* Generic flash bbt descriptors
@@ -866,11 +929,25 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
} else {
layout = &oob_4096_ecc8;
chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
}
priv->bufnum_mask = 1;
break;
+ case CSOR_NAND_PGS_8K:
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
+ CSOR_NAND_ECC_MODE_4) {
+ layout = &oob_8192_ecc4;
+ } else {
+ layout = &oob_8192_ecc8;
+ chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
+ }
+
+ priv->bufnum_mask = 0;
+ break;
+
default:
dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
return -ENODEV;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 45cbc10..88ecb85 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -123,6 +123,21 @@ static struct phy_driver at8035_driver = {
},
};
+/* ATHEROS 8033 */
+static struct phy_driver at8033_driver = {
+ .phy_id = 0x004dd074,
+ .name = "Atheros 8033 ethernet",
+ .phy_id_mask = 0xffffffef,
+ .config_init = at803x_config_init,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+};
+
/* ATHEROS 8030 */
static struct phy_driver at8030_driver = {
.phy_id = 0x004dd076,
@@ -146,12 +161,17 @@ static int __init atheros_init(void)
if (ret)
goto err1;
- ret = phy_driver_register(&at8030_driver);
+ ret = phy_driver_register(&at8033_driver);
if (ret)
goto err2;
- return 0;
+ ret = phy_driver_register(&at8030_driver);
+ if (ret)
+ goto err3;
+ return 0;
+err3:
+ phy_driver_unregister(&at8033_driver);
err2:
phy_driver_unregister(&at8035_driver);
err1:
@@ -161,6 +181,7 @@ err1:
static void __exit atheros_exit(void)
{
phy_driver_unregister(&at8035_driver);
+ phy_driver_unregister(&at8033_driver);
phy_driver_unregister(&at8030_driver);
}
@@ -169,6 +190,7 @@ module_exit(atheros_exit);
static struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ 0x004dd076, 0xffffffef },
+ { 0x004dd074, 0xffffffef },
{ 0x004dd072, 0xffffffef },
{ }
};
diff --git a/drivers/staging/fsl_dpa_offload/dpa_classifier.c b/drivers/staging/fsl_dpa_offload/dpa_classifier.c
index 5056b2e..751dacd 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_classifier.c
+++ b/drivers/staging/fsl_dpa_offload/dpa_classifier.c
@@ -1673,6 +1673,12 @@ static int flush_table(struct dpa_cls_table *ptable)
int_cc_node = &ptable->int_cc_node[cc_node_index];
dpa_classif_hm_release_chain(index_entry->hmd);
+#ifdef DPA_CLASSIFIER_DEBUG
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Remove "
+ "entry #%d from table cc_node=0x%p.\n",
+ __func__, __LINE__, index_entry->entry_index,
+ cc_node));
+#endif /* DPA_CLASSIFIER_DEBUG */
err = FM_PCD_MatchTableRemoveKey(cc_node,
index_entry->entry_index);
if (err != E_OK) {
@@ -2444,6 +2450,18 @@ static int table_insert_entry_exact_match(struct dpa_cls_table *cls_table,
}
/* Add the key to the selected Cc node */
+#ifdef DPA_CLASSIFIER_DEBUG
+ dpa_cls_dbg(("DEBUG: dpa_classifier %s (%d): Insert new entry in table "
+ "cc_node=0x%p.\n", __func__, __LINE__,
+ cls_table->int_cc_node[0].cc_node));
+ dpa_cls_dbg((" index=%d; action type (id)=%d; hmd=%d; h_Manip=0x%p\n",
+ cls_table->entry[k].entry_index, action->type, hmd,
+ key_params.ccNextEngineParams.h_Manip));
+ dpa_cls_dbg((" Lookup key (%d bytes): ",
+ cls_table->params.exact_match_params.key_size));
+ dump_lookup_key(key);
+ pr_err("\n");
+#endif /* DPA_CLASSIFIER_DEBUG */
err = FM_PCD_MatchTableAddKey((t_Handle)cls_table->
int_cc_node[0].cc_node,
cls_table->entry[k].entry_index,
@@ -2567,8 +2585,12 @@ static int table_insert_entry_hash(struct dpa_cls_table *cls_table,
}
if (key->mask) {
- log_err("Key masks are not supported by HASH tables.\n");
- return -EINVAL;
+ /* Only full 0xFF masks supported: */
+ for (j = 0; j < key->size; j++)
+ if (key->mask[j] ^ 0xff) {
+ log_err("Only key masks 0xff all over are supported by HASH tables.\n");
+ return -EINVAL;
+ }
}
memset(&key_params, 0, sizeof(t_FmPcdCcKeyParams));
@@ -3563,7 +3585,7 @@ static int init_hm_chain(void *fm_pcd, struct list_head *chain_head,
t_Error error;
struct dpa_cls_hm_node *pcurrent, *pnext;
t_FmPcdManipParams params;
- static int index;
+ static int index = 0;
static int num_int_nodes;
BUG_ON(!chain_head);
@@ -3776,7 +3798,7 @@ int remove_hm_chain(struct list_head *chain_head, struct list_head *item)
int err = 0;
struct dpa_cls_hm_node *pcurrent;
t_Error error;
- static int index;
+ static int index = 0;
BUG_ON(!chain_head);
BUG_ON(!item);
@@ -3807,6 +3829,8 @@ int remove_hm_chain(struct list_head *chain_head, struct list_head *item)
remove_hm_node(pcurrent);
+ index--;
+
return err;
}
diff --git a/drivers/staging/fsl_dpa_offload/dpa_stats.c b/drivers/staging/fsl_dpa_offload/dpa_stats.c
index 6c3605a..c5eb276 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_stats.c
+++ b/drivers/staging/fsl_dpa_offload/dpa_stats.c
@@ -411,7 +411,7 @@ static int get_new_req(struct dpa_stats *dpa_stats,
static int put_cnt(struct dpa_stats *dpa_stats, struct dpa_stats_cnt_cb *cnt_cb)
{
- int err = 0;
+ int err = 0, i = 0;
/* Acquire DPA Stats instance lock */
mutex_lock(&dpa_stats->lock);
@@ -452,6 +452,16 @@ static int put_cnt(struct dpa_stats *dpa_stats, struct dpa_stats_cnt_cb *cnt_cb)
break;
}
+ /* Reset all statistics information */
+ memset(cnt_cb->info.stats_off, 0,
+ MAX_NUM_OF_STATS * sizeof(*cnt_cb->info.stats_off));
+ for (i = 0; i < MAX_NUM_OF_MEMBERS; i++) {
+ memset(cnt_cb->info.stats[i], 0,
+ MAX_NUM_OF_STATS * sizeof(uint64_t));
+ memset(cnt_cb->info.last_stats[i], 0,
+ MAX_NUM_OF_STATS * sizeof(uint64_t));
+ }
+
/* Release DPA Stats instance lock */
mutex_unlock(&dpa_stats->lock);
@@ -490,10 +500,69 @@ static int put_req(struct dpa_stats *dpa_stats, struct dpa_stats_req_cb *req_cb)
return 0;
}
+static int alloc_cnt_cb(struct dpa_stats *dpa_stats,
+ struct dpa_stats_cnt_cb *cnt_cb)
+{
+ int i = 0;
+
+ /* Initialize counter lock */
+ mutex_init(&cnt_cb->lock);
+ /* Store dpa_stats instance */
+ cnt_cb->dpa_stats = dpa_stats;
+ /* Counter is not initialized, set the index to invalid value */
+ cnt_cb->index = DPA_OFFLD_INVALID_OBJECT_ID;
+
+ /* Allocate array of statistics offsets */
+ cnt_cb->info.stats_off = kcalloc(MAX_NUM_OF_STATS,
+ sizeof(*cnt_cb->info.stats_off), GFP_KERNEL);
+ if (!cnt_cb->info.stats_off) {
+ log_err("Cannot allocate memory to store array of "
+ "statistics offsets\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate array of currently read statistics */
+ cnt_cb->info.stats = kcalloc(MAX_NUM_OF_MEMBERS,
+ sizeof(uint64_t *), GFP_KERNEL);
+ if (!cnt_cb->info.stats) {
+ log_err("Cannot allocate memory to store array of "
+ "statistics for all members\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < MAX_NUM_OF_MEMBERS; i++) {
+ cnt_cb->info.stats[i] = kcalloc(MAX_NUM_OF_STATS,
+ sizeof(uint64_t), GFP_KERNEL);
+ if (!cnt_cb->info.stats[i]) {
+ log_err("Cannot allocate memory to store array of "
+ "statistics for %d member\n", i);
+ return -ENOMEM;
+ }
+ }
+
+ /* Allocate array of previously read statistics */
+ cnt_cb->info.last_stats = kcalloc(MAX_NUM_OF_MEMBERS,
+ sizeof(uint64_t *), GFP_KERNEL);
+ if (!cnt_cb->info.last_stats) {
+ log_err("Cannot allocate memory to store array of "
+ "previous read statistics for all members\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < MAX_NUM_OF_MEMBERS; i++) {
+ cnt_cb->info.last_stats[i] = kcalloc(MAX_NUM_OF_STATS,
+ sizeof(uint64_t), GFP_KERNEL);
+ if (!cnt_cb->info.last_stats[i]) {
+ log_err("Cannot allocate memory to store array of "
+ "previous read statistics for %d member\n", i);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
static int init_cnts_resources(struct dpa_stats *dpa_stats)
{
struct dpa_stats_params config = dpa_stats->config;
- int i;
+ int i, err;
/* Create circular queue that holds free counter IDs */
dpa_stats->cnt_id_cq = cq_new(config.max_counters, sizeof(int));
@@ -521,7 +590,16 @@ static int init_cnts_resources(struct dpa_stats *dpa_stats)
memset(dpa_stats->used_cnt_ids, DPA_OFFLD_INVALID_OBJECT_ID,
config.max_counters * sizeof(uint32_t));
- /* Allocate array to store counters control blocks */
+ /* Allocate array to store counter ids scheduled for retrieve */
+ dpa_stats->sched_cnt_ids = kcalloc(
+ config.max_counters, sizeof(bool), GFP_KERNEL);
+ if (!dpa_stats->sched_cnt_ids) {
+ log_err("Cannot allocate memory to store %d scheduled counter "
+ "ids\n", config.max_counters);
+ return -ENOMEM;
+ }
+
+ /* Allocate array of counters control blocks */
dpa_stats->cnts_cb = kzalloc(config.max_counters *
sizeof(struct dpa_stats_cnt_cb), GFP_KERNEL);
if (!dpa_stats->cnts_cb) {
@@ -530,10 +608,11 @@ static int init_cnts_resources(struct dpa_stats *dpa_stats)
return -ENOMEM;
}
+ /* Allocate memory for every counter control block */
for (i = 0; i < config.max_counters; i++) {
- mutex_init(&dpa_stats->cnts_cb[i].lock);
- dpa_stats->cnts_cb[i].dpa_stats = dpa_stats;
- dpa_stats->cnts_cb[i].index = DPA_OFFLD_INVALID_OBJECT_ID;
+ err = alloc_cnt_cb(dpa_stats, &dpa_stats->cnts_cb[i]);
+ if (err < 0)
+ return err;
}
return 0;
@@ -541,7 +620,7 @@ static int init_cnts_resources(struct dpa_stats *dpa_stats)
static int free_cnts_resources(struct dpa_stats *dpa_stats)
{
- uint32_t id, i;
+ uint32_t id, i, j;
int err = 0;
for (i = 0; i < dpa_stats->config.max_counters; i++) {
@@ -549,13 +628,21 @@ static int free_cnts_resources(struct dpa_stats *dpa_stats)
id = dpa_stats->used_cnt_ids[i];
mutex_unlock(&dpa_stats->lock);
- if (id != DPA_OFFLD_INVALID_OBJECT_ID)
+ if (id != DPA_OFFLD_INVALID_OBJECT_ID) {
/* Release the counter id in the Counter IDs cq */
err = put_cnt(dpa_stats, &dpa_stats->cnts_cb[id]);
if (err < 0) {
log_err("Cannot release counter id %d\n", id);
return err;
}
+ }
+ for (j = 0; j < MAX_NUM_OF_MEMBERS; j++) {
+ kfree(dpa_stats->cnts_cb[i].info.stats[j]);
+ kfree(dpa_stats->cnts_cb[i].info.last_stats[j]);
+ }
+ kfree(dpa_stats->cnts_cb[i].info.stats_off);
+ kfree(dpa_stats->cnts_cb[i].info.stats);
+ kfree(dpa_stats->cnts_cb[i].info.last_stats);
}
/* Release counters IDs circular queue */
@@ -572,6 +659,10 @@ static int free_cnts_resources(struct dpa_stats *dpa_stats)
kfree(dpa_stats->used_cnt_ids);
dpa_stats->used_cnt_ids = NULL;
+ /* Release scheduled counters ids array */
+ kfree(dpa_stats->sched_cnt_ids);
+ dpa_stats->sched_cnt_ids = NULL;
+
return 0;
}
@@ -629,7 +720,7 @@ static int init_reqs_resources(struct dpa_stats *dpa_stats)
/* Allocate array to store the counter ids */
for (i = 0; i < DPA_STATS_MAX_NUM_OF_REQUESTS; i++) {
dpa_stats->reqs_cb[i].cnts_ids =
- kzalloc(DPA_STATS_MAX_NUM_OF_COUNTERS *
+ kzalloc(dpa_stats->config.max_counters *
sizeof(int), GFP_KERNEL);
if (!dpa_stats->reqs_cb[i].cnts_ids) {
log_err("Cannot allocate memory for array of counter "
@@ -1565,6 +1656,13 @@ static int set_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb,
prm.td, cnt_cb->id);
return -EINVAL;
}
+ /* Allocate memory for one key descriptor */
+ cnt_tbl_cb->keys = kzalloc(sizeof(*cnt_tbl_cb->keys), GFP_KERNEL);
+ if (!cnt_tbl_cb->keys) {
+ log_err("Cannot allocate memory for key descriptor "
+ "for counter id %d\n", cnt_cb->id);
+ return -ENOMEM;
+ }
/* Store CcNode handle and set number of keys to one */
cnt_tbl_cb->keys[0].cc_node = cls_tbl.cc_node;
@@ -1710,6 +1808,23 @@ static int set_cnt_ipsec_cb(struct dpa_stats_cnt_cb *cnt_cb,
return -EFAULT;
}
+ /* Allocate memory for one security association id */
+ cnt_cb->ipsec_cb.sa_id = kzalloc(sizeof(*cnt_cb->ipsec_cb.sa_id),
+ GFP_KERNEL);
+ if (!cnt_cb->ipsec_cb.sa_id) {
+ log_err("Cannot allocate memory for security association id "
+ "for counter id %d\n", cnt_cb->id);
+ return -ENOMEM;
+ }
+
+ /* Allocate memory to store if security association is valid */
+ cnt_cb->ipsec_cb.valid = kzalloc(sizeof(*cnt_cb->ipsec_cb.valid),
+ GFP_KERNEL);
+ if (!cnt_cb->ipsec_cb.valid) {
+ log_err("Cannot allocate memory to store if security "
+ "association is valid for counter id %d\n", cnt_cb->id);
+ return -ENOMEM;
+ }
cnt_cb->ipsec_cb.sa_id[0] = params->ipsec_params.sa_id;
cnt_cb->ipsec_cb.valid[0] = TRUE;
@@ -2173,6 +2288,15 @@ static int set_cls_cnt_classif_tbl_cb(struct dpa_stats_cnt_cb *cnt_cb,
tbl_cb->td = params->classif_tbl_params.td;
cnt_cb->members_num = params->class_members;
+ /* Allocate memory for key descriptors */
+ tbl_cb->keys = kcalloc(params->class_members,
+ sizeof(*tbl_cb->keys), GFP_KERNEL);
+ if (!tbl_cb->keys) {
+ log_err("Cannot allocate memory for array of key descriptors "
+ "for counter id %d\n", cnt_cb->id);
+ return -ENOMEM;
+ }
+
switch (prm.key_type) {
case DPA_STATS_CLASSIF_SINGLE_KEY:
if (!prm.keys) {
@@ -2421,6 +2545,25 @@ static int set_cls_cnt_ipsec_cb(struct dpa_stats_cnt_cb *cnt_cb,
cnt_cb->members_num = prm->class_members;
+ /* Allocate memory for array of security association ids */
+ cnt_cb->ipsec_cb.sa_id = kcalloc(cnt_cb->members_num,
+ sizeof(*cnt_cb->ipsec_cb.sa_id), GFP_KERNEL);
+ if (!cnt_cb->ipsec_cb.sa_id) {
+ log_err("Cannot allocate memory for array of security "
+ "association ids, for counter id %d\n", cnt_cb->id);
+ return -ENOMEM;
+ }
+
+ /* Allocate memory for array that stores if SA id is valid */
+ cnt_cb->ipsec_cb.valid = kcalloc(cnt_cb->members_num,
+ sizeof(*cnt_cb->ipsec_cb.valid), GFP_KERNEL);
+ if (!cnt_cb->ipsec_cb.valid) {
+ log_err("Cannot allocate memory for array that stores if "
+ "security association ids are valid for counter id %d\n",
+ cnt_cb->id);
+ return -ENOMEM;
+ }
+
for (i = 0; i < prm->class_members; i++) {
if (prm->ipsec_params.sa_id[i] != DPA_OFFLD_INVALID_OBJECT_ID) {
cnt_ipsec_cb->sa_id[i] = prm->ipsec_params.sa_id[i];
@@ -3748,19 +3891,30 @@ int dpa_stats_remove_counter(int dpa_stats_cnt_id)
return -EINVAL;
}
- /* Remove the allocated memory for keys bytes and masks */
- if (cnt_cb->type == DPA_STATS_CNT_CLASSIF_NODE)
+ switch (cnt_cb->type) {
+ case DPA_STATS_CNT_CLASSIF_NODE:
+ /* Remove the allocated memory for keys bytes and masks */
for (i = 0; i < cnt_cb->members_num; i++) {
kfree(cnt_cb->ccnode_cb.keys[i].byte);
kfree(cnt_cb->ccnode_cb.keys[i].mask);
}
-
- /* Remove the allocated memory for keys bytes and masks */
- if (cnt_cb->type == DPA_STATS_CNT_CLASSIF_TBL)
+ break;
+ case DPA_STATS_CNT_CLASSIF_TBL:
+ /* Remove the allocated memory for keys bytes, masks and keys */
for (i = 0; i < cnt_cb->members_num; i++) {
kfree(cnt_cb->tbl_cb.keys[i].key.byte);
kfree(cnt_cb->tbl_cb.keys[i].key.mask);
}
+ kfree(cnt_cb->tbl_cb.keys);
+ break;
+ case DPA_STATS_CNT_IPSEC:
+ /* Remove the allocated memory for security associations */
+ kfree(cnt_cb->ipsec_cb.sa_id);
+ kfree(cnt_cb->ipsec_cb.valid);
+ break;
+ default:
+ break;
+ }
/* Release the counter id in the Counter IDs circular queue */
err = put_cnt(dpa_stats, cnt_cb);
@@ -3905,7 +4059,7 @@ int dpa_stats_reset_counters(int *cnts_ids, unsigned int cnts_ids_len)
{
struct dpa_stats *dpa_stats = NULL;
struct dpa_stats_cnt_cb *cnt_cb = NULL;
- uint32_t i = 0;
+ uint32_t i = 0, j = 0;
int err = 0;
if (!gbl_dpa_stats) {
@@ -3963,8 +4117,11 @@ int dpa_stats_reset_counters(int *cnts_ids, unsigned int cnts_ids_len)
cnts_ids, cnts_ids_len);
return -EINVAL;
}
- memset(&cnt_cb->info.stats, 0, (MAX_NUM_OF_MEMBERS *
- MAX_NUM_OF_STATS * sizeof(uint64_t)));
+ /* Reset stored statistics values */
+ for (j = 0; j < MAX_NUM_OF_MEMBERS; j++)
+ memset(cnt_cb->info.stats[j], 0,
+ MAX_NUM_OF_STATS * sizeof(uint64_t));
+
mutex_unlock(&cnt_cb->lock);
}
diff --git a/drivers/staging/fsl_dpa_offload/dpa_stats.h b/drivers/staging/fsl_dpa_offload/dpa_stats.h
index 3d682af..eec099c 100644
--- a/drivers/staging/fsl_dpa_offload/dpa_stats.h
+++ b/drivers/staging/fsl_dpa_offload/dpa_stats.h
@@ -68,8 +68,7 @@ struct dpa_stats {
*/
struct workqueue_struct *async_req_workqueue;
struct mutex lock; /* Lock for this dpa_stats instance */
- /* Counters that are scheduled for a retrieve operation */
- bool sched_cnt_ids[DPA_STATS_MAX_NUM_OF_COUNTERS];
+ bool *sched_cnt_ids; /* Counters scheduled for a retrieve operation */
struct mutex sched_cnt_lock; /* Lock for array of scheduled counters */
};
@@ -94,12 +93,10 @@ struct stats_info {
* Array of statistics offsets relative to
* corresponding statistics area
*/
- unsigned int stats_off[MAX_NUM_OF_STATS];
+ unsigned int *stats_off;
unsigned int stats_num; /* Number of statistics to retrieve */
- uint64_t stats[MAX_NUM_OF_MEMBERS][MAX_NUM_OF_STATS];
- /* Array to store statistics values */
- uint64_t last_stats[MAX_NUM_OF_MEMBERS][MAX_NUM_OF_STATS];
- /* Array to store previous statistics values */
+ uint64_t **stats; /* Array to store statistics values */
+ uint64_t **last_stats;/* Array to store previous statistics values */
bool reset; /* Reset counter's statistics */
};
@@ -122,7 +119,7 @@ struct dpa_stats_lookup_key {
struct dpa_stats_cnt_classif_tbl_cb {
int td; /* Table descriptor */
enum dpa_cls_tbl_type type; /* The type of the DPA Classifier table */
- struct dpa_stats_lookup_key keys[MAX_NUM_OF_MEMBERS]; /* Array of
+ struct dpa_stats_lookup_key *keys; /* Array of
key descriptors for which to provide statistics */
};
@@ -135,8 +132,8 @@ struct dpa_stats_cnt_classif_cb {
/* DPA Stats IPSec Counter control block */
struct dpa_stats_cnt_ipsec_cb {
- int sa_id[MAX_NUM_OF_MEMBERS]; /* Array of Security Association ids */
- bool valid[MAX_NUM_OF_MEMBERS]; /* Security Association id is valid */
+ int *sa_id; /* Array of Security Association ids */
+ bool *valid; /* Security Association id is valid */
};
typedef int get_cnt_stats(struct dpa_stats_req_cb *req_cb,