summaryrefslogtreecommitdiff
path: root/drivers/staging/fsl_qbman/dpa_alloc.c
diff options
context:
space:
mode:
authorRoy Pledge <roy.pledge@freescale.com>2013-03-06 12:55:29 (GMT)
committerFleming Andrew-AFLEMING <AFLEMING@freescale.com>2013-04-18 21:56:59 (GMT)
commite9e49107ad3f879ce332a19c635f8612ad7d3c0f (patch)
treedb95e17020cea5fab48c187721c6afc3efa94382 /drivers/staging/fsl_qbman/dpa_alloc.c
parentbe3f3a93ea18a4e8db382eb171e02b0349d071f2 (diff)
downloadlinux-fsl-qoriq-e9e49107ad3f879ce332a19c635f8612ad7d3c0f.tar.xz
Cleanup USDPAA resources when a process exits
Add an API to query the usage levels of DMA memory Elminates UIO and converts USDPAA to interact with the kernel via a character devicc Removed misc/devices fsl_usdpaa file Remove unneeded file Change-Id: I797548e2c79f7098e6378484bcae2b455f022cd4 Reviewed-on: http://git.am.freescale.net:8181/1546 Reviewed-by: Ladouceur Jeffrey-R11498 <Jeffrey.Ladouceur@freescale.com> Reviewed-by: Wang Haiying-R54964 <Haiying.Wang@freescale.com> Reviewed-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com> Tested-by: Fleming Andrew-AFLEMING <AFLEMING@freescale.com>
Diffstat (limited to 'drivers/staging/fsl_qbman/dpa_alloc.c')
-rw-r--r--drivers/staging/fsl_qbman/dpa_alloc.c339
1 files changed, 262 insertions, 77 deletions
diff --git a/drivers/staging/fsl_qbman/dpa_alloc.c b/drivers/staging/fsl_qbman/dpa_alloc.c
index 03a9d28..3fb158e 100644
--- a/drivers/staging/fsl_qbman/dpa_alloc.c
+++ b/drivers/staging/fsl_qbman/dpa_alloc.c
@@ -93,27 +93,27 @@ int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
}
EXPORT_SYMBOL(bman_alloc_bpid_range);
-static int bp_valid(u32 bpid)
-{
- struct bm_pool_state state;
- int ret = bman_query_pools(&state);
- BUG_ON(ret);
- if (bman_depletion_get(&state.as.state, bpid))
- /* "Available==1" means unavailable, go figure. Ie. it has no
- * buffers, which is means it is valid for deallocation. (So
- * true means false, which means true...) */
- return 1;
- return 0;
+static int bp_cleanup(u32 bpid)
+{
+ return (bman_shutdown_pool(bpid) == 0);
}
void bman_release_bpid_range(u32 bpid, u32 count)
{
- u32 total_invalid = release_id_range(&bpalloc, bpid, count, bp_valid);
+ u32 total_invalid = release_id_range(&bpalloc, bpid, count, bp_cleanup);
if (total_invalid)
pr_err("BPID range [%d..%d] (%d) had %d leaks\n",
bpid, bpid + count - 1, count, total_invalid);
}
EXPORT_SYMBOL(bman_release_bpid_range);
+void bman_seed_bpid_range(u32 bpid, u32 count)
+{
+ dpa_alloc_seed(&bpalloc, bpid, count);
+}
+EXPORT_SYMBOL(bman_seed_bpid_range);
+
+
+
/* FQID allocator front-end */
int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
@@ -122,25 +122,31 @@ int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
}
EXPORT_SYMBOL(qman_alloc_fqid_range);
-static int fq_valid(u32 fqid)
+static int fq_cleanup(u32 fqid)
{
- struct qman_fq fq = {
- .fqid = fqid
- };
- struct qm_mcr_queryfq_np np;
- int err = qman_query_fq_np(&fq, &np);
- BUG_ON(err);
- return ((np.state & QM_MCR_NP_STATE_MASK) == QM_MCR_NP_STATE_OOS);
+ return (qman_shutdown_fq(fqid) == 0);
}
void qman_release_fqid_range(u32 fqid, u32 count)
{
- u32 total_invalid = release_id_range(&fqalloc, fqid, count, fq_valid);
+ u32 total_invalid = release_id_range(&fqalloc, fqid, count, fq_cleanup);
if (total_invalid)
pr_err("FQID range [%d..%d] (%d) had %d leaks\n",
fqid, fqid + count - 1, count, total_invalid);
}
EXPORT_SYMBOL(qman_release_fqid_range);
+int qman_reserve_fqid_range(u32 fqid, u32 count)
+{
+ return dpa_alloc_reserve(&fqalloc, fqid, count);
+}
+EXPORT_SYMBOL(qman_reserve_fqid_range);
+
+void qman_seed_fqid_range(u32 fqid, u32 count)
+{
+ dpa_alloc_seed(&fqalloc, fqid, count);
+}
+EXPORT_SYMBOL(qman_seed_fqid_range);
+
/* Pool-channel allocator front-end */
int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
@@ -149,12 +155,12 @@ int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
}
EXPORT_SYMBOL(qman_alloc_pool_range);
-static int qp_valid(u32 qp)
+static int qpool_cleanup(u32 qp)
{
- /* TBD: when resource-management improves, we may be able to find
- * something better than this. Currently we query all FQDs starting from
+ /* We query all FQDs starting from
* FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
- * whose destination channel is the pool-channel being released. */
+ * whose destination channel is the pool-channel being released.
+ * When a non-OOS FQD is found we attempt to clean it up */
struct qman_fq fq = {
.fqid = 1
};
@@ -169,9 +175,13 @@ static int qp_valid(u32 qp)
struct qm_fqd fqd;
err = qman_query_fq(&fq, &fqd);
BUG_ON(err);
- if (fqd.dest.channel == qp)
- /* The channel is the FQ's target, can't free */
- return 0;
+ if (fqd.dest.channel == qp) {
+ /* The channel is the FQ's target, clean it */
+ if (qman_shutdown_fq(fq.fqid) != 0)
+ /* Couldn't shut down the FQ
+ so the pool must be leaked */
+ return 0;
+ }
}
/* Move to the next FQID */
fq.fqid++;
@@ -179,7 +189,8 @@ static int qp_valid(u32 qp)
}
void qman_release_pool_range(u32 qp, u32 count)
{
- u32 total_invalid = release_id_range(&qpalloc, qp, count, qp_valid);
+ u32 total_invalid = release_id_range(&qpalloc, qp,
+ count, qpool_cleanup);
if (total_invalid) {
/* Pool channels are almost always used individually */
if (count == 1)
@@ -192,6 +203,15 @@ void qman_release_pool_range(u32 qp, u32 count)
}
EXPORT_SYMBOL(qman_release_pool_range);
+
+void qman_seed_pool_range(u32 poolid, u32 count)
+{
+ dpa_alloc_seed(&qpalloc, poolid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_pool_range);
+
+
/* CGR ID allocator front-end */
int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
@@ -200,15 +220,56 @@ int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
}
EXPORT_SYMBOL(qman_alloc_cgrid_range);
+static int cqr_cleanup(u32 cgrid)
+{
+ /* We query all FQDs starting from
+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+ * whose CGR is the CGR being released.
+ */
+ struct qman_fq fq = {
+ .fqid = 1
+ };
+ int err;
+ do {
+ struct qm_mcr_queryfq_np np;
+ err = qman_query_fq_np(&fq, &np);
+ if (err)
+ /* FQID range exceeded, found no problems */
+ return 1;
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+ err = qman_query_fq(&fq, &fqd);
+ BUG_ON(err);
+ if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+ (fqd.cgid == cgrid)) {
+ pr_err("CRGID 0x%x is being used by FQID 0x%x,"
+ " CGR will be leaked\n",
+ cgrid, fq.fqid);
+ return 1;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
void qman_release_cgrid_range(u32 cgrid, u32 count)
{
- u32 total_invalid = release_id_range(&cgralloc, cgrid, count, NULL);
+ u32 total_invalid = release_id_range(&cgralloc, cgrid,
+ count, cqr_cleanup);
if (total_invalid)
pr_err("CGRID range [%d..%d] (%d) had %d leaks\n",
cgrid, cgrid + count - 1, count, total_invalid);
}
EXPORT_SYMBOL(qman_release_cgrid_range);
+void qman_seed_cgrid_range(u32 cgrid, u32 count)
+{
+ dpa_alloc_seed(&cgralloc, cgrid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_cgrid_range);
+
/* CEETM CHANNEL ID allocator front-end */
int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align,
int partial)
@@ -236,6 +297,13 @@ void qman_release_ceetm0_channel_range(u32 channelid, u32 count)
}
EXPORT_SYMBOL(qman_release_ceetm0_channel_range);
+void qman_seed_ceetm0_channel_range(u32 channelid, u32 count)
+{
+ dpa_alloc_seed(&ceetm0_challoc, channelid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_ceetm0_channel_range);
+
void qman_release_ceetm1_channel_range(u32 channelid, u32 count)
{
u32 total_invalid;
@@ -247,6 +315,13 @@ void qman_release_ceetm1_channel_range(u32 channelid, u32 count)
}
EXPORT_SYMBOL(qman_release_ceetm1_channel_range);
+void qman_seed_ceetm1_channel_range(u32 channelid, u32 count)
+{
+ dpa_alloc_seed(&ceetm1_challoc, channelid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_ceetm1_channel_range);
+
/* CEETM LFQID allocator front-end */
int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align,
int partial)
@@ -274,6 +349,13 @@ void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count)
}
EXPORT_SYMBOL(qman_release_ceetm0_lfqid_range);
+void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count)
+{
+ dpa_alloc_seed(&ceetm0_lfqidalloc, lfqid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_ceetm0_lfqid_range);
+
void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count)
{
u32 total_invalid;
@@ -286,6 +368,14 @@ void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count)
}
EXPORT_SYMBOL(qman_release_ceetm1_lfqid_range);
+void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count)
+{
+ dpa_alloc_seed(&ceetm1_lfqidalloc, lfqid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_ceetm1_lfqid_range);
+
+
/* Everything else is the common backend to all the allocators */
/* The allocator is a (possibly-empty) list of these; */
@@ -293,6 +383,10 @@ struct alloc_node {
struct list_head list;
u32 base;
u32 num;
+ /* refcount and is_alloced are only set
+ when the node is in the used list */
+ unsigned int refcount;
+ int is_alloced;
};
/* #define DPA_ALLOC_DEBUG */
@@ -304,12 +398,25 @@ static void DUMP(struct dpa_alloc *alloc)
int off = 0;
char buf[256];
struct alloc_node *p;
- list_for_each_entry(p, &alloc->list, list) {
+ pr_info("Free Nodes\n");
+ list_for_each_entry(p, &alloc->free, list) {
if (off < 255)
off += snprintf(buf + off, 255-off, "{%d,%d}",
p->base, p->base + p->num - 1);
}
pr_info("%s\n", buf);
+
+ off = 0;
+ pr_info("Used Nodes\n");
+ list_for_each_entry(p, &alloc->used, list) {
+ if (off < 255)
+ off += snprintf(buf + off, 255-off, "{%d,%d}",
+ p->base, p->base + p->num - 1);
+ }
+ pr_info("%s\n", buf);
+
+
+
}
#else
#define DPRINT(x...) do { ; } while (0)
@@ -319,7 +426,7 @@ static void DUMP(struct dpa_alloc *alloc)
int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
int partial)
{
- struct alloc_node *i = NULL, *next_best = NULL;
+ struct alloc_node *i = NULL, *next_best = NULL, *used_node = NULL;
u32 base, next_best_base = 0, num = 0, next_best_num = 0;
struct alloc_node *margin_left, *margin_right;
@@ -338,7 +445,7 @@ int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
goto err;
}
spin_lock_irq(&alloc->lock);
- list_for_each_entry(i, &alloc->list, list) {
+ list_for_each_entry(i, &alloc->free, list) {
base = (i->base + align - 1) / align;
base *= align;
if ((base - i->base) >= i->num)
@@ -385,12 +492,24 @@ done:
err:
DPRINT("returning %d\n", i ? num : -ENOMEM);
DUMP(alloc);
- return i ? (int)num : -ENOMEM;
+ if (!i)
+ return -ENOMEM;
+
+ /* Add the allocation to the used list with a refcount of 1 */
+ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
+ if (!used_node)
+ return -ENOMEM;
+ used_node->base = *result;
+ used_node->num = num;
+ used_node->refcount = 1;
+ used_node->is_alloced = 1;
+ list_add_tail(&used_node->list, &alloc->used);
+ return (int)num;
}
/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid
* forcing error-handling on to users in the deallocation path. */
-void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
+static void _dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
{
struct alloc_node *i, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
BUG_ON(!node);
@@ -398,9 +517,11 @@ void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
DUMP(alloc);
BUG_ON(!count);
spin_lock_irq(&alloc->lock);
+
+
node->base = base_id;
node->num = count;
- list_for_each_entry(i, &alloc->list, list) {
+ list_for_each_entry(i, &alloc->free, list) {
if (i->base >= node->base) {
/* BUG_ON(any overlapping) */
BUG_ON(i->base < (node->base + node->num));
@@ -408,11 +529,11 @@ void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
goto done;
}
}
- list_add_tail(&node->list, &alloc->list);
+ list_add_tail(&node->list, &alloc->free);
done:
/* Merge to the left */
i = list_entry(node->list.prev, struct alloc_node, list);
- if (node->list.prev != &alloc->list) {
+ if (node->list.prev != &alloc->free) {
BUG_ON((i->base + i->num) > node->base);
if ((i->base + i->num) == node->base) {
node->base = i->base;
@@ -423,7 +544,7 @@ done:
}
/* Merge to the right */
i = list_entry(node->list.next, struct alloc_node, list);
- if (node->list.next != &alloc->list) {
+ if (node->list.next != &alloc->free) {
BUG_ON((node->base + node->num) > i->base);
if ((node->base + node->num) == i->base) {
node->num += i->num;
@@ -435,60 +556,107 @@ done:
DUMP(alloc);
}
-int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base, u32 num)
+
+void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
{
struct alloc_node *i = NULL;
- struct alloc_node *margin_left, *margin_right;
+ spin_lock_irq(&alloc->lock);
- DPRINT("alloc_reserve(%d,%d)\n", base_id, count);
- DUMP(alloc);
- margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL);
- if (!margin_left)
- goto err;
- margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL);
- if (!margin_right) {
- kfree(margin_left);
- goto err;
+ /* First find the node in the used list and decrement its ref count */
+ list_for_each_entry(i, &alloc->used, list) {
+ if (i->base == base_id && i->num == count) {
+ --i->refcount;
+ if (i->refcount == 0) {
+ list_del(&i->list);
+ spin_unlock_irq(&alloc->lock);
+ if (i->is_alloced)
+ _dpa_alloc_free(alloc, base_id, count);
+ kfree(i);
+ return;
+ }
+ spin_unlock_irq(&alloc->lock);
+ return;
+ }
}
+ /* Couldn't find the allocation */
+ pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n",
+ base_id, count);
+ spin_unlock_irq(&alloc->lock);
+}
+
+void dpa_alloc_seed(struct dpa_alloc *alloc, u32 base_id, u32 count)
+{
+ /* Same as free but no previous allocation checking is needed */
+ _dpa_alloc_free(alloc, base_id, count);
+}
+
+
+int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base, u32 num)
+{
+ struct alloc_node *i = NULL, *used_node;
+
+ DPRINT("alloc_reserve(%d,%d)\n", base, num);
+ DUMP(alloc);
+
spin_lock_irq(&alloc->lock);
- list_for_each_entry(i, &alloc->list, list)
- if ((i->base <= base) && ((i->base + i->num) >= (base + num)))
+
+ /* Check for the node in the used list.
+ If found, increase it's refcount */
+ list_for_each_entry(i, &alloc->used, list) {
+ if ((i->base == base) && (i->num == num)) {
+ ++i->refcount;
+ spin_unlock_irq(&alloc->lock);
+ return 0;
+ }
+ if ((base >= i->base) && (base < (i->base + i->num))) {
+ /* This is an attempt to reserve a region that was
+ already reserved or alloced with a different
+ base or num */
+ pr_err("Cannot reserve %d - %d, it overlaps with"
+ " existing reservation from %d - %d\n",
+ base, base + num - 1, i->base,
+ i->base + i->num - 1);
+ spin_unlock_irq(&alloc->lock);
+ return -1;
+ }
+ }
+ /* Check to make sure this ID isn't in the free list */
+ list_for_each_entry(i, &alloc->free, list) {
+ if ((base >= i->base) && (base < (i->base + i->num))) {
/* yep, the reservation is within this node */
- goto done;
- i = NULL;
-done:
- if (i) {
- if (base != i->base) {
- margin_left->base = i->base;
- margin_left->num = base - i->base;
- list_add_tail(&margin_left->list, &i->list);
- } else
- kfree(margin_left);
- if ((base + num) < (i->base + i->num)) {
- margin_right->base = base + num;
- margin_right->num = (i->base + i->num) -
- (base + num);
- list_add(&margin_right->list, &i->list);
- } else
- kfree(margin_right);
- list_del(&i->list);
- kfree(i);
+ pr_err("Cannot reserve %d - %d, it overlaps with"
+ " free range %d - %d and must be alloced\n",
+ base, base + num - 1,
+ i->base, i->base + i->num - 1);
+ spin_unlock_irq(&alloc->lock);
+ return -1;
+ }
}
+ /* Add the allocation to the used list with a refcount of 1 */
+ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
+ if (!used_node) {
+ spin_unlock_irq(&alloc->lock);
+ return -ENOMEM;
+
+ }
+ used_node->base = base;
+ used_node->num = num;
+ used_node->refcount = 1;
+ used_node->is_alloced = 0;
+ list_add_tail(&used_node->list, &alloc->used);
spin_unlock_irq(&alloc->lock);
-err:
- DPRINT("returning %d\n", i ? 0 : -ENOMEM);
- DUMP(alloc);
- return i ? 0 : -ENOMEM;
+ return 0;
}
+
int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count)
{
struct alloc_node *i = NULL;
DPRINT("alloc_pop()\n");
DUMP(alloc);
spin_lock_irq(&alloc->lock);
- if (!list_empty(&alloc->list)) {
- i = list_entry(alloc->list.next, struct alloc_node, list);
+ if (!list_empty(&alloc->free)) {
+ i = list_entry(alloc->free.next, struct alloc_node, list);
list_del(&i->list);
}
spin_unlock_irq(&alloc->lock);
@@ -501,3 +669,20 @@ int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count)
kfree(i);
return 0;
}
+
+int dpa_alloc_check(struct dpa_alloc *list_head, u32 item)
+{
+ struct alloc_node *i = NULL;
+ int res = 0;
+ DPRINT("alloc_check()\n");
+ spin_lock_irq(&list_head->lock);
+
+ list_for_each_entry(i, &list_head->free, list) {
+ if ((item >= i->base) && (item < (i->base + i->num))) {
+ res = 1;
+ break;
+ }
+ }
+ spin_unlock_irq(&list_head->lock);
+ return res;
+}