summaryrefslogtreecommitdiff
path: root/drivers/staging/fsl_qbman/fsl_usdpaa.c
diff options
context:
space:
mode:
authorRoy Pledge <Roy.Pledge@freescale.com>2013-09-11 14:48:07 (GMT)
committerRivera Jose-B46482 <German.Rivera@freescale.com>2013-09-16 18:34:32 (GMT)
commit9af64af96d4b6a79e08b7d082c168ded11f742b9 (patch)
tree54d9e3caf845481eb4cb731d051b0eddb36feb7c /drivers/staging/fsl_qbman/fsl_usdpaa.c
parentf059999a09ac09c8f2114bb0b8a819b1f3b9ffc4 (diff)
downloadlinux-fsl-qoriq-9af64af96d4b6a79e08b7d082c168ded11f742b9.tar.xz
Fix USDPAA dma_mem allocator when blocks needed to be split
Also rework the fragment management to eliminate fragmentation Signed-off-by: Roy Pledge <Roy.Pledge@freescale.com> Change-Id: Ieb1aaf3b7d7e3d42b78efe2411952f10bbd2803d Reviewed-on: http://git.am.freescale.net:8181/4774 Tested-by: Review Code-CDREVIEW <CDREVIEW@freescale.com> Reviewed-by: Wang Haiying-R54964 <Haiying.Wang@freescale.com> Reviewed-by: Thorpe Geoff-R01361 <Geoff.Thorpe@freescale.com> Reviewed-by: Rivera Jose-B46482 <German.Rivera@freescale.com>
Diffstat (limited to 'drivers/staging/fsl_qbman/fsl_usdpaa.c')
-rw-r--r--drivers/staging/fsl_qbman/fsl_usdpaa.c141
1 files changed, 51 insertions, 90 deletions
diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa.c b/drivers/staging/fsl_qbman/fsl_usdpaa.c
index 033cfbb..b6f0ef2 100644
--- a/drivers/staging/fsl_qbman/fsl_usdpaa.c
+++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c
@@ -220,57 +220,24 @@ static struct mem_fragment *split_frag(struct mem_fragment *frag)
return x[2];
}
-/* Conversely, when a fragment is released we look to see whether its
- * similarly-split siblings are free to be reassembled. */
-static struct mem_fragment *merge_frag(struct mem_fragment *frag)
+/* Walk the list of fragments and adjoin neighbouring segments if possible */
+static void compress_frags(void)
{
- /* If this fragment can be merged with its siblings, it will have
- * newbase and newlen as its geometry. */
- uint64_t newlen = frag->len << 2;
- uint64_t newbase = frag->base & ~(newlen - 1);
- struct mem_fragment *tmp, *leftmost = frag, *rightmost = frag;
-
- /* If this fragment is already at root size don't allow merge */
- if (frag->len == frag->root_len)
- return NULL;
- /* Scan left until we find the start */
- tmp = list_entry(frag->list.prev, struct mem_fragment, list);
- while ((&tmp->list != &mem_list) && (tmp->base >= newbase)) {
- if (tmp->refs)
- return NULL;
- if (tmp->len != frag->len)
- return NULL;
- leftmost = tmp;
- tmp = list_entry(tmp->list.prev, struct mem_fragment, list);
- }
- /* Scan right until we find the end */
- tmp = list_entry(frag->list.next, struct mem_fragment, list);
- while ((&tmp->list != &mem_list) && (tmp->base < (newbase + newlen))) {
- if (tmp->refs)
- return NULL;
- if (tmp->len != frag->len)
- return NULL;
- rightmost = tmp;
- tmp = list_entry(tmp->list.next, struct mem_fragment, list);
- }
- if (leftmost == rightmost)
- return NULL;
- /* OK, we can merge */
- frag = leftmost;
- frag->len = newlen;
- frag->pfn_len = newlen >> PAGE_SHIFT;
- while (1) {
- int lastone;
- tmp = list_entry(frag->list.next, struct mem_fragment, list);
- lastone = (tmp == rightmost);
- if (&tmp->list == &mem_list)
- break;
- list_del(&tmp->list);
- kfree(tmp);
- if (lastone)
- break;
+ /* Walk the fragment list and combine fragments */
+ struct mem_fragment *frag, *tmpfrag;
+ list_for_each_entry_safe(frag, tmpfrag, &mem_list, list) {
+ struct mem_fragment *next_frag =
+ list_entry(frag->list.next, struct mem_fragment, list);
+ if (frag->refs == 0 &&
+ frag->len < frag->root_len &&
+ &next_frag->list != &mem_list) {
+ if (next_frag->refs == 0) {
+ /* Merge with next */
+ next_frag->len += frag->len;
+ list_del(&frag->list);
+ }
+ }
}
- return frag;
}
/* Hook from arch/powerpc/mm/mem.c */
@@ -464,8 +431,8 @@ __maybe_unused static void dump_frags(void)
struct mem_fragment *frag;
int i = 0;
list_for_each_entry(frag, &mem_list, list) {
- pr_info("FRAG %d: base 0x%llx len 0x%llx root_len 0x%llx\n",
- i, frag->base, frag->len, frag->root_len);
+ pr_info("FRAG %d: base 0x%llx len 0x%llx root_len 0x%llx refs %d\n",
+ i, frag->base, frag->len, frag->root_len, frag->refs);
++i;
}
}
@@ -558,15 +525,12 @@ static int usdpaa_release(struct inode *inode, struct file *filp)
}
/* Check each fragment and merge if the ref count is 0 */
for (i = 0; i < map->frag_count; i++) {
- if (!--current_frag->refs) {
- struct mem_fragment *frag = current_frag;
- do {
- frag = merge_frag(frag);
- } while (frag);
- }
+ --current_frag->refs;
current_frag = list_entry(current_frag->list.next,
struct mem_fragment, list);
}
+
+ compress_frags();
list_del(&map->list);
kfree(map);
}
@@ -608,8 +572,6 @@ static int check_mmap_dma(struct ctx *ctx, struct vm_area_struct *vma,
for (i = 0; i < map->frag_count; i++) {
if (frag->pfn_base == vma->vm_pgoff) {
*match = 1;
- if (frag->len != (vma->vm_end - vma->vm_start))
- return -EINVAL;
*pfn = frag->pfn_base;
return 0;
}
@@ -705,6 +667,7 @@ static unsigned long usdpaa_get_unmapped_area(struct file *file,
while (vma) {
if ((addr + len) < vma->vm_start)
return addr;
+
addr = USDPAA_MEM_ROUNDUP(vma->vm_end, len);
vma = vma->vm_next;
}
@@ -914,7 +877,7 @@ static long ioctl_dma_map(struct file *fp, struct ctx *ctx,
/* See if the next x fragments are free
and can accomidate the size */
u32 found_size = largest_page;
- next_frag = list_entry(frag->list.next,
+ next_frag = list_entry(frag->list.prev,
struct mem_fragment,
list);
/* If the fragement is too small check
@@ -926,6 +889,10 @@ static long ioctl_dma_map(struct file *fp, struct ctx *ctx,
next_frag->len == 0)
break; /* not enough space */
found_size += next_frag->len;
+ next_frag = list_entry(
+ next_frag->list.prev,
+ struct mem_fragment,
+ list);
}
if (found_size >= i->len) {
/* Success! there is enough contigous
@@ -952,20 +919,23 @@ do_map:
BUG_ON(next_frag->len == 0);
while ((next_frag->len + so_far) > i->len) {
/* Split frag until they match */
- split_frag(next_frag);
+ if (next_frag == start_frag)
+ start_frag = next_frag = split_frag(next_frag);
+ else
+ next_frag = split_frag(next_frag);
}
so_far += next_frag->len;
++frag_count;
- next_frag = list_entry(next_frag->list.next,
+ next_frag = list_entry(next_frag->list.prev,
struct mem_fragment, list);
}
/* we need to reserve start count fragments starting at start frag */
- next_frag = start_frag;
for (k = 0; k < frag_count; k++) {
- next_frag->refs++;
- next_frag = list_entry(next_frag->list.next,
- struct mem_fragment, list);
+ start_frag->refs++;
+ if (k+1 != frag_count)
+ start_frag = list_entry(start_frag->list.prev,
+ struct mem_fragment, list);
}
start_frag->flags = i->flags;
@@ -982,32 +952,23 @@ do_map:
i->phys_addr = start_frag->base;
out:
spin_unlock(&mem_lock);
+
if (!ret) {
unsigned long longret;
- unsigned long next_addr = PAGE_SIZE;
- next_frag = start_frag;
- for (k = 0; k < frag_count; k++) {
- down_write(&current->mm->mmap_sem);
- longret = do_mmap_pgoff(fp, next_addr, next_frag->len,
- PROT_READ |
- (i->flags &
- USDPAA_DMA_FLAG_RDONLY ? 0
- : PROT_WRITE),
- MAP_SHARED,
- next_frag->pfn_base);
- up_write(&current->mm->mmap_sem);
- if (longret & ~PAGE_MASK)
- ret = (int)longret;
- else {
- if (k == 0)
- i->ptr = (void *)longret;
- else
- BUG_ON(next_addr != longret);
- next_addr = longret + next_frag->len;
- }
- next_frag = list_entry(next_frag->list.next,
- struct mem_fragment, list);
- }
+ down_write(&current->mm->mmap_sem);
+ longret = do_mmap_pgoff(fp, PAGE_SIZE, map->total_size,
+ PROT_READ |
+ (i->flags &
+ USDPAA_DMA_FLAG_RDONLY ? 0
+ : PROT_WRITE),
+ MAP_SHARED,
+ start_frag->pfn_base);
+ up_write(&current->mm->mmap_sem);
+ if (longret & ~PAGE_MASK)
+ ret = (int)longret;
+ else
+ i->ptr = (void *)longret;
+
} else
kfree(map);
return ret;