summaryrefslogtreecommitdiff
path: root/drivers/staging/android/ion/ion_cma_heap.c
diff options
context:
space:
mode:
authorRebecca Schultz Zavin <rebecca@android.com>2013-12-13 22:24:45 (GMT)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-12-14 16:57:17 (GMT)
commitc13bd1c4eb714c08214e897fcbe51b13e0e0f279 (patch)
tree471b60bb44863c045e0252c44a5cf22c66e5f7d9 /drivers/staging/android/ion/ion_cma_heap.c
parent349c9e13855109df99c5205a4e8d53d9fa169490 (diff)
downloadlinux-c13bd1c4eb714c08214e897fcbe51b13e0e0f279.tar.xz
gpu: ion: Fix performance issue in faulting code
Previously the code to fault ion buffers in one page at a time had a performance problem caused by the requirement to traverse the sg list looking for the right page to load in (a result of the fact that the items in the list may not be of uniform size). To fix the problem, for buffers that will be faulted in, also keep a flat array of all the pages in the buffer to use from the fault handler. To recover some of the additional memory footprint this creates per buffer, dirty bits used to indicate which pages have been faulted in to the cpu are now stored in the low bit of each page struct pointer in the page array. Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com> [jstultz: modified patch to apply to staging directory] Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/android/ion/ion_cma_heap.c')
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c35
1 files changed, 3 insertions, 32 deletions
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 74e3c77..86b6cf5 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -58,29 +58,6 @@ int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
return 0;
}
-/*
- * Create scatter-list for each page of the already allocated DMA buffer.
- */
-int ion_cma_get_sgtable_per_page(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size)
-{
- struct page *page = virt_to_page(cpu_addr);
- int ret, i;
- struct scatterlist *sg;
-
- ret = sg_alloc_table(sgt, PAGE_ALIGN(size) / PAGE_SIZE, GFP_KERNEL);
- if (unlikely(ret))
- return ret;
-
- sg = sgt->sgl;
- for (i = 0; i < (PAGE_ALIGN(size) / PAGE_SIZE); i++) {
- page = virt_to_page(cpu_addr + (i * PAGE_SIZE));
- sg_set_page(sg, page, PAGE_SIZE, 0);
- sg = sg_next(sg);
- }
- return 0;
-}
-
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
unsigned long len, unsigned long align,
@@ -111,15 +88,9 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
goto free_mem;
}
- if (ion_buffer_fault_user_mappings(buffer)) {
- if (ion_cma_get_sgtable_per_page
- (dev, info->table, info->cpu_addr, info->handle, len))
- goto free_table;
- } else {
- if (ion_cma_get_sgtable
- (dev, info->table, info->cpu_addr, info->handle, len))
- goto free_table;
- }
+ if (ion_cma_get_sgtable
+ (dev, info->table, info->cpu_addr, info->handle, len))
+ goto free_table;
/* keep this for memory release */
buffer->priv_virt = info;
dev_dbg(dev, "Allocate buffer %p\n", buffer);