summaryrefslogtreecommitdiff
path: root/drivers/staging/android/ion/ion_priv.h
diff options
context:
space:
mode:
authorRebecca Schultz Zavin <rebecca@android.com>2013-12-13 22:24:45 (GMT)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-12-14 16:57:17 (GMT)
commitc13bd1c4eb714c08214e897fcbe51b13e0e0f279 (patch)
tree471b60bb44863c045e0252c44a5cf22c66e5f7d9 /drivers/staging/android/ion/ion_priv.h
parent349c9e13855109df99c5205a4e8d53d9fa169490 (diff)
downloadlinux-c13bd1c4eb714c08214e897fcbe51b13e0e0f279.tar.xz
gpu: ion: Fix performance issue in faulting code
Previously the code to fault ion buffers in one page at a time had a performance problem caused by the requirement to traverse the sg list looking for the right page to load in (a result of the fact that the items in the list may not be of uniform size). To fix the problem, for buffers that will be faulted in, also keep a flat array of all the pages in the buffer to use from the fault handler. To recover some of the additional memory footprint this creates per buffer, dirty bits used to indicate which pages have been faulted in to the cpu are now stored in the low bit of each page struct pointer in the page array. Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com> [jstultz: modified patch to apply to staging directory] Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/android/ion/ion_priv.h')
-rw-r--r--drivers/staging/android/ion/ion_priv.h20
1 files changed, 16 insertions, 4 deletions
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index f5a09b6..965471a 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -46,9 +46,8 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
* @vaddr: the kenrel mapping if kmap_cnt is not zero
* @dmap_cnt: number of times the buffer is mapped for dma
* @sg_table: the sg table for the buffer if dmap_cnt is not zero
- * @dirty: bitmask representing which pages of this buffer have
- * been dirtied by the cpu and need cache maintenance
- * before dma
+ * @pages: flat array of pages in the buffer -- used by fault
+ * handler and only valid for buffers that are faulted in
* @vmas: list of vma's mapping this buffer
* @handle_count: count of handles referencing this buffer
* @task_comm: taskcomm of last client to reference this buffer in a
@@ -75,7 +74,7 @@ struct ion_buffer {
void *vaddr;
int dmap_cnt;
struct sg_table *sg_table;
- unsigned long *dirty;
+ struct page **pages;
struct list_head vmas;
/* used to track orphaned buffers */
int handle_count;
@@ -214,6 +213,19 @@ int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
int ion_heap_buffer_zero(struct ion_buffer *buffer);
/**
+ * ion_heap_alloc_pages - allocate pages from alloc_pages
+ * @buffer: the buffer to allocate for, used to extract the flags
+ * @gfp_flags: the gfp_t for the allocation
+ * @order: the order of the allocatoin
+ *
+ * This funciton allocations from alloc pages and also does any other
+ * necessary operations based on the buffer->flags. For buffers which
+ * will be faulted in the pages are split using split_page
+ */
+struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags,
+ unsigned int order);
+
+/**
* ion_heap_init_deferred_free -- initialize deferred free functionality
* @heap: the heap
*