summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 19:46:18 (GMT)
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 19:46:18 (GMT)
commit138c4ae9cfda8fdcf9e137457853b09ef8cf8f77 (patch)
tree704c363de6d5868b08e9ae31a436ff04d423f625 /include
parent3b3dd79d6a8b3debd0291465fc8cd9caf765d545 (diff)
parente182a345d40deba7c3165a2857812bf403818319 (diff)
downloadlinux-fsl-qoriq-138c4ae9cfda8fdcf9e137457853b09ef8cf8f77.tar.xz
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
* 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: tools, slub: Fix off-by-one buffer corruption after readlink() call slub: Discard slab page when node partial > minimum partial number slub: correct comments error for per cpu partial mm: restrict access to slab files under procfs and sysfs slub: Code optimization in get_partial_node() slub: doc: update the slabinfo.c file path slub: explicitly document position of inserting slab to partial list slub: update slabinfo tools to report per cpu partial list statistics slub: per cpu cache for partial pages slub: return object pointer from get_partial() / new_slab(). slub: pass kmem_cache_cpu pointer to get_partial() slub: Prepare inuse field in new_slab() slub: Remove useless statements in __slab_alloc slub: free slabs without holding locks slub: use print_hex_dump slab: use print_hex_dump
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm_types.h14
-rw-r--r--include/linux/slub_def.h4
2 files changed, 17 insertions, 1 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 29971a5..c93d00a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -79,9 +79,21 @@ struct page {
};
/* Third double word block */
- struct list_head lru; /* Pageout list, eg. active_list
+ union {
+ struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
*/
+ struct { /* slub per cpu partial pages */
+ struct page *next; /* Next partial slab */
+#ifdef CONFIG_64BIT
+ int pages; /* Nr of partial slabs left */
+ int pobjects; /* Approximate # of objects */
+#else
+ short int pages;
+ short int pobjects;
+#endif
+ };
+ };
/* Remainder is not double word aligned */
union {
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index f58d641..a32bcfd 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -36,12 +36,15 @@ enum stat_item {
ORDER_FALLBACK, /* Number of times fallback was necessary */
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
+ CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
+ CPU_PARTIAL_FREE, /* USed cpu partial on free */
NR_SLUB_STAT_ITEMS };
struct kmem_cache_cpu {
void **freelist; /* Pointer to next available object */
unsigned long tid; /* Globally unique transaction id */
struct page *page; /* The slab from which we are allocating */
+ struct page *partial; /* Partially allocated frozen slabs */
int node; /* The node of the page (or -1 for debug) */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
@@ -79,6 +82,7 @@ struct kmem_cache {
int size; /* The size of an object including meta data */
int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
+ int cpu_partial; /* Number of per cpu partial objects to keep around */
struct kmem_cache_order_objects oo;
/* Allocation and freeing of slabs */