diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/mm_types.h | 7 | ||||
-rw-r--r-- | include/linux/slab.h | 9 | ||||
-rw-r--r-- | include/linux/slab_def.h | 6 |
3 files changed, 15 insertions, 7 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 7d9ebb7..f8f5162 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -128,10 +128,7 @@ struct page { }; struct list_head list; /* slobs list of pages */ - struct { /* slab fields */ - struct kmem_cache *slab_cache; - struct slab *slab_page; - }; + struct slab *slab_page; /* slab fields */ }; /* Remainder is not double word aligned */ @@ -146,7 +143,7 @@ struct page { #if USE_SPLIT_PTLOCKS spinlock_t ptl; #endif - struct kmem_cache *slab; /* SLUB: Pointer to slab */ + struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ struct page *first_page; /* Compound tail pages */ }; diff --git a/include/linux/slab.h b/include/linux/slab.h index 83d1a14..743a104 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -128,7 +128,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); -unsigned int kmem_cache_size(struct kmem_cache *); /* * Please use this macro to create slab caches. Simply specify the @@ -388,6 +387,14 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) return kmalloc_node(size, flags | __GFP_ZERO, node); } +/* + * Determine the size of a slab object + */ +static inline unsigned int kmem_cache_size(struct kmem_cache *s) +{ + return s->object_size; +} + void __init kmem_cache_init_late(void); #endif /* _LINUX_SLAB_H */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index cc290f0..45c0356 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -89,9 +89,13 @@ struct kmem_cache { * (see kmem_cache_init()) * We still use [NR_CPUS] and not [1] or [0] because cache_cache * is statically defined, so we reserve the max number of cpus. + * + * We also need to guarantee that the list is able to accomodate a + * pointer for each node since "nodelists" uses the remainder of + * available pointers. */ struct kmem_list3 **nodelists; - struct array_cache *array[NR_CPUS]; + struct array_cache *array[NR_CPUS + MAX_NUMNODES]; /* * Do not add fields after array[] */ |