diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 639 |
1 files changed, 362 insertions, 277 deletions
@@ -55,7 +55,7 @@ * * SMP synchronization: * constructors and destructors are called without any locking. - * Several members in kmem_cache_t and struct slab never change, they + * Several members in struct kmem_cache and struct slab never change, they * are accessed without any locking. * The per-cpu arrays are never accessed from the wrong cpu, no locking, * and local interrupts are disabled so slab code is preempt-safe. @@ -244,7 +244,7 @@ struct slab { */ struct slab_rcu { struct rcu_head head; - kmem_cache_t *cachep; + struct kmem_cache *cachep; void *addr; }; @@ -316,6 +316,8 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; */ static __always_inline int index_of(const size_t size) { + extern void __bad_size(void); + if (__builtin_constant_p(size)) { int i = 0; @@ -326,19 +328,16 @@ static __always_inline int index_of(const size_t size) i++; #include "linux/kmalloc_sizes.h" #undef CACHE - { - extern void __bad_size(void); - __bad_size(); - } + __bad_size(); } else - BUG(); + __bad_size(); return 0; } #define INDEX_AC index_of(sizeof(struct arraycache_init)) #define INDEX_L3 index_of(sizeof(struct kmem_list3)) -static inline void kmem_list3_init(struct kmem_list3 *parent) +static void kmem_list3_init(struct kmem_list3 *parent) { INIT_LIST_HEAD(&parent->slabs_full); INIT_LIST_HEAD(&parent->slabs_partial); @@ -364,7 +363,7 @@ static inline void kmem_list3_init(struct kmem_list3 *parent) } while (0) /* - * kmem_cache_t + * struct kmem_cache * * manages a cache. */ @@ -375,7 +374,7 @@ struct kmem_cache { unsigned int batchcount; unsigned int limit; unsigned int shared; - unsigned int objsize; + unsigned int buffer_size; /* 2) touched by every alloc & free from the backend */ struct kmem_list3 *nodelists[MAX_NUMNODES]; unsigned int flags; /* constant flags */ @@ -392,15 +391,15 @@ struct kmem_cache { size_t colour; /* cache colouring range */ unsigned int colour_off; /* colour offset */ unsigned int colour_next; /* cache colouring */ - kmem_cache_t *slabp_cache; + struct kmem_cache *slabp_cache; unsigned int slab_size; unsigned int dflags; /* dynamic flags */ /* constructor func */ - void (*ctor) (void *, kmem_cache_t *, unsigned long); + void (*ctor) (void *, struct kmem_cache *, unsigned long); /* de-constructor func */ - void (*dtor) (void *, kmem_cache_t *, unsigned long); + void (*dtor) (void *, struct kmem_cache *, unsigned long); /* 4) cache creation/removal */ const char *name; @@ -423,8 +422,14 @@ struct kmem_cache { atomic_t freemiss; #endif #if DEBUG - int dbghead; - int reallen; + /* + * If debugging is enabled, then the allocator can add additional + * fields and/or padding to every object. buffer_size contains the total + * object size including these internal fields, the following two + * variables contain the offset to the user object and its size. + */ + int obj_offset; + int obj_size; #endif }; @@ -495,50 +500,50 @@ struct kmem_cache { /* memory layout of objects: * 0 : objp - * 0 .. cachep->dbghead - BYTES_PER_WORD - 1: padding. This ensures that + * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that * the end of an object is aligned with the end of the real * allocation. Catches writes behind the end of the allocation. - * cachep->dbghead - BYTES_PER_WORD .. cachep->dbghead - 1: + * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: * redzone word. - * cachep->dbghead: The real object. - * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] - * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long] + * cachep->obj_offset: The real object. + * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] + * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long] */ -static int obj_dbghead(kmem_cache_t *cachep) +static int obj_offset(struct kmem_cache *cachep) { - return cachep->dbghead; + return cachep->obj_offset; } -static int obj_reallen(kmem_cache_t *cachep) +static int obj_size(struct kmem_cache *cachep) { - return cachep->reallen; + return cachep->obj_size; } -static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp) +static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); - return (unsigned long*) (objp+obj_dbghead(cachep)-BYTES_PER_WORD); + return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); } -static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp) +static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); if (cachep->flags & SLAB_STORE_USER) - return (unsigned long *)(objp + cachep->objsize - + return (unsigned long *)(objp + cachep->buffer_size - 2 * BYTES_PER_WORD); - return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD); + return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); } -static void **dbg_userword(kmem_cache_t *cachep, void *objp) +static void **dbg_userword(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_STORE_USER)); - return (void **)(objp + cachep->objsize - BYTES_PER_WORD); + return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); } #else -#define obj_dbghead(x) 0 -#define obj_reallen(cachep) (cachep->objsize) +#define obj_offset(x) 0 +#define obj_size(cachep) (cachep->buffer_size) #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;}) #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;}) #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) @@ -591,6 +596,18 @@ static inline struct slab *page_get_slab(struct page *page) return (struct slab *)page->lru.prev; } +static inline struct kmem_cache *virt_to_cache(const void *obj) +{ + struct page *page = virt_to_page(obj); + return page_get_cache(page); +} + +static inline struct slab *virt_to_slab(const void *obj) +{ + struct page *page = virt_to_page(obj); + return page_get_slab(page); +} + /* These are the default caches for kmalloc. Custom caches can have other sizes. */ struct cache_sizes malloc_sizes[] = { #define CACHE(x) { .cs_size = (x) }, @@ -619,16 +636,16 @@ static struct arraycache_init initarray_generic = { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; /* internal cache of cache description objs */ -static kmem_cache_t cache_cache = { +static struct kmem_cache cache_cache = { .batchcount = 1, .limit = BOOT_CPUCACHE_ENTRIES, .shared = 1, - .objsize = sizeof(kmem_cache_t), + .buffer_size = sizeof(struct kmem_cache), .flags = SLAB_NO_REAP, .spinlock = SPIN_LOCK_UNLOCKED, .name = "kmem_cache", #if DEBUG - .reallen = sizeof(kmem_cache_t), + .obj_size = sizeof(struct kmem_cache), #endif }; @@ -657,17 +674,17 @@ static enum { static DEFINE_PER_CPU(struct work_struct, reap_work); -static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node); -static void enable_cpucache(kmem_cache_t *cachep); +static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node); +static void enable_cpucache(struct kmem_cache *cachep); static void cache_reap(void *unused); -static int __node_shrink(kmem_cache_t *cachep, int node); +static int __node_shrink(struct kmem_cache *cachep, int node); -static inline struct array_cache *ac_data(kmem_cache_t *cachep) +static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) { return cachep->array[smp_processor_id()]; } -static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) +static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags) { struct cache_sizes *csizep = malloc_sizes; @@ -691,43 +708,80 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) return csizep->cs_cachep; } -kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) +struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) { return __find_general_cachep(size, gfpflags); } EXPORT_SYMBOL(kmem_find_general_cachep); -/* Cal the num objs, wastage, and bytes left over for a given slab size. */ -static void cache_estimate(unsigned long gfporder, size_t size, size_t align, - int flags, size_t *left_over, unsigned int *num) +static size_t slab_mgmt_size(size_t nr_objs, size_t align) { - int i; - size_t wastage = PAGE_SIZE << gfporder; - size_t extra = 0; - size_t base = 0; + return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); +} - if (!(flags & CFLGS_OFF_SLAB)) { - base = sizeof(struct slab); - extra = sizeof(kmem_bufctl_t); - } - i = 0; - while (i * size + ALIGN(base + i * extra, align) <= wastage) - i++; - if (i > 0) - i--; +/* Calculate the number of objects and left-over bytes for a given + buffer size. */ +static void cache_estimate(unsigned long gfporder, size_t buffer_size, + size_t align, int flags, size_t *left_over, + unsigned int *num) +{ + int nr_objs; + size_t mgmt_size; + size_t slab_size = PAGE_SIZE << gfporder; + + /* + * The slab management structure can be either off the slab or + * on it. For the latter case, the memory allocated for a + * slab is used for: + * + * - The struct slab + * - One kmem_bufctl_t for each object + * - Padding to respect alignment of @align + * - @buffer_size bytes for each object + * + * If the slab management structure is off the slab, then the + * alignment will already be calculated into the size. Because + * the slabs are all pages aligned, the objects will be at the + * correct alignment when allocated. + */ + if (flags & CFLGS_OFF_SLAB) { + mgmt_size = 0; + nr_objs = slab_size / buffer_size; - if (i > SLAB_LIMIT) - i = SLAB_LIMIT; + if (nr_objs > SLAB_LIMIT) + nr_objs = SLAB_LIMIT; + } else { + /* + * Ignore padding for the initial guess. The padding + * is at most @align-1 bytes, and @buffer_size is at + * least @align. In the worst case, this result will + * be one greater than the number of objects that fit + * into the memory allocation when taking the padding + * into account. + */ + nr_objs = (slab_size - sizeof(struct slab)) / + (buffer_size + sizeof(kmem_bufctl_t)); + + /* + * This calculated number will be either the right + * amount, or one greater than what we want. + */ + if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size + > slab_size) + nr_objs--; - *num = i; - wastage -= i * size; - wastage -= ALIGN(base + i * extra, align); - *left_over = wastage; + if (nr_objs > SLAB_LIMIT) + nr_objs = SLAB_LIMIT; + + mgmt_size = slab_mgmt_size(nr_objs, align); + } + *num = nr_objs; + *left_over = slab_size - nr_objs*buffer_size - mgmt_size; } #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) -static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg) +static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg) { printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", function, cachep->name, msg); @@ -774,9 +828,9 @@ static struct array_cache *alloc_arraycache(int node, int entries, } #ifdef CONFIG_NUMA -static void *__cache_alloc_node(kmem_cache_t *, gfp_t, int); +static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); -static inline struct array_cache **alloc_alien_cache(int node, int limit) +static struct array_cache **alloc_alien_cache(int node, int limit) { struct array_cache **ac_ptr; int memsize = sizeof(void *) * MAX_NUMNODES; @@ -803,7 +857,7 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit) return ac_ptr; } -static inline void free_alien_cache(struct array_cache **ac_ptr) +static void free_alien_cache(struct array_cache **ac_ptr) { int i; @@ -816,8 +870,8 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) kfree(ac_ptr); } -static inline void __drain_alien_cache(kmem_cache_t *cachep, - struct array_cache *ac, int node) +static void __drain_alien_cache(struct kmem_cache *cachep, + struct array_cache *ac, int node) { struct kmem_list3 *rl3 = cachep->nodelists[node]; @@ -829,7 +883,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, } } -static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) +static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3) { int i = 0; struct array_cache *ac; @@ -854,7 +908,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; - kmem_cache_t *cachep; + struct kmem_cache *cachep; struct kmem_list3 *l3 = NULL; int node = cpu_to_node(cpu); int memsize = sizeof(struct kmem_list3); @@ -992,7 +1046,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; /* * swap the static kmem_list3 with kmalloced memory */ -static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid) +static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid) { struct kmem_list3 *ptr; @@ -1032,14 +1086,14 @@ void __init kmem_cache_init(void) /* Bootstrap is tricky, because several objects are allocated * from caches that do not exist yet: - * 1) initialize the cache_cache cache: it contains the kmem_cache_t + * 1) initialize the cache_cache cache: it contains the struct kmem_cache * structures of all caches, except cache_cache itself: cache_cache * is statically allocated. * Initially an __init data area is used for the head array and the * kmem_list3 structures, it's replaced with a kmalloc allocated * array at the end of the bootstrap. * 2) Create the first kmalloc cache. - * The kmem_cache_t for the new cache is allocated normally. + * The struct kmem_cache for the new cache is allocated normally. * An __init data area is used for the head array. * 3) Create the remaining kmalloc caches, with minimally sized * head arrays. @@ -1057,9 +1111,9 @@ void __init kmem_cache_init(void) cache_cache.array[smp_processor_id()] = &initarray_cache.cache; cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE]; - cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size()); + cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size()); - cache_estimate(0, cache_cache.objsize, cache_line_size(), 0, + cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0, &left_over, &cache_cache.num); if (!cache_cache.num) BUG(); @@ -1132,8 +1186,8 @@ void __init kmem_cache_init(void) ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); local_irq_disable(); - BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache); - memcpy(ptr, ac_data(&cache_cache), + BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); + memcpy(ptr, cpu_cache_get(&cache_cache), sizeof(struct arraycache_init)); cache_cache.array[smp_processor_id()] = ptr; local_irq_enable(); @@ -1141,9 +1195,9 @@ void __init kmem_cache_init(void) ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); local_irq_disable(); - BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep) + BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) != &initarray_generic.cache); - memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep), + memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), sizeof(struct arraycache_init)); malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = ptr; @@ -1170,7 +1224,7 @@ void __init kmem_cache_init(void) /* 6) resize the head arrays to their final sizes */ { - kmem_cache_t *cachep; + struct kmem_cache *cachep; mutex_lock(&cache_chain_mutex); list_for_each_entry(cachep, &cache_chain, next) enable_cpucache(cachep); @@ -1181,7 +1235,7 @@ void __init kmem_cache_init(void) g_cpucache_up = FULL; /* Register a cpu startup notifier callback - * that initializes ac_data for all new cpus + * that initializes cpu_cache_get for all new cpus */ register_cpu_notifier(&cpucache_notifier); @@ -1213,7 +1267,7 @@ __initcall(cpucache_init); * did not request dmaable memory, we might get it, but that * would be relatively rare and ignorable. */ -static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) +static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct page *page; void *addr; @@ -1239,7 +1293,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) /* * Interface to system's page release. */ -static void kmem_freepages(kmem_cache_t *cachep, void *addr) +static void kmem_freepages(struct kmem_cache *cachep, void *addr) { unsigned long i = (1 << cachep->gfporder); struct page *page = virt_to_page(addr); @@ -1261,7 +1315,7 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr) static void kmem_rcu_free(struct rcu_head *head) { struct slab_rcu *slab_rcu = (struct slab_rcu *)head; - kmem_cache_t *cachep = slab_rcu->cachep; + struct kmem_cache *cachep = slab_rcu->cachep; kmem_freepages(cachep, slab_rcu->addr); if (OFF_SLAB(cachep)) @@ -1271,12 +1325,12 @@ static void kmem_rcu_free(struct rcu_head *head) #if DEBUG #ifdef CONFIG_DEBUG_PAGEALLOC -static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, +static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, unsigned long caller) { - int size = obj_reallen(cachep); + int size = obj_size(cachep); - addr = (unsigned long *)&((char *)addr)[obj_dbghead(cachep)]; + addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; if (size < 5 * sizeof(unsigned long)) return; @@ -1304,10 +1358,10 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, } #endif -static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val) +static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) { - int size = obj_reallen(cachep); - addr = &((char *)addr)[obj_dbghead(cachep)]; + int size = obj_size(cachep); + addr = &((char *)addr)[obj_offset(cachep)]; memset(addr, val, size); *(unsigned char *)(addr + size - 1) = POISON_END; @@ -1326,7 +1380,7 @@ static void dump_line(char *data, int offset, int limit) #if DEBUG -static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines) +static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) { int i, size; char *realobj; @@ -1344,8 +1398,8 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines) (unsigned long)*dbg_userword(cachep, objp)); printk("\n"); } - realobj = (char *)objp + obj_dbghead(cachep); - size = obj_reallen(cachep); + realobj = (char *)objp + obj_offset(cachep); + size = obj_size(cachep); for (i = 0; i < size && lines; i += 16, lines--) { int limit; limit = 16; @@ -1355,14 +1409,14 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines) } } -static void check_poison_obj(kmem_cache_t *cachep, void *objp) +static void check_poison_obj(struct kmem_cache *cachep, void *objp) { char *realobj; int size, i; int lines = 0; - realobj = (char *)objp + obj_dbghead(cachep); - size = obj_reallen(cachep); + realobj = (char *)objp + obj_offset(cachep); + size = obj_size(cachep); for (i = 0; i < size; i++) { char exp = POISON_FREE; @@ -1395,20 +1449,20 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) /* Print some data about the neighboring objects, if they * exist: */ - struct slab *slabp = page_get_slab(virt_to_page(objp)); + struct slab *slabp = virt_to_slab(objp); int objnr; - objnr = (objp - slabp->s_mem) / cachep->objsize; + objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; if (objnr) { - objp = slabp->s_mem + (objnr - 1) * cachep->objsize; - realobj = (char *)objp + obj_dbghead(cachep); + objp = slabp->s_mem + (objnr - 1) * cachep->buffer_size; + realobj = (char *)objp + obj_offset(cachep); printk(KERN_ERR "Prev obj: start=%p, len=%d\n", realobj, size); print_objinfo(cachep, objp, 2); } if (objnr + 1 < cachep->num) { - objp = slabp->s_mem + (objnr + 1) * cachep->objsize; - realobj = (char *)objp + obj_dbghead(cachep); + objp = slabp->s_mem + (objnr + 1) * cachep->buffer_size; + realobj = (char *)objp + obj_offset(cachep); printk(KERN_ERR "Next obj: start=%p, len=%d\n", realobj, size); print_objinfo(cachep, objp, 2); @@ -1417,25 +1471,23 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) } #endif -/* Destroy all the objs in a slab, and release the mem back to the system. - * Before calling the slab must have been unlinked from the cache. - * The cache-lock is not held/needed. +#if DEBUG +/** + * slab_destroy_objs - call the registered destructor for each object in + * a slab that is to be destroyed. */ -static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp) +static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) { - void *addr = slabp->s_mem - slabp->colouroff; - -#if DEBUG int i; for (i = 0; i < cachep->num; i++) { - void *objp = slabp->s_mem + cachep->objsize * i; + void *objp = slabp->s_mem + cachep->buffer_size * i; if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC - if ((cachep->objsize % PAGE_SIZE) == 0 + if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) kernel_map_pages(virt_to_page(objp), - cachep->objsize / PAGE_SIZE, + cachep->buffer_size / PAGE_SIZE, 1); else check_poison_obj(cachep, objp); @@ -1452,18 +1504,32 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp) "was overwritten"); } if (cachep->dtor && !(cachep->flags & SLAB_POISON)) - (cachep->dtor) (objp + obj_dbghead(cachep), cachep, 0); + (cachep->dtor) (objp + obj_offset(cachep), cachep, 0); } +} #else +static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) +{ if (cachep->dtor) { int i; for (i = 0; i < cachep->num; i++) { - void *objp = slabp->s_mem + cachep->objsize * i; + void *objp = slabp->s_mem + cachep->buffer_size * i; (cachep->dtor) (objp, cachep, 0); } } +} #endif +/** + * Destroy all the objs in a slab, and release the mem back to the system. + * Before calling the slab must have been unlinked from the cache. + * The cache-lock is not held/needed. + */ +static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) +{ + void *addr = slabp->s_mem - slabp->colouroff; + + slab_destroy_objs(cachep, slabp); if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { struct slab_rcu *slab_rcu; @@ -1478,9 +1544,9 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp) } } -/* For setting up all the kmem_list3s for cache whose objsize is same +/* For setting up all the kmem_list3s for cache whose buffer_size is same as size of kmem_list3. */ -static inline void set_up_list3s(kmem_cache_t *cachep, int index) +static void set_up_list3s(struct kmem_cache *cachep, int index) { int node; @@ -1493,15 +1559,20 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index) } /** - * calculate_slab_order - calculate size (page order) of slabs and the number - * of objects per slab. + * calculate_slab_order - calculate size (page order) of slabs + * @cachep: pointer to the cache that is being created + * @size: size of objects to be created in this cache. + * @align: required alignment for the objects. + * @flags: slab allocation flags + * + * Also calculates the number of objects per slab. * * This could be made much more intelligent. For now, try to avoid using * high order pages for slabs. When the gfp() functions are more friendly * towards high-order requests, this should be changed. */ -static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size, - size_t align, gfp_t flags) +static inline size_t calculate_slab_order(struct kmem_cache *cachep, + size_t size, size_t align, unsigned long flags) { size_t left_over = 0; @@ -1572,13 +1643,13 @@ static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size, * cacheline. This can be beneficial if you're counting cycles as closely * as davem. */ -kmem_cache_t * +struct kmem_cache * kmem_cache_create (const char *name, size_t size, size_t align, - unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long), - void (*dtor)(void*, kmem_cache_t *, unsigned long)) + unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long), + void (*dtor)(void*, struct kmem_cache *, unsigned long)) { size_t left_over, slab_size, ralign; - kmem_cache_t *cachep = NULL; + struct kmem_cache *cachep = NULL; struct list_head *p; /* @@ -1596,7 +1667,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, mutex_lock(&cache_chain_mutex); list_for_each(p, &cache_chain) { - kmem_cache_t *pc = list_entry(p, kmem_cache_t, next); + struct kmem_cache *pc = list_entry(p, struct kmem_cache, next); mm_segment_t old_fs = get_fs(); char tmp; int res; @@ -1611,7 +1682,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, set_fs(old_fs); if (res) { printk("SLAB: cache with size %d has lost its name\n", - pc->objsize); + pc->buffer_size); continue; } @@ -1696,20 +1767,20 @@ kmem_cache_create (const char *name, size_t size, size_t align, align = ralign; /* Get cache's description obj. */ - cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL); + cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL); if (!cachep) goto oops; - memset(cachep, 0, sizeof(kmem_cache_t)); + memset(cachep, 0, sizeof(struct kmem_cache)); #if DEBUG - cachep->reallen = size; + cachep->obj_size = size; if (flags & SLAB_RED_ZONE) { /* redzoning only works with word aligned caches */ align = BYTES_PER_WORD; /* add space for red zone words */ - cachep->dbghead += BYTES_PER_WORD; + cachep->obj_offset += BYTES_PER_WORD; size += 2 * BYTES_PER_WORD; } if (flags & SLAB_STORE_USER) { @@ -1722,8 +1793,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) if (size >= malloc_sizes[INDEX_L3 + 1].cs_size - && cachep->reallen > cache_line_size() && size < PAGE_SIZE) { - cachep->dbghead += PAGE_SIZE - size; + && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { + cachep->obj_offset += PAGE_SIZE - size; size = PAGE_SIZE; } #endif @@ -1786,7 +1857,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, if (flags & SLAB_CACHE_DMA) cachep->gfpflags |= GFP_DMA; spin_lock_init(&cachep->spinlock); - cachep->objsize = size; + cachep->buffer_size = size; if (flags & CFLGS_OFF_SLAB) cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); @@ -1843,11 +1914,11 @@ kmem_cache_create (const char *name, size_t size, size_t align, jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; - BUG_ON(!ac_data(cachep)); - ac_data(cachep)->avail = 0; - ac_data(cachep)->limit = BOOT_CPUCACHE_ENTRIES; - ac_data(cachep)->batchcount = 1; - ac_data(cachep)->touched = 0; + BUG_ON(!cpu_cache_get(cachep)); + cpu_cache_get(cachep)->avail = 0; + cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; + cpu_cache_get(cachep)->batchcount = 1; + cpu_cache_get(cachep)->touched = 0; cachep->batchcount = 1; cachep->limit = BOOT_CPUCACHE_ENTRIES; } @@ -1875,7 +1946,7 @@ static void check_irq_on(void) BUG_ON(irqs_disabled()); } -static void check_spinlock_acquired(kmem_cache_t *cachep) +static void check_spinlock_acquired(struct kmem_cache *cachep) { #ifdef CONFIG_SMP check_irq_off(); @@ -1883,7 +1954,7 @@ static void check_spinlock_acquired(kmem_cache_t *cachep) #endif } -static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node) +static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) { #ifdef CONFIG_SMP check_irq_off(); @@ -1916,24 +1987,24 @@ static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg) preempt_enable(); } -static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac, +static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, int force, int node); static void do_drain(void *arg) { - kmem_cache_t *cachep = (kmem_cache_t *) arg; + struct kmem_cache *cachep = (struct kmem_cache *) arg; struct array_cache *ac; int node = numa_node_id(); check_irq_off(); - ac = ac_data(cachep); + ac = cpu_cache_get(cachep); spin_lock(&cachep->nodelists[node]->list_lock); free_block(cachep, ac->entry, ac->avail, node); spin_unlock(&cachep->nodelists[node]->list_lock); ac->avail = 0; } -static void drain_cpu_caches(kmem_cache_t *cachep) +static void drain_cpu_caches(struct kmem_cache *cachep) { struct kmem_list3 *l3; int node; @@ -1954,7 +2025,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep) spin_unlock_irq(&cachep->spinlock); } -static int __node_shrink(kmem_cache_t *cachep, int node) +static int __node_shrink(struct kmem_cache *cachep, int node) { struct slab *slabp; struct kmem_list3 *l3 = cachep->nodelists[node]; @@ -1983,7 +2054,7 @@ static int __node_shrink(kmem_cache_t *cachep, int node) return ret; } -static int __cache_shrink(kmem_cache_t *cachep) +static int __cache_shrink(struct kmem_cache *cachep) { int ret = 0, i = 0; struct kmem_list3 *l3; @@ -2009,7 +2080,7 @@ static int __cache_shrink(kmem_cache_t *cachep) * Releases as many slabs as possible for a cache. * To help debugging, a zero exit status indicates all slabs were released. */ -int kmem_cache_shrink(kmem_cache_t *cachep) +int kmem_cache_shrink(struct kmem_cache *cachep) { if (!cachep || in_interrupt()) BUG(); @@ -2022,7 +2093,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); * kmem_cache_destroy - delete a cache * @cachep: the cache to destroy * - * Remove a kmem_cache_t object from the slab cache. + * Remove a struct kmem_cache object from the slab cache. * Returns 0 on success. * * It is expected this function will be called by a module when it is @@ -2035,7 +2106,7 @@ EXPORT_SYMBOL(kmem_cache_shrink); * The caller must guarantee that noone will allocate memory from the cache * during the kmem_cache_destroy(). */ -int kmem_cache_destroy(kmem_cache_t *cachep) +int kmem_cache_destroy(struct kmem_cache *cachep) { int i; struct kmem_list3 *l3; @@ -2086,7 +2157,7 @@ int kmem_cache_destroy(kmem_cache_t *cachep) EXPORT_SYMBOL(kmem_cache_destroy); /* Get the memory for a slab management obj. */ -static struct slab *alloc_slabmgmt(kmem_cache_t *cachep, void *objp, +static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, int colour_off, gfp_t local_flags) { struct slab *slabp; @@ -2112,13 +2183,13 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) return (kmem_bufctl_t *) (slabp + 1); } -static void cache_init_objs(kmem_cache_t *cachep, +static void cache_init_objs(struct kmem_cache *cachep, struct slab *slabp, unsigned long ctor_flags) { int i; for (i = 0; i < cachep->num; i++) { - void *objp = slabp->s_mem + cachep->objsize * i; + void *objp = slabp->s_mem + cachep->buffer_size * i; #if DEBUG /* need to poison the objs? */ if (cachep->flags & SLAB_POISON) @@ -2136,7 +2207,7 @@ static void cache_init_objs(kmem_cache_t *cachep, * Otherwise, deadlock. They must also be threaded. */ if (cachep->ctor && !(cachep->flags & SLAB_POISON)) - cachep->ctor(objp + obj_dbghead(cachep), cachep, + cachep->ctor(objp + obj_offset(cachep), cachep, ctor_flags); if (cachep->flags & SLAB_RED_ZONE) { @@ -2147,10 +2218,10 @@ static void cache_init_objs(kmem_cache_t *cachep, slab_error(cachep, "constructor overwrote the" " start of an object"); } - if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) + if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) kernel_map_pages(virt_to_page(objp), - cachep->objsize / PAGE_SIZE, 0); + cachep->buffer_size / PAGE_SIZE, 0); #else if (cachep->ctor) cachep->ctor(objp, cachep, ctor_flags); @@ -2161,7 +2232,7 @@ static void cache_init_objs(kmem_cache_t *cachep, slabp->free = 0; } -static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags) +static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) { if (flags & SLAB_DMA) { if (!(cachep->gfpflags & GFP_DMA)) @@ -2172,7 +2243,43 @@ static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags) } } -static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) +static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid) +{ + void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size); + kmem_bufctl_t next; + + slabp->inuse++; + next = slab_bufctl(slabp)[slabp->free]; +#if DEBUG + slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; + WARN_ON(slabp->nodeid != nodeid); +#endif + slabp->free = next; + + return objp; +} + +static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp, + int nodeid) +{ + unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size; + +#if DEBUG + /* Verify that the slab belongs to the intended node */ + WARN_ON(slabp->nodeid != nodeid); + + if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { + printk(KERN_ERR "slab: double free detected in cache " + "'%s', objp %p\n", cachep->name, objp); + BUG(); + } +#endif + slab_bufctl(slabp)[objnr] = slabp->free; + slabp->free = objnr; + slabp->inuse--; +} + +static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, void *objp) { int i; struct page *page; @@ -2191,7 +2298,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */ -static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) +static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct slab *slabp; void *objp; @@ -2302,14 +2409,14 @@ static void kfree_debugcheck(const void *objp) } } -static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, +static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, void *caller) { struct page *page; unsigned int objnr; struct slab *slabp; - objp -= obj_dbghead(cachep); + objp -= obj_offset(cachep); kfree_debugcheck(objp); page = virt_to_page(objp); @@ -2341,31 +2448,31 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, if (cachep->flags & SLAB_STORE_USER) *dbg_userword(cachep, objp) = caller; - objnr = (objp - slabp->s_mem) / cachep->objsize; + objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; BUG_ON(objnr >= cachep->num); - BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize); + BUG_ON(objp != slabp->s_mem + objnr * cachep->buffer_size); if (cachep->flags & SLAB_DEBUG_INITIAL) { /* Need to call the slab's constructor so the * caller can perform a verify of its state (debugging). * Called without the cache-lock held. */ - cachep->ctor(objp + obj_dbghead(cachep), + cachep->ctor(objp + obj_offset(cachep), cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY); } if (cachep->flags & SLAB_POISON && cachep->dtor) { /* we want to cache poison the object, * call the destruction callback */ - cachep->dtor(objp + obj_dbghead(cachep), cachep, 0); + cachep->dtor(objp + obj_offset(cachep), cachep, 0); } if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC - if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) { + if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) { store_stackinfo(cachep, objp, (unsigned long)caller); kernel_map_pages(virt_to_page(objp), - cachep->objsize / PAGE_SIZE, 0); + cachep->buffer_size / PAGE_SIZE, 0); } else { poison_obj(cachep, objp, POISON_FREE); } @@ -2376,7 +2483,7 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, return objp; } -static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) +static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) { kmem_bufctl_t i; int entries = 0; @@ -2409,14 +2516,14 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) #define check_slabp(x,y) do { } while(0) #endif -static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) +static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) { int batchcount; struct kmem_list3 *l3; struct array_cache *ac; check_irq_off(); - ac = ac_data(cachep); + ac = cpu_cache_get(cachep); retry: batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { @@ -2461,22 +2568,12 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) check_slabp(cachep, slabp); check_spinlock_acquired(cachep); while (slabp->inuse < cachep->num && batchcount--) { - kmem_bufctl_t next; STATS_INC_ALLOCED(cachep); STATS_INC_ACTIVE(cachep); STATS_SET_HIGH(cachep); - /* get obj pointer */ - ac->entry[ac->avail++] = slabp->s_mem + - slabp->free * cachep->objsize; - - slabp->inuse++; - next = slab_bufctl(slabp)[slabp->free]; -#if DEBUG - slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; - WARN_ON(numa_node_id() != slabp->nodeid); -#endif - slabp->free = next; + ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, + numa_node_id()); } check_slabp(cachep, slabp); @@ -2498,7 +2595,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) x = cache_grow(cachep, flags, numa_node_id()); // cache_grow can reenable interrupts, then ac could change. - ac = ac_data(cachep); + ac = cpu_cache_get(cachep); if (!x && ac->avail == 0) // no objects in sight? abort return NULL; @@ -2510,7 +2607,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) } static inline void -cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) +cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags) { might_sleep_if(flags & __GFP_WAIT); #if DEBUG @@ -2519,16 +2616,16 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) } #if DEBUG -static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags, +static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags, void *objp, void *caller) { if (!objp) return objp; if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC - if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) + if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) kernel_map_pages(virt_to_page(objp), - cachep->objsize / PAGE_SIZE, 1); + cachep->buffer_size / PAGE_SIZE, 1); else check_poison_obj(cachep, objp); #else @@ -2553,7 +2650,7 @@ static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags, *dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE; } - objp += obj_dbghead(cachep); + objp += obj_offset(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) { unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; @@ -2568,7 +2665,7 @@ static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags, #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif -static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) +static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *objp; struct array_cache *ac; @@ -2583,7 +2680,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) #endif check_irq_off(); - ac = ac_data(cachep); + ac = cpu_cache_get(cachep); if (likely(ac->avail)) { STATS_INC_ALLOCHIT(cachep); ac->touched = 1; @@ -2595,7 +2692,8 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) return objp; } -static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) +static __always_inline void * +__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) { unsigned long save_flags; void *objp; @@ -2606,7 +2704,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) objp = ____cache_alloc(cachep, flags); local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, - __builtin_return_address(0)); + caller); prefetchw(objp); return objp; } @@ -2615,13 +2713,12 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) /* * A interface to enable slab creation on nodeid */ -static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) +static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct list_head *entry; struct slab *slabp; struct kmem_list3 *l3; void *obj; - kmem_bufctl_t next; int x; l3 = cachep->nodelists[nodeid]; @@ -2647,14 +2744,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) BUG_ON(slabp->inuse == cachep->num); - /* get obj pointer */ - obj = slabp->s_mem + slabp->free * cachep->objsize; - slabp->inuse++; - next = slab_bufctl(slabp)[slabp->free]; -#if DEBUG - slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; -#endif - slabp->free = next; + obj = slab_get_obj(cachep, slabp, nodeid); check_slabp(cachep, slabp); l3->free_objects--; /* move slabp to correct slabp list: */ @@ -2685,7 +2775,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) /* * Caller needs to acquire correct kmem_list's list_lock */ -static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, +static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, int node) { int i; @@ -2694,29 +2784,14 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, for (i = 0; i < nr_objects; i++) { void *objp = objpp[i]; struct slab *slabp; - unsigned int objnr; - slabp = page_get_slab(virt_to_page(objp)); + slabp = virt_to_slab(objp); l3 = cachep->nodelists[node]; list_del(&slabp->list); - objnr = (objp - slabp->s_mem) / cachep->objsize; check_spinlock_acquired_node(cachep, node); check_slabp(cachep, slabp); - -#if DEBUG - /* Verify that the slab belongs to the intended node */ - WARN_ON(slabp->nodeid != node); - - if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { - printk(KERN_ERR "slab: double free detected in cache " - "'%s', objp %p\n", cachep->name, objp); - BUG(); - } -#endif - slab_bufctl(slabp)[objnr] = slabp->free; - slabp->free = objnr; + slab_put_obj(cachep, slabp, objp, node); STATS_DEC_ACTIVE(cachep); - slabp->inuse--; l3->free_objects++; check_slabp(cachep, slabp); @@ -2738,7 +2813,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, } } -static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) +static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) { int batchcount; struct kmem_list3 *l3; @@ -2797,9 +2872,9 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) * * Called with disabled ints. */ -static inline void __cache_free(kmem_cache_t *cachep, void *objp) +static inline void __cache_free(struct kmem_cache *cachep, void *objp) { - struct array_cache *ac = ac_data(cachep); + struct array_cache *ac = cpu_cache_get(cachep); check_irq_off(); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); @@ -2810,7 +2885,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) #ifdef CONFIG_NUMA { struct slab *slabp; - slabp = page_get_slab(virt_to_page(objp)); + slabp = virt_to_slab(objp); if (unlikely(slabp->nodeid != numa_node_id())) { struct array_cache *alien = NULL; int nodeid = slabp->nodeid; @@ -2856,9 +2931,9 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) * Allocate an object from this cache. The flags are only relevant * if the cache has no available objects. */ -void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) +void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { - return __cache_alloc(cachep, flags); + return __cache_alloc(cachep, flags, __builtin_return_address(0)); } EXPORT_SYMBOL(kmem_cache_alloc); @@ -2876,12 +2951,12 @@ EXPORT_SYMBOL(kmem_cache_alloc); * * Currently only used for dentry validation. */ -int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) +int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr) { unsigned long addr = (unsigned long)ptr; unsigned long min_addr = PAGE_OFFSET; unsigned long align_mask = BYTES_PER_WORD - 1; - unsigned long size = cachep->objsize; + unsigned long size = cachep->buffer_size; struct page *page; if (unlikely(addr < min_addr)) @@ -2917,32 +2992,23 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) * New and improved: it will now make sure that the object gets * put on the correct node list so that there is no false sharing. */ -void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) +void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { unsigned long save_flags; void *ptr; - if (nodeid == -1) - return __cache_alloc(cachep, flags); - - if (unlikely(!cachep->nodelists[nodeid])) { - /* Fall back to __cache_alloc if we run into trouble */ - printk(KERN_WARNING - "slab: not allocating in inactive node %d for cache %s\n", - nodeid, cachep->name); - return __cache_alloc(cachep, flags); - } - cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); - if (nodeid == numa_node_id()) + + if (nodeid == -1 || nodeid == numa_node_id() || + !cachep->nodelists[nodeid]) ptr = ____cache_alloc(cachep, flags); else ptr = __cache_alloc_node(cachep, flags, nodeid); local_irq_restore(save_flags); - ptr = - cache_alloc_debugcheck_after(cachep, flags, ptr, - __builtin_return_address(0)); + + ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, + __builtin_return_address(0)); return ptr; } @@ -2950,7 +3016,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); void *kmalloc_node(size_t size, gfp_t flags, int node) { - kmem_cache_t *cachep; + struct kmem_cache *cachep; cachep = kmem_find_general_cachep(size, flags); if (unlikely(cachep == NULL)) @@ -2981,9 +3047,10 @@ EXPORT_SYMBOL(kmalloc_node); * platforms. For example, on i386, it means that the memory must come * from the first 16MB. */ -void *__kmalloc(size_t size, gfp_t flags) +static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, + void *caller) { - kmem_cache_t *cachep; + struct kmem_cache *cachep; /* If you want to save a few bytes .text space: replace * __ with kmem_. @@ -2993,10 +3060,27 @@ void *__kmalloc(size_t size, gfp_t flags) cachep = __find_general_cachep(size, flags); if (unlikely(cachep == NULL)) return NULL; - return __cache_alloc(cachep, flags); + return __cache_alloc(cachep, flags, caller); +} + +#ifndef CONFIG_DEBUG_SLAB + +void *__kmalloc(size_t size, gfp_t flags) +{ + return __do_kmalloc(size, flags, NULL); } EXPORT_SYMBOL(__kmalloc); +#else + +void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) +{ + return __do_kmalloc(size, flags, caller); +} +EXPORT_SYMBOL(__kmalloc_track_caller); + +#endif + #ifdef CONFIG_SMP /** * __alloc_percpu - allocate one copy of the object for every present @@ -3054,7 +3138,7 @@ EXPORT_SYMBOL(__alloc_percpu); * Free an object which was previously allocated from this * cache. */ -void kmem_cache_free(kmem_cache_t *cachep, void *objp) +void kmem_cache_free(struct kmem_cache *cachep, void *objp) { unsigned long flags; @@ -3075,15 +3159,15 @@ EXPORT_SYMBOL(kmem_cache_free); */ void kfree(const void *objp) { - kmem_cache_t *c; + struct kmem_cache *c; unsigned long flags; if (unlikely(!objp)) return; local_irq_save(flags); kfree_debugcheck(objp); - c = page_get_cache(virt_to_page(objp)); - mutex_debug_check_no_locks_freed(objp, obj_reallen(c)); + c = virt_to_cache(objp); + mutex_debug_check_no_locks_freed(objp, obj_size(c)); __cache_free(c, (void *)objp); local_irq_restore(flags); } @@ -3112,13 +3196,13 @@ void free_percpu(const void *objp) EXPORT_SYMBOL(free_percpu); #endif -unsigned int kmem_cache_size(kmem_cache_t *cachep) +unsigned int kmem_cache_size(struct kmem_cache *cachep) { - return obj_reallen(cachep); + return obj_size(cachep); } EXPORT_SYMBOL(kmem_cache_size); -const char *kmem_cache_name(kmem_cache_t *cachep) +const char *kmem_cache_name(struct kmem_cache *cachep) { return cachep->name; } @@ -3127,7 +3211,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name); /* * This initializes kmem_list3 for all nodes. */ -static int alloc_kmemlist(kmem_cache_t *cachep) +static int alloc_kmemlist(struct kmem_cache *cachep) { int node; struct kmem_list3 *l3; @@ -3183,7 +3267,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep) } struct ccupdate_struct { - kmem_cache_t *cachep; + struct kmem_cache *cachep; struct array_cache *new[NR_CPUS]; }; @@ -3193,13 +3277,13 @@ static void do_ccupdate_local(void *info) struct array_cache *old; check_irq_off(); - old = ac_data(new->cachep); + old = cpu_cache_get(new->cachep); new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; new->new[smp_processor_id()] = old; } -static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, +static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, int shared) { struct ccupdate_struct new; @@ -3245,7 +3329,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, return 0; } -static void enable_cpucache(kmem_cache_t *cachep) +static void enable_cpucache(struct kmem_cache *cachep) { int err; int limit, shared; @@ -3258,13 +3342,13 @@ static void enable_cpucache(kmem_cache_t *cachep) * The numbers are guessed, we should auto-tune as described by * Bonwick. */ - if (cachep->objsize > 131072) + if (cachep->buffer_size > 131072) limit = 1; - else if (cachep->objsize > PAGE_SIZE) + else if (cachep->buffer_size > PAGE_SIZE) limit = 8; - else if (cachep->objsize > 1024) + else if (cachep->buffer_size > 1024) limit = 24; - else if (cachep->objsize > 256) + else if (cachep->buffer_size > 256) limit = 54; else limit = 120; @@ -3279,7 +3363,7 @@ static void enable_cpucache(kmem_cache_t *cachep) */ shared = 0; #ifdef CONFIG_SMP - if (cachep->objsize <= PAGE_SIZE) + if (cachep->buffer_size <= PAGE_SIZE) shared = 8; #endif @@ -3297,7 +3381,7 @@ static void enable_cpucache(kmem_cache_t *cachep) cachep->name, -err); } -static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac, +static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, int force, int node) { int tofree; @@ -3342,12 +3426,12 @@ static void cache_reap(void *unused) } list_for_each(walk, &cache_chain) { - kmem_cache_t *searchp; + struct kmem_cache *searchp; struct list_head *p; int tofree; struct slab *slabp; - searchp = list_entry(walk, kmem_cache_t, next); + searchp = list_entry(walk, struct kmem_cache, next); if (searchp->flags & SLAB_NO_REAP) goto next; @@ -3359,7 +3443,7 @@ static void cache_reap(void *unused) drain_alien_cache(searchp, l3); spin_lock_irq(&l3->list_lock); - drain_array_locked(searchp, ac_data(searchp), 0, + drain_array_locked(searchp, cpu_cache_get(searchp), 0, numa_node_id()); if (time_after(l3->next_reap, jiffies)) @@ -3450,15 +3534,15 @@ static void *s_start(struct seq_file *m, loff_t *pos) if (p == &cache_chain) return NULL; } - return list_entry(p, kmem_cache_t, next); + return list_entry(p, struct kmem_cache, next); } static void *s_next(struct seq_file *m, void *p, loff_t *pos) { - kmem_cache_t *cachep = p; + struct kmem_cache *cachep = p; ++*pos; return cachep->next.next == &cache_chain ? NULL - : list_entry(cachep->next.next, kmem_cache_t, next); + : list_entry(cachep->next.next, struct kmem_cache, next); } static void s_stop(struct seq_file *m, void *p) @@ -3468,7 +3552,7 @@ static void s_stop(struct seq_file *m, void *p) static int s_show(struct seq_file *m, void *p) { - kmem_cache_t *cachep = p; + struct kmem_cache *cachep = p; struct list_head *q; struct slab *slabp; unsigned long active_objs; @@ -3528,7 +3612,7 @@ static int s_show(struct seq_file *m, void *p) printk(KERN_ERR "slab: cache %s error: %s\n", name, error); seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", - name, active_objs, num_objs, cachep->objsize, + name, active_objs, num_objs, cachep->buffer_size, cachep->num, (1 << cachep->gfporder)); seq_printf(m, " : tunables %4u %4u %4u", cachep->limit, cachep->batchcount, cachep->shared); @@ -3618,7 +3702,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, mutex_lock(&cache_chain_mutex); res = -EINVAL; list_for_each(p, &cache_chain) { - kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); + struct kmem_cache *cachep = list_entry(p, struct kmem_cache, + next); if (!strcmp(cachep->name, kbuf)) { if (limit < 1 || @@ -3656,5 +3741,5 @@ unsigned int ksize(const void *objp) if (unlikely(objp == NULL)) return 0; - return obj_reallen(page_get_cache(virt_to_page(objp))); + return obj_size(virt_to_cache(objp)); } |