From 1b5ad24878b7e5a543b98c5d2f8c0d8c0dd3088f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Sat, 7 Aug 2010 14:29:22 +0200 Subject: slub: add missing __percpu markup in mm/slub_def.h kmem_cache->cpu_slab is a percpu pointer but was missing __percpu markup. Add it. Signed-off-by: Namhyung Kim Acked-by: Tejun Heo Signed-off-by: Pekka Enberg diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 6447a72..5ec4bc0 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -68,7 +68,7 @@ struct kmem_cache_order_objects { * Slab cache management. */ struct kmem_cache { - struct kmem_cache_cpu *cpu_slab; + struct kmem_cache_cpu __percpu *cpu_slab; /* Used for retriving partial slabs etc */ unsigned long flags; int size; /* The size of an object including meta data */ -- cgit v0.10.2 From 1ab335d8f85792e3b107ff8237d53cf64db714df Mon Sep 17 00:00:00 2001 From: Carsten Otte Date: Fri, 6 Aug 2010 18:19:22 +0200 Subject: slab: fix object alignment This patch fixes alignment of slab objects in case CONFIG_DEBUG_PAGEALLOC is active. Before this spot in kmem_cache_create, we have this situation: - align contains the required alignment of the object - cachep->obj_offset is 0 or equals align in case of CONFIG_DEBUG_SLAB - size equals the size of the object, or object plus trailing redzone in case of CONFIG_DEBUG_SLAB This spot tries to fill one page per object if the object is in certain size limits, however setting obj_offset to PAGE_SIZE - size does break the object alignment since size may not be aligned with the required alignment. This patch simply adds an ALIGN(size, align) to the equation and fixes the object size detection accordingly. This code in drivers/s390/cio/qdio_setup_init has lead to incorrectly aligned slab objects (sizeof(struct qdio_q) equals 1792): qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), 256, 0, NULL); Acked-by: Christoph Lameter Signed-off-by: Carsten Otte Signed-off-by: Pekka Enberg diff --git a/mm/slab.c b/mm/slab.c index 736e497..dd41b74 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2330,8 +2330,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) if (size >= malloc_sizes[INDEX_L3 + 1].cs_size - && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { - cachep->obj_offset += PAGE_SIZE - size; + && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { + cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); size = PAGE_SIZE; } #endif -- cgit v0.10.2