summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-06-19 05:05:52 (GMT)
committerPekka Enberg <penberg@kernel.org>2013-07-07 16:09:56 (GMT)
commit345c905d13a4ec9f774b6b4bc038fe4aef26cced (patch)
tree9b6177f5b4712ff78c3808b240e66079e8c8338f /mm
parente7efa615ccf78394338144ff0187be331240748a (diff)
downloadlinux-345c905d13a4ec9f774b6b4bc038fe4aef26cced.tar.xz
slub: Make cpu partial slab support configurable
CPU partial support can introduce level of indeterminism that is not wanted in certain context (like a realtime kernel). Make it configurable. This patch is based on Christoph Lameter's "slub: Make cpu partial slab support configurable V2". Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 54cc4d5..ef60536 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -122,6 +122,15 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
#endif
}
+static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+ return !kmem_cache_debug(s);
+#else
+ return false;
+#endif
+}
+
/*
* Issues still to be resolved:
*
@@ -1572,7 +1581,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
put_cpu_partial(s, page, 0);
stat(s, CPU_PARTIAL_NODE);
}
- if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
+ if (!kmem_cache_has_cpu_partial(s)
+ || available > s->cpu_partial / 2)
break;
}
@@ -1883,6 +1893,7 @@ redo:
static void unfreeze_partials(struct kmem_cache *s,
struct kmem_cache_cpu *c)
{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
struct kmem_cache_node *n = NULL, *n2 = NULL;
struct page *page, *discard_page = NULL;
@@ -1937,6 +1948,7 @@ static void unfreeze_partials(struct kmem_cache *s,
discard_slab(s, page);
stat(s, FREE_SLAB);
}
+#endif
}
/*
@@ -1950,6 +1962,7 @@ static void unfreeze_partials(struct kmem_cache *s,
*/
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
struct page *oldpage;
int pages;
int pobjects;
@@ -1989,6 +2002,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->next = oldpage;
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+#endif
}
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@ -2497,7 +2511,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
new.inuse--;
if ((!new.inuse || !prior) && !was_frozen) {
- if (!kmem_cache_debug(s) && !prior)
+ if (kmem_cache_has_cpu_partial(s) && !prior)
/*
* Slab was on no list before and will be partially empty
@@ -2552,8 +2566,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Objects left in the slab. If it was not on the partial list before
* then add it.
*/
- if (kmem_cache_debug(s) && unlikely(!prior)) {
- remove_full(s, page);
+ if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
+ if (kmem_cache_debug(s))
+ remove_full(s, page);
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -3061,7 +3076,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
* per node list when we run out of per cpu objects. We only fetch 50%
* to keep some capacity around for frees.
*/
- if (kmem_cache_debug(s))
+ if (!kmem_cache_has_cpu_partial(s))
s->cpu_partial = 0;
else if (s->size >= PAGE_SIZE)
s->cpu_partial = 2;
@@ -4456,7 +4471,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
err = strict_strtoul(buf, 10, &objects);
if (err)
return err;
- if (objects && kmem_cache_debug(s))
+ if (objects && !kmem_cache_has_cpu_partial(s))
return -EINVAL;
s->cpu_partial = objects;