summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2015-02-13 22:12:06 (GMT)
committerScott Wood <scottwood@freescale.com>2015-02-13 22:19:22 (GMT)
commit6faa2909871d8937cb2f79a10e1b21ffe193fac1 (patch)
treef558a94f1553814cc122ab8d9e04c0ebad5262a5 /mm/slub.c
parentfcb2fb84301c673ee15ca04e7a2fc965712d49a0 (diff)
downloadlinux-fsl-qoriq-6faa2909871d8937cb2f79a10e1b21ffe193fac1.tar.xz
Reset to 3.12.37
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c142
1 files changed, 37 insertions, 105 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a164648..a88d94c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1087,7 +1087,7 @@ static noinline struct kmem_cache_node *free_debug_processing(
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
- raw_spin_lock_irqsave(&n->list_lock, *flags);
+ spin_lock_irqsave(&n->list_lock, *flags);
slab_lock(page);
if (!check_slab(s, page))
@@ -1135,7 +1135,7 @@ out:
fail:
slab_unlock(page);
- raw_spin_unlock_irqrestore(&n->list_lock, *flags);
+ spin_unlock_irqrestore(&n->list_lock, *flags);
slab_fix(s, "Object at 0x%p not freed", object);
return NULL;
}
@@ -1270,12 +1270,6 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
#endif /* CONFIG_SLUB_DEBUG */
-struct slub_free_list {
- raw_spinlock_t lock;
- struct list_head list;
-};
-static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
-
/*
* Slab allocation and freeing
*/
@@ -1297,15 +1291,10 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
- bool enableirqs;
flags &= gfp_allowed_mask;
- enableirqs = (flags & __GFP_WAIT) != 0;
-#ifdef CONFIG_PREEMPT_RT_FULL
- enableirqs |= system_state == SYSTEM_RUNNING;
-#endif
- if (enableirqs)
+ if (flags & __GFP_WAIT)
local_irq_enable();
flags |= s->allocflags;
@@ -1345,7 +1334,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
kmemcheck_mark_unallocated_pages(page, pages);
}
- if (enableirqs)
+ if (flags & __GFP_WAIT)
local_irq_disable();
if (!page)
return NULL;
@@ -1363,10 +1352,8 @@ static void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
-#ifndef CONFIG_PREEMPT_RT_FULL
if (unlikely(s->ctor))
s->ctor(object);
-#endif
}
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -1444,16 +1431,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__free_memcg_kmem_pages(page, order);
}
-static void free_delayed(struct list_head *h)
-{
- while(!list_empty(h)) {
- struct page *page = list_first_entry(h, struct page, lru);
-
- list_del(&page->lru);
- __free_slab(page->slab_cache, page);
- }
-}
-
#define need_reserve_slab_rcu \
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
@@ -1488,12 +1465,6 @@ static void free_slab(struct kmem_cache *s, struct page *page)
}
call_rcu(head, rcu_free_slab);
- } else if (irqs_disabled()) {
- struct slub_free_list *f = &__get_cpu_var(slub_free_list);
-
- raw_spin_lock(&f->lock);
- list_add(&page->lru, &f->list);
- raw_spin_unlock(&f->lock);
} else
__free_slab(s, page);
}
@@ -1598,7 +1569,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
if (!n || !n->nr_partial)
return NULL;
- raw_spin_lock(&n->list_lock);
+ spin_lock(&n->list_lock);
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t;
@@ -1623,7 +1594,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
break;
}
- raw_spin_unlock(&n->list_lock);
+ spin_unlock(&n->list_lock);
return object;
}
@@ -1664,7 +1635,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
return NULL;
do {
- cpuset_mems_cookie = get_mems_allowed();
+ cpuset_mems_cookie = read_mems_allowed_begin();
zonelist = node_zonelist(slab_node(), flags);
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
struct kmem_cache_node *n;
@@ -1676,19 +1647,17 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
object = get_partial_node(s, n, c, flags);
if (object) {
/*
- * Return the object even if
- * put_mems_allowed indicated that
- * the cpuset mems_allowed was
- * updated in parallel. It's a
- * harmless race between the alloc
- * and the cpuset update.
+ * Don't check read_mems_allowed_retry()
+ * here - if mems_allowed was updated in
+ * parallel, that was a harmless race
+ * between allocation and the cpuset
+ * update
*/
- put_mems_allowed(cpuset_mems_cookie);
return object;
}
}
}
- } while (!put_mems_allowed(cpuset_mems_cookie));
+ } while (read_mems_allowed_retry(cpuset_mems_cookie));
#endif
return NULL;
}
@@ -1866,7 +1835,7 @@ redo:
* that acquire_slab() will see a slab page that
* is frozen
*/
- raw_spin_lock(&n->list_lock);
+ spin_lock(&n->list_lock);
}
} else {
m = M_FULL;
@@ -1877,7 +1846,7 @@ redo:
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
- raw_spin_lock(&n->list_lock);
+ spin_lock(&n->list_lock);
}
}
@@ -1912,7 +1881,7 @@ redo:
goto redo;
if (lock)
- raw_spin_unlock(&n->list_lock);
+ spin_unlock(&n->list_lock);
if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
@@ -1944,10 +1913,10 @@ static void unfreeze_partials(struct kmem_cache *s,
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
- raw_spin_unlock(&n->list_lock);
+ spin_unlock(&n->list_lock);
n = n2;
- raw_spin_lock(&n->list_lock);
+ spin_lock(&n->list_lock);
}
do {
@@ -1976,7 +1945,7 @@ static void unfreeze_partials(struct kmem_cache *s,
}
if (n)
- raw_spin_unlock(&n->list_lock);
+ spin_unlock(&n->list_lock);
while (discard_page) {
page = discard_page;
@@ -2014,21 +1983,14 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
- struct slub_free_list *f;
unsigned long flags;
- LIST_HEAD(tofree);
/*
* partial array is full. Move the existing
* set to the per node partial list.
*/
local_irq_save(flags);
unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
- f = &__get_cpu_var(slub_free_list);
- raw_spin_lock(&f->lock);
- list_splice_init(&f->list, &tofree);
- raw_spin_unlock(&f->lock);
local_irq_restore(flags);
- free_delayed(&tofree);
oldpage = NULL;
pobjects = 0;
pages = 0;
@@ -2092,22 +2054,7 @@ static bool has_cpu_slab(int cpu, void *info)
static void flush_all(struct kmem_cache *s)
{
- LIST_HEAD(tofree);
- int cpu;
-
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
- for_each_online_cpu(cpu) {
- struct slub_free_list *f;
-
- if (!has_cpu_slab(cpu, s))
- continue;
-
- f = &per_cpu(slub_free_list, cpu);
- raw_spin_lock_irq(&f->lock);
- list_splice_init(&f->list, &tofree);
- raw_spin_unlock_irq(&f->lock);
- free_delayed(&tofree);
- }
}
/*
@@ -2135,10 +2082,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
unsigned long x = 0;
struct page *page;
- raw_spin_lock_irqsave(&n->list_lock, flags);
+ spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
x += get_count(page);
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}
@@ -2281,11 +2228,9 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
- struct slub_free_list *f;
void *freelist;
struct page *page;
unsigned long flags;
- LIST_HEAD(tofree);
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
@@ -2348,13 +2293,7 @@ load_freelist:
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
-out:
- f = &__get_cpu_var(slub_free_list);
- raw_spin_lock(&f->lock);
- list_splice_init(&f->list, &tofree);
- raw_spin_unlock(&f->lock);
local_irq_restore(flags);
- free_delayed(&tofree);
return freelist;
new_slab:
@@ -2372,7 +2311,9 @@ new_slab:
if (unlikely(!freelist)) {
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
slab_out_of_memory(s, gfpflags, node);
- goto out;
+
+ local_irq_restore(flags);
+ return NULL;
}
page = c->page;
@@ -2387,7 +2328,8 @@ new_slab:
deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
- goto out;
+ local_irq_restore(flags);
+ return freelist;
}
/*
@@ -2472,10 +2414,6 @@ redo:
if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, s->object_size);
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (unlikely(s->ctor) && object)
- s->ctor(object);
-#endif
slab_post_alloc_hook(s, gfpflags, object);
@@ -2563,7 +2501,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
do {
if (unlikely(n)) {
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
n = NULL;
}
prior = page->freelist;
@@ -2595,7 +2533,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
- raw_spin_lock_irqsave(&n->list_lock, flags);
+ spin_lock_irqsave(&n->list_lock, flags);
}
}
@@ -2637,7 +2575,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
return;
slab_empty:
@@ -2651,7 +2589,7 @@ slab_empty:
/* Slab must be on the full list */
remove_full(s, page);
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
discard_slab(s, page);
}
@@ -2853,7 +2791,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
- raw_spin_lock_init(&n->list_lock);
+ spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
@@ -3439,7 +3377,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
for (i = 0; i < objects; i++)
INIT_LIST_HEAD(slabs_by_inuse + i);
- raw_spin_lock_irqsave(&n->list_lock, flags);
+ spin_lock_irqsave(&n->list_lock, flags);
/*
* Build lists indexed by the items in use in each slab.
@@ -3460,7 +3398,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
for (i = objects - 1; i > 0; i--)
list_splice(slabs_by_inuse + i, n->partial.prev);
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
/* Release empty slabs */
list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
@@ -3636,12 +3574,6 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
- int cpu;
-
- for_each_possible_cpu(cpu) {
- raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
- INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
- }
if (debug_guardpage_minorder())
slub_max_order = 0;
@@ -3946,7 +3878,7 @@ static int validate_slab_node(struct kmem_cache *s,
struct page *page;
unsigned long flags;
- raw_spin_lock_irqsave(&n->list_lock, flags);
+ spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
@@ -3969,7 +3901,7 @@ static int validate_slab_node(struct kmem_cache *s,
atomic_long_read(&n->nr_slabs));
out:
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
return count;
}
@@ -4159,12 +4091,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
if (!atomic_long_read(&n->nr_slabs))
continue;
- raw_spin_lock_irqsave(&n->list_lock, flags);
+ spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
process_slab(&t, s, page, alloc, map);
list_for_each_entry(page, &n->full, lru)
process_slab(&t, s, page, alloc, map);
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
}
for (i = 0; i < t.count; i++) {