summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-06-01 17:25:55 (GMT)
committerPekka Enberg <penberg@kernel.org>2011-07-02 10:26:56 (GMT)
commit80f08c191f6c9563641291bea80657a3b9faabf0 (patch)
tree9ad3d121f6a6cd30e317b5819a0d8cb729e2b296
parent5c2e4bbbd60623f1024a753c291b666068f8a6e7 (diff)
downloadlinux-fsl-qoriq-80f08c191f6c9563641291bea80657a3b9faabf0.tar.xz
slub: Avoid disabling interrupts in free slowpath
Disabling interrupts can be avoided now. However, list operation still require disabling interrupts since allocations can occur from interrupt contexts and there is no way to perform atomic list operations. The acquition of the list_lock therefore has to disable interrupts as well. Dropping interrupt handling significantly simplifies the slowpath. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--mm/slub.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 08c57a0..cb6b085 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2197,11 +2197,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
struct kmem_cache_node *n = NULL;
unsigned long uninitialized_var(flags);
- local_irq_save(flags);
stat(s, FREE_SLOWPATH);
if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
- goto out_unlock;
+ return;
do {
prior = page->freelist;
@@ -2220,7 +2219,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
- spin_lock(&n->list_lock);
+ spin_lock_irqsave(&n->list_lock, flags);
}
inuse = new.inuse;
@@ -2236,7 +2235,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
if (was_frozen)
stat(s, FREE_FROZEN);
- goto out_unlock;
+ return;
}
/*
@@ -2259,11 +2258,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
stat(s, FREE_ADD_PARTIAL);
}
}
-
- spin_unlock(&n->list_lock);
-
-out_unlock:
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
return;
slab_empty:
@@ -2275,8 +2270,7 @@ slab_empty:
stat(s, FREE_REMOVE_PARTIAL);
}
- spin_unlock(&n->list_lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
discard_slab(s, page);
}