summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-09-26 14:21:08 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:38:49 (GMT)
commit006c9ab271a7d079ac4fd8312988c4bdc87f526f (patch)
tree445f15783201edfcd3882b841f332a6f19ad7f6d /net
parent5d2a2d95f8f9eb70d94c3f554e8f92f23e35a533 (diff)
downloadlinux-fsl-qoriq-006c9ab271a7d079ac4fd8312988c4bdc87f526f.tar.xz
net: Another local_irq_disable/kmalloc headache
Replace it by a local lock. Though that's pretty inefficient :( Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'net')
-rw-r--r--net/core/skbuff.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 21571dc..da24627 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -62,6 +62,7 @@
#include <linux/scatterlist.h>
#include <linux/errqueue.h>
#include <linux/prefetch.h>
+#include <linux/locallock.h>
#include <net/protocol.h>
#include <net/dst.h>
@@ -334,6 +335,7 @@ struct netdev_alloc_cache {
unsigned int pagecnt_bias;
};
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
+static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
@@ -342,7 +344,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
int order;
unsigned long flags;
- local_irq_save(flags);
+ local_lock_irqsave(netdev_alloc_lock, flags);
nc = &__get_cpu_var(netdev_alloc_cache);
if (unlikely(!nc->frag.page)) {
refill:
@@ -376,7 +378,7 @@ recycle:
nc->frag.offset += fragsz;
nc->pagecnt_bias--;
end:
- local_irq_restore(flags);
+ local_unlock_irqrestore(netdev_alloc_lock, flags);
return data;
}