summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-07-03 13:29:37 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:38:03 (GMT)
commit85b3b8121f0c31adcd61ce382ac9e89bb5f63cf2 (patch)
treea09846332a1260d2287882c2cedde30111c6ce58 /mm
parentd697065a324409ddfac52e3fd499a91df2e44c77 (diff)
downloadlinux-fsl-qoriq-85b3b8121f0c31adcd61ce382ac9e89bb5f63cf2.tar.xz
mm: page_alloc: rt-friendly per-cpu pages
rt-friendly per-cpu pages: convert the irqs-off per-cpu locking method into a preemptible, explicit-per-cpu-locks method. Contains fixes from: Peter Zijlstra <a.p.zijlstra@chello.nl> Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c53
1 files changed, 37 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e31f44d..31d643d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,6 +61,7 @@
#include <linux/page-debug-flags.h>
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
+#include <linux/locallock.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -230,6 +231,18 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
+static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define cpu_lock_irqsave(cpu, flags) \
+ spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags)
+# define cpu_unlock_irqrestore(cpu, flags) \
+ spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags)
+#else
+# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
+# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
+#endif
+
int page_group_by_mobility_disabled __read_mostly;
void set_pageblock_migratetype(struct page *page, int migratetype)
@@ -744,12 +757,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
if (!free_pages_prepare(page, order))
return;
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
__count_vm_events(PGFREE, 1 << order);
migratetype = get_pageblock_migratetype(page);
set_freepage_migratetype(page, migratetype);
free_one_page(page_zone(page), page, order, migratetype);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
}
void __init __free_pages_bootmem(struct page *page, unsigned int order)
@@ -1207,7 +1220,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
int to_drain;
unsigned long batch;
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
batch = ACCESS_ONCE(pcp->batch);
if (pcp->count >= batch)
to_drain = batch;
@@ -1217,7 +1230,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
free_pcppages_bulk(zone, to_drain, pcp);
pcp->count -= to_drain;
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
}
static bool gfp_thisnode_allocation(gfp_t gfp_mask)
{
@@ -1246,7 +1259,7 @@ static void drain_pages(unsigned int cpu)
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
- local_irq_save(flags);
+ cpu_lock_irqsave(cpu, flags);
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
@@ -1254,7 +1267,7 @@ static void drain_pages(unsigned int cpu)
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
- local_irq_restore(flags);
+ cpu_unlock_irqrestore(cpu, flags);
}
}
@@ -1307,7 +1320,12 @@ void drain_all_pages(void)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
+#ifndef CONFIG_PREEMPT_RT_BASE
on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
+#else
+ for_each_cpu(cpu, &cpus_with_pcps)
+ drain_pages(cpu);
+#endif
}
#ifdef CONFIG_HIBERNATION
@@ -1362,7 +1380,7 @@ void free_hot_cold_page(struct page *page, int cold)
migratetype = get_pageblock_migratetype(page);
set_freepage_migratetype(page, migratetype);
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
__count_vm_event(PGFREE);
/*
@@ -1393,7 +1411,7 @@ void free_hot_cold_page(struct page *page, int cold)
}
out:
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
}
/*
@@ -1523,7 +1541,7 @@ again:
struct per_cpu_pages *pcp;
struct list_head *list;
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
@@ -1555,13 +1573,15 @@ again:
*/
WARN_ON_ONCE(order > 1);
}
- spin_lock_irqsave(&zone->lock, flags);
+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
- spin_unlock(&zone->lock);
- if (!page)
+ if (!page) {
+ spin_unlock(&zone->lock);
goto failed;
+ }
__mod_zone_freepage_state(zone, -(1 << order),
get_pageblock_migratetype(page));
+ spin_unlock(&zone->lock);
}
/*
@@ -1573,7 +1593,7 @@ again:
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
VM_BUG_ON(bad_range(zone, page));
if (prep_new_page(page, order, gfp_flags))
@@ -1581,7 +1601,7 @@ again:
return page;
failed:
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
return NULL;
}
@@ -5463,6 +5483,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
+ local_irq_lock_init(pa_lock);
}
/*
@@ -6326,7 +6347,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
@@ -6335,7 +6356,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
}
#ifdef CONFIG_MEMORY_HOTREMOVE