diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-07-03 13:29:37 (GMT) |
---|---|---|
committer | Scott Wood <scottwood@freescale.com> | 2013-04-04 22:09:16 (GMT) |
commit | 478022118fa8b49b03dafbbee339698863a76920 (patch) | |
tree | b415738a259b0277f53645cd5d2f56451e0e2a04 /mm | |
parent | dc9291821ce438a834c81c4860c78e7e824b6d02 (diff) | |
download | linux-fsl-qoriq-478022118fa8b49b03dafbbee339698863a76920.tar.xz |
mm: page_alloc: rt-friendly per-cpu pages
rt-friendly per-cpu pages: convert the irqs-off per-cpu locking
method into a preemptible, explicit-per-cpu-locks method.
Contains fixes from:
Peter Zijlstra <a.p.zijlstra@chello.nl>
Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 57 |
1 files changed, 39 insertions, 18 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f61bc0f..e93a0d8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -58,6 +58,7 @@ #include <linux/prefetch.h> #include <linux/migrate.h> #include <linux/page-debug-flags.h> +#include <linux/locallock.h> #include <asm/tlbflush.h> #include <asm/div64.h> @@ -219,6 +220,18 @@ EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif +static DEFINE_LOCAL_IRQ_LOCK(pa_lock); + +#ifdef CONFIG_PREEMPT_RT_BASE +# define cpu_lock_irqsave(cpu, flags) \ + spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) +# define cpu_unlock_irqrestore(cpu, flags) \ + spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) +#else +# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) +# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) +#endif + int page_group_by_mobility_disabled __read_mostly; void set_pageblock_migratetype(struct page *page, int migratetype) @@ -722,12 +735,12 @@ static void __free_pages_ok(struct page *page, unsigned int order) if (!free_pages_prepare(page, order)) return; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); __count_vm_events(PGFREE, 1 << order); migratetype = get_pageblock_migratetype(page); set_freepage_migratetype(page, migratetype); free_one_page(page_zone(page), page, order, migratetype); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } /* @@ -1169,7 +1182,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) unsigned long flags; int to_drain; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); if (pcp->count >= pcp->batch) to_drain = pcp->batch; else @@ -1178,7 +1191,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) free_pcppages_bulk(zone, to_drain, pcp); pcp->count -= to_drain; } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } #endif @@ -1198,7 +1211,7 @@ static void drain_pages(unsigned int cpu) struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; - local_irq_save(flags); + cpu_lock_irqsave(cpu, flags); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; @@ -1206,7 +1219,7 @@ static void drain_pages(unsigned int cpu) free_pcppages_bulk(zone, pcp->count, pcp); pcp->count = 0; } - local_irq_restore(flags); + cpu_unlock_irqrestore(cpu, flags); } } @@ -1259,7 +1272,12 @@ void drain_all_pages(void) else cpumask_clear_cpu(cpu, &cpus_with_pcps); } +#ifndef CONFIG_PREEMPT_RT_BASE on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1); +#else + for_each_cpu(cpu, &cpus_with_pcps) + drain_pages(cpu); +#endif } #ifdef CONFIG_HIBERNATION @@ -1314,7 +1332,7 @@ void free_hot_cold_page(struct page *page, int cold) migratetype = get_pageblock_migratetype(page); set_freepage_migratetype(page, migratetype); - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); __count_vm_event(PGFREE); /* @@ -1344,7 +1362,7 @@ void free_hot_cold_page(struct page *page, int cold) } out: - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } /* @@ -1473,7 +1491,7 @@ again: struct per_cpu_pages *pcp; struct list_head *list; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; if (list_empty(list)) { @@ -1505,18 +1523,20 @@ again: */ WARN_ON_ONCE(order > 1); } - spin_lock_irqsave(&zone->lock, flags); + local_spin_lock_irqsave(pa_lock, &zone->lock, flags); page = __rmqueue(zone, order, migratetype); - spin_unlock(&zone->lock); - if (!page) + if (!page) { + spin_unlock(&zone->lock); goto failed; + } __mod_zone_freepage_state(zone, -(1 << order), get_pageblock_migratetype(page)); + spin_unlock(&zone->lock); } __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); VM_BUG_ON(bad_range(zone, page)); if (prep_new_page(page, order, gfp_flags)) @@ -1524,7 +1544,7 @@ again: return page; failed: - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); return NULL; } @@ -5133,6 +5153,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self, void __init page_alloc_init(void) { hotcpu_notifier(page_alloc_cpu_notify, 0); + local_irq_lock_init(pa_lock); } /* @@ -5965,12 +5986,12 @@ static int __meminit __zone_pcp_update(void *data) pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; - local_irq_save(flags); + cpu_lock_irqsave(cpu, flags); if (pcp->count > 0) free_pcppages_bulk(zone, pcp->count, pcp); drain_zonestat(zone, pset); setup_pageset(pset, batch); - local_irq_restore(flags); + cpu_unlock_irqrestore(cpu, flags); } return 0; } @@ -5988,7 +6009,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); @@ -5997,7 +6018,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } #ifdef CONFIG_MEMORY_HOTREMOVE |