summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c149
1 files changed, 47 insertions, 102 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f200af2..6fca390 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,7 +61,6 @@
#include <linux/page-debug-flags.h>
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
-#include <linux/locallock.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -231,18 +230,6 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
-static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
-
-#ifdef CONFIG_PREEMPT_RT_BASE
-# define cpu_lock_irqsave(cpu, flags) \
- local_lock_irqsave_on(pa_lock, flags, cpu)
-# define cpu_unlock_irqrestore(cpu, flags) \
- local_unlock_irqrestore_on(pa_lock, flags, cpu)
-#else
-# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
-# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
-#endif
-
int page_group_by_mobility_disabled __read_mostly;
void set_pageblock_migratetype(struct page *page, int migratetype)
@@ -382,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order)
__SetPageHead(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
- __SetPageTail(p);
set_page_count(p, 0);
p->first_page = page;
+ /* Make sure p->first_page is always valid for PageTail() */
+ smp_wmb();
+ __SetPageTail(p);
}
}
@@ -646,7 +635,7 @@ static inline int free_pages_check(struct page *page)
}
/*
- * Frees a number of pages which have been collected from the pcp lists.
+ * Frees a number of pages from the PCP lists
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
@@ -657,49 +646,15 @@ static inline int free_pages_check(struct page *page)
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
- struct list_head *list)
+ struct per_cpu_pages *pcp)
{
+ int migratetype = 0;
+ int batch_free = 0;
int to_free = count;
- unsigned long flags;
- spin_lock_irqsave(&zone->lock, flags);
+ spin_lock(&zone->lock);
zone->pages_scanned = 0;
- while (!list_empty(list)) {
- struct page *page = list_first_entry(list, struct page, lru);
- int mt; /* migratetype of the to-be-freed page */
-
- /* must delete as __free_one_page list manipulates */
- list_del(&page->lru);
-
- mt = get_freepage_migratetype(page);
- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
- __free_one_page(page, zone, 0, mt);
- trace_mm_page_pcpu_drain(page, 0, mt);
- if (likely(!is_migrate_isolate_page(page))) {
- __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
- if (is_migrate_cma(mt))
- __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
- }
-
- to_free--;
- }
- WARN_ON(to_free != 0);
- spin_unlock_irqrestore(&zone->lock, flags);
-}
-
-/*
- * Moves a number of pages from the PCP lists to free list which
- * is freed outside of the locked region.
- *
- * Assumes all pages on list are in same zone, and of same order.
- * count is the number of pages to free.
- */
-static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
- struct list_head *dst)
-{
- int migratetype = 0, batch_free = 0;
-
while (to_free) {
struct page *page;
struct list_head *list;
@@ -715,7 +670,7 @@ static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
- list = &src->lists[migratetype];
+ list = &pcp->lists[migratetype];
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
@@ -723,25 +678,35 @@ static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
batch_free = to_free;
do {
- page = list_last_entry(list, struct page, lru);
+ int mt; /* migratetype of the to-be-freed page */
+
+ page = list_entry(list->prev, struct page, lru);
+ /* must delete as __free_one_page list manipulates */
list_del(&page->lru);
- list_add(&page->lru, dst);
+ mt = get_freepage_migratetype(page);
+ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+ __free_one_page(page, zone, 0, mt);
+ trace_mm_page_pcpu_drain(page, 0, mt);
+ if (likely(!is_migrate_isolate_page(page))) {
+ __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
+ if (is_migrate_cma(mt))
+ __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
+ }
} while (--to_free && --batch_free && !list_empty(list));
}
+ spin_unlock(&zone->lock);
}
static void free_one_page(struct zone *zone, struct page *page, int order,
int migratetype)
{
- unsigned long flags;
-
- spin_lock_irqsave(&zone->lock, flags);
+ spin_lock(&zone->lock);
zone->pages_scanned = 0;
__free_one_page(page, zone, order, migratetype);
if (unlikely(!is_migrate_isolate(migratetype)))
__mod_zone_freepage_state(zone, 1 << order, migratetype);
- spin_unlock_irqrestore(&zone->lock, flags);
+ spin_unlock(&zone->lock);
}
static bool free_pages_prepare(struct page *page, unsigned int order)
@@ -779,12 +744,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
if (!free_pages_prepare(page, order))
return;
- local_lock_irqsave(pa_lock, flags);
+ local_irq_save(flags);
__count_vm_events(PGFREE, 1 << order);
migratetype = get_pageblock_migratetype(page);
set_freepage_migratetype(page, migratetype);
free_one_page(page_zone(page), page, order, migratetype);
- local_unlock_irqrestore(pa_lock, flags);
+ local_irq_restore(flags);
}
void __init __free_pages_bootmem(struct page *page, unsigned int order)
@@ -1239,22 +1204,20 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
- LIST_HEAD(dst);
int to_drain;
unsigned long batch;
- local_lock_irqsave(pa_lock, flags);
+ local_irq_save(flags);
batch = ACCESS_ONCE(pcp->batch);
if (pcp->count >= batch)
to_drain = batch;
else
to_drain = pcp->count;
if (to_drain > 0) {
- isolate_pcp_pages(to_drain, pcp, &dst);
+ free_pcppages_bulk(zone, to_drain, pcp);
pcp->count -= to_drain;
}
- local_unlock_irqrestore(pa_lock, flags);
- free_pcppages_bulk(zone, to_drain, &dst);
+ local_irq_restore(flags);
}
static bool gfp_thisnode_allocation(gfp_t gfp_mask)
{
@@ -1282,21 +1245,16 @@ static void drain_pages(unsigned int cpu)
for_each_populated_zone(zone) {
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
- LIST_HEAD(dst);
- int count;
- cpu_lock_irqsave(cpu, flags);
+ local_irq_save(flags);
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
- count = pcp->count;
- if (count) {
- isolate_pcp_pages(count, pcp, &dst);
+ if (pcp->count) {
+ free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
- cpu_unlock_irqrestore(cpu, flags);
- if (count)
- free_pcppages_bulk(zone, count, &dst);
+ local_irq_restore(flags);
}
}
@@ -1349,12 +1307,7 @@ void drain_all_pages(void)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
-#ifndef CONFIG_PREEMPT_RT_BASE
on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
-#else
- for_each_cpu(cpu, &cpus_with_pcps)
- drain_pages(cpu);
-#endif
}
#ifdef CONFIG_HIBERNATION
@@ -1409,7 +1362,7 @@ void free_hot_cold_page(struct page *page, int cold)
migratetype = get_pageblock_migratetype(page);
set_freepage_migratetype(page, migratetype);
- local_lock_irqsave(pa_lock, flags);
+ local_irq_save(flags);
__count_vm_event(PGFREE);
/*
@@ -1435,17 +1388,12 @@ void free_hot_cold_page(struct page *page, int cold)
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = ACCESS_ONCE(pcp->batch);
- LIST_HEAD(dst);
-
- isolate_pcp_pages(batch, pcp, &dst);
+ free_pcppages_bulk(zone, batch, pcp);
pcp->count -= batch;
- local_unlock_irqrestore(pa_lock, flags);
- free_pcppages_bulk(zone, batch, &dst);
- return;
}
out:
- local_unlock_irqrestore(pa_lock, flags);
+ local_irq_restore(flags);
}
/*
@@ -1575,7 +1523,7 @@ again:
struct per_cpu_pages *pcp;
struct list_head *list;
- local_lock_irqsave(pa_lock, flags);
+ local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
@@ -1607,15 +1555,13 @@ again:
*/
WARN_ON_ONCE(order > 1);
}
- local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
+ spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
- if (!page) {
- spin_unlock(&zone->lock);
+ spin_unlock(&zone->lock);
+ if (!page)
goto failed;
- }
__mod_zone_freepage_state(zone, -(1 << order),
get_pageblock_migratetype(page));
- spin_unlock(&zone->lock);
}
/*
@@ -1627,7 +1573,7 @@ again:
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
- local_unlock_irqrestore(pa_lock, flags);
+ local_irq_restore(flags);
VM_BUG_ON(bad_range(zone, page));
if (prep_new_page(page, order, gfp_flags))
@@ -1635,7 +1581,7 @@ again:
return page;
failed:
- local_unlock_irqrestore(pa_lock, flags);
+ local_irq_restore(flags);
return NULL;
}
@@ -2314,8 +2260,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct page *page;
/* Page migration frees to the PCP lists but we want merging */
- drain_pages(get_cpu_light());
- put_cpu_light();
+ drain_pages(get_cpu());
+ put_cpu();
page = get_page_from_freelist(gfp_mask, nodemask,
order, zonelist, high_zoneidx,
@@ -5517,7 +5463,6 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
- local_irq_lock_init(pa_lock);
}
/*
@@ -6381,7 +6326,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
- local_lock_irqsave(pa_lock, flags);
+ local_irq_save(flags);
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
@@ -6390,7 +6335,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
- local_unlock_irqrestore(pa_lock, flags);
+ local_irq_restore(flags);
}
#ifdef CONFIG_MEMORY_HOTREMOVE