summaryrefslogtreecommitdiff
path: root/mm/swap.c
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-05-14 18:19:12 (GMT)
committerScott Wood <scottwood@freescale.com>2014-05-14 18:37:18 (GMT)
commit86ba38e6f5f2fbfe9b49e153ea89593b26482019 (patch)
treef99d2906b0eafca507f37289e68052fc105cc2dc /mm/swap.c
parent07c8b57b111585a617b2b456497fc9b33c00743c (diff)
downloadlinux-fsl-qoriq-86ba38e6f5f2fbfe9b49e153ea89593b26482019.tar.xz
Reset to 3.12.19
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c38
1 files changed, 16 insertions, 22 deletions
diff --git a/mm/swap.c b/mm/swap.c
index b55ee9c..aa4da5d 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -32,7 +32,6 @@
#include <linux/gfp.h>
#include <linux/uio.h>
#include <linux/hugetlb.h>
-#include <linux/locallock.h>
#include "internal.h"
@@ -46,9 +45,6 @@ static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
-static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
-static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
-
/*
* This path almost never happens for VM activity - pages are normally
* freed via pagevecs. But it gets used by networking.
@@ -88,7 +84,7 @@ static void put_compound_page(struct page *page)
{
if (unlikely(PageTail(page))) {
/* __split_huge_page_refcount can run under us */
- struct page *page_head = compound_trans_head(page);
+ struct page *page_head = compound_head(page);
if (likely(page != page_head &&
get_page_unless_zero(page_head))) {
@@ -226,7 +222,7 @@ bool __get_page_tail(struct page *page)
*/
unsigned long flags;
bool got = false;
- struct page *page_head = compound_trans_head(page);
+ struct page *page_head = compound_head(page);
if (likely(page != page_head && get_page_unless_zero(page_head))) {
/* Ref to put_compound_page() comment. */
@@ -412,11 +408,11 @@ void rotate_reclaimable_page(struct page *page)
unsigned long flags;
page_cache_get(page);
- local_lock_irqsave(rotate_lock, flags);
+ local_irq_save(flags);
pvec = &__get_cpu_var(lru_rotate_pvecs);
if (!pagevec_add(pvec, page))
pagevec_move_tail(pvec);
- local_unlock_irqrestore(rotate_lock, flags);
+ local_irq_restore(flags);
}
}
@@ -467,13 +463,12 @@ static bool need_activate_page_drain(int cpu)
void activate_page(struct page *page)
{
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- struct pagevec *pvec = &get_locked_var(swapvec_lock,
- activate_page_pvecs);
+ struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
page_cache_get(page);
if (!pagevec_add(pvec, page))
pagevec_lru_move_fn(pvec, __activate_page, NULL);
- put_locked_var(swapvec_lock, activate_page_pvecs);
+ put_cpu_var(activate_page_pvecs);
}
}
@@ -499,7 +494,7 @@ void activate_page(struct page *page)
static void __lru_cache_activate_page(struct page *page)
{
- struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
+ struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
int i;
/*
@@ -521,7 +516,7 @@ static void __lru_cache_activate_page(struct page *page)
}
}
- put_locked_var(swapvec_lock, lru_add_pvec);
+ put_cpu_var(lru_add_pvec);
}
/*
@@ -561,13 +556,13 @@ EXPORT_SYMBOL(mark_page_accessed);
*/
void __lru_cache_add(struct page *page)
{
- struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
+ struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
page_cache_get(page);
if (!pagevec_space(pvec))
__pagevec_lru_add(pvec);
pagevec_add(pvec, page);
- put_locked_var(swapvec_lock, lru_add_pvec);
+ put_cpu_var(lru_add_pvec);
}
EXPORT_SYMBOL(__lru_cache_add);
@@ -690,9 +685,9 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
- local_lock_irqsave(rotate_lock, flags);
+ local_irq_save(flags);
pagevec_move_tail(pvec);
- local_unlock_irqrestore(rotate_lock, flags);
+ local_irq_restore(flags);
}
pvec = &per_cpu(lru_deactivate_pvecs, cpu);
@@ -720,19 +715,18 @@ void deactivate_page(struct page *page)
return;
if (likely(get_page_unless_zero(page))) {
- struct pagevec *pvec = &get_locked_var(swapvec_lock,
- lru_deactivate_pvecs);
+ struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
if (!pagevec_add(pvec, page))
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
- put_locked_var(swapvec_lock, lru_deactivate_pvecs);
+ put_cpu_var(lru_deactivate_pvecs);
}
}
void lru_add_drain(void)
{
- lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
- local_unlock_cpu(swapvec_lock);
+ lru_add_drain_cpu(get_cpu());
+ put_cpu();
}
static void lru_add_drain_per_cpu(struct work_struct *dummy)