summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/migrate.h3
-rw-r--r--mm/internal.h2
-rw-r--r--mm/memory_hotplug.c3
-rw-r--r--mm/mempolicy.c9
-rw-r--r--mm/migrate.c34
-rw-r--r--mm/vmscan.c45
6 files changed, 59 insertions, 37 deletions
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 03aea61..3f34005 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -7,7 +7,6 @@
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
#ifdef CONFIG_MIGRATION
-extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *);
@@ -21,8 +20,6 @@ extern int migrate_vmas(struct mm_struct *mm,
const nodemask_t *from, const nodemask_t *to,
unsigned long flags);
#else
-static inline int isolate_lru_page(struct page *p, struct list_head *list)
- { return -ENOSYS; }
static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private) { return -ENOSYS; }
diff --git a/mm/internal.h b/mm/internal.h
index 1f43f74..4e8e78b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -39,6 +39,8 @@ static inline void __put_page(struct page *page)
atomic_dec(&page->_count);
}
+extern int isolate_lru_page(struct page *page);
+
extern void __free_pages_bootmem(struct page *page, unsigned int order);
/*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index c299d08..3b49758 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -658,8 +658,9 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* We can skip free pages. And we can only deal with pages on
* LRU.
*/
- ret = isolate_lru_page(page, &source);
+ ret = isolate_lru_page(page);
if (!ret) { /* Success */
+ list_add_tail(&page->lru, &source);
move_pages--;
} else {
/* Becasue we don't have big zone->lock. we should
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8336905..71b4749 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -93,6 +93,8 @@
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
+#include "internal.h"
+
/* Internal flags */
#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
@@ -762,8 +764,11 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
/*
* Avoid migrating a page that is shared with others.
*/
- if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
- isolate_lru_page(page, pagelist);
+ if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
+ if (!isolate_lru_page(page)) {
+ list_add_tail(&page->lru, pagelist);
+ }
+ }
}
static struct page *new_node_page(struct page *page, unsigned long node, int **x)
diff --git a/mm/migrate.c b/mm/migrate.c
index 2a80136..da73742 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -37,36 +37,6 @@
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
/*
- * Isolate one page from the LRU lists. If successful put it onto
- * the indicated list with elevated page count.
- *
- * Result:
- * -EBUSY: page not on LRU list
- * 0: page removed from LRU list and added to the specified list.
- */
-int isolate_lru_page(struct page *page, struct list_head *pagelist)
-{
- int ret = -EBUSY;
-
- if (PageLRU(page)) {
- struct zone *zone = page_zone(page);
-
- spin_lock_irq(&zone->lru_lock);
- if (PageLRU(page) && get_page_unless_zero(page)) {
- ret = 0;
- ClearPageLRU(page);
- if (PageActive(page))
- del_page_from_active_list(zone, page);
- else
- del_page_from_inactive_list(zone, page);
- list_add_tail(&page->lru, pagelist);
- }
- spin_unlock_irq(&zone->lru_lock);
- }
- return ret;
-}
-
-/*
* migrate_prep() needs to be called before we start compiling a list of pages
* to be migrated using isolate_lru_page().
*/
@@ -914,7 +884,9 @@ static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm,
!migrate_all)
goto put_and_set;
- err = isolate_lru_page(page, &pagelist);
+ err = isolate_lru_page(page);
+ if (!err)
+ list_add_tail(&page->lru, &pagelist);
put_and_set:
/*
* Either remove the duplicate refcount from
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1ff1a58..1fd4912 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -844,6 +844,51 @@ static unsigned long clear_active_flags(struct list_head *page_list)
return nr_active;
}
+/**
+ * isolate_lru_page - tries to isolate a page from its LRU list
+ * @page: page to isolate from its LRU list
+ *
+ * Isolates a @page from an LRU list, clears PageLRU and adjusts the
+ * vmstat statistic corresponding to whatever LRU list the page was on.
+ *
+ * Returns 0 if the page was removed from an LRU list.
+ * Returns -EBUSY if the page was not on an LRU list.
+ *
+ * The returned page will have PageLRU() cleared. If it was found on
+ * the active list, it will have PageActive set. That flag may need
+ * to be cleared by the caller before letting the page go.
+ *
+ * The vmstat statistic corresponding to the list on which the page was
+ * found will be decremented.
+ *
+ * Restrictions:
+ * (1) Must be called with an elevated refcount on the page. This is a
+ * fundamentnal difference from isolate_lru_pages (which is called
+ * without a stable reference).
+ * (2) the lru_lock must not be held.
+ * (3) interrupts must be enabled.
+ */
+int isolate_lru_page(struct page *page)
+{
+ int ret = -EBUSY;
+
+ if (PageLRU(page)) {
+ struct zone *zone = page_zone(page);
+
+ spin_lock_irq(&zone->lru_lock);
+ if (PageLRU(page) && get_page_unless_zero(page)) {
+ ret = 0;
+ ClearPageLRU(page);
+ if (PageActive(page))
+ del_page_from_active_list(zone, page);
+ else
+ del_page_from_inactive_list(zone, page);
+ }
+ spin_unlock_irq(&zone->lru_lock);
+ }
+ return ret;
+}
+
/*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
* of reclaimed pages