summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-01-08 09:00:53 (GMT)
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-09 04:12:42 (GMT)
commit1480a540c98525640174a7eadd712378fcd6fd63 (patch)
tree28f2cc0aa819ff0aed30dd85bb16177d4a73002b
parent8419c3181086c86664e8246bc997afc2e4ffba4f (diff)
downloadlinux-fsl-qoriq-1480a540c98525640174a7eadd712378fcd6fd63.tar.xz
[PATCH] SwapMig: add_to_swap() avoid atomic allocations
Add gfp_mask to add_to_swap add_to_swap does allocations with GFP_ATOMIC in order not to interfere with swapping. During migration we may have use add_to_swap extensively which may lead to out of memory errors. This patch makes add_to_swap take a parameter that specifies the gfp mask. The page migration code can then make add_to_swap use GFP_KERNEL. Signed-off-by: Hirokazu Takahashi <taka@valinux.co.jp> Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/swap.h2
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/vmscan.c4
3 files changed, 5 insertions, 5 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 997d838..eb591ea 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -198,7 +198,7 @@ extern int rw_swap_page_sync(int, swp_entry_t, struct page *);
extern struct address_space swapper_space;
#define total_swapcache_pages swapper_space.nrpages
extern void show_swap_cache_info(void);
-extern int add_to_swap(struct page *);
+extern int add_to_swap(struct page *, gfp_t);
extern void __delete_from_swap_cache(struct page *);
extern void delete_from_swap_cache(struct page *);
extern int move_to_swap_cache(struct page *, swp_entry_t);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index fc2aecb..7b09ac5 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -141,7 +141,7 @@ void __delete_from_swap_cache(struct page *page)
* Allocate swap space for the page and add the page to the
* swap cache. Caller needs to hold the page lock.
*/
-int add_to_swap(struct page * page)
+int add_to_swap(struct page * page, gfp_t gfp_mask)
{
swp_entry_t entry;
int err;
@@ -166,7 +166,7 @@ int add_to_swap(struct page * page)
* Add it to the swap cache and mark it dirty
*/
err = __add_to_swap_cache(page, entry,
- GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN);
+ gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
switch (err) {
case 0: /* Success */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index daed4a7..5393b093 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -458,7 +458,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
* Try to allocate it some swap space here.
*/
if (PageAnon(page) && !PageSwapCache(page)) {
- if (!add_to_swap(page))
+ if (!add_to_swap(page, GFP_ATOMIC))
goto activate_locked;
}
#endif /* CONFIG_SWAP */
@@ -715,7 +715,7 @@ redo:
}
if (PageAnon(page) && !PageSwapCache(page)) {
- if (!add_to_swap(page)) {
+ if (!add_to_swap(page, GFP_KERNEL)) {
unlock_page(page);
list_move(&page->lru, &failed);
nr_failed++;