summaryrefslogtreecommitdiff
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-03-22 08:08:03 (GMT)
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 15:53:57 (GMT)
commit7c8ee9a86340db686cd4314e9944dc9b6111bda9 (patch)
tree80638e1658556b4fd7c0b92d571aaac854245bd3 /include/linux/mm.h
parentf205b2fe62d321403525065a4cb31b6bff1bbe53 (diff)
downloadlinux-fsl-qoriq-7c8ee9a86340db686cd4314e9944dc9b6111bda9.tar.xz
[PATCH] mm: simplify vmscan vs release refcounting
The VM has an interesting race where a page refcount can drop to zero, but it is still on the LRU lists for a short time. This was solved by testing a 0->1 refcount transition when picking up pages from the LRU, and dropping the refcount in that case. Instead, use atomic_add_unless to ensure we never pick up a 0 refcount page from the LRU, thus a 0 refcount page will never have its refcount elevated until it is allocated again. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h19
1 files changed, 11 insertions, 8 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 498ff87..b12d5c7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -301,17 +301,20 @@ struct page {
* Drop a ref, return true if the logical refcount fell to zero (the page has
* no users)
*/
-#define put_page_testzero(p) \
- ({ \
- BUG_ON(atomic_read(&(p)->_count) == -1);\
- atomic_add_negative(-1, &(p)->_count); \
- })
+static inline int put_page_testzero(struct page *page)
+{
+ BUG_ON(atomic_read(&page->_count) == -1);
+ return atomic_add_negative(-1, &page->_count);
+}
/*
- * Grab a ref, return true if the page previously had a logical refcount of
- * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page
+ * Try to grab a ref unless the page has a refcount of zero, return false if
+ * that is the case.
*/
-#define get_page_testone(p) atomic_inc_and_test(&(p)->_count)
+static inline int get_page_unless_zero(struct page *page)
+{
+ return atomic_add_unless(&page->_count, 1, -1);
+}
#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1)
#define __put_page(p) atomic_dec(&(p)->_count)