summaryrefslogtreecommitdiff
path: root/mm/zswap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/zswap.c')
-rw-r--r--mm/zswap.c195
1 files changed, 96 insertions, 99 deletions
diff --git a/mm/zswap.c b/mm/zswap.c
index 5a63f78..d93510c 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -217,7 +217,6 @@ static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
if (!entry)
return NULL;
entry->refcount = 1;
- RB_CLEAR_NODE(&entry->rbnode);
return entry;
}
@@ -226,6 +225,19 @@ static void zswap_entry_cache_free(struct zswap_entry *entry)
kmem_cache_free(zswap_entry_cache, entry);
}
+/* caller must hold the tree lock */
+static void zswap_entry_get(struct zswap_entry *entry)
+{
+ entry->refcount++;
+}
+
+/* caller must hold the tree lock */
+static int zswap_entry_put(struct zswap_entry *entry)
+{
+ entry->refcount--;
+ return entry->refcount;
+}
+
/*********************************
* rbtree functions
**********************************/
@@ -273,61 +285,6 @@ static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
return 0;
}
-static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
-{
- if (!RB_EMPTY_NODE(&entry->rbnode)) {
- rb_erase(&entry->rbnode, root);
- RB_CLEAR_NODE(&entry->rbnode);
- }
-}
-
-/*
- * Carries out the common pattern of freeing and entry's zsmalloc allocation,
- * freeing the entry itself, and decrementing the number of stored pages.
- */
-static void zswap_free_entry(struct zswap_tree *tree,
- struct zswap_entry *entry)
-{
- zbud_free(tree->pool, entry->handle);
- zswap_entry_cache_free(entry);
- atomic_dec(&zswap_stored_pages);
- zswap_pool_pages = zbud_get_pool_size(tree->pool);
-}
-
-/* caller must hold the tree lock */
-static void zswap_entry_get(struct zswap_entry *entry)
-{
- entry->refcount++;
-}
-
-/* caller must hold the tree lock
-* remove from the tree and free it, if nobody reference the entry
-*/
-static void zswap_entry_put(struct zswap_tree *tree,
- struct zswap_entry *entry)
-{
- int refcount = --entry->refcount;
-
- BUG_ON(refcount < 0);
- if (refcount == 0) {
- zswap_rb_erase(&tree->rbroot, entry);
- zswap_free_entry(tree, entry);
- }
-}
-
-/* caller must hold the tree lock */
-static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
- pgoff_t offset)
-{
- struct zswap_entry *entry = NULL;
-
- entry = zswap_rb_search(root, offset);
- if (entry)
- zswap_entry_get(entry);
-
- return entry;
-}
-
/*********************************
* per-cpu code
**********************************/
@@ -411,6 +368,18 @@ static bool zswap_is_full(void)
zswap_pool_pages);
}
+/*
+ * Carries out the common pattern of freeing and entry's zsmalloc allocation,
+ * freeing the entry itself, and decrementing the number of stored pages.
+ */
+static void zswap_free_entry(struct zswap_tree *tree, struct zswap_entry *entry)
+{
+ zbud_free(tree->pool, entry->handle);
+ zswap_entry_cache_free(entry);
+ atomic_dec(&zswap_stored_pages);
+ zswap_pool_pages = zbud_get_pool_size(tree->pool);
+}
+
/*********************************
* writeback code
**********************************/
@@ -418,7 +387,7 @@ static bool zswap_is_full(void)
enum zswap_get_swap_ret {
ZSWAP_SWAPCACHE_NEW,
ZSWAP_SWAPCACHE_EXIST,
- ZSWAP_SWAPCACHE_FAIL,
+ ZSWAP_SWAPCACHE_NOMEM
};
/*
@@ -432,10 +401,9 @@ enum zswap_get_swap_ret {
* added to the swap cache, and returned in retpage.
*
* If success, the swap cache page is returned in retpage
- * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
- * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
- * the new page is added to swapcache and locked
- * Returns ZSWAP_SWAPCACHE_FAIL on error
+ * Returns 0 if page was already in the swap cache, page is not locked
+ * Returns 1 if the new page needs to be populated, page is locked
+ * Returns <0 on error
*/
static int zswap_get_swap_cache_page(swp_entry_t entry,
struct page **retpage)
@@ -507,7 +475,7 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
if (new_page)
page_cache_release(new_page);
if (!found_page)
- return ZSWAP_SWAPCACHE_FAIL;
+ return ZSWAP_SWAPCACHE_NOMEM;
*retpage = found_page;
return ZSWAP_SWAPCACHE_EXIST;
}
@@ -534,7 +502,7 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
struct page *page;
u8 *src, *dst;
unsigned int dlen;
- int ret;
+ int ret, refcount;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
};
@@ -549,22 +517,23 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
/* find and ref zswap entry */
spin_lock(&tree->lock);
- entry = zswap_entry_find_get(&tree->rbroot, offset);
+ entry = zswap_rb_search(&tree->rbroot, offset);
if (!entry) {
/* entry was invalidated */
spin_unlock(&tree->lock);
return 0;
}
+ zswap_entry_get(entry);
spin_unlock(&tree->lock);
BUG_ON(offset != entry->offset);
/* try to allocate swap cache page */
switch (zswap_get_swap_cache_page(swpentry, &page)) {
- case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
+ case ZSWAP_SWAPCACHE_NOMEM: /* no memory */
ret = -ENOMEM;
goto fail;
- case ZSWAP_SWAPCACHE_EXIST:
+ case ZSWAP_SWAPCACHE_EXIST: /* page is unlocked */
/* page is already in the swap cache, ignore for now */
page_cache_release(page);
ret = -EEXIST;
@@ -587,44 +556,43 @@ static int zswap_writeback_entry(struct zbud_pool *pool, unsigned long handle)
SetPageUptodate(page);
}
- /* move it to the tail of the inactive list after end_writeback */
- SetPageReclaim(page);
-
/* start writeback */
__swap_writepage(page, &wbc, end_swap_bio_write);
page_cache_release(page);
zswap_written_back_pages++;
spin_lock(&tree->lock);
+
/* drop local reference */
- zswap_entry_put(tree, entry);
+ zswap_entry_put(entry);
+ /* drop the initial reference from entry creation */
+ refcount = zswap_entry_put(entry);
/*
- * There are two possible situations for entry here:
- * (1) refcount is 1(normal case), entry is valid and on the tree
- * (2) refcount is 0, entry is freed and not on the tree
- * because invalidate happened during writeback
- * search the tree and free the entry if find entry
- */
- if (entry == zswap_rb_search(&tree->rbroot, offset))
- zswap_entry_put(tree, entry);
+ * There are three possible values for refcount here:
+ * (1) refcount is 1, load is in progress, unlink from rbtree,
+ * load will free
+ * (2) refcount is 0, (normal case) entry is valid,
+ * remove from rbtree and free entry
+ * (3) refcount is -1, invalidate happened during writeback,
+ * free entry
+ */
+ if (refcount >= 0) {
+ /* no invalidate yet, remove from rbtree */
+ rb_erase(&entry->rbnode, &tree->rbroot);
+ }
spin_unlock(&tree->lock);
+ if (refcount <= 0) {
+ /* free the entry */
+ zswap_free_entry(tree, entry);
+ return 0;
+ }
+ return -EAGAIN;
- goto end;
-
- /*
- * if we get here due to ZSWAP_SWAPCACHE_EXIST
- * a load may happening concurrently
- * it is safe and okay to not free the entry
- * if we free the entry in the following put
- * it it either okay to return !0
- */
fail:
spin_lock(&tree->lock);
- zswap_entry_put(tree, entry);
+ zswap_entry_put(entry);
spin_unlock(&tree->lock);
-
-end:
return ret;
}
@@ -708,8 +676,11 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
if (ret == -EEXIST) {
zswap_duplicate_entry++;
/* remove from rbtree */
- zswap_rb_erase(&tree->rbroot, dupentry);
- zswap_entry_put(tree, dupentry);
+ rb_erase(&dupentry->rbnode, &tree->rbroot);
+ if (!zswap_entry_put(dupentry)) {
+ /* free */
+ zswap_free_entry(tree, dupentry);
+ }
}
} while (ret == -EEXIST);
spin_unlock(&tree->lock);
@@ -738,16 +709,17 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
struct zswap_entry *entry;
u8 *src, *dst;
unsigned int dlen;
- int ret;
+ int refcount, ret;
/* find */
spin_lock(&tree->lock);
- entry = zswap_entry_find_get(&tree->rbroot, offset);
+ entry = zswap_rb_search(&tree->rbroot, offset);
if (!entry) {
/* entry was written back */
spin_unlock(&tree->lock);
return -1;
}
+ zswap_entry_get(entry);
spin_unlock(&tree->lock);
/* decompress */
@@ -762,9 +734,22 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
BUG_ON(ret);
spin_lock(&tree->lock);
- zswap_entry_put(tree, entry);
+ refcount = zswap_entry_put(entry);
+ if (likely(refcount)) {
+ spin_unlock(&tree->lock);
+ return 0;
+ }
spin_unlock(&tree->lock);
+ /*
+ * We don't have to unlink from the rbtree because
+ * zswap_writeback_entry() or zswap_frontswap_invalidate page()
+ * has already done this for us if we are the last reference.
+ */
+ /* free */
+
+ zswap_free_entry(tree, entry);
+
return 0;
}
@@ -773,6 +758,7 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
{
struct zswap_tree *tree = zswap_trees[type];
struct zswap_entry *entry;
+ int refcount;
/* find */
spin_lock(&tree->lock);
@@ -784,12 +770,20 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
}
/* remove from rbtree */
- zswap_rb_erase(&tree->rbroot, entry);
+ rb_erase(&entry->rbnode, &tree->rbroot);
/* drop the initial reference from entry creation */
- zswap_entry_put(tree, entry);
+ refcount = zswap_entry_put(entry);
spin_unlock(&tree->lock);
+
+ if (refcount) {
+ /* writeback in progress, writeback will free */
+ return;
+ }
+
+ /* free */
+ zswap_free_entry(tree, entry);
}
/* frees all zswap entries for the given swap type */
@@ -803,8 +797,11 @@ static void zswap_frontswap_invalidate_area(unsigned type)
/* walk the tree and free everything */
spin_lock(&tree->lock);
- rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
- zswap_free_entry(tree, entry);
+ rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) {
+ zbud_free(tree->pool, entry->handle);
+ zswap_entry_cache_free(entry);
+ atomic_dec(&zswap_stored_pages);
+ }
tree->rbroot = RB_ROOT;
spin_unlock(&tree->lock);