summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/super.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2014-03-18 00:15:53 (GMT)
committerKent Overstreet <kmo@daterainc.com>2014-03-18 19:23:35 (GMT)
commit0a63b66db566cffdf90182eb6e66fdd4d0479e63 (patch)
treed1284e5008b668befb8179de30aeb50d4e789177 /drivers/md/bcache/super.c
parent56b30770b27d54d68ad51eccc6d888282b568cee (diff)
downloadlinux-0a63b66db566cffdf90182eb6e66fdd4d0479e63.tar.xz
bcache: Rework btree cache reserve handling
This changes the bucket allocation reserves to use _real_ reserves - separate freelists - instead of watermarks, which if nothing else makes the current code saner to reason about and is going to be important in the future when we add support for multiple btrees. It also adds btree_check_reserve(), which checks (and locks) the reserves for both bucket allocation and memory allocation for btree nodes; the old code just kinda sorta assumed that since (e.g. for btree node splits) it had the root locked and that meant no other threads could try to make use of the same reserve; this technically should have been ok for memory allocation (we should always have a reserve for memory allocation (the btree node cache is used as a reserve and we preallocate it)), but multiple btrees will mean that locking the root won't be sufficient anymore, and for the bucket allocation reserve it was technically possible for the old code to deadlock. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/super.c')
-rw-r--r--drivers/md/bcache/super.c13
1 files changed, 3 insertions, 10 deletions
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 307fe37..2d4a562 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1495,14 +1495,13 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
sema_init(&c->sb_write_mutex, 1);
mutex_init(&c->bucket_lock);
- init_waitqueue_head(&c->try_wait);
+ init_waitqueue_head(&c->btree_cache_wait);
init_waitqueue_head(&c->bucket_wait);
sema_init(&c->uuid_write_mutex, 1);
spin_lock_init(&c->btree_gc_time.lock);
spin_lock_init(&c->btree_split_time.lock);
spin_lock_init(&c->btree_read_time.lock);
- spin_lock_init(&c->try_harder_time.lock);
bch_moving_init_cache_set(c);
@@ -1591,7 +1590,7 @@ static void run_cache_set(struct cache_set *c)
goto err;
err = "error reading btree root";
- c->root = bch_btree_node_get(c, k, j->btree_level, true);
+ c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true);
if (IS_ERR_OR_NULL(c->root))
goto err;
@@ -1666,7 +1665,7 @@ static void run_cache_set(struct cache_set *c)
goto err;
err = "cannot allocate new btree root";
- c->root = bch_btree_node_alloc(c, 0, true);
+ c->root = bch_btree_node_alloc(c, NULL, 0);
if (IS_ERR_OR_NULL(c->root))
goto err;
@@ -1847,13 +1846,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
for_each_bucket(b, ca)
atomic_set(&b->pin, 0);
- if (bch_cache_allocator_init(ca))
- goto err;
-
return 0;
-err:
- kobject_put(&ca->kobj);
- return -ENOMEM;
}
static void register_cache(struct cache_sb *sb, struct page *sb_page,