aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/super.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2014-03-17 20:15:53 -0400
committerKent Overstreet <kmo@daterainc.com>2014-03-18 15:23:35 -0400
commit0a63b66db566cffdf90182eb6e66fdd4d0479e63 (patch)
treed1284e5008b668befb8179de30aeb50d4e789177 /drivers/md/bcache/super.c
parent56b30770b27d54d68ad51eccc6d888282b568cee (diff)
bcache: Rework btree cache reserve handling
This changes the bucket allocation reserves to use _real_ reserves - separate freelists - instead of watermarks, which if nothing else makes the current code saner to reason about and is going to be important in the future when we add support for multiple btrees. It also adds btree_check_reserve(), which checks (and locks) the reserves for both bucket allocation and memory allocation for btree nodes; the old code just kinda sorta assumed that since (e.g. for btree node splits) it had the root locked and that meant no other threads could try to make use of the same reserve; this technically should have been ok for memory allocation (we should always have a reserve for memory allocation (the btree node cache is used as a reserve and we preallocate it)), but multiple btrees will mean that locking the root won't be sufficient anymore, and for the bucket allocation reserve it was technically possible for the old code to deadlock. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/super.c')
-rw-r--r--drivers/md/bcache/super.c13
1 files changed, 3 insertions, 10 deletions
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 307fe378ea43..2d4a56219ec7 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1495,14 +1495,13 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1495 1495
1496 sema_init(&c->sb_write_mutex, 1); 1496 sema_init(&c->sb_write_mutex, 1);
1497 mutex_init(&c->bucket_lock); 1497 mutex_init(&c->bucket_lock);
1498 init_waitqueue_head(&c->try_wait); 1498 init_waitqueue_head(&c->btree_cache_wait);
1499 init_waitqueue_head(&c->bucket_wait); 1499 init_waitqueue_head(&c->bucket_wait);
1500 sema_init(&c->uuid_write_mutex, 1); 1500 sema_init(&c->uuid_write_mutex, 1);
1501 1501
1502 spin_lock_init(&c->btree_gc_time.lock); 1502 spin_lock_init(&c->btree_gc_time.lock);
1503 spin_lock_init(&c->btree_split_time.lock); 1503 spin_lock_init(&c->btree_split_time.lock);
1504 spin_lock_init(&c->btree_read_time.lock); 1504 spin_lock_init(&c->btree_read_time.lock);
1505 spin_lock_init(&c->try_harder_time.lock);
1506 1505
1507 bch_moving_init_cache_set(c); 1506 bch_moving_init_cache_set(c);
1508 1507
@@ -1591,7 +1590,7 @@ static void run_cache_set(struct cache_set *c)
1591 goto err; 1590 goto err;
1592 1591
1593 err = "error reading btree root"; 1592 err = "error reading btree root";
1594 c->root = bch_btree_node_get(c, k, j->btree_level, true); 1593 c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true);
1595 if (IS_ERR_OR_NULL(c->root)) 1594 if (IS_ERR_OR_NULL(c->root))
1596 goto err; 1595 goto err;
1597 1596
@@ -1666,7 +1665,7 @@ static void run_cache_set(struct cache_set *c)
1666 goto err; 1665 goto err;
1667 1666
1668 err = "cannot allocate new btree root"; 1667 err = "cannot allocate new btree root";
1669 c->root = bch_btree_node_alloc(c, 0, true); 1668 c->root = bch_btree_node_alloc(c, NULL, 0);
1670 if (IS_ERR_OR_NULL(c->root)) 1669 if (IS_ERR_OR_NULL(c->root))
1671 goto err; 1670 goto err;
1672 1671
@@ -1847,13 +1846,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1847 for_each_bucket(b, ca) 1846 for_each_bucket(b, ca)
1848 atomic_set(&b->pin, 0); 1847 atomic_set(&b->pin, 0);
1849 1848
1850 if (bch_cache_allocator_init(ca))
1851 goto err;
1852
1853 return 0; 1849 return 0;
1854err:
1855 kobject_put(&ca->kobj);
1856 return -ENOMEM;
1857} 1850}
1858 1851
1859static void register_cache(struct cache_sb *sb, struct page *sb_page, 1852static void register_cache(struct cache_sb *sb, struct page *sb_page,