aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/super.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-12-17 04:29:34 -0500
committerKent Overstreet <kmo@daterainc.com>2014-01-08 16:05:09 -0500
commit78365411b344df35a198b119133e6515c2dcfb9f (patch)
treee94c2e1bd0d5dc53e6a938b012e9b20d3a511eca /drivers/md/bcache/super.c
parent1dd13c8d3c2d82e1b668d0b4754591291656542a (diff)
bcache: Rework allocator reserves
We need a reserve for allocating buckets for new btree nodes - and now that we've got multiple btrees, it really needs to be per btree. This reworks the reserves so we've got separate freelists for each reserve instead of watermarks, which seems to make things a bit cleaner, and it adds some code so that btree_split() can make sure the reserve is available before it starts. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/super.c')
-rw-r--r--drivers/md/bcache/super.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index b057676fc67d..63ebef78df4a 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -444,7 +444,7 @@ static int __uuid_write(struct cache_set *c)
444 444
445 lockdep_assert_held(&bch_register_lock); 445 lockdep_assert_held(&bch_register_lock);
446 446
447 if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true)) 447 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
448 return 1; 448 return 1;
449 449
450 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 450 SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -562,8 +562,8 @@ void bch_prio_write(struct cache *ca)
562 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 562 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
563 &ca->meta_sectors_written); 563 &ca->meta_sectors_written);
564 564
565 pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 565 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
566 fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 566 // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
567 567
568 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 568 for (i = prio_buckets(ca) - 1; i >= 0; --i) {
569 long bucket; 569 long bucket;
@@ -582,7 +582,7 @@ void bch_prio_write(struct cache *ca)
582 p->magic = pset_magic(&ca->sb); 582 p->magic = pset_magic(&ca->sb);
583 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 583 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
584 584
585 bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true); 585 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
586 BUG_ON(bucket == -1); 586 BUG_ON(bucket == -1);
587 587
588 mutex_unlock(&ca->set->bucket_lock); 588 mutex_unlock(&ca->set->bucket_lock);
@@ -1767,6 +1767,7 @@ err:
1767void bch_cache_release(struct kobject *kobj) 1767void bch_cache_release(struct kobject *kobj)
1768{ 1768{
1769 struct cache *ca = container_of(kobj, struct cache, kobj); 1769 struct cache *ca = container_of(kobj, struct cache, kobj);
1770 unsigned i;
1770 1771
1771 if (ca->set) 1772 if (ca->set)
1772 ca->set->cache[ca->sb.nr_this_dev] = NULL; 1773 ca->set->cache[ca->sb.nr_this_dev] = NULL;
@@ -1780,7 +1781,9 @@ void bch_cache_release(struct kobject *kobj)
1780 free_heap(&ca->heap); 1781 free_heap(&ca->heap);
1781 free_fifo(&ca->unused); 1782 free_fifo(&ca->unused);
1782 free_fifo(&ca->free_inc); 1783 free_fifo(&ca->free_inc);
1783 free_fifo(&ca->free); 1784
1785 for (i = 0; i < RESERVE_NR; i++)
1786 free_fifo(&ca->free[i]);
1784 1787
1785 if (ca->sb_bio.bi_inline_vecs[0].bv_page) 1788 if (ca->sb_bio.bi_inline_vecs[0].bv_page)
1786 put_page(ca->sb_bio.bi_io_vec[0].bv_page); 1789 put_page(ca->sb_bio.bi_io_vec[0].bv_page);
@@ -1806,10 +1809,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1806 ca->journal.bio.bi_max_vecs = 8; 1809 ca->journal.bio.bi_max_vecs = 8;
1807 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; 1810 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
1808 1811
1809 free = roundup_pow_of_two(ca->sb.nbuckets) >> 9; 1812 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
1810 free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2);
1811 1813
1812 if (!init_fifo(&ca->free, free, GFP_KERNEL) || 1814 if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
1815 !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
1816 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
1817 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
1813 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1818 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
1814 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || 1819 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
1815 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1820 !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||