aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-12-17 04:29:34 -0500
committerKent Overstreet <kmo@daterainc.com>2014-01-08 16:05:09 -0500
commit78365411b344df35a198b119133e6515c2dcfb9f (patch)
treee94c2e1bd0d5dc53e6a938b012e9b20d3a511eca /drivers/md
parent1dd13c8d3c2d82e1b668d0b4754591291656542a (diff)
bcache: Rework allocator reserves
We need a reserve for allocating buckets for new btree nodes - and now that we've got multiple btrees, it really needs to be per btree. This reworks the reserves so we've got separate freelists for each reserve instead of watermarks, which seems to make things a bit cleaner, and it adds some code so that btree_split() can make sure the reserve is available before it starts. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/alloc.c72
-rw-r--r--drivers/md/bcache/bcache.h16
-rw-r--r--drivers/md/bcache/btree.c34
-rw-r--r--drivers/md/bcache/btree.h4
-rw-r--r--drivers/md/bcache/movinggc.c2
-rw-r--r--drivers/md/bcache/super.c21
-rw-r--r--drivers/md/bcache/sysfs.c31
7 files changed, 101 insertions, 79 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 4c9852d92b0a..bcfd96e2121b 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -132,10 +132,16 @@ bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
132{ 132{
133 BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b)); 133 BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
134 134
135 if (fifo_used(&ca->free) > ca->watermark[WATERMARK_MOVINGGC] && 135 if (CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) {
136 CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) 136 unsigned i;
137 return false; 137
138 for (i = 0; i < RESERVE_NONE; i++)
139 if (!fifo_full(&ca->free[i]))
140 goto add;
138 141
142 return false;
143 }
144add:
139 b->prio = 0; 145 b->prio = 0;
140 146
141 if (can_inc_bucket_gen(b) && 147 if (can_inc_bucket_gen(b) &&
@@ -304,6 +310,21 @@ do { \
304 __set_current_state(TASK_RUNNING); \ 310 __set_current_state(TASK_RUNNING); \
305} while (0) 311} while (0)
306 312
313static int bch_allocator_push(struct cache *ca, long bucket)
314{
315 unsigned i;
316
317 /* Prios/gens are actually the most important reserve */
318 if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
319 return true;
320
321 for (i = 0; i < RESERVE_NR; i++)
322 if (fifo_push(&ca->free[i], bucket))
323 return true;
324
325 return false;
326}
327
307static int bch_allocator_thread(void *arg) 328static int bch_allocator_thread(void *arg)
308{ 329{
309 struct cache *ca = arg; 330 struct cache *ca = arg;
@@ -336,9 +357,7 @@ static int bch_allocator_thread(void *arg)
336 mutex_lock(&ca->set->bucket_lock); 357 mutex_lock(&ca->set->bucket_lock);
337 } 358 }
338 359
339 allocator_wait(ca, !fifo_full(&ca->free)); 360 allocator_wait(ca, bch_allocator_push(ca, bucket));
340
341 fifo_push(&ca->free, bucket);
342 wake_up(&ca->set->bucket_wait); 361 wake_up(&ca->set->bucket_wait);
343 } 362 }
344 363
@@ -365,34 +384,29 @@ static int bch_allocator_thread(void *arg)
365 } 384 }
366} 385}
367 386
368long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait) 387long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
369{ 388{
370 DEFINE_WAIT(w); 389 DEFINE_WAIT(w);
371 struct bucket *b; 390 struct bucket *b;
372 long r; 391 long r;
373 392
374 /* fastpath */ 393 /* fastpath */
375 if (fifo_used(&ca->free) > ca->watermark[watermark]) { 394 if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
376 fifo_pop(&ca->free, r); 395 fifo_pop(&ca->free[reserve], r))
377 goto out; 396 goto out;
378 }
379 397
380 if (!wait) 398 if (!wait)
381 return -1; 399 return -1;
382 400
383 while (1) { 401 do {
384 if (fifo_used(&ca->free) > ca->watermark[watermark]) {
385 fifo_pop(&ca->free, r);
386 break;
387 }
388
389 prepare_to_wait(&ca->set->bucket_wait, &w, 402 prepare_to_wait(&ca->set->bucket_wait, &w,
390 TASK_UNINTERRUPTIBLE); 403 TASK_UNINTERRUPTIBLE);
391 404
392 mutex_unlock(&ca->set->bucket_lock); 405 mutex_unlock(&ca->set->bucket_lock);
393 schedule(); 406 schedule();
394 mutex_lock(&ca->set->bucket_lock); 407 mutex_lock(&ca->set->bucket_lock);
395 } 408 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
409 !fifo_pop(&ca->free[reserve], r));
396 410
397 finish_wait(&ca->set->bucket_wait, &w); 411 finish_wait(&ca->set->bucket_wait, &w);
398out: 412out:
@@ -401,12 +415,14 @@ out:
401 if (expensive_debug_checks(ca->set)) { 415 if (expensive_debug_checks(ca->set)) {
402 size_t iter; 416 size_t iter;
403 long i; 417 long i;
418 unsigned j;
404 419
405 for (iter = 0; iter < prio_buckets(ca) * 2; iter++) 420 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
406 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); 421 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
407 422
408 fifo_for_each(i, &ca->free, iter) 423 for (j = 0; j < RESERVE_NR; j++)
409 BUG_ON(i == r); 424 fifo_for_each(i, &ca->free[j], iter)
425 BUG_ON(i == r);
410 fifo_for_each(i, &ca->free_inc, iter) 426 fifo_for_each(i, &ca->free_inc, iter)
411 BUG_ON(i == r); 427 BUG_ON(i == r);
412 fifo_for_each(i, &ca->unused, iter) 428 fifo_for_each(i, &ca->unused, iter)
@@ -419,7 +435,7 @@ out:
419 435
420 SET_GC_SECTORS_USED(b, ca->sb.bucket_size); 436 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
421 437
422 if (watermark <= WATERMARK_METADATA) { 438 if (reserve <= RESERVE_PRIO) {
423 SET_GC_MARK(b, GC_MARK_METADATA); 439 SET_GC_MARK(b, GC_MARK_METADATA);
424 SET_GC_MOVE(b, 0); 440 SET_GC_MOVE(b, 0);
425 b->prio = BTREE_PRIO; 441 b->prio = BTREE_PRIO;
@@ -445,7 +461,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
445 } 461 }
446} 462}
447 463
448int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, 464int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
449 struct bkey *k, int n, bool wait) 465 struct bkey *k, int n, bool wait)
450{ 466{
451 int i; 467 int i;
@@ -459,7 +475,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
459 475
460 for (i = 0; i < n; i++) { 476 for (i = 0; i < n; i++) {
461 struct cache *ca = c->cache_by_alloc[i]; 477 struct cache *ca = c->cache_by_alloc[i];
462 long b = bch_bucket_alloc(ca, watermark, wait); 478 long b = bch_bucket_alloc(ca, reserve, wait);
463 479
464 if (b == -1) 480 if (b == -1)
465 goto err; 481 goto err;
@@ -478,12 +494,12 @@ err:
478 return -1; 494 return -1;
479} 495}
480 496
481int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, 497int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
482 struct bkey *k, int n, bool wait) 498 struct bkey *k, int n, bool wait)
483{ 499{
484 int ret; 500 int ret;
485 mutex_lock(&c->bucket_lock); 501 mutex_lock(&c->bucket_lock);
486 ret = __bch_bucket_alloc_set(c, watermark, k, n, wait); 502 ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
487 mutex_unlock(&c->bucket_lock); 503 mutex_unlock(&c->bucket_lock);
488 return ret; 504 return ret;
489} 505}
@@ -573,8 +589,8 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
573 589
574 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) { 590 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
575 unsigned watermark = write_prio 591 unsigned watermark = write_prio
576 ? WATERMARK_MOVINGGC 592 ? RESERVE_MOVINGGC
577 : WATERMARK_NONE; 593 : RESERVE_NONE;
578 594
579 spin_unlock(&c->data_bucket_lock); 595 spin_unlock(&c->data_bucket_lock);
580 596
@@ -689,7 +705,7 @@ int bch_cache_allocator_init(struct cache *ca)
689 * Then 8 for btree allocations 705 * Then 8 for btree allocations
690 * Then half for the moving garbage collector 706 * Then half for the moving garbage collector
691 */ 707 */
692 708#if 0
693 ca->watermark[WATERMARK_PRIO] = 0; 709 ca->watermark[WATERMARK_PRIO] = 0;
694 710
695 ca->watermark[WATERMARK_METADATA] = prio_buckets(ca); 711 ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
@@ -699,6 +715,6 @@ int bch_cache_allocator_init(struct cache *ca)
699 715
700 ca->watermark[WATERMARK_NONE] = ca->free.size / 2 + 716 ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
701 ca->watermark[WATERMARK_MOVINGGC]; 717 ca->watermark[WATERMARK_MOVINGGC];
702 718#endif
703 return 0; 719 return 0;
704} 720}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 9d062bc56261..94d346e2ea17 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -383,12 +383,12 @@ struct cached_dev {
383 unsigned writeback_rate_p_term_inverse; 383 unsigned writeback_rate_p_term_inverse;
384}; 384};
385 385
386enum alloc_watermarks { 386enum alloc_reserve {
387 WATERMARK_PRIO, 387 RESERVE_BTREE,
388 WATERMARK_METADATA, 388 RESERVE_PRIO,
389 WATERMARK_MOVINGGC, 389 RESERVE_MOVINGGC,
390 WATERMARK_NONE, 390 RESERVE_NONE,
391 WATERMARK_MAX 391 RESERVE_NR,
392}; 392};
393 393
394struct cache { 394struct cache {
@@ -400,8 +400,6 @@ struct cache {
400 struct kobject kobj; 400 struct kobject kobj;
401 struct block_device *bdev; 401 struct block_device *bdev;
402 402
403 unsigned watermark[WATERMARK_MAX];
404
405 struct task_struct *alloc_thread; 403 struct task_struct *alloc_thread;
406 404
407 struct closure prio; 405 struct closure prio;
@@ -430,7 +428,7 @@ struct cache {
430 * because all the data they contained was overwritten), so we only 428 * because all the data they contained was overwritten), so we only
431 * need to discard them before they can be moved to the free list. 429 * need to discard them before they can be moved to the free list.
432 */ 430 */
433 DECLARE_FIFO(long, free); 431 DECLARE_FIFO(long, free)[RESERVE_NR];
434 DECLARE_FIFO(long, free_inc); 432 DECLARE_FIFO(long, free_inc);
435 DECLARE_FIFO(long, unused); 433 DECLARE_FIFO(long, unused);
436 434
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 101231f0f399..6a0f5faf0bed 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -167,6 +167,8 @@ static inline bool should_split(struct btree *b)
167 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ 167 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
168 } \ 168 } \
169 rw_unlock(_w, _b); \ 169 rw_unlock(_w, _b); \
170 if (_r == -EINTR) \
171 schedule(); \
170 bch_cannibalize_unlock(c); \ 172 bch_cannibalize_unlock(c); \
171 if (_r == -ENOSPC) { \ 173 if (_r == -ENOSPC) { \
172 wait_event((c)->try_wait, \ 174 wait_event((c)->try_wait, \
@@ -175,6 +177,7 @@ static inline bool should_split(struct btree *b)
175 } \ 177 } \
176 } while (_r == -EINTR); \ 178 } while (_r == -EINTR); \
177 \ 179 \
180 finish_wait(&(c)->bucket_wait, &(op)->wait); \
178 _r; \ 181 _r; \
179}) 182})
180 183
@@ -1075,7 +1078,7 @@ struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
1075 1078
1076 mutex_lock(&c->bucket_lock); 1079 mutex_lock(&c->bucket_lock);
1077retry: 1080retry:
1078 if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, wait)) 1081 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1079 goto err; 1082 goto err;
1080 1083
1081 bkey_put(c, &k.key); 1084 bkey_put(c, &k.key);
@@ -1132,6 +1135,28 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1132 atomic_inc(&b->c->prio_blocked); 1135 atomic_inc(&b->c->prio_blocked);
1133} 1136}
1134 1137
1138static int btree_check_reserve(struct btree *b, struct btree_op *op)
1139{
1140 struct cache_set *c = b->c;
1141 struct cache *ca;
1142 unsigned i, reserve = c->root->level * 2 + 1;
1143 int ret = 0;
1144
1145 mutex_lock(&c->bucket_lock);
1146
1147 for_each_cache(ca, c, i)
1148 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1149 if (op)
1150 prepare_to_wait(&c->bucket_wait, &op->wait,
1151 TASK_UNINTERRUPTIBLE);
1152 ret = -EINTR;
1153 break;
1154 }
1155
1156 mutex_unlock(&c->bucket_lock);
1157 return ret;
1158}
1159
1135/* Garbage collection */ 1160/* Garbage collection */
1136 1161
1137uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) 1162uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
@@ -1428,7 +1453,8 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1428 1453
1429 if (!IS_ERR(last->b)) { 1454 if (!IS_ERR(last->b)) {
1430 should_rewrite = btree_gc_mark_node(last->b, gc); 1455 should_rewrite = btree_gc_mark_node(last->b, gc);
1431 if (should_rewrite) { 1456 if (should_rewrite &&
1457 !btree_check_reserve(b, NULL)) {
1432 n = btree_node_alloc_replacement(last->b, 1458 n = btree_node_alloc_replacement(last->b,
1433 false); 1459 false);
1434 1460
@@ -2071,6 +2097,10 @@ static int btree_split(struct btree *b, struct btree_op *op,
2071 closure_init_stack(&cl); 2097 closure_init_stack(&cl);
2072 bch_keylist_init(&parent_keys); 2098 bch_keylist_init(&parent_keys);
2073 2099
2100 if (!b->level &&
2101 btree_check_reserve(b, op))
2102 return -EINTR;
2103
2074 n1 = btree_node_alloc_replacement(b, true); 2104 n1 = btree_node_alloc_replacement(b, true);
2075 if (IS_ERR(n1)) 2105 if (IS_ERR(n1))
2076 goto err; 2106 goto err;
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index d68af7442f70..4f0378ac1f7b 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -241,6 +241,9 @@ void bkey_put(struct cache_set *c, struct bkey *k);
241/* Recursing down the btree */ 241/* Recursing down the btree */
242 242
243struct btree_op { 243struct btree_op {
244 /* for waiting on btree reserve in btree_split() */
245 wait_queue_t wait;
246
244 /* Btree level at which we start taking write locks */ 247 /* Btree level at which we start taking write locks */
245 short lock; 248 short lock;
246 249
@@ -250,6 +253,7 @@ struct btree_op {
250static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) 253static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
251{ 254{
252 memset(op, 0, sizeof(struct btree_op)); 255 memset(op, 0, sizeof(struct btree_op));
256 init_wait(&op->wait);
253 op->lock = write_lock_level; 257 op->lock = write_lock_level;
254} 258}
255 259
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 052bd24d24b4..9eb60d102de8 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -211,7 +211,7 @@ void bch_moving_gc(struct cache_set *c)
211 for_each_cache(ca, c, i) { 211 for_each_cache(ca, c, i) {
212 unsigned sectors_to_move = 0; 212 unsigned sectors_to_move = 0;
213 unsigned reserve_sectors = ca->sb.bucket_size * 213 unsigned reserve_sectors = ca->sb.bucket_size *
214 min(fifo_used(&ca->free), ca->free.size / 2); 214 fifo_used(&ca->free[RESERVE_MOVINGGC]);
215 215
216 ca->heap.used = 0; 216 ca->heap.used = 0;
217 217
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index b057676fc67d..63ebef78df4a 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -444,7 +444,7 @@ static int __uuid_write(struct cache_set *c)
444 444
445 lockdep_assert_held(&bch_register_lock); 445 lockdep_assert_held(&bch_register_lock);
446 446
447 if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true)) 447 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
448 return 1; 448 return 1;
449 449
450 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 450 SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -562,8 +562,8 @@ void bch_prio_write(struct cache *ca)
562 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 562 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
563 &ca->meta_sectors_written); 563 &ca->meta_sectors_written);
564 564
565 pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 565 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
566 fifo_used(&ca->free_inc), fifo_used(&ca->unused)); 566 // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
567 567
568 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 568 for (i = prio_buckets(ca) - 1; i >= 0; --i) {
569 long bucket; 569 long bucket;
@@ -582,7 +582,7 @@ void bch_prio_write(struct cache *ca)
582 p->magic = pset_magic(&ca->sb); 582 p->magic = pset_magic(&ca->sb);
583 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 583 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
584 584
585 bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true); 585 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
586 BUG_ON(bucket == -1); 586 BUG_ON(bucket == -1);
587 587
588 mutex_unlock(&ca->set->bucket_lock); 588 mutex_unlock(&ca->set->bucket_lock);
@@ -1767,6 +1767,7 @@ err:
1767void bch_cache_release(struct kobject *kobj) 1767void bch_cache_release(struct kobject *kobj)
1768{ 1768{
1769 struct cache *ca = container_of(kobj, struct cache, kobj); 1769 struct cache *ca = container_of(kobj, struct cache, kobj);
1770 unsigned i;
1770 1771
1771 if (ca->set) 1772 if (ca->set)
1772 ca->set->cache[ca->sb.nr_this_dev] = NULL; 1773 ca->set->cache[ca->sb.nr_this_dev] = NULL;
@@ -1780,7 +1781,9 @@ void bch_cache_release(struct kobject *kobj)
1780 free_heap(&ca->heap); 1781 free_heap(&ca->heap);
1781 free_fifo(&ca->unused); 1782 free_fifo(&ca->unused);
1782 free_fifo(&ca->free_inc); 1783 free_fifo(&ca->free_inc);
1783 free_fifo(&ca->free); 1784
1785 for (i = 0; i < RESERVE_NR; i++)
1786 free_fifo(&ca->free[i]);
1784 1787
1785 if (ca->sb_bio.bi_inline_vecs[0].bv_page) 1788 if (ca->sb_bio.bi_inline_vecs[0].bv_page)
1786 put_page(ca->sb_bio.bi_io_vec[0].bv_page); 1789 put_page(ca->sb_bio.bi_io_vec[0].bv_page);
@@ -1806,10 +1809,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
1806 ca->journal.bio.bi_max_vecs = 8; 1809 ca->journal.bio.bi_max_vecs = 8;
1807 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs; 1810 ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
1808 1811
1809 free = roundup_pow_of_two(ca->sb.nbuckets) >> 9; 1812 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
1810 free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2);
1811 1813
1812 if (!init_fifo(&ca->free, free, GFP_KERNEL) || 1814 if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
1815 !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
1816 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
1817 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
1813 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1818 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
1814 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || 1819 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
1815 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1820 !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index a1f85612f0b3..d5dd282b176f 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -102,7 +102,6 @@ rw_attribute(bypass_torture_test);
102rw_attribute(key_merging_disabled); 102rw_attribute(key_merging_disabled);
103rw_attribute(gc_always_rewrite); 103rw_attribute(gc_always_rewrite);
104rw_attribute(expensive_debug_checks); 104rw_attribute(expensive_debug_checks);
105rw_attribute(freelist_percent);
106rw_attribute(cache_replacement_policy); 105rw_attribute(cache_replacement_policy);
107rw_attribute(btree_shrinker_disabled); 106rw_attribute(btree_shrinker_disabled);
108rw_attribute(copy_gc_enabled); 107rw_attribute(copy_gc_enabled);
@@ -711,9 +710,6 @@ SHOW(__bch_cache)
711 sysfs_print(io_errors, 710 sysfs_print(io_errors,
712 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT); 711 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
713 712
714 sysfs_print(freelist_percent, ca->free.size * 100 /
715 ((size_t) ca->sb.nbuckets));
716
717 if (attr == &sysfs_cache_replacement_policy) 713 if (attr == &sysfs_cache_replacement_policy)
718 return bch_snprint_string_list(buf, PAGE_SIZE, 714 return bch_snprint_string_list(buf, PAGE_SIZE,
719 cache_replacement_policies, 715 cache_replacement_policies,
@@ -820,32 +816,6 @@ STORE(__bch_cache)
820 } 816 }
821 } 817 }
822 818
823 if (attr == &sysfs_freelist_percent) {
824 DECLARE_FIFO(long, free);
825 long i;
826 size_t p = strtoul_or_return(buf);
827
828 p = clamp_t(size_t,
829 ((size_t) ca->sb.nbuckets * p) / 100,
830 roundup_pow_of_two(ca->sb.nbuckets) >> 9,
831 ca->sb.nbuckets / 2);
832
833 if (!init_fifo_exact(&free, p, GFP_KERNEL))
834 return -ENOMEM;
835
836 mutex_lock(&ca->set->bucket_lock);
837
838 fifo_move(&free, &ca->free);
839 fifo_swap(&free, &ca->free);
840
841 mutex_unlock(&ca->set->bucket_lock);
842
843 while (fifo_pop(&free, i))
844 atomic_dec(&ca->buckets[i].pin);
845
846 free_fifo(&free);
847 }
848
849 if (attr == &sysfs_clear_stats) { 819 if (attr == &sysfs_clear_stats) {
850 atomic_long_set(&ca->sectors_written, 0); 820 atomic_long_set(&ca->sectors_written, 0);
851 atomic_long_set(&ca->btree_sectors_written, 0); 821 atomic_long_set(&ca->btree_sectors_written, 0);
@@ -869,7 +839,6 @@ static struct attribute *bch_cache_files[] = {
869 &sysfs_metadata_written, 839 &sysfs_metadata_written,
870 &sysfs_io_errors, 840 &sysfs_io_errors,
871 &sysfs_clear_stats, 841 &sysfs_clear_stats,
872 &sysfs_freelist_percent,
873 &sysfs_cache_replacement_policy, 842 &sysfs_cache_replacement_policy,
874 NULL 843 NULL
875}; 844};