aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-24 20:29:09 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 00:56:04 -0500
commit35fcd848d72683141052aa9880542461577f2dbe (patch)
treef88ebdbc88c9c7eebf33f603a2deb24e39e2bb9a /drivers/md
parente8e1d4682c8cb06dbcb5ef7bb851bf9bcb889c84 (diff)
bcache: Convert bucket_wait to wait_queue_head_t
At one point we did do fancy asynchronous waiting stuff with bucket_wait, but that's all gone (and bucket_wait is used a lot less than it used to be). So use the standard primitives. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/alloc.c82
-rw-r--r--drivers/md/bcache/bcache.h8
-rw-r--r--drivers/md/bcache/btree.c25
-rw-r--r--drivers/md/bcache/btree.h6
-rw-r--r--drivers/md/bcache/request.c9
-rw-r--r--drivers/md/bcache/super.c7
6 files changed, 70 insertions, 67 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index e033b0203b68..1b64e662e81b 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -339,7 +339,7 @@ static int bch_allocator_thread(void *arg)
339 allocator_wait(ca, !fifo_full(&ca->free)); 339 allocator_wait(ca, !fifo_full(&ca->free));
340 340
341 fifo_push(&ca->free, bucket); 341 fifo_push(&ca->free, bucket);
342 closure_wake_up(&ca->set->bucket_wait); 342 wake_up(&ca->set->bucket_wait);
343 } 343 }
344 344
345 /* 345 /*
@@ -365,16 +365,41 @@ static int bch_allocator_thread(void *arg)
365 } 365 }
366} 366}
367 367
368long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl) 368long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait)
369{ 369{
370 long r = -1; 370 DEFINE_WAIT(w);
371again: 371 struct bucket *b;
372 long r;
373
374 /* fastpath */
375 if (fifo_used(&ca->free) > ca->watermark[watermark]) {
376 fifo_pop(&ca->free, r);
377 goto out;
378 }
379
380 if (!wait)
381 return -1;
382
383 while (1) {
384 if (fifo_used(&ca->free) > ca->watermark[watermark]) {
385 fifo_pop(&ca->free, r);
386 break;
387 }
388
389 prepare_to_wait(&ca->set->bucket_wait, &w,
390 TASK_UNINTERRUPTIBLE);
391
392 mutex_unlock(&ca->set->bucket_lock);
393 schedule();
394 mutex_lock(&ca->set->bucket_lock);
395 }
396
397 finish_wait(&ca->set->bucket_wait, &w);
398out:
372 wake_up_process(ca->alloc_thread); 399 wake_up_process(ca->alloc_thread);
373 400
374 if (fifo_used(&ca->free) > ca->watermark[watermark] &&
375 fifo_pop(&ca->free, r)) {
376 struct bucket *b = ca->buckets + r;
377#ifdef CONFIG_BCACHE_EDEBUG 401#ifdef CONFIG_BCACHE_EDEBUG
402 {
378 size_t iter; 403 size_t iter;
379 long i; 404 long i;
380 405
@@ -387,36 +412,23 @@ again:
387 BUG_ON(i == r); 412 BUG_ON(i == r);
388 fifo_for_each(i, &ca->unused, iter) 413 fifo_for_each(i, &ca->unused, iter)
389 BUG_ON(i == r); 414 BUG_ON(i == r);
390#endif
391 BUG_ON(atomic_read(&b->pin) != 1);
392
393 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
394
395 if (watermark <= WATERMARK_METADATA) {
396 SET_GC_MARK(b, GC_MARK_METADATA);
397 b->prio = BTREE_PRIO;
398 } else {
399 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
400 b->prio = INITIAL_PRIO;
401 }
402
403 return r;
404 } 415 }
416#endif
417 b = ca->buckets + r;
405 418
406 trace_bcache_alloc_fail(ca); 419 BUG_ON(atomic_read(&b->pin) != 1);
407 420
408 if (cl) { 421 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
409 closure_wait(&ca->set->bucket_wait, cl);
410 422
411 if (closure_blocking(cl)) { 423 if (watermark <= WATERMARK_METADATA) {
412 mutex_unlock(&ca->set->bucket_lock); 424 SET_GC_MARK(b, GC_MARK_METADATA);
413 closure_sync(cl); 425 b->prio = BTREE_PRIO;
414 mutex_lock(&ca->set->bucket_lock); 426 } else {
415 goto again; 427 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
416 } 428 b->prio = INITIAL_PRIO;
417 } 429 }
418 430
419 return -1; 431 return r;
420} 432}
421 433
422void bch_bucket_free(struct cache_set *c, struct bkey *k) 434void bch_bucket_free(struct cache_set *c, struct bkey *k)
@@ -433,7 +445,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
433} 445}
434 446
435int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, 447int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
436 struct bkey *k, int n, struct closure *cl) 448 struct bkey *k, int n, bool wait)
437{ 449{
438 int i; 450 int i;
439 451
@@ -446,7 +458,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
446 458
447 for (i = 0; i < n; i++) { 459 for (i = 0; i < n; i++) {
448 struct cache *ca = c->cache_by_alloc[i]; 460 struct cache *ca = c->cache_by_alloc[i];
449 long b = bch_bucket_alloc(ca, watermark, cl); 461 long b = bch_bucket_alloc(ca, watermark, wait);
450 462
451 if (b == -1) 463 if (b == -1)
452 goto err; 464 goto err;
@@ -466,11 +478,11 @@ err:
466} 478}
467 479
468int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark, 480int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
469 struct bkey *k, int n, struct closure *cl) 481 struct bkey *k, int n, bool wait)
470{ 482{
471 int ret; 483 int ret;
472 mutex_lock(&c->bucket_lock); 484 mutex_lock(&c->bucket_lock);
473 ret = __bch_bucket_alloc_set(c, watermark, k, n, cl); 485 ret = __bch_bucket_alloc_set(c, watermark, k, n, wait);
474 mutex_unlock(&c->bucket_lock); 486 mutex_unlock(&c->bucket_lock);
475 return ret; 487 return ret;
476} 488}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index c1c44191afb1..d3520748bc27 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -750,7 +750,7 @@ struct cache_set {
750 * written. 750 * written.
751 */ 751 */
752 atomic_t prio_blocked; 752 atomic_t prio_blocked;
753 struct closure_waitlist bucket_wait; 753 wait_queue_head_t bucket_wait;
754 754
755 /* 755 /*
756 * For any bio we don't skip we subtract the number of sectors from 756 * For any bio we don't skip we subtract the number of sectors from
@@ -1162,13 +1162,13 @@ uint8_t bch_inc_gen(struct cache *, struct bucket *);
1162void bch_rescale_priorities(struct cache_set *, int); 1162void bch_rescale_priorities(struct cache_set *, int);
1163bool bch_bucket_add_unused(struct cache *, struct bucket *); 1163bool bch_bucket_add_unused(struct cache *, struct bucket *);
1164 1164
1165long bch_bucket_alloc(struct cache *, unsigned, struct closure *); 1165long bch_bucket_alloc(struct cache *, unsigned, bool);
1166void bch_bucket_free(struct cache_set *, struct bkey *); 1166void bch_bucket_free(struct cache_set *, struct bkey *);
1167 1167
1168int __bch_bucket_alloc_set(struct cache_set *, unsigned, 1168int __bch_bucket_alloc_set(struct cache_set *, unsigned,
1169 struct bkey *, int, struct closure *); 1169 struct bkey *, int, bool);
1170int bch_bucket_alloc_set(struct cache_set *, unsigned, 1170int bch_bucket_alloc_set(struct cache_set *, unsigned,
1171 struct bkey *, int, struct closure *); 1171 struct bkey *, int, bool);
1172 1172
1173__printf(2, 3) 1173__printf(2, 3)
1174bool bch_cache_set_error(struct cache_set *, const char *, ...); 1174bool bch_cache_set_error(struct cache_set *, const char *, ...);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 4d50f1e7006e..935d90df397b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -813,7 +813,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
813 * cannibalize_bucket() will take. This means every time we unlock the root of 813 * cannibalize_bucket() will take. This means every time we unlock the root of
814 * the btree, we need to release this lock if we have it held. 814 * the btree, we need to release this lock if we have it held.
815 */ 815 */
816void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl) 816void bch_cannibalize_unlock(struct cache_set *c)
817{ 817{
818 if (c->try_harder == current) { 818 if (c->try_harder == current) {
819 bch_time_stats_update(&c->try_harder_time, c->try_harder_start); 819 bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
@@ -995,15 +995,14 @@ static void btree_node_free(struct btree *b)
995 mutex_unlock(&b->c->bucket_lock); 995 mutex_unlock(&b->c->bucket_lock);
996} 996}
997 997
998struct btree *bch_btree_node_alloc(struct cache_set *c, int level, 998struct btree *bch_btree_node_alloc(struct cache_set *c, int level)
999 struct closure *cl)
1000{ 999{
1001 BKEY_PADDED(key) k; 1000 BKEY_PADDED(key) k;
1002 struct btree *b = ERR_PTR(-EAGAIN); 1001 struct btree *b = ERR_PTR(-EAGAIN);
1003 1002
1004 mutex_lock(&c->bucket_lock); 1003 mutex_lock(&c->bucket_lock);
1005retry: 1004retry:
1006 if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, cl)) 1005 if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
1007 goto err; 1006 goto err;
1008 1007
1009 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); 1008 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
@@ -1036,10 +1035,9 @@ err:
1036 return b; 1035 return b;
1037} 1036}
1038 1037
1039static struct btree *btree_node_alloc_replacement(struct btree *b, 1038static struct btree *btree_node_alloc_replacement(struct btree *b)
1040 struct closure *cl)
1041{ 1039{
1042 struct btree *n = bch_btree_node_alloc(b->c, b->level, cl); 1040 struct btree *n = bch_btree_node_alloc(b->c, b->level);
1043 if (!IS_ERR_OR_NULL(n)) 1041 if (!IS_ERR_OR_NULL(n))
1044 bch_btree_sort_into(b, n); 1042 bch_btree_sort_into(b, n);
1045 1043
@@ -1152,7 +1150,7 @@ static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k)
1152 * bch_bucket_alloc_set(), or we'd risk deadlock - so we don't pass it 1150 * bch_bucket_alloc_set(), or we'd risk deadlock - so we don't pass it
1153 * our closure. 1151 * our closure.
1154 */ 1152 */
1155 struct btree *n = btree_node_alloc_replacement(b, NULL); 1153 struct btree *n = btree_node_alloc_replacement(b);
1156 1154
1157 if (!IS_ERR_OR_NULL(n)) { 1155 if (!IS_ERR_OR_NULL(n)) {
1158 swap(b, n); 1156 swap(b, n);
@@ -1359,7 +1357,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1359 int ret = 0, stale = btree_gc_mark_node(b, &keys, gc); 1357 int ret = 0, stale = btree_gc_mark_node(b, &keys, gc);
1360 1358
1361 if (b->level || stale > 10) 1359 if (b->level || stale > 10)
1362 n = btree_node_alloc_replacement(b, NULL); 1360 n = btree_node_alloc_replacement(b);
1363 1361
1364 if (!IS_ERR_OR_NULL(n)) 1362 if (!IS_ERR_OR_NULL(n))
1365 swap(b, n); 1363 swap(b, n);
@@ -1882,10 +1880,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
1882 struct btree *n1, *n2 = NULL, *n3 = NULL; 1880 struct btree *n1, *n2 = NULL, *n3 = NULL;
1883 uint64_t start_time = local_clock(); 1881 uint64_t start_time = local_clock();
1884 1882
1885 if (b->level) 1883 n1 = btree_node_alloc_replacement(b);
1886 set_closure_blocking(&op->cl);
1887
1888 n1 = btree_node_alloc_replacement(b, &op->cl);
1889 if (IS_ERR(n1)) 1884 if (IS_ERR(n1))
1890 goto err; 1885 goto err;
1891 1886
@@ -1896,12 +1891,12 @@ static int btree_split(struct btree *b, struct btree_op *op,
1896 1891
1897 trace_bcache_btree_node_split(b, n1->sets[0].data->keys); 1892 trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
1898 1893
1899 n2 = bch_btree_node_alloc(b->c, b->level, &op->cl); 1894 n2 = bch_btree_node_alloc(b->c, b->level);
1900 if (IS_ERR(n2)) 1895 if (IS_ERR(n2))
1901 goto err_free1; 1896 goto err_free1;
1902 1897
1903 if (!b->parent) { 1898 if (!b->parent) {
1904 n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl); 1899 n3 = bch_btree_node_alloc(b->c, b->level + 1);
1905 if (IS_ERR(n3)) 1900 if (IS_ERR(n3))
1906 goto err_free2; 1901 goto err_free2;
1907 } 1902 }
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 72794ab8e8e5..d691d954730e 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -355,7 +355,7 @@ static inline void rw_unlock(bool w, struct btree *b)
355 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ 355 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
356 } \ 356 } \
357 rw_unlock(_w, _b); \ 357 rw_unlock(_w, _b); \
358 bch_cannibalize_unlock(c, &(op)->cl); \ 358 bch_cannibalize_unlock(c); \
359 if (_r == -ENOSPC) { \ 359 if (_r == -ENOSPC) { \
360 wait_event((c)->try_wait, \ 360 wait_event((c)->try_wait, \
361 !(c)->try_harder); \ 361 !(c)->try_harder); \
@@ -377,9 +377,9 @@ static inline bool should_split(struct btree *b)
377void bch_btree_node_read(struct btree *); 377void bch_btree_node_read(struct btree *);
378void bch_btree_node_write(struct btree *, struct closure *); 378void bch_btree_node_write(struct btree *, struct closure *);
379 379
380void bch_cannibalize_unlock(struct cache_set *, struct closure *); 380void bch_cannibalize_unlock(struct cache_set *);
381void bch_btree_set_root(struct btree *); 381void bch_btree_set_root(struct btree *);
382struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *); 382struct btree *bch_btree_node_alloc(struct cache_set *, int);
383struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool); 383struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
384 384
385int bch_btree_insert_check_key(struct btree *, struct btree_op *, 385int bch_btree_insert_check_key(struct btree *, struct btree_op *,
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index d85c7001df61..26d18f4bf4a0 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -350,14 +350,8 @@ static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
350 struct cache_set *c = s->op.c; 350 struct cache_set *c = s->op.c;
351 struct open_bucket *b; 351 struct open_bucket *b;
352 BKEY_PADDED(key) alloc; 352 BKEY_PADDED(key) alloc;
353 struct closure cl, *w = NULL;
354 unsigned i; 353 unsigned i;
355 354
356 if (s->writeback) {
357 closure_init_stack(&cl);
358 w = &cl;
359 }
360
361 /* 355 /*
362 * We might have to allocate a new bucket, which we can't do with a 356 * We might have to allocate a new bucket, which we can't do with a
363 * spinlock held. So if we have to allocate, we drop the lock, allocate 357 * spinlock held. So if we have to allocate, we drop the lock, allocate
@@ -375,7 +369,8 @@ static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
375 369
376 spin_unlock(&c->data_bucket_lock); 370 spin_unlock(&c->data_bucket_lock);
377 371
378 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w)) 372 if (bch_bucket_alloc_set(c, watermark, &alloc.key,
373 1, s->writeback))
379 return false; 374 return false;
380 375
381 spin_lock(&c->data_bucket_lock); 376 spin_lock(&c->data_bucket_lock);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 9a164cd4058c..84398a82fbe3 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -427,7 +427,7 @@ static int __uuid_write(struct cache_set *c)
427 427
428 lockdep_assert_held(&bch_register_lock); 428 lockdep_assert_held(&bch_register_lock);
429 429
430 if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl)) 430 if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
431 return 1; 431 return 1;
432 432
433 SET_KEY_SIZE(&k.key, c->sb.bucket_size); 433 SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -565,7 +565,7 @@ void bch_prio_write(struct cache *ca)
565 p->magic = pset_magic(ca); 565 p->magic = pset_magic(ca);
566 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8); 566 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
567 567
568 bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl); 568 bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true);
569 BUG_ON(bucket == -1); 569 BUG_ON(bucket == -1);
570 570
571 mutex_unlock(&ca->set->bucket_lock); 571 mutex_unlock(&ca->set->bucket_lock);
@@ -1439,6 +1439,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1439 closure_init_unlocked(&c->sb_write); 1439 closure_init_unlocked(&c->sb_write);
1440 mutex_init(&c->bucket_lock); 1440 mutex_init(&c->bucket_lock);
1441 init_waitqueue_head(&c->try_wait); 1441 init_waitqueue_head(&c->try_wait);
1442 init_waitqueue_head(&c->bucket_wait);
1442 closure_init_unlocked(&c->uuid_write); 1443 closure_init_unlocked(&c->uuid_write);
1443 spin_lock_init(&c->sort_time_lock); 1444 spin_lock_init(&c->sort_time_lock);
1444 mutex_init(&c->sort_lock); 1445 mutex_init(&c->sort_lock);
@@ -1608,7 +1609,7 @@ static void run_cache_set(struct cache_set *c)
1608 goto err_unlock_gc; 1609 goto err_unlock_gc;
1609 1610
1610 err = "cannot allocate new btree root"; 1611 err = "cannot allocate new btree root";
1611 c->root = bch_btree_node_alloc(c, 0, &op.cl); 1612 c->root = bch_btree_node_alloc(c, 0);
1612 if (IS_ERR_OR_NULL(c->root)) 1613 if (IS_ERR_OR_NULL(c->root))
1613 goto err_unlock_gc; 1614 goto err_unlock_gc;
1614 1615