diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-07-24 20:27:07 -0400 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-11 00:56:03 -0500 |
commit | e8e1d4682c8cb06dbcb5ef7bb851bf9bcb889c84 (patch) | |
tree | 8e2287b8e21ccb51518b7e2e9d54ab9b4714b7bb /drivers | |
parent | 0b93207abb40d3c42bb83eba1e1e7edc1da77810 (diff) |
bcache: Convert try_wait to wait_queue_head_t
We never waited on c->try_wait asynchronously, so just use the standard
primitives.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/bcache/bcache.h | 4 | ||||
-rw-r--r-- | drivers/md/bcache/btree.c | 150 | ||||
-rw-r--r-- | drivers/md/bcache/btree.h | 10 | ||||
-rw-r--r-- | drivers/md/bcache/super.c | 10 |
4 files changed, 75 insertions, 99 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 10ce0c825fce..c1c44191afb1 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -735,8 +735,8 @@ struct cache_set { | |||
735 | * basically a lock for this that we can wait on asynchronously. The | 735 | * basically a lock for this that we can wait on asynchronously. The |
736 | * btree_root() macro releases the lock when it returns. | 736 | * btree_root() macro releases the lock when it returns. |
737 | */ | 737 | */ |
738 | struct closure *try_harder; | 738 | struct task_struct *try_harder; |
739 | struct closure_waitlist try_wait; | 739 | wait_queue_head_t try_wait; |
740 | uint64_t try_harder_start; | 740 | uint64_t try_harder_start; |
741 | 741 | ||
742 | /* | 742 | /* |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 731cd8e3fe90..4d50f1e7006e 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -437,7 +437,7 @@ static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op) | |||
437 | 437 | ||
438 | set_btree_node_dirty(b); | 438 | set_btree_node_dirty(b); |
439 | 439 | ||
440 | if (op && op->journal) { | 440 | if (op->journal) { |
441 | if (w->journal && | 441 | if (w->journal && |
442 | journal_pin_cmp(b->c, w, op)) { | 442 | journal_pin_cmp(b->c, w, op)) { |
443 | atomic_dec_bug(w->journal); | 443 | atomic_dec_bug(w->journal); |
@@ -574,34 +574,35 @@ static struct btree *mca_bucket_alloc(struct cache_set *c, | |||
574 | return b; | 574 | return b; |
575 | } | 575 | } |
576 | 576 | ||
577 | static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order) | 577 | static int mca_reap(struct btree *b, unsigned min_order, bool flush) |
578 | { | 578 | { |
579 | struct closure cl; | ||
580 | |||
581 | closure_init_stack(&cl); | ||
579 | lockdep_assert_held(&b->c->bucket_lock); | 582 | lockdep_assert_held(&b->c->bucket_lock); |
580 | 583 | ||
581 | if (!down_write_trylock(&b->lock)) | 584 | if (!down_write_trylock(&b->lock)) |
582 | return -ENOMEM; | 585 | return -ENOMEM; |
583 | 586 | ||
584 | if (b->page_order < min_order) { | 587 | BUG_ON(btree_node_dirty(b) && !b->sets[0].data); |
588 | |||
589 | if (b->page_order < min_order || | ||
590 | (!flush && | ||
591 | (btree_node_dirty(b) || | ||
592 | atomic_read(&b->io.cl.remaining) != -1))) { | ||
585 | rw_unlock(true, b); | 593 | rw_unlock(true, b); |
586 | return -ENOMEM; | 594 | return -ENOMEM; |
587 | } | 595 | } |
588 | 596 | ||
589 | BUG_ON(btree_node_dirty(b) && !b->sets[0].data); | 597 | if (btree_node_dirty(b)) { |
590 | 598 | bch_btree_node_write(b, &cl); | |
591 | if (cl && btree_node_dirty(b)) | 599 | closure_sync(&cl); |
592 | bch_btree_node_write(b, NULL); | ||
593 | |||
594 | if (cl) | ||
595 | closure_wait_event_async(&b->io.wait, cl, | ||
596 | atomic_read(&b->io.cl.remaining) == -1); | ||
597 | |||
598 | if (btree_node_dirty(b) || | ||
599 | !closure_is_unlocked(&b->io.cl) || | ||
600 | work_pending(&b->work.work)) { | ||
601 | rw_unlock(true, b); | ||
602 | return -EAGAIN; | ||
603 | } | 600 | } |
604 | 601 | ||
602 | /* wait for any in flight btree write */ | ||
603 | closure_wait_event_sync(&b->io.wait, &cl, | ||
604 | atomic_read(&b->io.cl.remaining) == -1); | ||
605 | |||
605 | return 0; | 606 | return 0; |
606 | } | 607 | } |
607 | 608 | ||
@@ -641,7 +642,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, | |||
641 | break; | 642 | break; |
642 | 643 | ||
643 | if (++i > 3 && | 644 | if (++i > 3 && |
644 | !mca_reap(b, NULL, 0)) { | 645 | !mca_reap(b, 0, false)) { |
645 | mca_data_free(b); | 646 | mca_data_free(b); |
646 | rw_unlock(true, b); | 647 | rw_unlock(true, b); |
647 | freed++; | 648 | freed++; |
@@ -660,7 +661,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, | |||
660 | list_rotate_left(&c->btree_cache); | 661 | list_rotate_left(&c->btree_cache); |
661 | 662 | ||
662 | if (!b->accessed && | 663 | if (!b->accessed && |
663 | !mca_reap(b, NULL, 0)) { | 664 | !mca_reap(b, 0, false)) { |
664 | mca_bucket_free(b); | 665 | mca_bucket_free(b); |
665 | mca_data_free(b); | 666 | mca_data_free(b); |
666 | rw_unlock(true, b); | 667 | rw_unlock(true, b); |
@@ -783,52 +784,27 @@ out: | |||
783 | return b; | 784 | return b; |
784 | } | 785 | } |
785 | 786 | ||
786 | static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k, | 787 | static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k) |
787 | int level, struct closure *cl) | ||
788 | { | 788 | { |
789 | int ret = -ENOMEM; | 789 | struct btree *b; |
790 | struct btree *i; | ||
791 | 790 | ||
792 | trace_bcache_btree_cache_cannibalize(c); | 791 | trace_bcache_btree_cache_cannibalize(c); |
793 | 792 | ||
794 | if (!cl) | 793 | if (!c->try_harder) { |
795 | return ERR_PTR(-ENOMEM); | 794 | c->try_harder = current; |
796 | 795 | c->try_harder_start = local_clock(); | |
797 | /* | 796 | } else if (c->try_harder != current) |
798 | * Trying to free up some memory - i.e. reuse some btree nodes - may | 797 | return ERR_PTR(-ENOSPC); |
799 | * require initiating IO to flush the dirty part of the node. If we're | ||
800 | * running under generic_make_request(), that IO will never finish and | ||
801 | * we would deadlock. Returning -EAGAIN causes the cache lookup code to | ||
802 | * punt to workqueue and retry. | ||
803 | */ | ||
804 | if (current->bio_list) | ||
805 | return ERR_PTR(-EAGAIN); | ||
806 | |||
807 | if (c->try_harder && c->try_harder != cl) { | ||
808 | closure_wait_event_async(&c->try_wait, cl, !c->try_harder); | ||
809 | return ERR_PTR(-EAGAIN); | ||
810 | } | ||
811 | 798 | ||
812 | c->try_harder = cl; | 799 | list_for_each_entry_reverse(b, &c->btree_cache, list) |
813 | c->try_harder_start = local_clock(); | 800 | if (!mca_reap(b, btree_order(k), false)) |
814 | retry: | 801 | return b; |
815 | list_for_each_entry_reverse(i, &c->btree_cache, list) { | ||
816 | int r = mca_reap(i, cl, btree_order(k)); | ||
817 | if (!r) | ||
818 | return i; | ||
819 | if (r != -ENOMEM) | ||
820 | ret = r; | ||
821 | } | ||
822 | 802 | ||
823 | if (ret == -EAGAIN && | 803 | list_for_each_entry_reverse(b, &c->btree_cache, list) |
824 | closure_blocking(cl)) { | 804 | if (!mca_reap(b, btree_order(k), true)) |
825 | mutex_unlock(&c->bucket_lock); | 805 | return b; |
826 | closure_sync(cl); | ||
827 | mutex_lock(&c->bucket_lock); | ||
828 | goto retry; | ||
829 | } | ||
830 | 806 | ||
831 | return ERR_PTR(ret); | 807 | return ERR_PTR(-ENOMEM); |
832 | } | 808 | } |
833 | 809 | ||
834 | /* | 810 | /* |
@@ -839,18 +815,19 @@ retry: | |||
839 | */ | 815 | */ |
840 | void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl) | 816 | void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl) |
841 | { | 817 | { |
842 | if (c->try_harder == cl) { | 818 | if (c->try_harder == current) { |
843 | bch_time_stats_update(&c->try_harder_time, c->try_harder_start); | 819 | bch_time_stats_update(&c->try_harder_time, c->try_harder_start); |
844 | c->try_harder = NULL; | 820 | c->try_harder = NULL; |
845 | __closure_wake_up(&c->try_wait); | 821 | wake_up(&c->try_wait); |
846 | } | 822 | } |
847 | } | 823 | } |
848 | 824 | ||
849 | static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, | 825 | static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level) |
850 | int level, struct closure *cl) | ||
851 | { | 826 | { |
852 | struct btree *b; | 827 | struct btree *b; |
853 | 828 | ||
829 | BUG_ON(current->bio_list); | ||
830 | |||
854 | lockdep_assert_held(&c->bucket_lock); | 831 | lockdep_assert_held(&c->bucket_lock); |
855 | 832 | ||
856 | if (mca_find(c, k)) | 833 | if (mca_find(c, k)) |
@@ -860,14 +837,14 @@ static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, | |||
860 | * the list. Check if there's any freed nodes there: | 837 | * the list. Check if there's any freed nodes there: |
861 | */ | 838 | */ |
862 | list_for_each_entry(b, &c->btree_cache_freeable, list) | 839 | list_for_each_entry(b, &c->btree_cache_freeable, list) |
863 | if (!mca_reap(b, NULL, btree_order(k))) | 840 | if (!mca_reap(b, btree_order(k), false)) |
864 | goto out; | 841 | goto out; |
865 | 842 | ||
866 | /* We never free struct btree itself, just the memory that holds the on | 843 | /* We never free struct btree itself, just the memory that holds the on |
867 | * disk node. Check the freed list before allocating a new one: | 844 | * disk node. Check the freed list before allocating a new one: |
868 | */ | 845 | */ |
869 | list_for_each_entry(b, &c->btree_cache_freed, list) | 846 | list_for_each_entry(b, &c->btree_cache_freed, list) |
870 | if (!mca_reap(b, NULL, 0)) { | 847 | if (!mca_reap(b, 0, false)) { |
871 | mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); | 848 | mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO); |
872 | if (!b->sets[0].data) | 849 | if (!b->sets[0].data) |
873 | goto err; | 850 | goto err; |
@@ -901,7 +878,7 @@ err: | |||
901 | if (b) | 878 | if (b) |
902 | rw_unlock(true, b); | 879 | rw_unlock(true, b); |
903 | 880 | ||
904 | b = mca_cannibalize(c, k, level, cl); | 881 | b = mca_cannibalize(c, k); |
905 | if (!IS_ERR(b)) | 882 | if (!IS_ERR(b)) |
906 | goto out; | 883 | goto out; |
907 | 884 | ||
@@ -919,10 +896,9 @@ err: | |||
919 | * level and op->lock. | 896 | * level and op->lock. |
920 | */ | 897 | */ |
921 | struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k, | 898 | struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k, |
922 | int level, struct btree_op *op) | 899 | int level, bool write) |
923 | { | 900 | { |
924 | int i = 0; | 901 | int i = 0; |
925 | bool write = level <= op->lock; | ||
926 | struct btree *b; | 902 | struct btree *b; |
927 | 903 | ||
928 | BUG_ON(level < 0); | 904 | BUG_ON(level < 0); |
@@ -934,7 +910,7 @@ retry: | |||
934 | return ERR_PTR(-EAGAIN); | 910 | return ERR_PTR(-EAGAIN); |
935 | 911 | ||
936 | mutex_lock(&c->bucket_lock); | 912 | mutex_lock(&c->bucket_lock); |
937 | b = mca_alloc(c, k, level, &op->cl); | 913 | b = mca_alloc(c, k, level); |
938 | mutex_unlock(&c->bucket_lock); | 914 | mutex_unlock(&c->bucket_lock); |
939 | 915 | ||
940 | if (!b) | 916 | if (!b) |
@@ -980,7 +956,7 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level) | |||
980 | struct btree *b; | 956 | struct btree *b; |
981 | 957 | ||
982 | mutex_lock(&c->bucket_lock); | 958 | mutex_lock(&c->bucket_lock); |
983 | b = mca_alloc(c, k, level, NULL); | 959 | b = mca_alloc(c, k, level); |
984 | mutex_unlock(&c->bucket_lock); | 960 | mutex_unlock(&c->bucket_lock); |
985 | 961 | ||
986 | if (!IS_ERR_OR_NULL(b)) { | 962 | if (!IS_ERR_OR_NULL(b)) { |
@@ -991,17 +967,12 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level) | |||
991 | 967 | ||
992 | /* Btree alloc */ | 968 | /* Btree alloc */ |
993 | 969 | ||
994 | static void btree_node_free(struct btree *b, struct btree_op *op) | 970 | static void btree_node_free(struct btree *b) |
995 | { | 971 | { |
996 | unsigned i; | 972 | unsigned i; |
997 | 973 | ||
998 | trace_bcache_btree_node_free(b); | 974 | trace_bcache_btree_node_free(b); |
999 | 975 | ||
1000 | /* | ||
1001 | * The BUG_ON() in btree_node_get() implies that we must have a write | ||
1002 | * lock on parent to free or even invalidate a node | ||
1003 | */ | ||
1004 | BUG_ON(op->lock <= b->level); | ||
1005 | BUG_ON(b == b->c->root); | 976 | BUG_ON(b == b->c->root); |
1006 | 977 | ||
1007 | if (btree_node_dirty(b)) | 978 | if (btree_node_dirty(b)) |
@@ -1037,7 +1008,7 @@ retry: | |||
1037 | 1008 | ||
1038 | SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); | 1009 | SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); |
1039 | 1010 | ||
1040 | b = mca_alloc(c, &k.key, level, cl); | 1011 | b = mca_alloc(c, &k.key, level); |
1041 | if (IS_ERR(b)) | 1012 | if (IS_ERR(b)) |
1042 | goto err_free; | 1013 | goto err_free; |
1043 | 1014 | ||
@@ -1173,8 +1144,7 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys, | |||
1173 | return stale; | 1144 | return stale; |
1174 | } | 1145 | } |
1175 | 1146 | ||
1176 | static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k, | 1147 | static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k) |
1177 | struct btree_op *op) | ||
1178 | { | 1148 | { |
1179 | /* | 1149 | /* |
1180 | * We block priorities from being written for the duration of garbage | 1150 | * We block priorities from being written for the duration of garbage |
@@ -1191,7 +1161,7 @@ static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k, | |||
1191 | memcpy(k->ptr, b->key.ptr, | 1161 | memcpy(k->ptr, b->key.ptr, |
1192 | sizeof(uint64_t) * KEY_PTRS(&b->key)); | 1162 | sizeof(uint64_t) * KEY_PTRS(&b->key)); |
1193 | 1163 | ||
1194 | btree_node_free(n, op); | 1164 | btree_node_free(n); |
1195 | up_write(&n->lock); | 1165 | up_write(&n->lock); |
1196 | } | 1166 | } |
1197 | 1167 | ||
@@ -1211,8 +1181,8 @@ struct gc_merge_info { | |||
1211 | unsigned keys; | 1181 | unsigned keys; |
1212 | }; | 1182 | }; |
1213 | 1183 | ||
1214 | static void btree_gc_coalesce(struct btree *b, struct btree_op *op, | 1184 | static void btree_gc_coalesce(struct btree *b, struct gc_stat *gc, |
1215 | struct gc_stat *gc, struct gc_merge_info *r) | 1185 | struct gc_merge_info *r) |
1216 | { | 1186 | { |
1217 | unsigned nodes = 0, keys = 0, blocks; | 1187 | unsigned nodes = 0, keys = 0, blocks; |
1218 | int i; | 1188 | int i; |
@@ -1228,7 +1198,7 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op, | |||
1228 | 1198 | ||
1229 | for (i = nodes - 1; i >= 0; --i) { | 1199 | for (i = nodes - 1; i >= 0; --i) { |
1230 | if (r[i].b->written) | 1200 | if (r[i].b->written) |
1231 | r[i].b = btree_gc_alloc(r[i].b, r[i].k, op); | 1201 | r[i].b = btree_gc_alloc(r[i].b, r[i].k); |
1232 | 1202 | ||
1233 | if (r[i].b->written) | 1203 | if (r[i].b->written) |
1234 | return; | 1204 | return; |
@@ -1292,7 +1262,7 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op, | |||
1292 | r[i - 1].keys = n2->keys; | 1262 | r[i - 1].keys = n2->keys; |
1293 | } | 1263 | } |
1294 | 1264 | ||
1295 | btree_node_free(r->b, op); | 1265 | btree_node_free(r->b); |
1296 | up_write(&r->b->lock); | 1266 | up_write(&r->b->lock); |
1297 | 1267 | ||
1298 | trace_bcache_btree_gc_coalesce(nodes); | 1268 | trace_bcache_btree_gc_coalesce(nodes); |
@@ -1324,7 +1294,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, | |||
1324 | memset(r, 0, sizeof(r)); | 1294 | memset(r, 0, sizeof(r)); |
1325 | 1295 | ||
1326 | while ((r->k = bch_next_recurse_key(b, &b->c->gc_done))) { | 1296 | while ((r->k = bch_next_recurse_key(b, &b->c->gc_done))) { |
1327 | r->b = bch_btree_node_get(b->c, r->k, b->level - 1, op); | 1297 | r->b = bch_btree_node_get(b->c, r->k, b->level - 1, true); |
1328 | 1298 | ||
1329 | if (IS_ERR(r->b)) { | 1299 | if (IS_ERR(r->b)) { |
1330 | ret = PTR_ERR(r->b); | 1300 | ret = PTR_ERR(r->b); |
@@ -1337,7 +1307,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, | |||
1337 | if (!b->written && | 1307 | if (!b->written && |
1338 | (r->b->level || stale > 10 || | 1308 | (r->b->level || stale > 10 || |
1339 | b->c->gc_always_rewrite)) | 1309 | b->c->gc_always_rewrite)) |
1340 | r->b = btree_gc_alloc(r->b, r->k, op); | 1310 | r->b = btree_gc_alloc(r->b, r->k); |
1341 | 1311 | ||
1342 | if (r->b->level) | 1312 | if (r->b->level) |
1343 | ret = btree_gc_recurse(r->b, op, writes, gc); | 1313 | ret = btree_gc_recurse(r->b, op, writes, gc); |
@@ -1350,7 +1320,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, | |||
1350 | bkey_copy_key(&b->c->gc_done, r->k); | 1320 | bkey_copy_key(&b->c->gc_done, r->k); |
1351 | 1321 | ||
1352 | if (!b->written) | 1322 | if (!b->written) |
1353 | btree_gc_coalesce(b, op, gc, r); | 1323 | btree_gc_coalesce(b, gc, r); |
1354 | 1324 | ||
1355 | if (r[GC_MERGE_NODES - 1].b) | 1325 | if (r[GC_MERGE_NODES - 1].b) |
1356 | write(r[GC_MERGE_NODES - 1].b); | 1326 | write(r[GC_MERGE_NODES - 1].b); |
@@ -1404,7 +1374,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op, | |||
1404 | if (!IS_ERR_OR_NULL(n)) { | 1374 | if (!IS_ERR_OR_NULL(n)) { |
1405 | closure_sync(&op->cl); | 1375 | closure_sync(&op->cl); |
1406 | bch_btree_set_root(b); | 1376 | bch_btree_set_root(b); |
1407 | btree_node_free(n, op); | 1377 | btree_node_free(n); |
1408 | rw_unlock(true, b); | 1378 | rw_unlock(true, b); |
1409 | } | 1379 | } |
1410 | 1380 | ||
@@ -2004,18 +1974,18 @@ static int btree_split(struct btree *b, struct btree_op *op, | |||
2004 | } | 1974 | } |
2005 | 1975 | ||
2006 | rw_unlock(true, n1); | 1976 | rw_unlock(true, n1); |
2007 | btree_node_free(b, op); | 1977 | btree_node_free(b); |
2008 | 1978 | ||
2009 | bch_time_stats_update(&b->c->btree_split_time, start_time); | 1979 | bch_time_stats_update(&b->c->btree_split_time, start_time); |
2010 | 1980 | ||
2011 | return 0; | 1981 | return 0; |
2012 | err_free2: | 1982 | err_free2: |
2013 | __bkey_put(n2->c, &n2->key); | 1983 | __bkey_put(n2->c, &n2->key); |
2014 | btree_node_free(n2, op); | 1984 | btree_node_free(n2); |
2015 | rw_unlock(true, n2); | 1985 | rw_unlock(true, n2); |
2016 | err_free1: | 1986 | err_free1: |
2017 | __bkey_put(n1->c, &n1->key); | 1987 | __bkey_put(n1->c, &n1->key); |
2018 | btree_node_free(n1, op); | 1988 | btree_node_free(n1); |
2019 | rw_unlock(true, n1); | 1989 | rw_unlock(true, n1); |
2020 | err: | 1990 | err: |
2021 | if (n3 == ERR_PTR(-EAGAIN) || | 1991 | if (n3 == ERR_PTR(-EAGAIN) || |
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 17b7a4e39c7e..72794ab8e8e5 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h | |||
@@ -326,7 +326,7 @@ static inline void rw_unlock(bool w, struct btree *b) | |||
326 | ({ \ | 326 | ({ \ |
327 | int _r, l = (b)->level - 1; \ | 327 | int _r, l = (b)->level - 1; \ |
328 | bool _w = l <= (op)->lock; \ | 328 | bool _w = l <= (op)->lock; \ |
329 | struct btree *_child = bch_btree_node_get((b)->c, key, l, op); \ | 329 | struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \ |
330 | if (!IS_ERR(_child)) { \ | 330 | if (!IS_ERR(_child)) { \ |
331 | _child->parent = (b); \ | 331 | _child->parent = (b); \ |
332 | _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ | 332 | _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ |
@@ -356,6 +356,11 @@ static inline void rw_unlock(bool w, struct btree *b) | |||
356 | } \ | 356 | } \ |
357 | rw_unlock(_w, _b); \ | 357 | rw_unlock(_w, _b); \ |
358 | bch_cannibalize_unlock(c, &(op)->cl); \ | 358 | bch_cannibalize_unlock(c, &(op)->cl); \ |
359 | if (_r == -ENOSPC) { \ | ||
360 | wait_event((c)->try_wait, \ | ||
361 | !(c)->try_harder); \ | ||
362 | _r = -EINTR; \ | ||
363 | } \ | ||
359 | } while (_r == -EINTR); \ | 364 | } while (_r == -EINTR); \ |
360 | \ | 365 | \ |
361 | _r; \ | 366 | _r; \ |
@@ -375,8 +380,7 @@ void bch_btree_node_write(struct btree *, struct closure *); | |||
375 | void bch_cannibalize_unlock(struct cache_set *, struct closure *); | 380 | void bch_cannibalize_unlock(struct cache_set *, struct closure *); |
376 | void bch_btree_set_root(struct btree *); | 381 | void bch_btree_set_root(struct btree *); |
377 | struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *); | 382 | struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *); |
378 | struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, | 383 | struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool); |
379 | int, struct btree_op *); | ||
380 | 384 | ||
381 | int bch_btree_insert_check_key(struct btree *, struct btree_op *, | 385 | int bch_btree_insert_check_key(struct btree *, struct btree_op *, |
382 | struct bkey *); | 386 | struct bkey *); |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index d3169c0652f8..9a164cd4058c 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -1436,12 +1436,14 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) | |||
1436 | 1436 | ||
1437 | c->sort_crit_factor = int_sqrt(c->btree_pages); | 1437 | c->sort_crit_factor = int_sqrt(c->btree_pages); |
1438 | 1438 | ||
1439 | mutex_init(&c->bucket_lock); | ||
1440 | mutex_init(&c->sort_lock); | ||
1441 | spin_lock_init(&c->sort_time_lock); | ||
1442 | closure_init_unlocked(&c->sb_write); | 1439 | closure_init_unlocked(&c->sb_write); |
1440 | mutex_init(&c->bucket_lock); | ||
1441 | init_waitqueue_head(&c->try_wait); | ||
1443 | closure_init_unlocked(&c->uuid_write); | 1442 | closure_init_unlocked(&c->uuid_write); |
1443 | spin_lock_init(&c->sort_time_lock); | ||
1444 | mutex_init(&c->sort_lock); | ||
1444 | spin_lock_init(&c->btree_read_time_lock); | 1445 | spin_lock_init(&c->btree_read_time_lock); |
1446 | |||
1445 | bch_moving_init_cache_set(c); | 1447 | bch_moving_init_cache_set(c); |
1446 | 1448 | ||
1447 | INIT_LIST_HEAD(&c->list); | 1449 | INIT_LIST_HEAD(&c->list); |
@@ -1529,7 +1531,7 @@ static void run_cache_set(struct cache_set *c) | |||
1529 | goto err; | 1531 | goto err; |
1530 | 1532 | ||
1531 | err = "error reading btree root"; | 1533 | err = "error reading btree root"; |
1532 | c->root = bch_btree_node_get(c, k, j->btree_level, &op); | 1534 | c->root = bch_btree_node_get(c, k, j->btree_level, true); |
1533 | if (IS_ERR_OR_NULL(c->root)) | 1535 | if (IS_ERR_OR_NULL(c->root)) |
1534 | goto err; | 1536 | goto err; |
1535 | 1537 | ||