aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-24 21:04:18 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 00:56:09 -0500
commitb54d6934da7857f87b092df9b77dc1f42818ba94 (patch)
treef505788c5308287f65c0b2bc58e9ff358f336d03
parentc18536a72ddd7fe30d63e6c1500b5c930ac14594 (diff)
bcache: Kill op->cl
This isn't used for waiting asynchronously anymore - so this is a fairly trivial refactoring. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
-rw-r--r--drivers/md/bcache/bset.c2
-rw-r--r--drivers/md/bcache/btree.c81
-rw-r--r--drivers/md/bcache/btree.h8
-rw-r--r--drivers/md/bcache/journal.c8
-rw-r--r--drivers/md/bcache/movinggc.c4
-rw-r--r--drivers/md/bcache/request.c35
-rw-r--r--drivers/md/bcache/request.h1
-rw-r--r--drivers/md/bcache/writeback.c5
8 files changed, 63 insertions, 81 deletions
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index fae5b7b3f5ab..f7b5525ddafa 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -1196,7 +1196,7 @@ int bch_bset_print_stats(struct cache_set *c, char *buf)
1196 int ret; 1196 int ret;
1197 1197
1198 memset(&t, 0, sizeof(struct bset_stats)); 1198 memset(&t, 0, sizeof(struct bset_stats));
1199 bch_btree_op_init_stack(&t.op); 1199 bch_btree_op_init(&t.op, -1);
1200 1200
1201 ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats); 1201 ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
1202 if (ret < 0) 1202 if (ret < 0)
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5cb59c313dc3..cb1a490f7f86 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -115,13 +115,6 @@ enum {
115 115
116static struct workqueue_struct *btree_io_wq; 116static struct workqueue_struct *btree_io_wq;
117 117
118void bch_btree_op_init_stack(struct btree_op *op)
119{
120 memset(op, 0, sizeof(struct btree_op));
121 closure_init_stack(&op->cl);
122 op->lock = -1;
123}
124
125static inline bool should_split(struct btree *b) 118static inline bool should_split(struct btree *b)
126{ 119{
127 struct bset *i = write_block(b); 120 struct bset *i = write_block(b);
@@ -965,8 +958,7 @@ err:
965 * bch_btree_node_get - find a btree node in the cache and lock it, reading it 958 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
966 * in from disk if necessary. 959 * in from disk if necessary.
967 * 960 *
968 * If IO is necessary, it uses the closure embedded in struct btree_op to wait; 961 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
969 * if that closure is in non blocking mode, will return -EAGAIN.
970 * 962 *
971 * The btree node will have either a read or a write lock held, depending on 963 * The btree node will have either a read or a write lock held, depending on
972 * level and op->lock. 964 * level and op->lock.
@@ -1260,6 +1252,9 @@ static void btree_gc_coalesce(struct btree *b, struct gc_stat *gc,
1260{ 1252{
1261 unsigned nodes = 0, keys = 0, blocks; 1253 unsigned nodes = 0, keys = 0, blocks;
1262 int i; 1254 int i;
1255 struct closure cl;
1256
1257 closure_init_stack(&cl);
1263 1258
1264 while (nodes < GC_MERGE_NODES && r[nodes].b) 1259 while (nodes < GC_MERGE_NODES && r[nodes].b)
1265 keys += r[nodes++].keys; 1260 keys += r[nodes++].keys;
@@ -1353,9 +1348,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1353{ 1348{
1354 void write(struct btree *r) 1349 void write(struct btree *r)
1355 { 1350 {
1356 if (!r->written) 1351 if (!r->written || btree_node_dirty(r))
1357 bch_btree_node_write(r, &op->cl);
1358 else if (btree_node_dirty(r))
1359 bch_btree_node_write(r, writes); 1352 bch_btree_node_write(r, writes);
1360 1353
1361 up_write(&r->lock); 1354 up_write(&r->lock);
@@ -1431,6 +1424,9 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1431 struct btree *n = NULL; 1424 struct btree *n = NULL;
1432 unsigned keys = 0; 1425 unsigned keys = 0;
1433 int ret = 0, stale = btree_gc_mark_node(b, &keys, gc); 1426 int ret = 0, stale = btree_gc_mark_node(b, &keys, gc);
1427 struct closure cl;
1428
1429 closure_init_stack(&cl);
1434 1430
1435 if (b->level || stale > 10) 1431 if (b->level || stale > 10)
1436 n = btree_node_alloc_replacement(b); 1432 n = btree_node_alloc_replacement(b);
@@ -1442,11 +1438,11 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1442 ret = btree_gc_recurse(b, op, writes, gc); 1438 ret = btree_gc_recurse(b, op, writes, gc);
1443 1439
1444 if (!b->written || btree_node_dirty(b)) { 1440 if (!b->written || btree_node_dirty(b)) {
1445 bch_btree_node_write(b, n ? &op->cl : NULL); 1441 bch_btree_node_write(b, n ? &cl : NULL);
1446 } 1442 }
1447 1443
1448 if (!IS_ERR_OR_NULL(n)) { 1444 if (!IS_ERR_OR_NULL(n)) {
1449 closure_sync(&op->cl); 1445 closure_sync(&cl);
1450 bch_btree_set_root(b); 1446 bch_btree_set_root(b);
1451 btree_node_free(n); 1447 btree_node_free(n);
1452 rw_unlock(true, b); 1448 rw_unlock(true, b);
@@ -1545,15 +1541,13 @@ static void bch_btree_gc(struct cache_set *c)
1545 1541
1546 memset(&stats, 0, sizeof(struct gc_stat)); 1542 memset(&stats, 0, sizeof(struct gc_stat));
1547 closure_init_stack(&writes); 1543 closure_init_stack(&writes);
1548 bch_btree_op_init_stack(&op); 1544 bch_btree_op_init(&op, SHRT_MAX);
1549 op.lock = SHRT_MAX;
1550 1545
1551 btree_gc_start(c); 1546 btree_gc_start(c);
1552 1547
1553 atomic_inc(&c->prio_blocked); 1548 atomic_inc(&c->prio_blocked);
1554 1549
1555 ret = btree_root(gc_root, c, &op, &writes, &stats); 1550 ret = btree_root(gc_root, c, &op, &writes, &stats);
1556 closure_sync(&op.cl);
1557 closure_sync(&writes); 1551 closure_sync(&writes);
1558 1552
1559 if (ret) { 1553 if (ret) {
@@ -1562,8 +1556,8 @@ static void bch_btree_gc(struct cache_set *c)
1562 } 1556 }
1563 1557
1564 /* Possibly wait for new UUIDs or whatever to hit disk */ 1558 /* Possibly wait for new UUIDs or whatever to hit disk */
1565 bch_journal_meta(c, &op.cl); 1559 bch_journal_meta(c, &writes);
1566 closure_sync(&op.cl); 1560 closure_sync(&writes);
1567 1561
1568 available = bch_btree_gc_finish(c); 1562 available = bch_btree_gc_finish(c);
1569 1563
@@ -1671,8 +1665,7 @@ int bch_btree_check(struct cache_set *c)
1671 struct btree_op op; 1665 struct btree_op op;
1672 1666
1673 memset(seen, 0, sizeof(seen)); 1667 memset(seen, 0, sizeof(seen));
1674 bch_btree_op_init_stack(&op); 1668 bch_btree_op_init(&op, SHRT_MAX);
1675 op.lock = SHRT_MAX;
1676 1669
1677 for (i = 0; c->cache[i]; i++) { 1670 for (i = 0; c->cache[i]; i++) {
1678 size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8); 1671 size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
@@ -1980,6 +1973,9 @@ static int btree_split(struct btree *b, struct btree_op *op,
1980 bool split; 1973 bool split;
1981 struct btree *n1, *n2 = NULL, *n3 = NULL; 1974 struct btree *n1, *n2 = NULL, *n3 = NULL;
1982 uint64_t start_time = local_clock(); 1975 uint64_t start_time = local_clock();
1976 struct closure cl;
1977
1978 closure_init_stack(&cl);
1983 1979
1984 n1 = btree_node_alloc_replacement(b); 1980 n1 = btree_node_alloc_replacement(b);
1985 if (IS_ERR(n1)) 1981 if (IS_ERR(n1))
@@ -2025,7 +2021,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
2025 bkey_copy_key(&n2->key, &b->key); 2021 bkey_copy_key(&n2->key, &b->key);
2026 2022
2027 bch_keylist_add(parent_keys, &n2->key); 2023 bch_keylist_add(parent_keys, &n2->key);
2028 bch_btree_node_write(n2, &op->cl); 2024 bch_btree_node_write(n2, &cl);
2029 rw_unlock(true, n2); 2025 rw_unlock(true, n2);
2030 } else { 2026 } else {
2031 trace_bcache_btree_node_compact(b, n1->sets[0].data->keys); 2027 trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
@@ -2034,23 +2030,23 @@ static int btree_split(struct btree *b, struct btree_op *op,
2034 } 2030 }
2035 2031
2036 bch_keylist_add(parent_keys, &n1->key); 2032 bch_keylist_add(parent_keys, &n1->key);
2037 bch_btree_node_write(n1, &op->cl); 2033 bch_btree_node_write(n1, &cl);
2038 2034
2039 if (n3) { 2035 if (n3) {
2040 /* Depth increases, make a new root */ 2036 /* Depth increases, make a new root */
2041 2037
2042 bkey_copy_key(&n3->key, &MAX_KEY); 2038 bkey_copy_key(&n3->key, &MAX_KEY);
2043 bch_btree_insert_keys(n3, op, parent_keys); 2039 bch_btree_insert_keys(n3, op, parent_keys);
2044 bch_btree_node_write(n3, &op->cl); 2040 bch_btree_node_write(n3, &cl);
2045 2041
2046 closure_sync(&op->cl); 2042 closure_sync(&cl);
2047 bch_btree_set_root(n3); 2043 bch_btree_set_root(n3);
2048 rw_unlock(true, n3); 2044 rw_unlock(true, n3);
2049 } else if (!b->parent) { 2045 } else if (!b->parent) {
2050 /* Root filled up but didn't need to be split */ 2046 /* Root filled up but didn't need to be split */
2051 2047
2052 bch_keylist_reset(parent_keys); 2048 bch_keylist_reset(parent_keys);
2053 closure_sync(&op->cl); 2049 closure_sync(&cl);
2054 bch_btree_set_root(n1); 2050 bch_btree_set_root(n1);
2055 } else { 2051 } else {
2056 unsigned i; 2052 unsigned i;
@@ -2065,7 +2061,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
2065 } 2061 }
2066 2062
2067 bch_keylist_push(parent_keys); 2063 bch_keylist_push(parent_keys);
2068 closure_sync(&op->cl); 2064 closure_sync(&cl);
2069 atomic_inc(&b->c->prio_blocked); 2065 atomic_inc(&b->c->prio_blocked);
2070 } 2066 }
2071 2067
@@ -2126,10 +2122,15 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2126 BUG_ON(write_block(b) != b->sets[b->nsets].data); 2122 BUG_ON(write_block(b) != b->sets[b->nsets].data);
2127 2123
2128 if (bch_btree_insert_keys(b, op, insert_keys)) { 2124 if (bch_btree_insert_keys(b, op, insert_keys)) {
2129 if (!b->level) 2125 if (!b->level) {
2130 bch_btree_leaf_dirty(b, journal_ref); 2126 bch_btree_leaf_dirty(b, journal_ref);
2131 else 2127 } else {
2132 bch_btree_node_write(b, &op->cl); 2128 struct closure cl;
2129
2130 closure_init_stack(&cl);
2131 bch_btree_node_write(b, &cl);
2132 closure_sync(&cl);
2133 }
2133 } 2134 }
2134 } 2135 }
2135 } while (!bch_keylist_empty(&split_keys)); 2136 } while (!bch_keylist_empty(&split_keys));
@@ -2204,12 +2205,6 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
2204{ 2205{
2205 int ret = 0; 2206 int ret = 0;
2206 2207
2207 /*
2208 * Don't want to block with the btree locked unless we have to,
2209 * otherwise we get deadlocks with try_harder and between split/gc
2210 */
2211 clear_closure_blocking(&op->cl);
2212
2213 BUG_ON(bch_keylist_empty(keys)); 2208 BUG_ON(bch_keylist_empty(keys));
2214 2209
2215 while (!bch_keylist_empty(keys)) { 2210 while (!bch_keylist_empty(keys)) {
@@ -2217,8 +2212,8 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
2217 ret = btree_root(insert_recurse, c, op, keys, journal_ref); 2212 ret = btree_root(insert_recurse, c, op, keys, journal_ref);
2218 2213
2219 if (ret == -EAGAIN) { 2214 if (ret == -EAGAIN) {
2215 BUG();
2220 ret = 0; 2216 ret = 0;
2221 closure_sync(&op->cl);
2222 } else if (ret) { 2217 } else if (ret) {
2223 struct bkey *k; 2218 struct bkey *k;
2224 2219
@@ -2292,10 +2287,7 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2292int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, 2287int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2293 struct bkey *from, btree_map_nodes_fn *fn, int flags) 2288 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2294{ 2289{
2295 int ret = btree_root(map_nodes_recurse, c, op, from, fn, flags); 2290 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2296 if (closure_blocking(&op->cl))
2297 closure_sync(&op->cl);
2298 return ret;
2299} 2291}
2300 2292
2301static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op, 2293static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
@@ -2328,10 +2320,7 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2328int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, 2320int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2329 struct bkey *from, btree_map_keys_fn *fn, int flags) 2321 struct bkey *from, btree_map_keys_fn *fn, int flags)
2330{ 2322{
2331 int ret = btree_root(map_keys_recurse, c, op, from, fn, flags); 2323 return btree_root(map_keys_recurse, c, op, from, fn, flags);
2332 if (closure_blocking(&op->cl))
2333 closure_sync(&op->cl);
2334 return ret;
2335} 2324}
2336 2325
2337/* Keybuf code */ 2326/* Keybuf code */
@@ -2409,7 +2398,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2409 2398
2410 cond_resched(); 2399 cond_resched();
2411 2400
2412 bch_btree_op_init_stack(&refill.op); 2401 bch_btree_op_init(&refill.op, -1);
2413 refill.buf = buf; 2402 refill.buf = buf;
2414 refill.end = end; 2403 refill.end = end;
2415 refill.pred = pred; 2404 refill.pred = pred;
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 3f820b67150c..34ee5359b262 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -237,8 +237,6 @@ void __bkey_put(struct cache_set *c, struct bkey *k);
237/* Recursing down the btree */ 237/* Recursing down the btree */
238 238
239struct btree_op { 239struct btree_op {
240 struct closure cl;
241
242 /* Btree level at which we start taking write locks */ 240 /* Btree level at which we start taking write locks */
243 short lock; 241 short lock;
244 242
@@ -253,7 +251,11 @@ struct btree_op {
253 BKEY_PADDED(replace); 251 BKEY_PADDED(replace);
254}; 252};
255 253
256void bch_btree_op_init_stack(struct btree_op *); 254static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
255{
256 memset(op, 0, sizeof(struct btree_op));
257 op->lock = write_lock_level;
258}
257 259
258static inline void rw_lock(bool w, struct btree *b, int level) 260static inline void rw_lock(bool w, struct btree *b, int level)
259{ 261{
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 725c8eb9a62a..20e900ad5010 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -305,8 +305,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
305 struct btree_op op; 305 struct btree_op op;
306 306
307 bch_keylist_init(&keylist); 307 bch_keylist_init(&keylist);
308 bch_btree_op_init_stack(&op); 308 bch_btree_op_init(&op, SHRT_MAX);
309 op.lock = SHRT_MAX;
310 309
311 list_for_each_entry(i, list, list) { 310 list_for_each_entry(i, list, list) {
312 BUG_ON(i->pin && atomic_read(i->pin) != 1); 311 BUG_ON(i->pin && atomic_read(i->pin) != 1);
@@ -341,14 +340,13 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
341 340
342 pr_info("journal replay done, %i keys in %i entries, seq %llu", 341 pr_info("journal replay done, %i keys in %i entries, seq %llu",
343 keys, entries, end); 342 keys, entries, end);
344 343err:
345 while (!list_empty(list)) { 344 while (!list_empty(list)) {
346 i = list_first_entry(list, struct journal_replay, list); 345 i = list_first_entry(list, struct journal_replay, list);
347 list_del(&i->list); 346 list_del(&i->list);
348 kfree(i); 347 kfree(i);
349 } 348 }
350err: 349
351 closure_sync(&op.cl);
352 return ret; 350 return ret;
353} 351}
354 352
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 80e30d77221e..219356f6159d 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -108,8 +108,8 @@ static void write_moving(struct closure *cl)
108 s->op.type = BTREE_REPLACE; 108 s->op.type = BTREE_REPLACE;
109 bkey_copy(&s->op.replace, &io->w->key); 109 bkey_copy(&s->op.replace, &io->w->key);
110 110
111 closure_init(&s->op.cl, cl); 111 closure_init(&s->btree, cl);
112 bch_data_insert(&s->op.cl); 112 bch_data_insert(&s->btree);
113 } 113 }
114 114
115 continue_at(cl, write_moving_finish, system_wq); 115 continue_at(cl, write_moving_finish, system_wq);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 818e2e39e71f..5df44fbc9e1d 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -215,8 +215,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
215 215
216static void bch_data_insert_keys(struct closure *cl) 216static void bch_data_insert_keys(struct closure *cl)
217{ 217{
218 struct btree_op *op = container_of(cl, struct btree_op, cl); 218 struct search *s = container_of(cl, struct search, btree);
219 struct search *s = container_of(op, struct search, op);
220 atomic_t *journal_ref = NULL; 219 atomic_t *journal_ref = NULL;
221 220
222 /* 221 /*
@@ -236,7 +235,7 @@ static void bch_data_insert_keys(struct closure *cl)
236 s->flush_journal 235 s->flush_journal
237 ? &s->cl : NULL); 236 ? &s->cl : NULL);
238 237
239 if (bch_btree_insert(op, s->c, &s->insert_keys, journal_ref)) { 238 if (bch_btree_insert(&s->op, s->c, &s->insert_keys, journal_ref)) {
240 s->error = -ENOMEM; 239 s->error = -ENOMEM;
241 s->insert_data_done = true; 240 s->insert_data_done = true;
242 } 241 }
@@ -433,8 +432,7 @@ static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
433 432
434static void bch_data_invalidate(struct closure *cl) 433static void bch_data_invalidate(struct closure *cl)
435{ 434{
436 struct btree_op *op = container_of(cl, struct btree_op, cl); 435 struct search *s = container_of(cl, struct search, btree);
437 struct search *s = container_of(op, struct search, op);
438 struct bio *bio = s->cache_bio; 436 struct bio *bio = s->cache_bio;
439 437
440 pr_debug("invalidating %i sectors from %llu", 438 pr_debug("invalidating %i sectors from %llu",
@@ -461,8 +459,7 @@ out:
461 459
462static void bch_data_insert_error(struct closure *cl) 460static void bch_data_insert_error(struct closure *cl)
463{ 461{
464 struct btree_op *op = container_of(cl, struct btree_op, cl); 462 struct search *s = container_of(cl, struct search, btree);
465 struct search *s = container_of(op, struct search, op);
466 463
467 /* 464 /*
468 * Our data write just errored, which means we've got a bunch of keys to 465 * Our data write just errored, which means we've got a bunch of keys to
@@ -493,8 +490,7 @@ static void bch_data_insert_error(struct closure *cl)
493static void bch_data_insert_endio(struct bio *bio, int error) 490static void bch_data_insert_endio(struct bio *bio, int error)
494{ 491{
495 struct closure *cl = bio->bi_private; 492 struct closure *cl = bio->bi_private;
496 struct btree_op *op = container_of(cl, struct btree_op, cl); 493 struct search *s = container_of(cl, struct search, btree);
497 struct search *s = container_of(op, struct search, op);
498 494
499 if (error) { 495 if (error) {
500 /* TODO: We could try to recover from this. */ 496 /* TODO: We could try to recover from this. */
@@ -511,8 +507,7 @@ static void bch_data_insert_endio(struct bio *bio, int error)
511 507
512static void bch_data_insert_start(struct closure *cl) 508static void bch_data_insert_start(struct closure *cl)
513{ 509{
514 struct btree_op *op = container_of(cl, struct btree_op, cl); 510 struct search *s = container_of(cl, struct search, btree);
515 struct search *s = container_of(op, struct search, op);
516 struct bio *bio = s->cache_bio, *n; 511 struct bio *bio = s->cache_bio, *n;
517 512
518 if (s->bypass) 513 if (s->bypass)
@@ -630,8 +625,7 @@ err:
630 */ 625 */
631void bch_data_insert(struct closure *cl) 626void bch_data_insert(struct closure *cl)
632{ 627{
633 struct btree_op *op = container_of(cl, struct btree_op, cl); 628 struct search *s = container_of(cl, struct search, btree);
634 struct search *s = container_of(op, struct search, op);
635 629
636 bch_keylist_init(&s->insert_keys); 630 bch_keylist_init(&s->insert_keys);
637 bio_get(s->cache_bio); 631 bio_get(s->cache_bio);
@@ -731,11 +725,10 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
731 725
732static void cache_lookup(struct closure *cl) 726static void cache_lookup(struct closure *cl)
733{ 727{
734 struct btree_op *op = container_of(cl, struct btree_op, cl); 728 struct search *s = container_of(cl, struct search, btree);
735 struct search *s = container_of(op, struct search, op);
736 struct bio *bio = &s->bio.bio; 729 struct bio *bio = &s->bio.bio;
737 730
738 int ret = bch_btree_map_keys(op, s->c, 731 int ret = bch_btree_map_keys(&s->op, s->c,
739 &KEY(s->inode, bio->bi_sector, 0), 732 &KEY(s->inode, bio->bi_sector, 0),
740 cache_lookup_fn, MAP_END_KEY); 733 cache_lookup_fn, MAP_END_KEY);
741 if (ret == -EAGAIN) 734 if (ret == -EAGAIN)
@@ -1064,7 +1057,7 @@ static void cached_dev_read_done(struct closure *cl)
1064 if (s->cache_bio && 1057 if (s->cache_bio &&
1065 !test_bit(CACHE_SET_STOPPING, &s->c->flags)) { 1058 !test_bit(CACHE_SET_STOPPING, &s->c->flags)) {
1066 s->op.type = BTREE_REPLACE; 1059 s->op.type = BTREE_REPLACE;
1067 closure_call(&s->op.cl, bch_data_insert, NULL, cl); 1060 closure_call(&s->btree, bch_data_insert, NULL, cl);
1068 } 1061 }
1069 1062
1070 continue_at(cl, cached_dev_cache_miss_done, NULL); 1063 continue_at(cl, cached_dev_cache_miss_done, NULL);
@@ -1156,7 +1149,7 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
1156{ 1149{
1157 struct closure *cl = &s->cl; 1150 struct closure *cl = &s->cl;
1158 1151
1159 closure_call(&s->op.cl, cache_lookup, NULL, cl); 1152 closure_call(&s->btree, cache_lookup, NULL, cl);
1160 continue_at(cl, cached_dev_read_done_bh, NULL); 1153 continue_at(cl, cached_dev_read_done_bh, NULL);
1161} 1154}
1162 1155
@@ -1239,7 +1232,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1239 closure_bio_submit(bio, cl, s->d); 1232 closure_bio_submit(bio, cl, s->d);
1240 } 1233 }
1241 1234
1242 closure_call(&s->op.cl, bch_data_insert, NULL, cl); 1235 closure_call(&s->btree, bch_data_insert, NULL, cl);
1243 continue_at(cl, cached_dev_write_complete, NULL); 1236 continue_at(cl, cached_dev_write_complete, NULL);
1244} 1237}
1245 1238
@@ -1418,9 +1411,9 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1418 s->writeback = true; 1411 s->writeback = true;
1419 s->cache_bio = bio; 1412 s->cache_bio = bio;
1420 1413
1421 closure_call(&s->op.cl, bch_data_insert, NULL, cl); 1414 closure_call(&s->btree, bch_data_insert, NULL, cl);
1422 } else { 1415 } else {
1423 closure_call(&s->op.cl, cache_lookup, NULL, cl); 1416 closure_call(&s->btree, cache_lookup, NULL, cl);
1424 } 1417 }
1425 1418
1426 continue_at(cl, search_free, NULL); 1419 continue_at(cl, search_free, NULL);
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 0f79177c4f33..ed578aa53ee2 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -6,6 +6,7 @@
6struct search { 6struct search {
7 /* Stack frame for bio_complete */ 7 /* Stack frame for bio_complete */
8 struct closure cl; 8 struct closure cl;
9 struct closure btree;
9 10
10 struct bcache_device *d; 11 struct bcache_device *d;
11 struct cache_set *c; 12 struct cache_set *c;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index b58c2bc91e3f..d0968e8938f7 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -143,7 +143,7 @@ static void write_dirty_finish(struct closure *cl)
143 struct btree_op op; 143 struct btree_op op;
144 struct keylist keys; 144 struct keylist keys;
145 145
146 bch_btree_op_init_stack(&op); 146 bch_btree_op_init(&op, -1);
147 bch_keylist_init(&keys); 147 bch_keylist_init(&keys);
148 148
149 op.type = BTREE_REPLACE; 149 op.type = BTREE_REPLACE;
@@ -156,7 +156,6 @@ static void write_dirty_finish(struct closure *cl)
156 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); 156 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
157 157
158 bch_btree_insert(&op, dc->disk.c, &keys, NULL); 158 bch_btree_insert(&op, dc->disk.c, &keys, NULL);
159 closure_sync(&op.cl);
160 159
161 if (op.insert_collision) 160 if (op.insert_collision)
162 trace_bcache_writeback_collision(&w->key); 161 trace_bcache_writeback_collision(&w->key);
@@ -457,7 +456,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
457{ 456{
458 struct sectors_dirty_init op; 457 struct sectors_dirty_init op;
459 458
460 bch_btree_op_init_stack(&op.op); 459 bch_btree_op_init(&op.op, -1);
461 op.inode = dc->disk.id; 460 op.inode = dc->disk.id;
462 461
463 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0), 462 bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),