diff options
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r-- | drivers/md/bcache/request.c | 35 |
1 files changed, 14 insertions, 21 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 818e2e39e71f..5df44fbc9e1d 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -215,8 +215,7 @@ static void bio_csum(struct bio *bio, struct bkey *k) | |||
215 | 215 | ||
216 | static void bch_data_insert_keys(struct closure *cl) | 216 | static void bch_data_insert_keys(struct closure *cl) |
217 | { | 217 | { |
218 | struct btree_op *op = container_of(cl, struct btree_op, cl); | 218 | struct search *s = container_of(cl, struct search, btree); |
219 | struct search *s = container_of(op, struct search, op); | ||
220 | atomic_t *journal_ref = NULL; | 219 | atomic_t *journal_ref = NULL; |
221 | 220 | ||
222 | /* | 221 | /* |
@@ -236,7 +235,7 @@ static void bch_data_insert_keys(struct closure *cl) | |||
236 | s->flush_journal | 235 | s->flush_journal |
237 | ? &s->cl : NULL); | 236 | ? &s->cl : NULL); |
238 | 237 | ||
239 | if (bch_btree_insert(op, s->c, &s->insert_keys, journal_ref)) { | 238 | if (bch_btree_insert(&s->op, s->c, &s->insert_keys, journal_ref)) { |
240 | s->error = -ENOMEM; | 239 | s->error = -ENOMEM; |
241 | s->insert_data_done = true; | 240 | s->insert_data_done = true; |
242 | } | 241 | } |
@@ -433,8 +432,7 @@ static bool bch_alloc_sectors(struct bkey *k, unsigned sectors, | |||
433 | 432 | ||
434 | static void bch_data_invalidate(struct closure *cl) | 433 | static void bch_data_invalidate(struct closure *cl) |
435 | { | 434 | { |
436 | struct btree_op *op = container_of(cl, struct btree_op, cl); | 435 | struct search *s = container_of(cl, struct search, btree); |
437 | struct search *s = container_of(op, struct search, op); | ||
438 | struct bio *bio = s->cache_bio; | 436 | struct bio *bio = s->cache_bio; |
439 | 437 | ||
440 | pr_debug("invalidating %i sectors from %llu", | 438 | pr_debug("invalidating %i sectors from %llu", |
@@ -461,8 +459,7 @@ out: | |||
461 | 459 | ||
462 | static void bch_data_insert_error(struct closure *cl) | 460 | static void bch_data_insert_error(struct closure *cl) |
463 | { | 461 | { |
464 | struct btree_op *op = container_of(cl, struct btree_op, cl); | 462 | struct search *s = container_of(cl, struct search, btree); |
465 | struct search *s = container_of(op, struct search, op); | ||
466 | 463 | ||
467 | /* | 464 | /* |
468 | * Our data write just errored, which means we've got a bunch of keys to | 465 | * Our data write just errored, which means we've got a bunch of keys to |
@@ -493,8 +490,7 @@ static void bch_data_insert_error(struct closure *cl) | |||
493 | static void bch_data_insert_endio(struct bio *bio, int error) | 490 | static void bch_data_insert_endio(struct bio *bio, int error) |
494 | { | 491 | { |
495 | struct closure *cl = bio->bi_private; | 492 | struct closure *cl = bio->bi_private; |
496 | struct btree_op *op = container_of(cl, struct btree_op, cl); | 493 | struct search *s = container_of(cl, struct search, btree); |
497 | struct search *s = container_of(op, struct search, op); | ||
498 | 494 | ||
499 | if (error) { | 495 | if (error) { |
500 | /* TODO: We could try to recover from this. */ | 496 | /* TODO: We could try to recover from this. */ |
@@ -511,8 +507,7 @@ static void bch_data_insert_endio(struct bio *bio, int error) | |||
511 | 507 | ||
512 | static void bch_data_insert_start(struct closure *cl) | 508 | static void bch_data_insert_start(struct closure *cl) |
513 | { | 509 | { |
514 | struct btree_op *op = container_of(cl, struct btree_op, cl); | 510 | struct search *s = container_of(cl, struct search, btree); |
515 | struct search *s = container_of(op, struct search, op); | ||
516 | struct bio *bio = s->cache_bio, *n; | 511 | struct bio *bio = s->cache_bio, *n; |
517 | 512 | ||
518 | if (s->bypass) | 513 | if (s->bypass) |
@@ -630,8 +625,7 @@ err: | |||
630 | */ | 625 | */ |
631 | void bch_data_insert(struct closure *cl) | 626 | void bch_data_insert(struct closure *cl) |
632 | { | 627 | { |
633 | struct btree_op *op = container_of(cl, struct btree_op, cl); | 628 | struct search *s = container_of(cl, struct search, btree); |
634 | struct search *s = container_of(op, struct search, op); | ||
635 | 629 | ||
636 | bch_keylist_init(&s->insert_keys); | 630 | bch_keylist_init(&s->insert_keys); |
637 | bio_get(s->cache_bio); | 631 | bio_get(s->cache_bio); |
@@ -731,11 +725,10 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) | |||
731 | 725 | ||
732 | static void cache_lookup(struct closure *cl) | 726 | static void cache_lookup(struct closure *cl) |
733 | { | 727 | { |
734 | struct btree_op *op = container_of(cl, struct btree_op, cl); | 728 | struct search *s = container_of(cl, struct search, btree); |
735 | struct search *s = container_of(op, struct search, op); | ||
736 | struct bio *bio = &s->bio.bio; | 729 | struct bio *bio = &s->bio.bio; |
737 | 730 | ||
738 | int ret = bch_btree_map_keys(op, s->c, | 731 | int ret = bch_btree_map_keys(&s->op, s->c, |
739 | &KEY(s->inode, bio->bi_sector, 0), | 732 | &KEY(s->inode, bio->bi_sector, 0), |
740 | cache_lookup_fn, MAP_END_KEY); | 733 | cache_lookup_fn, MAP_END_KEY); |
741 | if (ret == -EAGAIN) | 734 | if (ret == -EAGAIN) |
@@ -1064,7 +1057,7 @@ static void cached_dev_read_done(struct closure *cl) | |||
1064 | if (s->cache_bio && | 1057 | if (s->cache_bio && |
1065 | !test_bit(CACHE_SET_STOPPING, &s->c->flags)) { | 1058 | !test_bit(CACHE_SET_STOPPING, &s->c->flags)) { |
1066 | s->op.type = BTREE_REPLACE; | 1059 | s->op.type = BTREE_REPLACE; |
1067 | closure_call(&s->op.cl, bch_data_insert, NULL, cl); | 1060 | closure_call(&s->btree, bch_data_insert, NULL, cl); |
1068 | } | 1061 | } |
1069 | 1062 | ||
1070 | continue_at(cl, cached_dev_cache_miss_done, NULL); | 1063 | continue_at(cl, cached_dev_cache_miss_done, NULL); |
@@ -1156,7 +1149,7 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s) | |||
1156 | { | 1149 | { |
1157 | struct closure *cl = &s->cl; | 1150 | struct closure *cl = &s->cl; |
1158 | 1151 | ||
1159 | closure_call(&s->op.cl, cache_lookup, NULL, cl); | 1152 | closure_call(&s->btree, cache_lookup, NULL, cl); |
1160 | continue_at(cl, cached_dev_read_done_bh, NULL); | 1153 | continue_at(cl, cached_dev_read_done_bh, NULL); |
1161 | } | 1154 | } |
1162 | 1155 | ||
@@ -1239,7 +1232,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) | |||
1239 | closure_bio_submit(bio, cl, s->d); | 1232 | closure_bio_submit(bio, cl, s->d); |
1240 | } | 1233 | } |
1241 | 1234 | ||
1242 | closure_call(&s->op.cl, bch_data_insert, NULL, cl); | 1235 | closure_call(&s->btree, bch_data_insert, NULL, cl); |
1243 | continue_at(cl, cached_dev_write_complete, NULL); | 1236 | continue_at(cl, cached_dev_write_complete, NULL); |
1244 | } | 1237 | } |
1245 | 1238 | ||
@@ -1418,9 +1411,9 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) | |||
1418 | s->writeback = true; | 1411 | s->writeback = true; |
1419 | s->cache_bio = bio; | 1412 | s->cache_bio = bio; |
1420 | 1413 | ||
1421 | closure_call(&s->op.cl, bch_data_insert, NULL, cl); | 1414 | closure_call(&s->btree, bch_data_insert, NULL, cl); |
1422 | } else { | 1415 | } else { |
1423 | closure_call(&s->op.cl, cache_lookup, NULL, cl); | 1416 | closure_call(&s->btree, cache_lookup, NULL, cl); |
1424 | } | 1417 | } |
1425 | 1418 | ||
1426 | continue_at(cl, search_free, NULL); | 1419 | continue_at(cl, search_free, NULL); |