aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c154
1 files changed, 125 insertions, 29 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index a16872541038..854743e85e76 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -638,24 +638,9 @@ void bch_data_insert(struct closure *cl)
638 bch_data_insert_start(cl); 638 bch_data_insert_start(cl);
639} 639}
640 640
641/* Common code for the make_request functions */ 641/* Cache lookup */
642 642
643static void request_endio(struct bio *bio, int error) 643static void bch_cache_read_endio(struct bio *bio, int error)
644{
645 struct closure *cl = bio->bi_private;
646
647 if (error) {
648 struct search *s = container_of(cl, struct search, cl);
649 s->error = error;
650 /* Only cache read errors are recoverable */
651 s->recoverable = false;
652 }
653
654 bio_put(bio);
655 closure_put(cl);
656}
657
658void bch_cache_read_endio(struct bio *bio, int error)
659{ 644{
660 struct bbio *b = container_of(bio, struct bbio, bio); 645 struct bbio *b = container_of(bio, struct bbio, bio);
661 struct closure *cl = bio->bi_private; 646 struct closure *cl = bio->bi_private;
@@ -678,6 +663,120 @@ void bch_cache_read_endio(struct bio *bio, int error)
678 bch_bbio_endio(s->op.c, bio, error, "reading from cache"); 663 bch_bbio_endio(s->op.c, bio, error, "reading from cache");
679} 664}
680 665
666static int submit_partial_cache_miss(struct btree *b, struct search *s,
667 struct bkey *k)
668{
669 struct bio *bio = &s->bio.bio;
670 int ret = MAP_CONTINUE;
671
672 do {
673 unsigned sectors = INT_MAX;
674
675 if (KEY_INODE(k) == s->op.inode) {
676 if (KEY_START(k) <= bio->bi_sector)
677 break;
678
679 sectors = min_t(uint64_t, sectors,
680 KEY_START(k) - bio->bi_sector);
681 }
682
683 ret = s->d->cache_miss(b, s, bio, sectors);
684 } while (ret == MAP_CONTINUE);
685
686 return ret;
687}
688
689/*
690 * Read from a single key, handling the initial cache miss if the key starts in
691 * the middle of the bio
692 */
693static int submit_partial_cache_hit(struct btree_op *op, struct btree *b,
694 struct bkey *k)
695{
696 struct search *s = container_of(op, struct search, op);
697 struct bio *bio = &s->bio.bio;
698 unsigned ptr;
699 struct bio *n;
700
701 int ret = submit_partial_cache_miss(b, s, k);
702 if (ret != MAP_CONTINUE || !KEY_SIZE(k))
703 return ret;
704
705 /* XXX: figure out best pointer - for multiple cache devices */
706 ptr = 0;
707
708 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
709
710 while (ret == MAP_CONTINUE &&
711 KEY_INODE(k) == op->inode &&
712 bio->bi_sector < KEY_OFFSET(k)) {
713 struct bkey *bio_key;
714 sector_t sector = PTR_OFFSET(k, ptr) +
715 (bio->bi_sector - KEY_START(k));
716 unsigned sectors = min_t(uint64_t, INT_MAX,
717 KEY_OFFSET(k) - bio->bi_sector);
718
719 n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
720 if (n == bio)
721 ret = MAP_DONE;
722
723 bio_key = &container_of(n, struct bbio, bio)->key;
724
725 /*
726 * The bucket we're reading from might be reused while our bio
727 * is in flight, and we could then end up reading the wrong
728 * data.
729 *
730 * We guard against this by checking (in cache_read_endio()) if
731 * the pointer is stale again; if so, we treat it as an error
732 * and reread from the backing device (but we don't pass that
733 * error up anywhere).
734 */
735
736 bch_bkey_copy_single_ptr(bio_key, k, ptr);
737 SET_PTR_OFFSET(bio_key, 0, sector);
738
739 n->bi_end_io = bch_cache_read_endio;
740 n->bi_private = &s->cl;
741
742 __bch_submit_bbio(n, b->c);
743 }
744
745 return ret;
746}
747
748static void cache_lookup(struct closure *cl)
749{
750 struct btree_op *op = container_of(cl, struct btree_op, cl);
751 struct search *s = container_of(op, struct search, op);
752 struct bio *bio = &s->bio.bio;
753
754 int ret = bch_btree_map_keys(op, op->c,
755 &KEY(op->inode, bio->bi_sector, 0),
756 submit_partial_cache_hit, 1);
757 if (ret == -EAGAIN)
758 continue_at(cl, cache_lookup, bcache_wq);
759
760 closure_return(cl);
761}
762
763/* Common code for the make_request functions */
764
765static void request_endio(struct bio *bio, int error)
766{
767 struct closure *cl = bio->bi_private;
768
769 if (error) {
770 struct search *s = container_of(cl, struct search, cl);
771 s->error = error;
772 /* Only cache read errors are recoverable */
773 s->recoverable = false;
774 }
775
776 bio_put(bio);
777 closure_put(cl);
778}
779
681static void bio_complete(struct search *s) 780static void bio_complete(struct search *s)
682{ 781{
683 if (s->orig_bio) { 782 if (s->orig_bio) {
@@ -1005,15 +1104,14 @@ static void cached_dev_read_done_bh(struct closure *cl)
1005static int cached_dev_cache_miss(struct btree *b, struct search *s, 1104static int cached_dev_cache_miss(struct btree *b, struct search *s,
1006 struct bio *bio, unsigned sectors) 1105 struct bio *bio, unsigned sectors)
1007{ 1106{
1008 int ret = 0; 1107 int ret = MAP_CONTINUE;
1009 unsigned reada = 0; 1108 unsigned reada = 0;
1010 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 1109 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1011 struct bio *miss, *cache_bio; 1110 struct bio *miss, *cache_bio;
1012 1111
1013 if (s->cache_miss || s->op.bypass) { 1112 if (s->cache_miss || s->op.bypass) {
1014 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 1113 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
1015 if (miss == bio) 1114 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
1016 s->op.lookup_done = true;
1017 goto out_submit; 1115 goto out_submit;
1018 } 1116 }
1019 1117
@@ -1033,11 +1131,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
1033 return ret; 1131 return ret;
1034 1132
1035 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 1133 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
1036 if (miss == bio) 1134
1037 s->op.lookup_done = true; 1135 /* btree_search_recurse()'s btree iterator is no good anymore */
1038 else 1136 ret = miss == bio ? MAP_DONE : -EINTR;
1039 /* btree_search_recurse()'s btree iterator is no good anymore */
1040 ret = -EINTR;
1041 1137
1042 cache_bio = bio_alloc_bioset(GFP_NOWAIT, 1138 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
1043 DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS), 1139 DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
@@ -1075,7 +1171,7 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
1075{ 1171{
1076 struct closure *cl = &s->cl; 1172 struct closure *cl = &s->cl;
1077 1173
1078 closure_call(&s->op.cl, bch_btree_search_async, NULL, cl); 1174 closure_call(&s->op.cl, cache_lookup, NULL, cl);
1079 continue_at(cl, cached_dev_read_done_bh, NULL); 1175 continue_at(cl, cached_dev_read_done_bh, NULL);
1080} 1176}
1081 1177
@@ -1287,9 +1383,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
1287 bio_advance(bio, min(sectors << 9, bio->bi_size)); 1383 bio_advance(bio, min(sectors << 9, bio->bi_size));
1288 1384
1289 if (!bio->bi_size) 1385 if (!bio->bi_size)
1290 s->op.lookup_done = true; 1386 return MAP_DONE;
1291 1387
1292 return 0; 1388 return MAP_CONTINUE;
1293} 1389}
1294 1390
1295static void flash_dev_nodata(struct closure *cl) 1391static void flash_dev_nodata(struct closure *cl)
@@ -1339,7 +1435,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1339 1435
1340 closure_call(&s->op.cl, bch_data_insert, NULL, cl); 1436 closure_call(&s->op.cl, bch_data_insert, NULL, cl);
1341 } else { 1437 } else {
1342 closure_call(&s->op.cl, bch_btree_search_async, NULL, cl); 1438 closure_call(&s->op.cl, cache_lookup, NULL, cl);
1343 } 1439 }
1344 1440
1345 continue_at(cl, search_free, NULL); 1441 continue_at(cl, search_free, NULL);