aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-24 20:41:13 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 00:56:08 -0500
commitcc23196631fbcd1bc3eafedbb712413fdbf946a3 (patch)
tree92f606edf7dc4b83ec5d1438f6e33b28e12af7bc /drivers/md/bcache
parent2c1953e201a05ddfb1ea53f23d81a492c6513028 (diff)
bcache: Clean up cache_lookup_fn
There was some looping in submit_partial_cache_hit() and submit_partial_cache_hit() that isn't needed anymore - originally, we wouldn't necessarily process the full hit or miss all at once because when splitting the bio, we took into account the restrictions of the device we were sending it to. But, device bio size restrictions are now handled elsewhere, with a wrapper around generic_make_request() - so that looping has been unnecessary for awhile now and we can now do quite a bit of cleanup. And if we trim the key we're reading from to match the subset we're actually reading, we don't have to explicitly calculate bi_sector anymore. Neat. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache')
-rw-r--r--drivers/md/bcache/request.c108
1 files changed, 46 insertions, 62 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 854743e85e76..de3fc76ffcfc 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -663,86 +663,70 @@ static void bch_cache_read_endio(struct bio *bio, int error)
663 bch_bbio_endio(s->op.c, bio, error, "reading from cache"); 663 bch_bbio_endio(s->op.c, bio, error, "reading from cache");
664} 664}
665 665
666static int submit_partial_cache_miss(struct btree *b, struct search *s,
667 struct bkey *k)
668{
669 struct bio *bio = &s->bio.bio;
670 int ret = MAP_CONTINUE;
671
672 do {
673 unsigned sectors = INT_MAX;
674
675 if (KEY_INODE(k) == s->op.inode) {
676 if (KEY_START(k) <= bio->bi_sector)
677 break;
678
679 sectors = min_t(uint64_t, sectors,
680 KEY_START(k) - bio->bi_sector);
681 }
682
683 ret = s->d->cache_miss(b, s, bio, sectors);
684 } while (ret == MAP_CONTINUE);
685
686 return ret;
687}
688
689/* 666/*
690 * Read from a single key, handling the initial cache miss if the key starts in 667 * Read from a single key, handling the initial cache miss if the key starts in
691 * the middle of the bio 668 * the middle of the bio
692 */ 669 */
693static int submit_partial_cache_hit(struct btree_op *op, struct btree *b, 670static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
694 struct bkey *k)
695{ 671{
696 struct search *s = container_of(op, struct search, op); 672 struct search *s = container_of(op, struct search, op);
697 struct bio *bio = &s->bio.bio; 673 struct bio *n, *bio = &s->bio.bio;
674 struct bkey *bio_key;
698 unsigned ptr; 675 unsigned ptr;
699 struct bio *n;
700 676
701 int ret = submit_partial_cache_miss(b, s, k); 677 if (bkey_cmp(k, &KEY(op->inode, bio->bi_sector, 0)) <= 0)
702 if (ret != MAP_CONTINUE || !KEY_SIZE(k)) 678 return MAP_CONTINUE;
703 return ret; 679
680 if (KEY_INODE(k) != s->op.inode ||
681 KEY_START(k) > bio->bi_sector) {
682 unsigned bio_sectors = bio_sectors(bio);
683 unsigned sectors = KEY_INODE(k) == s->op.inode
684 ? min_t(uint64_t, INT_MAX,
685 KEY_START(k) - bio->bi_sector)
686 : INT_MAX;
687
688 int ret = s->d->cache_miss(b, s, bio, sectors);
689 if (ret != MAP_CONTINUE)
690 return ret;
691
692 /* if this was a complete miss we shouldn't get here */
693 BUG_ON(bio_sectors <= sectors);
694 }
695
696 if (!KEY_SIZE(k))
697 return MAP_CONTINUE;
704 698
705 /* XXX: figure out best pointer - for multiple cache devices */ 699 /* XXX: figure out best pointer - for multiple cache devices */
706 ptr = 0; 700 ptr = 0;
707 701
708 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; 702 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
709 703
710 while (ret == MAP_CONTINUE && 704 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
711 KEY_INODE(k) == op->inode && 705 KEY_OFFSET(k) - bio->bi_sector),
712 bio->bi_sector < KEY_OFFSET(k)) { 706 GFP_NOIO, s->d->bio_split);
713 struct bkey *bio_key;
714 sector_t sector = PTR_OFFSET(k, ptr) +
715 (bio->bi_sector - KEY_START(k));
716 unsigned sectors = min_t(uint64_t, INT_MAX,
717 KEY_OFFSET(k) - bio->bi_sector);
718
719 n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
720 if (n == bio)
721 ret = MAP_DONE;
722
723 bio_key = &container_of(n, struct bbio, bio)->key;
724 707
725 /* 708 bio_key = &container_of(n, struct bbio, bio)->key;
726 * The bucket we're reading from might be reused while our bio 709 bch_bkey_copy_single_ptr(bio_key, k, ptr);
727 * is in flight, and we could then end up reading the wrong
728 * data.
729 *
730 * We guard against this by checking (in cache_read_endio()) if
731 * the pointer is stale again; if so, we treat it as an error
732 * and reread from the backing device (but we don't pass that
733 * error up anywhere).
734 */
735 710
736 bch_bkey_copy_single_ptr(bio_key, k, ptr); 711 bch_cut_front(&KEY(s->op.inode, n->bi_sector, 0), bio_key);
737 SET_PTR_OFFSET(bio_key, 0, sector); 712 bch_cut_back(&KEY(s->op.inode, bio_end_sector(n), 0), bio_key);
738 713
739 n->bi_end_io = bch_cache_read_endio; 714 n->bi_end_io = bch_cache_read_endio;
740 n->bi_private = &s->cl; 715 n->bi_private = &s->cl;
741 716
742 __bch_submit_bbio(n, b->c); 717 /*
743 } 718 * The bucket we're reading from might be reused while our bio
719 * is in flight, and we could then end up reading the wrong
720 * data.
721 *
722 * We guard against this by checking (in cache_read_endio()) if
723 * the pointer is stale again; if so, we treat it as an error
724 * and reread from the backing device (but we don't pass that
725 * error up anywhere).
726 */
744 727
745 return ret; 728 __bch_submit_bbio(n, b->c);
729 return n == bio ? MAP_DONE : MAP_CONTINUE;
746} 730}
747 731
748static void cache_lookup(struct closure *cl) 732static void cache_lookup(struct closure *cl)
@@ -753,7 +737,7 @@ static void cache_lookup(struct closure *cl)
753 737
754 int ret = bch_btree_map_keys(op, op->c, 738 int ret = bch_btree_map_keys(op, op->c,
755 &KEY(op->inode, bio->bi_sector, 0), 739 &KEY(op->inode, bio->bi_sector, 0),
756 submit_partial_cache_hit, 1); 740 cache_lookup_fn, MAP_END_KEY);
757 if (ret == -EAGAIN) 741 if (ret == -EAGAIN)
758 continue_at(cl, cache_lookup, bcache_wq); 742 continue_at(cl, cache_lookup, bcache_wq);
759 743