aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-24 20:41:08 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 00:56:07 -0500
commit2c1953e201a05ddfb1ea53f23d81a492c6513028 (patch)
treec1a2d6cfefb094698a38db864d5a5a575cfd835b
parentdf8e89701fb02cba6e09c5f46f002778b5b52dd2 (diff)
bcache: Convert bch_btree_read_async() to bch_btree_map_keys()
This is a fairly straightforward conversion, mostly reshuffling - op->lookup_done goes away, replaced by MAP_DONE/MAP_CONTINUE. And the code for handling cache hits and misses wasn't really btree code, so it gets moved to request.c. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
-rw-r--r--drivers/md/bcache/btree.c133
-rw-r--r--drivers/md/bcache/btree.h3
-rw-r--r--drivers/md/bcache/journal.c1
-rw-r--r--drivers/md/bcache/request.c154
-rw-r--r--drivers/md/bcache/request.h2
5 files changed, 125 insertions, 168 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index bfd60e6a2312..3949673cb1b0 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -23,7 +23,6 @@
23#include "bcache.h" 23#include "bcache.h"
24#include "btree.h" 24#include "btree.h"
25#include "debug.h" 25#include "debug.h"
26#include "request.h"
27#include "writeback.h" 26#include "writeback.h"
28 27
29#include <linux/slab.h> 28#include <linux/slab.h>
@@ -2255,138 +2254,6 @@ void bch_btree_set_root(struct btree *b)
2255 closure_sync(&cl); 2254 closure_sync(&cl);
2256} 2255}
2257 2256
2258/* Cache lookup */
2259
2260static int submit_partial_cache_miss(struct btree *b, struct btree_op *op,
2261 struct bkey *k)
2262{
2263 struct search *s = container_of(op, struct search, op);
2264 struct bio *bio = &s->bio.bio;
2265 int ret = 0;
2266
2267 while (!ret &&
2268 !op->lookup_done) {
2269 unsigned sectors = INT_MAX;
2270
2271 if (KEY_INODE(k) == op->inode) {
2272 if (KEY_START(k) <= bio->bi_sector)
2273 break;
2274
2275 sectors = min_t(uint64_t, sectors,
2276 KEY_START(k) - bio->bi_sector);
2277 }
2278
2279 ret = s->d->cache_miss(b, s, bio, sectors);
2280 }
2281
2282 return ret;
2283}
2284
2285/*
2286 * Read from a single key, handling the initial cache miss if the key starts in
2287 * the middle of the bio
2288 */
2289static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
2290 struct bkey *k)
2291{
2292 struct search *s = container_of(op, struct search, op);
2293 struct bio *bio = &s->bio.bio;
2294 unsigned ptr;
2295 struct bio *n;
2296
2297 int ret = submit_partial_cache_miss(b, op, k);
2298 if (ret || op->lookup_done)
2299 return ret;
2300
2301 /* XXX: figure out best pointer - for multiple cache devices */
2302 ptr = 0;
2303
2304 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
2305
2306 while (!op->lookup_done &&
2307 KEY_INODE(k) == op->inode &&
2308 bio->bi_sector < KEY_OFFSET(k)) {
2309 struct bkey *bio_key;
2310 sector_t sector = PTR_OFFSET(k, ptr) +
2311 (bio->bi_sector - KEY_START(k));
2312 unsigned sectors = min_t(uint64_t, INT_MAX,
2313 KEY_OFFSET(k) - bio->bi_sector);
2314
2315 n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
2316 if (n == bio)
2317 op->lookup_done = true;
2318
2319 bio_key = &container_of(n, struct bbio, bio)->key;
2320
2321 /*
2322 * The bucket we're reading from might be reused while our bio
2323 * is in flight, and we could then end up reading the wrong
2324 * data.
2325 *
2326 * We guard against this by checking (in cache_read_endio()) if
2327 * the pointer is stale again; if so, we treat it as an error
2328 * and reread from the backing device (but we don't pass that
2329 * error up anywhere).
2330 */
2331
2332 bch_bkey_copy_single_ptr(bio_key, k, ptr);
2333 SET_PTR_OFFSET(bio_key, 0, sector);
2334
2335 n->bi_end_io = bch_cache_read_endio;
2336 n->bi_private = &s->cl;
2337
2338 __bch_submit_bbio(n, b->c);
2339 }
2340
2341 return 0;
2342}
2343
2344static int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
2345{
2346 struct search *s = container_of(op, struct search, op);
2347 struct bio *bio = &s->bio.bio;
2348
2349 int ret = 0;
2350 struct bkey *k;
2351 struct btree_iter iter;
2352 bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0));
2353
2354 do {
2355 k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
2356 if (!k) {
2357 /*
2358 * b->key would be exactly what we want, except that
2359 * pointers to btree nodes have nonzero size - we
2360 * wouldn't go far enough
2361 */
2362
2363 ret = submit_partial_cache_miss(b, op,
2364 &KEY(KEY_INODE(&b->key),
2365 KEY_OFFSET(&b->key), 0));
2366 break;
2367 }
2368
2369 ret = b->level
2370 ? btree(search_recurse, k, b, op)
2371 : submit_partial_cache_hit(b, op, k);
2372 } while (!ret &&
2373 !op->lookup_done);
2374
2375 return ret;
2376}
2377
2378void bch_btree_search_async(struct closure *cl)
2379{
2380 struct btree_op *op = container_of(cl, struct btree_op, cl);
2381
2382 int ret = btree_root(search_recurse, op->c, op);
2383
2384 if (ret == -EAGAIN)
2385 continue_at(cl, bch_btree_search_async, bcache_wq);
2386
2387 closure_return(cl);
2388}
2389
2390/* Map across nodes or keys */ 2257/* Map across nodes or keys */
2391 2258
2392static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, 2259static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 1690f4731c1e..60dadd722ace 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -264,7 +264,6 @@ struct btree_op {
264 unsigned flush_journal:1; 264 unsigned flush_journal:1;
265 265
266 unsigned insert_data_done:1; 266 unsigned insert_data_done:1;
267 unsigned lookup_done:1;
268 unsigned insert_collision:1; 267 unsigned insert_collision:1;
269 268
270 BKEY_PADDED(replace); 269 BKEY_PADDED(replace);
@@ -306,8 +305,6 @@ int bch_btree_insert_check_key(struct btree *, struct btree_op *,
306 struct bkey *); 305 struct bkey *);
307int bch_btree_insert(struct btree_op *, struct cache_set *, struct keylist *); 306int bch_btree_insert(struct btree_op *, struct cache_set *, struct keylist *);
308 307
309void bch_btree_search_async(struct closure *);
310
311int bch_gc_thread_start(struct cache_set *); 308int bch_gc_thread_start(struct cache_set *);
312size_t bch_btree_gc_finish(struct cache_set *); 309size_t bch_btree_gc_finish(struct cache_set *);
313void bch_moving_gc(struct cache_set *); 310void bch_moving_gc(struct cache_set *);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 8866f8ee3a07..6f4daf031410 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -7,7 +7,6 @@
7#include "bcache.h" 7#include "bcache.h"
8#include "btree.h" 8#include "btree.h"
9#include "debug.h" 9#include "debug.h"
10#include "request.h"
11 10
12#include <trace/events/bcache.h> 11#include <trace/events/bcache.h>
13 12
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index a16872541038..854743e85e76 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -638,24 +638,9 @@ void bch_data_insert(struct closure *cl)
638 bch_data_insert_start(cl); 638 bch_data_insert_start(cl);
639} 639}
640 640
641/* Common code for the make_request functions */ 641/* Cache lookup */
642 642
643static void request_endio(struct bio *bio, int error) 643static void bch_cache_read_endio(struct bio *bio, int error)
644{
645 struct closure *cl = bio->bi_private;
646
647 if (error) {
648 struct search *s = container_of(cl, struct search, cl);
649 s->error = error;
650 /* Only cache read errors are recoverable */
651 s->recoverable = false;
652 }
653
654 bio_put(bio);
655 closure_put(cl);
656}
657
658void bch_cache_read_endio(struct bio *bio, int error)
659{ 644{
660 struct bbio *b = container_of(bio, struct bbio, bio); 645 struct bbio *b = container_of(bio, struct bbio, bio);
661 struct closure *cl = bio->bi_private; 646 struct closure *cl = bio->bi_private;
@@ -678,6 +663,120 @@ void bch_cache_read_endio(struct bio *bio, int error)
678 bch_bbio_endio(s->op.c, bio, error, "reading from cache"); 663 bch_bbio_endio(s->op.c, bio, error, "reading from cache");
679} 664}
680 665
666static int submit_partial_cache_miss(struct btree *b, struct search *s,
667 struct bkey *k)
668{
669 struct bio *bio = &s->bio.bio;
670 int ret = MAP_CONTINUE;
671
672 do {
673 unsigned sectors = INT_MAX;
674
675 if (KEY_INODE(k) == s->op.inode) {
676 if (KEY_START(k) <= bio->bi_sector)
677 break;
678
679 sectors = min_t(uint64_t, sectors,
680 KEY_START(k) - bio->bi_sector);
681 }
682
683 ret = s->d->cache_miss(b, s, bio, sectors);
684 } while (ret == MAP_CONTINUE);
685
686 return ret;
687}
688
689/*
690 * Read from a single key, handling the initial cache miss if the key starts in
691 * the middle of the bio
692 */
693static int submit_partial_cache_hit(struct btree_op *op, struct btree *b,
694 struct bkey *k)
695{
696 struct search *s = container_of(op, struct search, op);
697 struct bio *bio = &s->bio.bio;
698 unsigned ptr;
699 struct bio *n;
700
701 int ret = submit_partial_cache_miss(b, s, k);
702 if (ret != MAP_CONTINUE || !KEY_SIZE(k))
703 return ret;
704
705 /* XXX: figure out best pointer - for multiple cache devices */
706 ptr = 0;
707
708 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
709
710 while (ret == MAP_CONTINUE &&
711 KEY_INODE(k) == op->inode &&
712 bio->bi_sector < KEY_OFFSET(k)) {
713 struct bkey *bio_key;
714 sector_t sector = PTR_OFFSET(k, ptr) +
715 (bio->bi_sector - KEY_START(k));
716 unsigned sectors = min_t(uint64_t, INT_MAX,
717 KEY_OFFSET(k) - bio->bi_sector);
718
719 n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
720 if (n == bio)
721 ret = MAP_DONE;
722
723 bio_key = &container_of(n, struct bbio, bio)->key;
724
725 /*
726 * The bucket we're reading from might be reused while our bio
727 * is in flight, and we could then end up reading the wrong
728 * data.
729 *
730 * We guard against this by checking (in cache_read_endio()) if
731 * the pointer is stale again; if so, we treat it as an error
732 * and reread from the backing device (but we don't pass that
733 * error up anywhere).
734 */
735
736 bch_bkey_copy_single_ptr(bio_key, k, ptr);
737 SET_PTR_OFFSET(bio_key, 0, sector);
738
739 n->bi_end_io = bch_cache_read_endio;
740 n->bi_private = &s->cl;
741
742 __bch_submit_bbio(n, b->c);
743 }
744
745 return ret;
746}
747
748static void cache_lookup(struct closure *cl)
749{
750 struct btree_op *op = container_of(cl, struct btree_op, cl);
751 struct search *s = container_of(op, struct search, op);
752 struct bio *bio = &s->bio.bio;
753
754 int ret = bch_btree_map_keys(op, op->c,
755 &KEY(op->inode, bio->bi_sector, 0),
756 submit_partial_cache_hit, 1);
757 if (ret == -EAGAIN)
758 continue_at(cl, cache_lookup, bcache_wq);
759
760 closure_return(cl);
761}
762
763/* Common code for the make_request functions */
764
765static void request_endio(struct bio *bio, int error)
766{
767 struct closure *cl = bio->bi_private;
768
769 if (error) {
770 struct search *s = container_of(cl, struct search, cl);
771 s->error = error;
772 /* Only cache read errors are recoverable */
773 s->recoverable = false;
774 }
775
776 bio_put(bio);
777 closure_put(cl);
778}
779
681static void bio_complete(struct search *s) 780static void bio_complete(struct search *s)
682{ 781{
683 if (s->orig_bio) { 782 if (s->orig_bio) {
@@ -1005,15 +1104,14 @@ static void cached_dev_read_done_bh(struct closure *cl)
1005static int cached_dev_cache_miss(struct btree *b, struct search *s, 1104static int cached_dev_cache_miss(struct btree *b, struct search *s,
1006 struct bio *bio, unsigned sectors) 1105 struct bio *bio, unsigned sectors)
1007{ 1106{
1008 int ret = 0; 1107 int ret = MAP_CONTINUE;
1009 unsigned reada = 0; 1108 unsigned reada = 0;
1010 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 1109 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1011 struct bio *miss, *cache_bio; 1110 struct bio *miss, *cache_bio;
1012 1111
1013 if (s->cache_miss || s->op.bypass) { 1112 if (s->cache_miss || s->op.bypass) {
1014 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 1113 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
1015 if (miss == bio) 1114 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
1016 s->op.lookup_done = true;
1017 goto out_submit; 1115 goto out_submit;
1018 } 1116 }
1019 1117
@@ -1033,11 +1131,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
1033 return ret; 1131 return ret;
1034 1132
1035 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 1133 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
1036 if (miss == bio) 1134
1037 s->op.lookup_done = true; 1135 /* btree_search_recurse()'s btree iterator is no good anymore */
1038 else 1136 ret = miss == bio ? MAP_DONE : -EINTR;
1039 /* btree_search_recurse()'s btree iterator is no good anymore */
1040 ret = -EINTR;
1041 1137
1042 cache_bio = bio_alloc_bioset(GFP_NOWAIT, 1138 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
1043 DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS), 1139 DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
@@ -1075,7 +1171,7 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
1075{ 1171{
1076 struct closure *cl = &s->cl; 1172 struct closure *cl = &s->cl;
1077 1173
1078 closure_call(&s->op.cl, bch_btree_search_async, NULL, cl); 1174 closure_call(&s->op.cl, cache_lookup, NULL, cl);
1079 continue_at(cl, cached_dev_read_done_bh, NULL); 1175 continue_at(cl, cached_dev_read_done_bh, NULL);
1080} 1176}
1081 1177
@@ -1287,9 +1383,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
1287 bio_advance(bio, min(sectors << 9, bio->bi_size)); 1383 bio_advance(bio, min(sectors << 9, bio->bi_size));
1288 1384
1289 if (!bio->bi_size) 1385 if (!bio->bi_size)
1290 s->op.lookup_done = true; 1386 return MAP_DONE;
1291 1387
1292 return 0; 1388 return MAP_CONTINUE;
1293} 1389}
1294 1390
1295static void flash_dev_nodata(struct closure *cl) 1391static void flash_dev_nodata(struct closure *cl)
@@ -1339,7 +1435,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1339 1435
1340 closure_call(&s->op.cl, bch_data_insert, NULL, cl); 1436 closure_call(&s->op.cl, bch_data_insert, NULL, cl);
1341 } else { 1437 } else {
1342 closure_call(&s->op.cl, bch_btree_search_async, NULL, cl); 1438 closure_call(&s->op.cl, cache_lookup, NULL, cl);
1343 } 1439 }
1344 1440
1345 continue_at(cl, search_free, NULL); 1441 continue_at(cl, search_free, NULL);
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 7d02ac5f936e..b0b4b0b5b7e9 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -31,10 +31,8 @@ struct search {
31 struct keylist insert_keys; 31 struct keylist insert_keys;
32}; 32};
33 33
34void bch_cache_read_endio(struct bio *, int);
35unsigned bch_get_congested(struct cache_set *); 34unsigned bch_get_congested(struct cache_set *);
36void bch_data_insert(struct closure *cl); 35void bch_data_insert(struct closure *cl);
37void bch_cache_read_endio(struct bio *, int);
38 36
39void bch_open_buckets_free(struct cache_set *); 37void bch_open_buckets_free(struct cache_set *);
40int bch_open_buckets_alloc(struct cache_set *); 38int bch_open_buckets_alloc(struct cache_set *);