aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-11-23 21:21:01 -0500
committerKent Overstreet <kmo@daterainc.com>2013-11-24 01:33:57 -0500
commit20d0189b1012a37d2533a87fb451f7852f2418d1 (patch)
tree5ceaa6cfc0e1f1cec423c6c9f5de72d49f2d63a1 /drivers/md/bcache
parentee67891bf132612feb7b999ee1f3350b40867cb4 (diff)
block: Introduce new bio_split()
The new bio_split() can split arbitrary bios - it's not restricted to single page bios, like the old bio_split() (previously renamed to bio_pair_split()). It also has different semantics - it doesn't allocate a struct bio_pair, leaving it up to the caller to handle completions. Then convert the existing bio_pair_split() users to the new bio_split() - and also nvme, which was open coding bio splitting. (We have to take that BUG_ON() out of bio_integrity_trim() because this bio_split() needs to use it, and there's no reason it has to be used on bios marked as cloned; BIO_CLONED doesn't seem to have clearly documented semantics anyways.) Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Matthew Wilcox <matthew.r.wilcox@intel.com> Cc: Keith Busch <keith.busch@intel.com> Cc: Vishal Verma <vishal.l.verma@intel.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Neil Brown <neilb@suse.de>
Diffstat (limited to 'drivers/md/bcache')
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/io.c82
-rw-r--r--drivers/md/bcache/request.c12
3 files changed, 8 insertions, 87 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 6b6fe935be73..964353c5329d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -901,7 +901,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
901void bch_bbio_free(struct bio *, struct cache_set *); 901void bch_bbio_free(struct bio *, struct cache_set *);
902struct bio *bch_bbio_alloc(struct cache_set *); 902struct bio *bch_bbio_alloc(struct cache_set *);
903 903
904struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
905void bch_generic_make_request(struct bio *, struct bio_split_pool *); 904void bch_generic_make_request(struct bio *, struct bio_split_pool *);
906void __bch_submit_bbio(struct bio *, struct cache_set *); 905void __bch_submit_bbio(struct bio *, struct cache_set *);
907void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); 906void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 522f95778443..fa028fa82df4 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -11,84 +11,6 @@
11 11
12#include <linux/blkdev.h> 12#include <linux/blkdev.h>
13 13
14/**
15 * bch_bio_split - split a bio
16 * @bio: bio to split
17 * @sectors: number of sectors to split from the front of @bio
18 * @gfp: gfp mask
19 * @bs: bio set to allocate from
20 *
21 * Allocates and returns a new bio which represents @sectors from the start of
22 * @bio, and updates @bio to represent the remaining sectors.
23 *
24 * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
25 * unchanged.
26 *
27 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
28 * bvec boundry; it is the caller's responsibility to ensure that @bio is not
29 * freed before the split.
30 */
31struct bio *bch_bio_split(struct bio *bio, int sectors,
32 gfp_t gfp, struct bio_set *bs)
33{
34 unsigned vcnt = 0, nbytes = sectors << 9;
35 struct bio_vec bv;
36 struct bvec_iter iter;
37 struct bio *ret = NULL;
38
39 BUG_ON(sectors <= 0);
40
41 if (sectors >= bio_sectors(bio))
42 return bio;
43
44 if (bio->bi_rw & REQ_DISCARD) {
45 ret = bio_alloc_bioset(gfp, 1, bs);
46 if (!ret)
47 return NULL;
48 goto out;
49 }
50
51 bio_for_each_segment(bv, bio, iter) {
52 vcnt++;
53
54 if (nbytes <= bv.bv_len)
55 break;
56
57 nbytes -= bv.bv_len;
58 }
59
60 ret = bio_alloc_bioset(gfp, vcnt, bs);
61 if (!ret)
62 return NULL;
63
64 bio_for_each_segment(bv, bio, iter) {
65 ret->bi_io_vec[ret->bi_vcnt++] = bv;
66
67 if (ret->bi_vcnt == vcnt)
68 break;
69 }
70
71 ret->bi_io_vec[ret->bi_vcnt - 1].bv_len = nbytes;
72out:
73 ret->bi_bdev = bio->bi_bdev;
74 ret->bi_iter.bi_sector = bio->bi_iter.bi_sector;
75 ret->bi_iter.bi_size = sectors << 9;
76 ret->bi_rw = bio->bi_rw;
77
78 if (bio_integrity(bio)) {
79 if (bio_integrity_clone(ret, bio, gfp)) {
80 bio_put(ret);
81 return NULL;
82 }
83
84 bio_integrity_trim(ret, 0, bio_sectors(ret));
85 }
86
87 bio_advance(bio, ret->bi_iter.bi_size);
88
89 return ret;
90}
91
92static unsigned bch_bio_max_sectors(struct bio *bio) 14static unsigned bch_bio_max_sectors(struct bio *bio)
93{ 15{
94 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 16 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
@@ -172,8 +94,8 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
172 bio_get(bio); 94 bio_get(bio);
173 95
174 do { 96 do {
175 n = bch_bio_split(bio, bch_bio_max_sectors(bio), 97 n = bio_next_split(bio, bch_bio_max_sectors(bio),
176 GFP_NOIO, s->p->bio_split); 98 GFP_NOIO, s->p->bio_split);
177 99
178 n->bi_end_io = bch_bio_submit_split_endio; 100 n->bi_end_io = bch_bio_submit_split_endio;
179 n->bi_private = &s->cl; 101 n->bi_private = &s->cl;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 63451c724781..5878cdb39529 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -371,7 +371,7 @@ static void bch_data_insert_start(struct closure *cl)
371 op->writeback)) 371 op->writeback))
372 goto err; 372 goto err;
373 373
374 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); 374 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
375 375
376 n->bi_end_io = bch_data_insert_endio; 376 n->bi_end_io = bch_data_insert_endio;
377 n->bi_private = cl; 377 n->bi_private = cl;
@@ -679,9 +679,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
679 if (KEY_DIRTY(k)) 679 if (KEY_DIRTY(k))
680 s->read_dirty_data = true; 680 s->read_dirty_data = true;
681 681
682 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, 682 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
683 KEY_OFFSET(k) - bio->bi_iter.bi_sector), 683 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
684 GFP_NOIO, s->d->bio_split); 684 GFP_NOIO, s->d->bio_split);
685 685
686 bio_key = &container_of(n, struct bbio, bio)->key; 686 bio_key = &container_of(n, struct bbio, bio)->key;
687 bch_bkey_copy_single_ptr(bio_key, k, ptr); 687 bch_bkey_copy_single_ptr(bio_key, k, ptr);
@@ -920,7 +920,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
920 struct bio *miss, *cache_bio; 920 struct bio *miss, *cache_bio;
921 921
922 if (s->cache_miss || s->iop.bypass) { 922 if (s->cache_miss || s->iop.bypass) {
923 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 923 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
924 ret = miss == bio ? MAP_DONE : MAP_CONTINUE; 924 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
925 goto out_submit; 925 goto out_submit;
926 } 926 }
@@ -943,7 +943,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
943 943
944 s->iop.replace = true; 944 s->iop.replace = true;
945 945
946 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 946 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
947 947
948 /* btree_search_recurse()'s btree iterator is no good anymore */ 948 /* btree_search_recurse()'s btree iterator is no good anymore */
949 ret = miss == bio ? MAP_DONE : -EINTR; 949 ret = miss == bio ? MAP_DONE : -EINTR;