aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-09-24 19:26:05 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-24 01:33:51 -0500
commit458b76ed2f9517becb74dcc8eedd70d3068ea6e4 (patch)
treeb01b2150b197e0759b3ba59c0f9367d4477ebb8c /drivers/md/bcache
parentd57a5f7c6605f15f3b5134837e68b448a7cea88e (diff)
block: Kill bio_segments()/bi_vcnt usage
When we start sharing biovecs, keeping bi_vcnt accurate for splits is going to be error prone - and unnecessary, if we refactor some code. So bio_segments() has to go - but most of the existing users just needed to know if the bio had multiple segments, which is easier - add a bio_multiple_segments() for them. (Two of the current uses of bio_segments() are going to go away in a couple patches, but the current implementation of bio_segments() is unsafe as soon as we start doing driver conversions for immutable biovecs - so implement a dumb version for bisectability, it'll go away in a couple patches) Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Neil Brown <neilb@suse.de> Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> Cc: "James E.J. Bottomley" <JBottomley@parallels.com>
Diffstat (limited to 'drivers/md/bcache')
-rw-r--r--drivers/md/bcache/io.c53
1 files changed, 23 insertions, 30 deletions
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9b5b6a41a9b6..6e04f3bb0286 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -24,7 +24,8 @@ static void bch_generic_make_request_hack(struct bio *bio)
24 if (bio->bi_iter.bi_idx) { 24 if (bio->bi_iter.bi_idx) {
25 struct bio_vec bv; 25 struct bio_vec bv;
26 struct bvec_iter iter; 26 struct bvec_iter iter;
27 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); 27 unsigned segs = bio_segments(bio);
28 struct bio *clone = bio_alloc(GFP_NOIO, segs);
28 29
29 bio_for_each_segment(bv, bio, iter) 30 bio_for_each_segment(bv, bio, iter)
30 clone->bi_io_vec[clone->bi_vcnt++] = bv; 31 clone->bi_io_vec[clone->bi_vcnt++] = bv;
@@ -32,7 +33,7 @@ static void bch_generic_make_request_hack(struct bio *bio)
32 clone->bi_iter.bi_sector = bio->bi_iter.bi_sector; 33 clone->bi_iter.bi_sector = bio->bi_iter.bi_sector;
33 clone->bi_bdev = bio->bi_bdev; 34 clone->bi_bdev = bio->bi_bdev;
34 clone->bi_rw = bio->bi_rw; 35 clone->bi_rw = bio->bi_rw;
35 clone->bi_vcnt = bio_segments(bio); 36 clone->bi_vcnt = segs;
36 clone->bi_iter.bi_size = bio->bi_iter.bi_size; 37 clone->bi_iter.bi_size = bio->bi_iter.bi_size;
37 38
38 clone->bi_private = bio; 39 clone->bi_private = bio;
@@ -133,40 +134,32 @@ out:
133 134
134static unsigned bch_bio_max_sectors(struct bio *bio) 135static unsigned bch_bio_max_sectors(struct bio *bio)
135{ 136{
136 unsigned ret = bio_sectors(bio);
137 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 137 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
138 unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, 138 struct bio_vec bv;
139 queue_max_segments(q)); 139 struct bvec_iter iter;
140 unsigned ret = 0, seg = 0;
140 141
141 if (bio->bi_rw & REQ_DISCARD) 142 if (bio->bi_rw & REQ_DISCARD)
142 return min(ret, q->limits.max_discard_sectors); 143 return min(bio_sectors(bio), q->limits.max_discard_sectors);
143
144 if (bio_segments(bio) > max_segments ||
145 q->merge_bvec_fn) {
146 struct bio_vec bv;
147 struct bvec_iter iter;
148 unsigned seg = 0;
149
150 ret = 0;
151 144
152 bio_for_each_segment(bv, bio, iter) { 145 bio_for_each_segment(bv, bio, iter) {
153 struct bvec_merge_data bvm = { 146 struct bvec_merge_data bvm = {
154 .bi_bdev = bio->bi_bdev, 147 .bi_bdev = bio->bi_bdev,
155 .bi_sector = bio->bi_iter.bi_sector, 148 .bi_sector = bio->bi_iter.bi_sector,
156 .bi_size = ret << 9, 149 .bi_size = ret << 9,
157 .bi_rw = bio->bi_rw, 150 .bi_rw = bio->bi_rw,
158 }; 151 };
159 152
160 if (seg == max_segments) 153 if (seg == min_t(unsigned, BIO_MAX_PAGES,
161 break; 154 queue_max_segments(q)))
155 break;
162 156
163 if (q->merge_bvec_fn && 157 if (q->merge_bvec_fn &&
164 q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) 158 q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
165 break; 159 break;
166 160
167 seg++; 161 seg++;
168 ret += bv.bv_len >> 9; 162 ret += bv.bv_len >> 9;
169 }
170 } 163 }
171 164
172 ret = min(ret, queue_max_sectors(q)); 165 ret = min(ret, queue_max_sectors(q));