aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-11-23 20:19:00 -0500
committerKent Overstreet <kmo@daterainc.com>2013-11-24 01:33:49 -0500
commit7988613b0e5b2638caf6cd493cc78e9595eba19c (patch)
treecc9fc4e235278035313ee32940740f61269f8fb3 /block/blk-merge.c
parenta4ad39b1d10584dfcfcfb0d510faab2c7f034399 (diff)
block: Convert bio_for_each_segment() to bvec_iter
More prep work for immutable biovecs - with immutable bvecs drivers won't be able to use the biovec directly, they'll need to use helpers that take into account bio->bi_iter.bi_bvec_done. This updates callers for the new usage without changing the implementation yet. Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Ed L. Cashin" <ecashin@coraid.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Lars Ellenberg <drbd-dev@lists.linbit.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Paul Clements <Paul.Clements@steeleye.com> Cc: Jim Paris <jim@jtan.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Yehuda Sadeh <yehuda@inktank.com> Cc: Sage Weil <sage@inktank.com> Cc: Alex Elder <elder@inktank.com> Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris <josh.h.morris@us.ibm.com> Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Neil Brown <neilb@suse.de> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: linux390@de.ibm.com Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> Cc: support@lsi.com Cc: "James E.J. Bottomley" <JBottomley@parallels.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Guo Chao <yan@linux.vnet.ibm.com> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Matthew Wilcox <matthew.r.wilcox@intel.com> Cc: Keith Busch <keith.busch@intel.com> Cc: Stephen Hemminger <shemminger@vyatta.com> Cc: Quoc-Son Anh <quoc-sonx.anh@intel.com> Cc: Sebastian Ott <sebott@linux.vnet.ibm.com> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Seth Jennings <sjenning@linux.vnet.ibm.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: "Darrick J. Wong" <darrick.wong@oracle.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Jan Kara <jack@suse.cz> Cc: linux-m68k@lists.linux-m68k.org Cc: linuxppc-dev@lists.ozlabs.org Cc: drbd-user@lists.linbit.com Cc: nbd-general@lists.sourceforge.net Cc: cbe-oss-dev@lists.ozlabs.org Cc: xen-devel@lists.xensource.com Cc: virtualization@lists.linux-foundation.org Cc: linux-raid@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: DL-MPTFusionLinux@lsi.com Cc: linux-scsi@vger.kernel.org Cc: devel@driverdev.osuosl.org Cc: linux-fsdevel@vger.kernel.org Cc: cluster-devel@redhat.com Cc: linux-mm@kvack.org Acked-by: Geoff Levand <geoff@infradead.org>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c49
1 files changed, 23 insertions, 26 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 03bc083c28cf..a1ead9049ed6 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -12,10 +12,11 @@
12static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
13 struct bio *bio) 13 struct bio *bio)
14{ 14{
15 struct bio_vec *bv, *bvprv = NULL; 15 struct bio_vec bv, bvprv = { NULL };
16 int cluster, i, high, highprv = 1; 16 int cluster, high, highprv = 1;
17 unsigned int seg_size, nr_phys_segs; 17 unsigned int seg_size, nr_phys_segs;
18 struct bio *fbio, *bbio; 18 struct bio *fbio, *bbio;
19 struct bvec_iter iter;
19 20
20 if (!bio) 21 if (!bio)
21 return 0; 22 return 0;
@@ -25,25 +26,23 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
25 seg_size = 0; 26 seg_size = 0;
26 nr_phys_segs = 0; 27 nr_phys_segs = 0;
27 for_each_bio(bio) { 28 for_each_bio(bio) {
28 bio_for_each_segment(bv, bio, i) { 29 bio_for_each_segment(bv, bio, iter) {
29 /* 30 /*
30 * the trick here is making sure that a high page is 31 * the trick here is making sure that a high page is
31 * never considered part of another segment, since that 32 * never considered part of another segment, since that
32 * might change with the bounce page. 33 * might change with the bounce page.
33 */ 34 */
34 high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); 35 high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
35 if (high || highprv) 36 if (!high && !highprv && cluster) {
36 goto new_segment; 37 if (seg_size + bv.bv_len
37 if (cluster) {
38 if (seg_size + bv->bv_len
39 > queue_max_segment_size(q)) 38 > queue_max_segment_size(q))
40 goto new_segment; 39 goto new_segment;
41 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) 40 if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
42 goto new_segment; 41 goto new_segment;
43 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) 42 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
44 goto new_segment; 43 goto new_segment;
45 44
46 seg_size += bv->bv_len; 45 seg_size += bv.bv_len;
47 bvprv = bv; 46 bvprv = bv;
48 continue; 47 continue;
49 } 48 }
@@ -54,7 +53,7 @@ new_segment:
54 53
55 nr_phys_segs++; 54 nr_phys_segs++;
56 bvprv = bv; 55 bvprv = bv;
57 seg_size = bv->bv_len; 56 seg_size = bv.bv_len;
58 highprv = high; 57 highprv = high;
59 } 58 }
60 bbio = bio; 59 bbio = bio;
@@ -110,21 +109,21 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
110 return 0; 109 return 0;
111} 110}
112 111
113static void 112static inline void
114__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, 113__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
115 struct scatterlist *sglist, struct bio_vec **bvprv, 114 struct scatterlist *sglist, struct bio_vec *bvprv,
116 struct scatterlist **sg, int *nsegs, int *cluster) 115 struct scatterlist **sg, int *nsegs, int *cluster)
117{ 116{
118 117
119 int nbytes = bvec->bv_len; 118 int nbytes = bvec->bv_len;
120 119
121 if (*bvprv && *cluster) { 120 if (*sg && *cluster) {
122 if ((*sg)->length + nbytes > queue_max_segment_size(q)) 121 if ((*sg)->length + nbytes > queue_max_segment_size(q))
123 goto new_segment; 122 goto new_segment;
124 123
125 if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) 124 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
126 goto new_segment; 125 goto new_segment;
127 if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) 126 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
128 goto new_segment; 127 goto new_segment;
129 128
130 (*sg)->length += nbytes; 129 (*sg)->length += nbytes;
@@ -150,7 +149,7 @@ new_segment:
150 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); 149 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
151 (*nsegs)++; 150 (*nsegs)++;
152 } 151 }
153 *bvprv = bvec; 152 *bvprv = *bvec;
154} 153}
155 154
156/* 155/*
@@ -160,7 +159,7 @@ new_segment:
160int blk_rq_map_sg(struct request_queue *q, struct request *rq, 159int blk_rq_map_sg(struct request_queue *q, struct request *rq,
161 struct scatterlist *sglist) 160 struct scatterlist *sglist)
162{ 161{
163 struct bio_vec *bvec, *bvprv; 162 struct bio_vec bvec, bvprv;
164 struct req_iterator iter; 163 struct req_iterator iter;
165 struct scatterlist *sg; 164 struct scatterlist *sg;
166 int nsegs, cluster; 165 int nsegs, cluster;
@@ -171,10 +170,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
171 /* 170 /*
172 * for each bio in rq 171 * for each bio in rq
173 */ 172 */
174 bvprv = NULL;
175 sg = NULL; 173 sg = NULL;
176 rq_for_each_segment(bvec, rq, iter) { 174 rq_for_each_segment(bvec, rq, iter) {
177 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, 175 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
178 &nsegs, &cluster); 176 &nsegs, &cluster);
179 } /* segments in rq */ 177 } /* segments in rq */
180 178
@@ -223,18 +221,17 @@ EXPORT_SYMBOL(blk_rq_map_sg);
223int blk_bio_map_sg(struct request_queue *q, struct bio *bio, 221int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
224 struct scatterlist *sglist) 222 struct scatterlist *sglist)
225{ 223{
226 struct bio_vec *bvec, *bvprv; 224 struct bio_vec bvec, bvprv;
227 struct scatterlist *sg; 225 struct scatterlist *sg;
228 int nsegs, cluster; 226 int nsegs, cluster;
229 unsigned long i; 227 struct bvec_iter iter;
230 228
231 nsegs = 0; 229 nsegs = 0;
232 cluster = blk_queue_cluster(q); 230 cluster = blk_queue_cluster(q);
233 231
234 bvprv = NULL;
235 sg = NULL; 232 sg = NULL;
236 bio_for_each_segment(bvec, bio, i) { 233 bio_for_each_segment(bvec, bio, iter) {
237 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, 234 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
238 &nsegs, &cluster); 235 &nsegs, &cluster);
239 } /* segments in bio */ 236 } /* segments in bio */
240 237