aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c66
1 files changed, 36 insertions, 30 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 1ffc58977835..8f8adaa95466 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -12,10 +12,11 @@
12static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
13 struct bio *bio) 13 struct bio *bio)
14{ 14{
15 struct bio_vec *bv, *bvprv = NULL; 15 struct bio_vec bv, bvprv = { NULL };
16 int cluster, i, high, highprv = 1; 16 int cluster, high, highprv = 1;
17 unsigned int seg_size, nr_phys_segs; 17 unsigned int seg_size, nr_phys_segs;
18 struct bio *fbio, *bbio; 18 struct bio *fbio, *bbio;
19 struct bvec_iter iter;
19 20
20 if (!bio) 21 if (!bio)
21 return 0; 22 return 0;
@@ -25,25 +26,23 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
25 seg_size = 0; 26 seg_size = 0;
26 nr_phys_segs = 0; 27 nr_phys_segs = 0;
27 for_each_bio(bio) { 28 for_each_bio(bio) {
28 bio_for_each_segment(bv, bio, i) { 29 bio_for_each_segment(bv, bio, iter) {
29 /* 30 /*
30 * the trick here is making sure that a high page is 31 * the trick here is making sure that a high page is
31 * never considered part of another segment, since that 32 * never considered part of another segment, since that
32 * might change with the bounce page. 33 * might change with the bounce page.
33 */ 34 */
34 high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); 35 high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
35 if (high || highprv) 36 if (!high && !highprv && cluster) {
36 goto new_segment; 37 if (seg_size + bv.bv_len
37 if (cluster) {
38 if (seg_size + bv->bv_len
39 > queue_max_segment_size(q)) 38 > queue_max_segment_size(q))
40 goto new_segment; 39 goto new_segment;
41 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) 40 if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
42 goto new_segment; 41 goto new_segment;
43 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) 42 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
44 goto new_segment; 43 goto new_segment;
45 44
46 seg_size += bv->bv_len; 45 seg_size += bv.bv_len;
47 bvprv = bv; 46 bvprv = bv;
48 continue; 47 continue;
49 } 48 }
@@ -54,7 +53,7 @@ new_segment:
54 53
55 nr_phys_segs++; 54 nr_phys_segs++;
56 bvprv = bv; 55 bvprv = bv;
57 seg_size = bv->bv_len; 56 seg_size = bv.bv_len;
58 highprv = high; 57 highprv = high;
59 } 58 }
60 bbio = bio; 59 bbio = bio;
@@ -87,6 +86,9 @@ EXPORT_SYMBOL(blk_recount_segments);
87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 86static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
88 struct bio *nxt) 87 struct bio *nxt)
89{ 88{
89 struct bio_vec end_bv = { NULL }, nxt_bv;
90 struct bvec_iter iter;
91
90 if (!blk_queue_cluster(q)) 92 if (!blk_queue_cluster(q))
91 return 0; 93 return 0;
92 94
@@ -97,34 +99,40 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
97 if (!bio_has_data(bio)) 99 if (!bio_has_data(bio))
98 return 1; 100 return 1;
99 101
100 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) 102 bio_for_each_segment(end_bv, bio, iter)
103 if (end_bv.bv_len == iter.bi_size)
104 break;
105
106 nxt_bv = bio_iovec(nxt);
107
108 if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
101 return 0; 109 return 0;
102 110
103 /* 111 /*
104 * bio and nxt are contiguous in memory; check if the queue allows 112 * bio and nxt are contiguous in memory; check if the queue allows
105 * these two to be merged into one 113 * these two to be merged into one
106 */ 114 */
107 if (BIO_SEG_BOUNDARY(q, bio, nxt)) 115 if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
108 return 1; 116 return 1;
109 117
110 return 0; 118 return 0;
111} 119}
112 120
113static void 121static inline void
114__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, 122__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
115 struct scatterlist *sglist, struct bio_vec **bvprv, 123 struct scatterlist *sglist, struct bio_vec *bvprv,
116 struct scatterlist **sg, int *nsegs, int *cluster) 124 struct scatterlist **sg, int *nsegs, int *cluster)
117{ 125{
118 126
119 int nbytes = bvec->bv_len; 127 int nbytes = bvec->bv_len;
120 128
121 if (*bvprv && *cluster) { 129 if (*sg && *cluster) {
122 if ((*sg)->length + nbytes > queue_max_segment_size(q)) 130 if ((*sg)->length + nbytes > queue_max_segment_size(q))
123 goto new_segment; 131 goto new_segment;
124 132
125 if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) 133 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
126 goto new_segment; 134 goto new_segment;
127 if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) 135 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
128 goto new_segment; 136 goto new_segment;
129 137
130 (*sg)->length += nbytes; 138 (*sg)->length += nbytes;
@@ -150,7 +158,7 @@ new_segment:
150 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); 158 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
151 (*nsegs)++; 159 (*nsegs)++;
152 } 160 }
153 *bvprv = bvec; 161 *bvprv = *bvec;
154} 162}
155 163
156/* 164/*
@@ -160,7 +168,7 @@ new_segment:
160int blk_rq_map_sg(struct request_queue *q, struct request *rq, 168int blk_rq_map_sg(struct request_queue *q, struct request *rq,
161 struct scatterlist *sglist) 169 struct scatterlist *sglist)
162{ 170{
163 struct bio_vec *bvec, *bvprv; 171 struct bio_vec bvec, bvprv = { NULL };
164 struct req_iterator iter; 172 struct req_iterator iter;
165 struct scatterlist *sg; 173 struct scatterlist *sg;
166 int nsegs, cluster; 174 int nsegs, cluster;
@@ -171,10 +179,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
171 /* 179 /*
172 * for each bio in rq 180 * for each bio in rq
173 */ 181 */
174 bvprv = NULL;
175 sg = NULL; 182 sg = NULL;
176 rq_for_each_segment(bvec, rq, iter) { 183 rq_for_each_segment(bvec, rq, iter) {
177 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, 184 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
178 &nsegs, &cluster); 185 &nsegs, &cluster);
179 } /* segments in rq */ 186 } /* segments in rq */
180 187
@@ -223,18 +230,17 @@ EXPORT_SYMBOL(blk_rq_map_sg);
223int blk_bio_map_sg(struct request_queue *q, struct bio *bio, 230int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
224 struct scatterlist *sglist) 231 struct scatterlist *sglist)
225{ 232{
226 struct bio_vec *bvec, *bvprv; 233 struct bio_vec bvec, bvprv = { NULL };
227 struct scatterlist *sg; 234 struct scatterlist *sg;
228 int nsegs, cluster; 235 int nsegs, cluster;
229 unsigned long i; 236 struct bvec_iter iter;
230 237
231 nsegs = 0; 238 nsegs = 0;
232 cluster = blk_queue_cluster(q); 239 cluster = blk_queue_cluster(q);
233 240
234 bvprv = NULL;
235 sg = NULL; 241 sg = NULL;
236 bio_for_each_segment(bvec, bio, i) { 242 bio_for_each_segment(bvec, bio, iter) {
237 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, 243 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
238 &nsegs, &cluster); 244 &nsegs, &cluster);
239 } /* segments in bio */ 245 } /* segments in bio */
240 246
@@ -543,9 +549,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
543 549
544int blk_try_merge(struct request *rq, struct bio *bio) 550int blk_try_merge(struct request *rq, struct bio *bio)
545{ 551{
546 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector) 552 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
547 return ELEVATOR_BACK_MERGE; 553 return ELEVATOR_BACK_MERGE;
548 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector) 554 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
549 return ELEVATOR_FRONT_MERGE; 555 return ELEVATOR_FRONT_MERGE;
550 return ELEVATOR_NO_MERGE; 556 return ELEVATOR_NO_MERGE;
551} 557}