aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Lei <ming.lei@canonical.com>2015-10-20 11:13:52 -0400
committerJens Axboe <axboe@fb.com>2015-10-21 17:00:50 -0400
commitbdced438acd83ad83a6c6fc7f50099b820245ddb (patch)
tree441b9c9b16d55c157db90c499ba4fd2821bfe60b
parent0809e3ac62319dc7534b64f95ac37e230d740e8a (diff)
block: setup bi_phys_segments after splitting
The number of bio->bi_phys_segments is always obtained during bio splitting, so it is natural to setup it just after bio splitting, then we can avoid to compute nr_segment again during merge. Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Ming Lei <ming.lei@canonical.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-merge.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index c4e9c37f3e38..22293fdf6514 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -11,13 +11,16 @@
11 11
12static struct bio *blk_bio_discard_split(struct request_queue *q, 12static struct bio *blk_bio_discard_split(struct request_queue *q,
13 struct bio *bio, 13 struct bio *bio,
14 struct bio_set *bs) 14 struct bio_set *bs,
15 unsigned *nsegs)
15{ 16{
16 unsigned int max_discard_sectors, granularity; 17 unsigned int max_discard_sectors, granularity;
17 int alignment; 18 int alignment;
18 sector_t tmp; 19 sector_t tmp;
19 unsigned split_sectors; 20 unsigned split_sectors;
20 21
22 *nsegs = 1;
23
21 /* Zero-sector (unknown) and one-sector granularities are the same. */ 24 /* Zero-sector (unknown) and one-sector granularities are the same. */
22 granularity = max(q->limits.discard_granularity >> 9, 1U); 25 granularity = max(q->limits.discard_granularity >> 9, 1U);
23 26
@@ -51,8 +54,11 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
51 54
52static struct bio *blk_bio_write_same_split(struct request_queue *q, 55static struct bio *blk_bio_write_same_split(struct request_queue *q,
53 struct bio *bio, 56 struct bio *bio,
54 struct bio_set *bs) 57 struct bio_set *bs,
58 unsigned *nsegs)
55{ 59{
60 *nsegs = 1;
61
56 if (!q->limits.max_write_same_sectors) 62 if (!q->limits.max_write_same_sectors)
57 return NULL; 63 return NULL;
58 64
@@ -64,7 +70,8 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
64 70
65static struct bio *blk_bio_segment_split(struct request_queue *q, 71static struct bio *blk_bio_segment_split(struct request_queue *q,
66 struct bio *bio, 72 struct bio *bio,
67 struct bio_set *bs) 73 struct bio_set *bs,
74 unsigned *segs)
68{ 75{
69 struct bio_vec bv, bvprv, *bvprvp = NULL; 76 struct bio_vec bv, bvprv, *bvprvp = NULL;
70 struct bvec_iter iter; 77 struct bvec_iter iter;
@@ -106,22 +113,30 @@ new_segment:
106 sectors += bv.bv_len >> 9; 113 sectors += bv.bv_len >> 9;
107 } 114 }
108 115
116 *segs = nsegs;
109 return NULL; 117 return NULL;
110split: 118split:
119 *segs = nsegs;
111 return bio_split(bio, sectors, GFP_NOIO, bs); 120 return bio_split(bio, sectors, GFP_NOIO, bs);
112} 121}
113 122
114void blk_queue_split(struct request_queue *q, struct bio **bio, 123void blk_queue_split(struct request_queue *q, struct bio **bio,
115 struct bio_set *bs) 124 struct bio_set *bs)
116{ 125{
117 struct bio *split; 126 struct bio *split, *res;
127 unsigned nsegs;
118 128
119 if ((*bio)->bi_rw & REQ_DISCARD) 129 if ((*bio)->bi_rw & REQ_DISCARD)
120 split = blk_bio_discard_split(q, *bio, bs); 130 split = blk_bio_discard_split(q, *bio, bs, &nsegs);
121 else if ((*bio)->bi_rw & REQ_WRITE_SAME) 131 else if ((*bio)->bi_rw & REQ_WRITE_SAME)
122 split = blk_bio_write_same_split(q, *bio, bs); 132 split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
123 else 133 else
124 split = blk_bio_segment_split(q, *bio, q->bio_split); 134 split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
135
136 /* physical segments can be figured out during splitting */
137 res = split ? split : *bio;
138 res->bi_phys_segments = nsegs;
139 bio_set_flag(res, BIO_SEG_VALID);
125 140
126 if (split) { 141 if (split) {
127 bio_chain(split, *bio); 142 bio_chain(split, *bio);