diff options
author | NeilBrown <neilb@suse.de> | 2007-08-16 07:27:52 -0400 |
---|---|---|
committer | Jens Axboe <axboe@carl.home.kernel.dk> | 2007-10-10 03:25:56 -0400 |
commit | 9dfa52831e96194b8649613e3131baa2c109f7dc (patch) | |
tree | 4a3c8b8261c54ba9530232a02ab241a4111d9ce3 | |
parent | bbf25010f1a6b761914430f5fca081ec8c7accd1 (diff) |
Merge blk_recount_segments into blk_recalc_rq_segments
blk_recalc_rq_segments calls blk_recount_segments on each bio,
then does some extra calculations to handle segments that overlap
two bios.
If we merge the code from blk_recount_segments into
blk_recalc_rq_segments, we can process the whole request one bio_vec
at a time, and not need the messy cross-bio calculations.
Then blk_recount_segments can be implemented by calling
blk_recalc_rq_segments, passing it a simple on-stack request which
stores just the bio.
Signed-off-by: Neil Brown <neilb@suse.de>
diff .prev/block/ll_rw_blk.c ./block/ll_rw_blk.c
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/ll_rw_blk.c | 102 |
1 files changed, 44 insertions, 58 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index ed39313c4085..e35119a72a44 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -42,6 +42,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | |||
42 | static void init_request_from_bio(struct request *req, struct bio *bio); | 42 | static void init_request_from_bio(struct request *req, struct bio *bio); |
43 | static int __make_request(struct request_queue *q, struct bio *bio); | 43 | static int __make_request(struct request_queue *q, struct bio *bio); |
44 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); | 44 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); |
45 | static void blk_recalc_rq_segments(struct request *rq); | ||
45 | 46 | ||
46 | /* | 47 | /* |
47 | * For the allocated request tables | 48 | * For the allocated request tables |
@@ -1220,16 +1221,42 @@ EXPORT_SYMBOL(blk_dump_rq_flags); | |||
1220 | 1221 | ||
1221 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | 1222 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
1222 | { | 1223 | { |
1224 | struct request rq; | ||
1225 | struct bio *nxt = bio->bi_next; | ||
1226 | rq.q = q; | ||
1227 | rq.bio = rq.biotail = bio; | ||
1228 | bio->bi_next = NULL; | ||
1229 | blk_recalc_rq_segments(&rq); | ||
1230 | bio->bi_next = nxt; | ||
1231 | bio->bi_phys_segments = rq.nr_phys_segments; | ||
1232 | bio->bi_hw_segments = rq.nr_hw_segments; | ||
1233 | bio->bi_flags |= (1 << BIO_SEG_VALID); | ||
1234 | } | ||
1235 | EXPORT_SYMBOL(blk_recount_segments); | ||
1236 | |||
1237 | static void blk_recalc_rq_segments(struct request *rq) | ||
1238 | { | ||
1239 | int nr_phys_segs; | ||
1240 | int nr_hw_segs; | ||
1241 | unsigned int phys_size; | ||
1242 | unsigned int hw_size; | ||
1223 | struct bio_vec *bv, *bvprv = NULL; | 1243 | struct bio_vec *bv, *bvprv = NULL; |
1224 | int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster; | 1244 | int seg_size; |
1245 | int hw_seg_size; | ||
1246 | int cluster; | ||
1247 | struct bio *bio; | ||
1248 | int i; | ||
1225 | int high, highprv = 1; | 1249 | int high, highprv = 1; |
1250 | struct request_queue *q = rq->q; | ||
1226 | 1251 | ||
1227 | if (unlikely(!bio->bi_io_vec)) | 1252 | if (!rq->bio) |
1228 | return; | 1253 | return; |
1229 | 1254 | ||
1230 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | 1255 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); |
1231 | hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0; | 1256 | hw_seg_size = seg_size = 0; |
1232 | bio_for_each_segment(bv, bio, i) { | 1257 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; |
1258 | rq_for_each_bio(bio, rq) | ||
1259 | bio_for_each_segment(bv, bio, i) { | ||
1233 | /* | 1260 | /* |
1234 | * the trick here is making sure that a high page is never | 1261 | * the trick here is making sure that a high page is never |
1235 | * considered part of another segment, since that might | 1262 | * considered part of another segment, since that might |
@@ -1255,12 +1282,13 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio) | |||
1255 | } | 1282 | } |
1256 | new_segment: | 1283 | new_segment: |
1257 | if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) && | 1284 | if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) && |
1258 | !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) { | 1285 | !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) |
1259 | hw_seg_size += bv->bv_len; | 1286 | hw_seg_size += bv->bv_len; |
1260 | } else { | 1287 | else { |
1261 | new_hw_segment: | 1288 | new_hw_segment: |
1262 | if (hw_seg_size > bio->bi_hw_front_size) | 1289 | if (nr_hw_segs == 1 && |
1263 | bio->bi_hw_front_size = hw_seg_size; | 1290 | hw_seg_size > rq->bio->bi_hw_front_size) |
1291 | rq->bio->bi_hw_front_size = hw_seg_size; | ||
1264 | hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len; | 1292 | hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len; |
1265 | nr_hw_segs++; | 1293 | nr_hw_segs++; |
1266 | } | 1294 | } |
@@ -1270,15 +1298,15 @@ new_hw_segment: | |||
1270 | seg_size = bv->bv_len; | 1298 | seg_size = bv->bv_len; |
1271 | highprv = high; | 1299 | highprv = high; |
1272 | } | 1300 | } |
1273 | if (hw_seg_size > bio->bi_hw_back_size) | 1301 | |
1274 | bio->bi_hw_back_size = hw_seg_size; | 1302 | if (nr_hw_segs == 1 && |
1275 | if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size) | 1303 | hw_seg_size > rq->bio->bi_hw_front_size) |
1276 | bio->bi_hw_front_size = hw_seg_size; | 1304 | rq->bio->bi_hw_front_size = hw_seg_size; |
1277 | bio->bi_phys_segments = nr_phys_segs; | 1305 | if (hw_seg_size > rq->biotail->bi_hw_back_size) |
1278 | bio->bi_hw_segments = nr_hw_segs; | 1306 | rq->biotail->bi_hw_back_size = hw_seg_size; |
1279 | bio->bi_flags |= (1 << BIO_SEG_VALID); | 1307 | rq->nr_phys_segments = nr_phys_segs; |
1308 | rq->nr_hw_segments = nr_hw_segs; | ||
1280 | } | 1309 | } |
1281 | EXPORT_SYMBOL(blk_recount_segments); | ||
1282 | 1310 | ||
1283 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | 1311 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
1284 | struct bio *nxt) | 1312 | struct bio *nxt) |
@@ -3329,48 +3357,6 @@ void submit_bio(int rw, struct bio *bio) | |||
3329 | 3357 | ||
3330 | EXPORT_SYMBOL(submit_bio); | 3358 | EXPORT_SYMBOL(submit_bio); |
3331 | 3359 | ||
3332 | static void blk_recalc_rq_segments(struct request *rq) | ||
3333 | { | ||
3334 | struct bio *bio, *prevbio = NULL; | ||
3335 | int nr_phys_segs, nr_hw_segs; | ||
3336 | unsigned int phys_size, hw_size; | ||
3337 | struct request_queue *q = rq->q; | ||
3338 | |||
3339 | if (!rq->bio) | ||
3340 | return; | ||
3341 | |||
3342 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; | ||
3343 | rq_for_each_bio(bio, rq) { | ||
3344 | /* Force bio hw/phys segs to be recalculated. */ | ||
3345 | bio->bi_flags &= ~(1 << BIO_SEG_VALID); | ||
3346 | |||
3347 | nr_phys_segs += bio_phys_segments(q, bio); | ||
3348 | nr_hw_segs += bio_hw_segments(q, bio); | ||
3349 | if (prevbio) { | ||
3350 | int pseg = phys_size + prevbio->bi_size + bio->bi_size; | ||
3351 | int hseg = hw_size + prevbio->bi_size + bio->bi_size; | ||
3352 | |||
3353 | if (blk_phys_contig_segment(q, prevbio, bio) && | ||
3354 | pseg <= q->max_segment_size) { | ||
3355 | nr_phys_segs--; | ||
3356 | phys_size += prevbio->bi_size + bio->bi_size; | ||
3357 | } else | ||
3358 | phys_size = 0; | ||
3359 | |||
3360 | if (blk_hw_contig_segment(q, prevbio, bio) && | ||
3361 | hseg <= q->max_segment_size) { | ||
3362 | nr_hw_segs--; | ||
3363 | hw_size += prevbio->bi_size + bio->bi_size; | ||
3364 | } else | ||
3365 | hw_size = 0; | ||
3366 | } | ||
3367 | prevbio = bio; | ||
3368 | } | ||
3369 | |||
3370 | rq->nr_phys_segments = nr_phys_segs; | ||
3371 | rq->nr_hw_segments = nr_hw_segs; | ||
3372 | } | ||
3373 | |||
3374 | static void blk_recalc_rq_sectors(struct request *rq, int nsect) | 3360 | static void blk_recalc_rq_sectors(struct request *rq, int nsect) |
3375 | { | 3361 | { |
3376 | if (blk_fs_request(rq)) { | 3362 | if (blk_fs_request(rq)) { |