aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2007-09-25 06:35:59 -0400
committerJens Axboe <axboe@carl.home.kernel.dk>2007-10-10 03:25:56 -0400
commit5705f7021748a69d84d6567e68e8851dab551464 (patch)
tree5a6dbc8fc6055c0334f4a97540e36a7844b9c482 /block/ll_rw_blk.c
parent9dfa52831e96194b8649613e3131baa2c109f7dc (diff)
Introduce rq_for_each_segment replacing rq_for_each_bio
Every usage of rq_for_each_bio wraps a usage of bio_for_each_segment, so these can be combined into rq_for_each_segment. We define "struct req_iterator" to hold the 'bio' and 'index' that are needed for the double iteration. Signed-off-by: Neil Brown <neilb@suse.de> Various compile fixes by me... Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c19
1 files changed, 6 insertions, 13 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index e35119a72a44..094c0fa5c405 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1244,8 +1244,7 @@ static void blk_recalc_rq_segments(struct request *rq)
1244 int seg_size; 1244 int seg_size;
1245 int hw_seg_size; 1245 int hw_seg_size;
1246 int cluster; 1246 int cluster;
1247 struct bio *bio; 1247 struct req_iterator iter;
1248 int i;
1249 int high, highprv = 1; 1248 int high, highprv = 1;
1250 struct request_queue *q = rq->q; 1249 struct request_queue *q = rq->q;
1251 1250
@@ -1255,8 +1254,7 @@ static void blk_recalc_rq_segments(struct request *rq)
1255 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); 1254 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
1256 hw_seg_size = seg_size = 0; 1255 hw_seg_size = seg_size = 0;
1257 phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; 1256 phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
1258 rq_for_each_bio(bio, rq) 1257 rq_for_each_segment(bv, rq, iter) {
1259 bio_for_each_segment(bv, bio, i) {
1260 /* 1258 /*
1261 * the trick here is making sure that a high page is never 1259 * the trick here is making sure that a high page is never
1262 * considered part of another segment, since that might 1260 * considered part of another segment, since that might
@@ -1353,8 +1351,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1353 struct scatterlist *sg) 1351 struct scatterlist *sg)
1354{ 1352{
1355 struct bio_vec *bvec, *bvprv; 1353 struct bio_vec *bvec, *bvprv;
1356 struct bio *bio; 1354 struct req_iterator iter;
1357 int nsegs, i, cluster; 1355 int nsegs, cluster;
1358 1356
1359 nsegs = 0; 1357 nsegs = 0;
1360 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); 1358 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
@@ -1363,11 +1361,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1363 * for each bio in rq 1361 * for each bio in rq
1364 */ 1362 */
1365 bvprv = NULL; 1363 bvprv = NULL;
1366 rq_for_each_bio(bio, rq) { 1364 rq_for_each_segment(bvec, rq, iter) {
1367 /*
1368 * for each segment in bio
1369 */
1370 bio_for_each_segment(bvec, bio, i) {
1371 int nbytes = bvec->bv_len; 1365 int nbytes = bvec->bv_len;
1372 1366
1373 if (bvprv && cluster) { 1367 if (bvprv && cluster) {
@@ -1390,8 +1384,7 @@ new_segment:
1390 nsegs++; 1384 nsegs++;
1391 } 1385 }
1392 bvprv = bvec; 1386 bvprv = bvec;
1393 } /* segments in bio */ 1387 } /* segments in rq */
1394 } /* bios in rq */
1395 1388
1396 return nsegs; 1389 return nsegs;
1397} 1390}