diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-09-21 04:44:19 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-10-16 05:07:11 -0400 |
commit | f565913ef8a8d0cfa46a1faaf8340cc357a46f3a (patch) | |
tree | e90daaa8f74c33d207dc02488352a6d674c7c618 | |
parent | 96b418c960af0d5c7185ff5c4af9376eb37ac9d3 (diff) |
block: convert to using sg helpers
Convert the main rq mapper (blk_rq_map_sg()) to the sg helper setup.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/ll_rw_blk.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 4df7d027eb06..36d205128f9a 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/cpu.h> | 30 | #include <linux/cpu.h> |
31 | #include <linux/blktrace_api.h> | 31 | #include <linux/blktrace_api.h> |
32 | #include <linux/fault-inject.h> | 32 | #include <linux/fault-inject.h> |
33 | #include <linux/scatterlist.h> | ||
33 | 34 | ||
34 | /* | 35 | /* |
35 | * for max sense size | 36 | * for max sense size |
@@ -1318,9 +1319,10 @@ static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, | |||
1318 | * must make sure sg can hold rq->nr_phys_segments entries | 1319 | * must make sure sg can hold rq->nr_phys_segments entries |
1319 | */ | 1320 | */ |
1320 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | 1321 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
1321 | struct scatterlist *sg) | 1322 | struct scatterlist *sglist) |
1322 | { | 1323 | { |
1323 | struct bio_vec *bvec, *bvprv; | 1324 | struct bio_vec *bvec, *bvprv; |
1325 | struct scatterlist *next_sg, *sg; | ||
1324 | struct req_iterator iter; | 1326 | struct req_iterator iter; |
1325 | int nsegs, cluster; | 1327 | int nsegs, cluster; |
1326 | 1328 | ||
@@ -1331,11 +1333,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
1331 | * for each bio in rq | 1333 | * for each bio in rq |
1332 | */ | 1334 | */ |
1333 | bvprv = NULL; | 1335 | bvprv = NULL; |
1336 | sg = next_sg = &sglist[0]; | ||
1334 | rq_for_each_segment(bvec, rq, iter) { | 1337 | rq_for_each_segment(bvec, rq, iter) { |
1335 | int nbytes = bvec->bv_len; | 1338 | int nbytes = bvec->bv_len; |
1336 | 1339 | ||
1337 | if (bvprv && cluster) { | 1340 | if (bvprv && cluster) { |
1338 | if (sg[nsegs - 1].length + nbytes > q->max_segment_size) | 1341 | if (sg->length + nbytes > q->max_segment_size) |
1339 | goto new_segment; | 1342 | goto new_segment; |
1340 | 1343 | ||
1341 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | 1344 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
@@ -1343,14 +1346,15 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
1343 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | 1346 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
1344 | goto new_segment; | 1347 | goto new_segment; |
1345 | 1348 | ||
1346 | sg[nsegs - 1].length += nbytes; | 1349 | sg->length += nbytes; |
1347 | } else { | 1350 | } else { |
1348 | new_segment: | 1351 | new_segment: |
1349 | memset(&sg[nsegs],0,sizeof(struct scatterlist)); | 1352 | sg = next_sg; |
1350 | sg[nsegs].page = bvec->bv_page; | 1353 | next_sg = sg_next(sg); |
1351 | sg[nsegs].length = nbytes; | ||
1352 | sg[nsegs].offset = bvec->bv_offset; | ||
1353 | 1354 | ||
1355 | sg->page = bvec->bv_page; | ||
1356 | sg->length = nbytes; | ||
1357 | sg->offset = bvec->bv_offset; | ||
1354 | nsegs++; | 1358 | nsegs++; |
1355 | } | 1359 | } |
1356 | bvprv = bvec; | 1360 | bvprv = bvec; |