aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2008-02-19 05:36:35 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-02-19 05:36:35 -0500
commit6b00769fe1502b4ad97bb327ef7ac971b208bfb5 (patch)
tree2ffc51176437ebf4f8a4ab13de59a32431043f15 /block/blk-core.c
parent40b01b9bbdf51ae543a04744283bf2d56c4a6afa (diff)
block: add request->raw_data_len
With padding and draining moved into it, block layer now may extend requests as directed by queue parameters, so now a request has two sizes - the original request size and the extended size which matches the size of area pointed to by bios and later by sgs. The latter size is what lower layers are primarily interested in when allocating, filling up DMA tables and setting up the controller. Both padding and draining extend the data area to accomodate controller characteristics. As any controller which speaks SCSI can handle underflows, feeding larger data area is safe. So, this patch makes the primary data length field, request->data_len, indicate the size of full data area and add a separate length field, request->raw_data_len, for the unmodified request size. The latter is used to report to higher layer (userland) and where the original request size should be fed to the controller or device. Signed-off-by: Tejun Heo <htejun@gmail.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c013ca22eb67..775c8516abf5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -127,6 +127,7 @@ void rq_init(struct request_queue *q, struct request *rq)
127 rq->nr_hw_segments = 0; 127 rq->nr_hw_segments = 0;
128 rq->ioprio = 0; 128 rq->ioprio = 0;
129 rq->special = NULL; 129 rq->special = NULL;
130 rq->raw_data_len = 0;
130 rq->buffer = NULL; 131 rq->buffer = NULL;
131 rq->tag = -1; 132 rq->tag = -1;
132 rq->errors = 0; 133 rq->errors = 0;
@@ -2015,6 +2016,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2015 rq->hard_cur_sectors = rq->current_nr_sectors; 2016 rq->hard_cur_sectors = rq->current_nr_sectors;
2016 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); 2017 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2017 rq->buffer = bio_data(bio); 2018 rq->buffer = bio_data(bio);
2019 rq->raw_data_len = bio->bi_size;
2018 rq->data_len = bio->bi_size; 2020 rq->data_len = bio->bi_size;
2019 2021
2020 rq->bio = rq->biotail = bio; 2022 rq->bio = rq->biotail = bio;