diff options
author | Tejun Heo <htejun@gmail.com> | 2008-03-04 05:18:17 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-03-04 05:18:17 -0500 |
commit | e3790c7d42a545e8fe8b38b513613ca96687b670 (patch) | |
tree | 0b849ba46007c4d7f7a34271a76d58e7406cbad7 /block | |
parent | 7a85f8896f4b4a4a0249563b92af9e3161a6b467 (diff) |
block: separate out padding from alignment
Block layer alignment was used for two different purposes - memory
alignment and padding. This causes problems in lower layers because
drivers which only require memory alignment ends up with adjusted
rq->data_len. Separate out padding such that padding occurs iff
driver explicitly requests it.
Tomo: restorethe code to update bio in blk_rq_map_user
introduced by the commit 40b01b9bbdf51ae543a04744283bf2d56c4a6afa
according to padding alignment.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-map.c | 20 | ||||
-rw-r--r-- | block/blk-settings.c | 17 |
2 files changed, 30 insertions, 7 deletions
diff --git a/block/blk-map.c b/block/blk-map.c index f5598322954d..4e17dfd0035d 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -43,6 +43,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
43 | void __user *ubuf, unsigned int len) | 43 | void __user *ubuf, unsigned int len) |
44 | { | 44 | { |
45 | unsigned long uaddr; | 45 | unsigned long uaddr; |
46 | unsigned int alignment; | ||
46 | struct bio *bio, *orig_bio; | 47 | struct bio *bio, *orig_bio; |
47 | int reading, ret; | 48 | int reading, ret; |
48 | 49 | ||
@@ -53,8 +54,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
53 | * direct dma. else, set up kernel bounce buffers | 54 | * direct dma. else, set up kernel bounce buffers |
54 | */ | 55 | */ |
55 | uaddr = (unsigned long) ubuf; | 56 | uaddr = (unsigned long) ubuf; |
56 | if (!(uaddr & queue_dma_alignment(q)) && | 57 | alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
57 | !(len & queue_dma_alignment(q))) | 58 | if (!(uaddr & alignment) && !(len & alignment)) |
58 | bio = bio_map_user(q, NULL, uaddr, len, reading); | 59 | bio = bio_map_user(q, NULL, uaddr, len, reading); |
59 | else | 60 | else |
60 | bio = bio_copy_user(q, uaddr, len, reading); | 61 | bio = bio_copy_user(q, uaddr, len, reading); |
@@ -141,15 +142,20 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
141 | 142 | ||
142 | /* | 143 | /* |
143 | * __blk_rq_map_user() copies the buffers if starting address | 144 | * __blk_rq_map_user() copies the buffers if starting address |
144 | * or length isn't aligned. As the copied buffer is always | 145 | * or length isn't aligned to dma_pad_mask. As the copied |
145 | * page aligned, we know that there's enough room for padding. | 146 | * buffer is always page aligned, we know that there's enough |
146 | * Extend the last bio and update rq->data_len accordingly. | 147 | * room for padding. Extend the last bio and update |
148 | * rq->data_len accordingly. | ||
147 | * | 149 | * |
148 | * On unmap, bio_uncopy_user() will use unmodified | 150 | * On unmap, bio_uncopy_user() will use unmodified |
149 | * bio_map_data pointed to by bio->bi_private. | 151 | * bio_map_data pointed to by bio->bi_private. |
150 | */ | 152 | */ |
151 | if (len & queue_dma_alignment(q)) { | 153 | if (len & q->dma_pad_mask) { |
152 | unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1; | 154 | unsigned int pad_len = (q->dma_pad_mask & ~len) + 1; |
155 | struct bio *bio = rq->biotail; | ||
156 | |||
157 | bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len; | ||
158 | bio->bi_size += pad_len; | ||
153 | 159 | ||
154 | rq->extra_len += pad_len; | 160 | rq->extra_len += pad_len; |
155 | } | 161 | } |
diff --git a/block/blk-settings.c b/block/blk-settings.c index da923fed1f2c..a9f37f530b15 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -293,6 +293,23 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |||
293 | EXPORT_SYMBOL(blk_queue_stack_limits); | 293 | EXPORT_SYMBOL(blk_queue_stack_limits); |
294 | 294 | ||
295 | /** | 295 | /** |
296 | * blk_queue_dma_pad - set pad mask | ||
297 | * @q: the request queue for the device | ||
298 | * @mask: pad mask | ||
299 | * | ||
300 | * Set pad mask. Direct IO requests are padded to the mask specified. | ||
301 | * | ||
302 | * Appending pad buffer to a request modifies ->data_len such that it | ||
303 | * includes the pad buffer. The original requested data length can be | ||
304 | * obtained using blk_rq_raw_data_len(). | ||
305 | **/ | ||
306 | void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) | ||
307 | { | ||
308 | q->dma_pad_mask = mask; | ||
309 | } | ||
310 | EXPORT_SYMBOL(blk_queue_dma_pad); | ||
311 | |||
312 | /** | ||
296 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. | 313 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. |
297 | * @q: the request queue for the device | 314 | * @q: the request queue for the device |
298 | * @dma_drain_needed: fn which returns non-zero if drain is necessary | 315 | * @dma_drain_needed: fn which returns non-zero if drain is necessary |