diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-07-04 03:31:11 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-07-04 03:52:13 -0400 |
commit | 30c00eda73d5db5bd64dd0c370161abd8df5ba4a (patch) | |
tree | 87b5a34323eb98edf504e51b2d8e0291559579e3 /block | |
parent | 27f8221af406e43b529a5425bc99c9b1e9bdf521 (diff) |
block: blk_rq_map_kern uses the bounce buffers for stack buffers
blk_rq_map_kern is used for kernel internal I/Os. Some callers use
this function with stack buffers but DMA to/from the stack buffers
leads to memory corruption on a non-coherent platform.
This patch make blk_rq_map_kern uses the bounce buffers if a caller
passes a stack buffer (on the all platforms for simplicity).
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Tejun Heo <htejun@gmail.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-map.c | 5 |
1 files changed, 5 insertions, 0 deletions
diff --git a/block/blk-map.c b/block/blk-map.c index 813011ef8276..ddd96fb11a7d 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -269,6 +269,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |||
269 | int reading = rq_data_dir(rq) == READ; | 269 | int reading = rq_data_dir(rq) == READ; |
270 | int do_copy = 0; | 270 | int do_copy = 0; |
271 | struct bio *bio; | 271 | struct bio *bio; |
272 | unsigned long stack_mask = ~(THREAD_SIZE - 1); | ||
272 | 273 | ||
273 | if (len > (q->max_hw_sectors << 9)) | 274 | if (len > (q->max_hw_sectors << 9)) |
274 | return -EINVAL; | 275 | return -EINVAL; |
@@ -279,6 +280,10 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |||
279 | alignment = queue_dma_alignment(q) | q->dma_pad_mask; | 280 | alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
280 | do_copy = ((kaddr & alignment) || (len & alignment)); | 281 | do_copy = ((kaddr & alignment) || (len & alignment)); |
281 | 282 | ||
283 | if (!((kaddr & stack_mask) ^ | ||
284 | ((unsigned long)current->stack & stack_mask))) | ||
285 | do_copy = 1; | ||
286 | |||
282 | if (do_copy) | 287 | if (do_copy) |
283 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); | 288 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); |
284 | else | 289 | else |