diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-04-25 06:47:50 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-04-29 03:50:34 -0400 |
commit | 68154e90c9d1492d570671ae181d9a8f8530da55 (patch) | |
tree | 4b4e9cfbaa587a576e7e52284c53ef99cf69ac4d /block/blk-map.c | |
parent | 657e93be356f51888f56a58d2b374caefbf2fe86 (diff) |
block: add dma alignment and padding support to blk_rq_map_kern
This patch adds bio_copy_kern similar to
bio_copy_user. blk_rq_map_kern uses bio_copy_kern instead of
bio_map_kern if necessary.
bio_copy_kern uses temporary pages and the bi_end_io callback frees
these pages. bio_copy_kern saves the original kernel buffer at
bio->bi_private it doesn't use something like struct bio_map_data to
store the information about the caller.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-map.c')
-rw-r--r-- | block/blk-map.c | 21 |
1 files changed, 20 insertions, 1 deletions
diff --git a/block/blk-map.c b/block/blk-map.c index 3c942bd6422a..0b1af5a3537c 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -255,10 +255,18 @@ EXPORT_SYMBOL(blk_rq_unmap_user); | |||
255 | * @kbuf: the kernel buffer | 255 | * @kbuf: the kernel buffer |
256 | * @len: length of user data | 256 | * @len: length of user data |
257 | * @gfp_mask: memory allocation flags | 257 | * @gfp_mask: memory allocation flags |
258 | * | ||
259 | * Description: | ||
260 | * Data will be mapped directly if possible. Otherwise a bounce | ||
261 | * buffer is used. | ||
258 | */ | 262 | */ |
259 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | 263 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, |
260 | unsigned int len, gfp_t gfp_mask) | 264 | unsigned int len, gfp_t gfp_mask) |
261 | { | 265 | { |
266 | unsigned long kaddr; | ||
267 | unsigned int alignment; | ||
268 | int reading = rq_data_dir(rq) == READ; | ||
269 | int do_copy = 0; | ||
262 | struct bio *bio; | 270 | struct bio *bio; |
263 | 271 | ||
264 | if (len > (q->max_hw_sectors << 9)) | 272 | if (len > (q->max_hw_sectors << 9)) |
@@ -266,13 +274,24 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |||
266 | if (!len || !kbuf) | 274 | if (!len || !kbuf) |
267 | return -EINVAL; | 275 | return -EINVAL; |
268 | 276 | ||
269 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | 277 | kaddr = (unsigned long)kbuf; |
278 | alignment = queue_dma_alignment(q) | q->dma_pad_mask; | ||
279 | do_copy = ((kaddr & alignment) || (len & alignment)); | ||
280 | |||
281 | if (do_copy) | ||
282 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); | ||
283 | else | ||
284 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | ||
285 | |||
270 | if (IS_ERR(bio)) | 286 | if (IS_ERR(bio)) |
271 | return PTR_ERR(bio); | 287 | return PTR_ERR(bio); |
272 | 288 | ||
273 | if (rq_data_dir(rq) == WRITE) | 289 | if (rq_data_dir(rq) == WRITE) |
274 | bio->bi_rw |= (1 << BIO_RW); | 290 | bio->bi_rw |= (1 << BIO_RW); |
275 | 291 | ||
292 | if (do_copy) | ||
293 | rq->cmd_flags |= REQ_COPY_USER; | ||
294 | |||
276 | blk_rq_bio_prep(q, rq, bio); | 295 | blk_rq_bio_prep(q, rq, bio); |
277 | blk_queue_bounce(q, &rq->bio); | 296 | blk_queue_bounce(q, &rq->bio); |
278 | rq->buffer = rq->data = NULL; | 297 | rq->buffer = rq->data = NULL; |