From afdc1a780ef84a54b613dae6f971407748aab61c Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 11 Apr 2008 12:56:51 +0200 Subject: block: add bio_copy_user_iov support to blk_rq_map_user_iov With this patch, blk_rq_map_user_iov uses bio_copy_user_iov when a low level driver needs padding or a buffer in sg_iovec isn't aligned. That is, it uses temporary kernel buffers instead of mapping user pages directly. When a LLD needs padding, later blk_rq_map_sg needs to extend the last entry of a scatter list. bio_copy_user_iov guarantees that there is enough space for padding by using temporary kernel buffers instead of user pages. blk_rq_map_user_iov needs buffers in sg_iovec to be aligned. The comment in blk_rq_map_user_iov indicates that drivers/scsi/sg.c also needs buffers in sg_iovec to be aligned. Actually, drivers/scsi/sg.c works with unaligned buffers in sg_iovec (it always uses temporary kernel buffers). Signed-off-by: FUJITA Tomonori Cc: Tejun Heo Cc: Mike Christie Cc: James Bottomley Signed-off-by: Jens Axboe --- block/blk-map.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'block/blk-map.c') diff --git a/block/blk-map.c b/block/blk-map.c index c07d9c8317f4..ab43533ba641 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -5,6 +5,7 @@ #include #include #include +#include /* for struct sg_iovec */ #include "blk.h" @@ -194,15 +195,26 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct sg_iovec *iov, int iov_count, unsigned int len) { struct bio *bio; + int i, read = rq_data_dir(rq) == READ; + int unaligned = 0; if (!iov || iov_count <= 0) return -EINVAL; - /* we don't allow misaligned data like bio_map_user() does. If the - * user is using sg, they're expected to know the alignment constraints - * and respect them accordingly */ - bio = bio_map_user_iov(q, NULL, iov, iov_count, - rq_data_dir(rq) == READ); + for (i = 0; i < iov_count; i++) { + unsigned long uaddr = (unsigned long)iov[i].iov_base; + + if (uaddr & queue_dma_alignment(q)) { + unaligned = 1; + break; + } + } + + if (unaligned || (q->dma_pad_mask & len)) + bio = bio_copy_user_iov(q, iov, iov_count, read); + else + bio = bio_map_user_iov(q, NULL, iov, iov_count, read); + if (IS_ERR(bio)) return PTR_ERR(bio); -- cgit v1.2.2 From f18573abcc57844a7c3c12699d40eead8728cd8a Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 11 Apr 2008 12:56:52 +0200 Subject: block: move the padding adjustment to blk_rq_map_sg blk_rq_map_user adjusts bi_size of the last bio. It breaks the rule that req->data_len (the true data length) is equal to sum(bio). It broke the scsi command completion code. commit e97a294ef6938512b655b1abf17656cf2b26f709 was introduced to fix the above issue. However, the partial completion code doesn't work with it. The commit is also a layer violation (scsi mid-layer should not know about the block layer's padding). This patch moves the padding adjustment to blk_rq_map_sg (suggested by James). The padding works like the drain buffer. This patch breaks the rule that req->data_len is equal to sum(sg), however, the drain buffer already broke it. So this patch just restores the rule that req->data_len is equal to sub(bio) without breaking anything new. Now when a low level driver needs padding, blk_rq_map_user and blk_rq_map_user_iov guarantee there's enough room for padding. blk_rq_map_sg can safely extend the last entry of a scatter list. blk_rq_map_sg must extend the last entry of a scatter list only for a request that got through bio_copy_user_iov. This patches introduces new REQ_COPY_USER flag. Signed-off-by: FUJITA Tomonori Cc: Tejun Heo Cc: Mike Christie Cc: James Bottomley Signed-off-by: Jens Axboe --- block/blk-map.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) (limited to 'block/blk-map.c') diff --git a/block/blk-map.c b/block/blk-map.c index ab43533ba641..3c942bd6422a 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -141,25 +141,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, ubuf += ret; } - /* - * __blk_rq_map_user() copies the buffers if starting address - * or length isn't aligned to dma_pad_mask. As the copied - * buffer is always page aligned, we know that there's enough - * room for padding. Extend the last bio and update - * rq->data_len accordingly. - * - * On unmap, bio_uncopy_user() will use unmodified - * bio_map_data pointed to by bio->bi_private. - */ - if (len & q->dma_pad_mask) { - unsigned int pad_len = (q->dma_pad_mask & ~len) + 1; - struct bio *tail = rq->biotail; - - tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len; - tail->bi_size += pad_len; - - rq->extra_len += pad_len; - } + if (!bio_flagged(bio, BIO_USER_MAPPED)) + rq->cmd_flags |= REQ_COPY_USER; rq->buffer = rq->data = NULL; return 0; @@ -224,6 +207,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, return -EINVAL; } + if (!bio_flagged(bio, BIO_USER_MAPPED)) + rq->cmd_flags |= REQ_COPY_USER; + bio_get(bio); blk_rq_bio_prep(q, rq, bio); rq->buffer = rq->data = NULL; -- cgit v1.2.2