diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-04-11 06:56:52 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-04-21 03:50:08 -0400 |
commit | f18573abcc57844a7c3c12699d40eead8728cd8a (patch) | |
tree | 99d5dcfdfe29578bb23900e63f226022f5e87281 | |
parent | afdc1a780ef84a54b613dae6f971407748aab61c (diff) |
block: move the padding adjustment to blk_rq_map_sg
blk_rq_map_user adjusts bi_size of the last bio. It breaks the rule
that req->data_len (the true data length) is equal to sum(bio). It
broke the scsi command completion code.
commit e97a294ef6938512b655b1abf17656cf2b26f709 was introduced to fix
the above issue. However, the partial completion code doesn't work
with it. The commit is also a layer violation (scsi mid-layer should
not know about the block layer's padding).
This patch moves the padding adjustment to blk_rq_map_sg (suggested by
James). The padding works like the drain buffer. This patch breaks the
rule that req->data_len is equal to sum(sg), however, the drain buffer
already broke it. So this patch just restores the rule that
req->data_len is equal to sub(bio) without breaking anything new.
Now when a low level driver needs padding, blk_rq_map_user and
blk_rq_map_user_iov guarantee there's enough room for padding.
blk_rq_map_sg can safely extend the last entry of a scatter list.
blk_rq_map_sg must extend the last entry of a scatter list only for a
request that got through bio_copy_user_iov. This patches introduces
new REQ_COPY_USER flag.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Tejun Heo <htejun@gmail.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/blk-map.c | 24 | ||||
-rw-r--r-- | block/blk-merge.c | 9 | ||||
-rw-r--r-- | drivers/scsi/scsi.c | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 2 |
4 files changed, 17 insertions, 20 deletions
diff --git a/block/blk-map.c b/block/blk-map.c index ab43533ba641..3c942bd6422a 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -141,25 +141,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
141 | ubuf += ret; | 141 | ubuf += ret; |
142 | } | 142 | } |
143 | 143 | ||
144 | /* | 144 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
145 | * __blk_rq_map_user() copies the buffers if starting address | 145 | rq->cmd_flags |= REQ_COPY_USER; |
146 | * or length isn't aligned to dma_pad_mask. As the copied | ||
147 | * buffer is always page aligned, we know that there's enough | ||
148 | * room for padding. Extend the last bio and update | ||
149 | * rq->data_len accordingly. | ||
150 | * | ||
151 | * On unmap, bio_uncopy_user() will use unmodified | ||
152 | * bio_map_data pointed to by bio->bi_private. | ||
153 | */ | ||
154 | if (len & q->dma_pad_mask) { | ||
155 | unsigned int pad_len = (q->dma_pad_mask & ~len) + 1; | ||
156 | struct bio *tail = rq->biotail; | ||
157 | |||
158 | tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len; | ||
159 | tail->bi_size += pad_len; | ||
160 | |||
161 | rq->extra_len += pad_len; | ||
162 | } | ||
163 | 146 | ||
164 | rq->buffer = rq->data = NULL; | 147 | rq->buffer = rq->data = NULL; |
165 | return 0; | 148 | return 0; |
@@ -224,6 +207,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
224 | return -EINVAL; | 207 | return -EINVAL; |
225 | } | 208 | } |
226 | 209 | ||
210 | if (!bio_flagged(bio, BIO_USER_MAPPED)) | ||
211 | rq->cmd_flags |= REQ_COPY_USER; | ||
212 | |||
227 | bio_get(bio); | 213 | bio_get(bio); |
228 | blk_rq_bio_prep(q, rq, bio); | 214 | blk_rq_bio_prep(q, rq, bio); |
229 | rq->buffer = rq->data = NULL; | 215 | rq->buffer = rq->data = NULL; |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 0f58616bcd7f..b5c5c4a9e3f0 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -220,6 +220,15 @@ new_segment: | |||
220 | bvprv = bvec; | 220 | bvprv = bvec; |
221 | } /* segments in rq */ | 221 | } /* segments in rq */ |
222 | 222 | ||
223 | |||
224 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && | ||
225 | (rq->data_len & q->dma_pad_mask)) { | ||
226 | unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; | ||
227 | |||
228 | sg->length += pad_len; | ||
229 | rq->extra_len += pad_len; | ||
230 | } | ||
231 | |||
223 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { | 232 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
224 | if (rq->cmd_flags & REQ_RW) | 233 | if (rq->cmd_flags & REQ_RW) |
225 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); | 234 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index f6980bd9d8f9..12d69d7c8577 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -852,7 +852,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd) | |||
852 | "Notifying upper driver of completion " | 852 | "Notifying upper driver of completion " |
853 | "(result %x)\n", cmd->result)); | 853 | "(result %x)\n", cmd->result)); |
854 | 854 | ||
855 | good_bytes = scsi_bufflen(cmd) + cmd->request->extra_len; | 855 | good_bytes = scsi_bufflen(cmd); |
856 | if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { | 856 | if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { |
857 | drv = scsi_cmd_to_driver(cmd); | 857 | drv = scsi_cmd_to_driver(cmd); |
858 | if (drv->done) | 858 | if (drv->done) |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6f79d40dd3c0..b3a58adc4352 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -112,6 +112,7 @@ enum rq_flag_bits { | |||
112 | __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ | 112 | __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ |
113 | __REQ_ALLOCED, /* request came from our alloc pool */ | 113 | __REQ_ALLOCED, /* request came from our alloc pool */ |
114 | __REQ_RW_META, /* metadata io request */ | 114 | __REQ_RW_META, /* metadata io request */ |
115 | __REQ_COPY_USER, /* contains copies of user pages */ | ||
115 | __REQ_NR_BITS, /* stops here */ | 116 | __REQ_NR_BITS, /* stops here */ |
116 | }; | 117 | }; |
117 | 118 | ||
@@ -133,6 +134,7 @@ enum rq_flag_bits { | |||
133 | #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) | 134 | #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) |
134 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) | 135 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) |
135 | #define REQ_RW_META (1 << __REQ_RW_META) | 136 | #define REQ_RW_META (1 << __REQ_RW_META) |
137 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | ||
136 | 138 | ||
137 | #define BLK_MAX_CDB 16 | 139 | #define BLK_MAX_CDB 16 |
138 | 140 | ||