aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-map.c24
-rw-r--r--block/blk-merge.c9
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--include/linux/blkdev.h2
4 files changed, 17 insertions, 20 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index ab43533ba641..3c942bd6422a 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -141,25 +141,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
141 ubuf += ret; 141 ubuf += ret;
142 } 142 }
143 143
144 /* 144 if (!bio_flagged(bio, BIO_USER_MAPPED))
145 * __blk_rq_map_user() copies the buffers if starting address 145 rq->cmd_flags |= REQ_COPY_USER;
146 * or length isn't aligned to dma_pad_mask. As the copied
147 * buffer is always page aligned, we know that there's enough
148 * room for padding. Extend the last bio and update
149 * rq->data_len accordingly.
150 *
151 * On unmap, bio_uncopy_user() will use unmodified
152 * bio_map_data pointed to by bio->bi_private.
153 */
154 if (len & q->dma_pad_mask) {
155 unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
156 struct bio *tail = rq->biotail;
157
158 tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len;
159 tail->bi_size += pad_len;
160
161 rq->extra_len += pad_len;
162 }
163 146
164 rq->buffer = rq->data = NULL; 147 rq->buffer = rq->data = NULL;
165 return 0; 148 return 0;
@@ -224,6 +207,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
224 return -EINVAL; 207 return -EINVAL;
225 } 208 }
226 209
210 if (!bio_flagged(bio, BIO_USER_MAPPED))
211 rq->cmd_flags |= REQ_COPY_USER;
212
227 bio_get(bio); 213 bio_get(bio);
228 blk_rq_bio_prep(q, rq, bio); 214 blk_rq_bio_prep(q, rq, bio);
229 rq->buffer = rq->data = NULL; 215 rq->buffer = rq->data = NULL;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 0f58616bcd7f..b5c5c4a9e3f0 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -220,6 +220,15 @@ new_segment:
220 bvprv = bvec; 220 bvprv = bvec;
221 } /* segments in rq */ 221 } /* segments in rq */
222 222
223
224 if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
225 (rq->data_len & q->dma_pad_mask)) {
226 unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
227
228 sg->length += pad_len;
229 rq->extra_len += pad_len;
230 }
231
223 if (q->dma_drain_size && q->dma_drain_needed(rq)) { 232 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
224 if (rq->cmd_flags & REQ_RW) 233 if (rq->cmd_flags & REQ_RW)
225 memset(q->dma_drain_buffer, 0, q->dma_drain_size); 234 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index f6980bd9d8f9..12d69d7c8577 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -852,7 +852,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
852 "Notifying upper driver of completion " 852 "Notifying upper driver of completion "
853 "(result %x)\n", cmd->result)); 853 "(result %x)\n", cmd->result));
854 854
855 good_bytes = scsi_bufflen(cmd) + cmd->request->extra_len; 855 good_bytes = scsi_bufflen(cmd);
856 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { 856 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
857 drv = scsi_cmd_to_driver(cmd); 857 drv = scsi_cmd_to_driver(cmd);
858 if (drv->done) 858 if (drv->done)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6f79d40dd3c0..b3a58adc4352 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -112,6 +112,7 @@ enum rq_flag_bits {
112 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 112 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */
113 __REQ_ALLOCED, /* request came from our alloc pool */ 113 __REQ_ALLOCED, /* request came from our alloc pool */
114 __REQ_RW_META, /* metadata io request */ 114 __REQ_RW_META, /* metadata io request */
115 __REQ_COPY_USER, /* contains copies of user pages */
115 __REQ_NR_BITS, /* stops here */ 116 __REQ_NR_BITS, /* stops here */
116}; 117};
117 118
@@ -133,6 +134,7 @@ enum rq_flag_bits {
133#define REQ_RW_SYNC (1 << __REQ_RW_SYNC) 134#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
134#define REQ_ALLOCED (1 << __REQ_ALLOCED) 135#define REQ_ALLOCED (1 << __REQ_ALLOCED)
135#define REQ_RW_META (1 << __REQ_RW_META) 136#define REQ_RW_META (1 << __REQ_RW_META)
137#define REQ_COPY_USER (1 << __REQ_COPY_USER)
136 138
137#define BLK_MAX_CDB 16 139#define BLK_MAX_CDB 16
138 140