aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-map.c24
-rw-r--r--block/blk-merge.c9
2 files changed, 14 insertions, 19 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index ab43533ba641..3c942bd6422a 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -141,25 +141,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
141 ubuf += ret; 141 ubuf += ret;
142 } 142 }
143 143
144 /* 144 if (!bio_flagged(bio, BIO_USER_MAPPED))
145 * __blk_rq_map_user() copies the buffers if starting address 145 rq->cmd_flags |= REQ_COPY_USER;
146 * or length isn't aligned to dma_pad_mask. As the copied
147 * buffer is always page aligned, we know that there's enough
148 * room for padding. Extend the last bio and update
149 * rq->data_len accordingly.
150 *
151 * On unmap, bio_uncopy_user() will use unmodified
152 * bio_map_data pointed to by bio->bi_private.
153 */
154 if (len & q->dma_pad_mask) {
155 unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
156 struct bio *tail = rq->biotail;
157
158 tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len;
159 tail->bi_size += pad_len;
160
161 rq->extra_len += pad_len;
162 }
163 146
164 rq->buffer = rq->data = NULL; 147 rq->buffer = rq->data = NULL;
165 return 0; 148 return 0;
@@ -224,6 +207,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
224 return -EINVAL; 207 return -EINVAL;
225 } 208 }
226 209
210 if (!bio_flagged(bio, BIO_USER_MAPPED))
211 rq->cmd_flags |= REQ_COPY_USER;
212
227 bio_get(bio); 213 bio_get(bio);
228 blk_rq_bio_prep(q, rq, bio); 214 blk_rq_bio_prep(q, rq, bio);
229 rq->buffer = rq->data = NULL; 215 rq->buffer = rq->data = NULL;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 0f58616bcd7f..b5c5c4a9e3f0 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -220,6 +220,15 @@ new_segment:
220 bvprv = bvec; 220 bvprv = bvec;
221 } /* segments in rq */ 221 } /* segments in rq */
222 222
223
224 if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
225 (rq->data_len & q->dma_pad_mask)) {
226 unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
227
228 sg->length += pad_len;
229 rq->extra_len += pad_len;
230 }
231
223 if (q->dma_drain_size && q->dma_drain_needed(rq)) { 232 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
224 if (rq->cmd_flags & REQ_RW) 233 if (rq->cmd_flags & REQ_RW)
225 memset(q->dma_drain_buffer, 0, q->dma_drain_size); 234 memset(q->dma_drain_buffer, 0, q->dma_drain_size);