aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-map.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-map.c')
-rw-r--r--block/blk-map.c46
1 files changed, 22 insertions, 24 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index c07d9c8317f4..3c942bd6422a 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -5,6 +5,7 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/bio.h> 6#include <linux/bio.h>
7#include <linux/blkdev.h> 7#include <linux/blkdev.h>
8#include <scsi/sg.h> /* for struct sg_iovec */
8 9
9#include "blk.h" 10#include "blk.h"
10 11
@@ -140,25 +141,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
140 ubuf += ret; 141 ubuf += ret;
141 } 142 }
142 143
143 /* 144 if (!bio_flagged(bio, BIO_USER_MAPPED))
144 * __blk_rq_map_user() copies the buffers if starting address 145 rq->cmd_flags |= REQ_COPY_USER;
145 * or length isn't aligned to dma_pad_mask. As the copied
146 * buffer is always page aligned, we know that there's enough
147 * room for padding. Extend the last bio and update
148 * rq->data_len accordingly.
149 *
150 * On unmap, bio_uncopy_user() will use unmodified
151 * bio_map_data pointed to by bio->bi_private.
152 */
153 if (len & q->dma_pad_mask) {
154 unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
155 struct bio *tail = rq->biotail;
156
157 tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len;
158 tail->bi_size += pad_len;
159
160 rq->extra_len += pad_len;
161 }
162 146
163 rq->buffer = rq->data = NULL; 147 rq->buffer = rq->data = NULL;
164 return 0; 148 return 0;
@@ -194,15 +178,26 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
194 struct sg_iovec *iov, int iov_count, unsigned int len) 178 struct sg_iovec *iov, int iov_count, unsigned int len)
195{ 179{
196 struct bio *bio; 180 struct bio *bio;
181 int i, read = rq_data_dir(rq) == READ;
182 int unaligned = 0;
197 183
198 if (!iov || iov_count <= 0) 184 if (!iov || iov_count <= 0)
199 return -EINVAL; 185 return -EINVAL;
200 186
201 /* we don't allow misaligned data like bio_map_user() does. If the 187 for (i = 0; i < iov_count; i++) {
202 * user is using sg, they're expected to know the alignment constraints 188 unsigned long uaddr = (unsigned long)iov[i].iov_base;
203 * and respect them accordingly */ 189
204 bio = bio_map_user_iov(q, NULL, iov, iov_count, 190 if (uaddr & queue_dma_alignment(q)) {
205 rq_data_dir(rq) == READ); 191 unaligned = 1;
192 break;
193 }
194 }
195
196 if (unaligned || (q->dma_pad_mask & len))
197 bio = bio_copy_user_iov(q, iov, iov_count, read);
198 else
199 bio = bio_map_user_iov(q, NULL, iov, iov_count, read);
200
206 if (IS_ERR(bio)) 201 if (IS_ERR(bio))
207 return PTR_ERR(bio); 202 return PTR_ERR(bio);
208 203
@@ -212,6 +207,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
212 return -EINVAL; 207 return -EINVAL;
213 } 208 }
214 209
210 if (!bio_flagged(bio, BIO_USER_MAPPED))
211 rq->cmd_flags |= REQ_COPY_USER;
212
215 bio_get(bio); 213 bio_get(bio);
216 blk_rq_bio_prep(q, rq, bio); 214 blk_rq_bio_prep(q, rq, bio);
217 rq->buffer = rq->data = NULL; 215 rq->buffer = rq->data = NULL;