aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-map.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-map.c')
-rw-r--r--block/blk-map.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index 09f7fd0bcb73..c07d9c8317f4 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -19,7 +19,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
19 rq->biotail->bi_next = bio; 19 rq->biotail->bi_next = bio;
20 rq->biotail = bio; 20 rq->biotail = bio;
21 21
22 rq->raw_data_len += bio->bi_size;
23 rq->data_len += bio->bi_size; 22 rq->data_len += bio->bi_size;
24 } 23 }
25 return 0; 24 return 0;
@@ -44,6 +43,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 void __user *ubuf, unsigned int len) 43 void __user *ubuf, unsigned int len)
45{ 44{
46 unsigned long uaddr; 45 unsigned long uaddr;
46 unsigned int alignment;
47 struct bio *bio, *orig_bio; 47 struct bio *bio, *orig_bio;
48 int reading, ret; 48 int reading, ret;
49 49
@@ -54,8 +54,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
54 * direct dma. else, set up kernel bounce buffers 54 * direct dma. else, set up kernel bounce buffers
55 */ 55 */
56 uaddr = (unsigned long) ubuf; 56 uaddr = (unsigned long) ubuf;
57 if (!(uaddr & queue_dma_alignment(q)) && 57 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
58 !(len & queue_dma_alignment(q))) 58 if (!(uaddr & alignment) && !(len & alignment))
59 bio = bio_map_user(q, NULL, uaddr, len, reading); 59 bio = bio_map_user(q, NULL, uaddr, len, reading);
60 else 60 else
61 bio = bio_copy_user(q, uaddr, len, reading); 61 bio = bio_copy_user(q, uaddr, len, reading);
@@ -142,20 +142,22 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
142 142
143 /* 143 /*
144 * __blk_rq_map_user() copies the buffers if starting address 144 * __blk_rq_map_user() copies the buffers if starting address
145 * or length isn't aligned. As the copied buffer is always 145 * or length isn't aligned to dma_pad_mask. As the copied
146 * page aligned, we know that there's enough room for padding. 146 * buffer is always page aligned, we know that there's enough
147 * Extend the last bio and update rq->data_len accordingly. 147 * room for padding. Extend the last bio and update
148 * rq->data_len accordingly.
148 * 149 *
149 * On unmap, bio_uncopy_user() will use unmodified 150 * On unmap, bio_uncopy_user() will use unmodified
150 * bio_map_data pointed to by bio->bi_private. 151 * bio_map_data pointed to by bio->bi_private.
151 */ 152 */
152 if (len & queue_dma_alignment(q)) { 153 if (len & q->dma_pad_mask) {
153 unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1; 154 unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
154 struct bio *bio = rq->biotail; 155 struct bio *tail = rq->biotail;
155 156
156 bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len; 157 tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len;
157 bio->bi_size += pad_len; 158 tail->bi_size += pad_len;
158 rq->data_len += pad_len; 159
160 rq->extra_len += pad_len;
159 } 161 }
160 162
161 rq->buffer = rq->data = NULL; 163 rq->buffer = rq->data = NULL;
@@ -215,7 +217,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
215 rq->buffer = rq->data = NULL; 217 rq->buffer = rq->data = NULL;
216 return 0; 218 return 0;
217} 219}
218EXPORT_SYMBOL(blk_rq_map_user_iov);
219 220
220/** 221/**
221 * blk_rq_unmap_user - unmap a request with user data 222 * blk_rq_unmap_user - unmap a request with user data