aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-map.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-28 11:26:12 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-28 11:26:12 -0400
commit7a9787e1eba95a166265e6a260cf30af04ef0a99 (patch)
treee730a4565e0318140d2fbd2f0415d18a339d7336 /block/blk-map.c
parent41b9eb264c8407655db57b60b4457fe1b2ec9977 (diff)
parent0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff)
Merge commit 'v2.6.28-rc2' into x86/pci-ioapic-boot-irq-quirks
Diffstat (limited to 'block/blk-map.c')
-rw-r--r--block/blk-map.c72
1 files changed, 39 insertions, 33 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index ddd96fb11a7d..4849fa36161e 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -41,10 +41,10 @@ static int __blk_rq_unmap_user(struct bio *bio)
41} 41}
42 42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 void __user *ubuf, unsigned int len) 44 struct rq_map_data *map_data, void __user *ubuf,
45 unsigned int len, int null_mapped, gfp_t gfp_mask)
45{ 46{
46 unsigned long uaddr; 47 unsigned long uaddr;
47 unsigned int alignment;
48 struct bio *bio, *orig_bio; 48 struct bio *bio, *orig_bio;
49 int reading, ret; 49 int reading, ret;
50 50
@@ -55,15 +55,17 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
55 * direct dma. else, set up kernel bounce buffers 55 * direct dma. else, set up kernel bounce buffers
56 */ 56 */
57 uaddr = (unsigned long) ubuf; 57 uaddr = (unsigned long) ubuf;
58 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 58 if (blk_rq_aligned(q, ubuf, len) && !map_data)
59 if (!(uaddr & alignment) && !(len & alignment)) 59 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
60 bio = bio_map_user(q, NULL, uaddr, len, reading);
61 else 60 else
62 bio = bio_copy_user(q, uaddr, len, reading); 61 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
63 62
64 if (IS_ERR(bio)) 63 if (IS_ERR(bio))
65 return PTR_ERR(bio); 64 return PTR_ERR(bio);
66 65
66 if (null_mapped)
67 bio->bi_flags |= (1 << BIO_NULL_MAPPED);
68
67 orig_bio = bio; 69 orig_bio = bio;
68 blk_queue_bounce(q, &bio); 70 blk_queue_bounce(q, &bio);
69 71
@@ -85,17 +87,19 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
85} 87}
86 88
87/** 89/**
88 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage 90 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
89 * @q: request queue where request should be inserted 91 * @q: request queue where request should be inserted
90 * @rq: request structure to fill 92 * @rq: request structure to fill
93 * @map_data: pointer to the rq_map_data holding pages (if necessary)
91 * @ubuf: the user buffer 94 * @ubuf: the user buffer
92 * @len: length of user data 95 * @len: length of user data
96 * @gfp_mask: memory allocation flags
93 * 97 *
94 * Description: 98 * Description:
95 * Data will be mapped directly for zero copy io, if possible. Otherwise 99 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
96 * a kernel bounce buffer is used. 100 * a kernel bounce buffer is used.
97 * 101 *
98 * A matching blk_rq_unmap_user() must be issued at the end of io, while 102 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
99 * still in process context. 103 * still in process context.
100 * 104 *
101 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 105 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
@@ -105,16 +109,22 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
105 * unmapping. 109 * unmapping.
106 */ 110 */
107int blk_rq_map_user(struct request_queue *q, struct request *rq, 111int blk_rq_map_user(struct request_queue *q, struct request *rq,
108 void __user *ubuf, unsigned long len) 112 struct rq_map_data *map_data, void __user *ubuf,
113 unsigned long len, gfp_t gfp_mask)
109{ 114{
110 unsigned long bytes_read = 0; 115 unsigned long bytes_read = 0;
111 struct bio *bio = NULL; 116 struct bio *bio = NULL;
112 int ret; 117 int ret, null_mapped = 0;
113 118
114 if (len > (q->max_hw_sectors << 9)) 119 if (len > (q->max_hw_sectors << 9))
115 return -EINVAL; 120 return -EINVAL;
116 if (!len || !ubuf) 121 if (!len)
117 return -EINVAL; 122 return -EINVAL;
123 if (!ubuf) {
124 if (!map_data || rq_data_dir(rq) != READ)
125 return -EINVAL;
126 null_mapped = 1;
127 }
118 128
119 while (bytes_read != len) { 129 while (bytes_read != len) {
120 unsigned long map_len, end, start; 130 unsigned long map_len, end, start;
@@ -132,7 +142,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
132 if (end - start > BIO_MAX_PAGES) 142 if (end - start > BIO_MAX_PAGES)
133 map_len -= PAGE_SIZE; 143 map_len -= PAGE_SIZE;
134 144
135 ret = __blk_rq_map_user(q, rq, ubuf, map_len); 145 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
146 null_mapped, gfp_mask);
136 if (ret < 0) 147 if (ret < 0)
137 goto unmap_rq; 148 goto unmap_rq;
138 if (!bio) 149 if (!bio)
@@ -154,18 +165,20 @@ unmap_rq:
154EXPORT_SYMBOL(blk_rq_map_user); 165EXPORT_SYMBOL(blk_rq_map_user);
155 166
156/** 167/**
157 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage 168 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
158 * @q: request queue where request should be inserted 169 * @q: request queue where request should be inserted
159 * @rq: request to map data to 170 * @rq: request to map data to
171 * @map_data: pointer to the rq_map_data holding pages (if necessary)
160 * @iov: pointer to the iovec 172 * @iov: pointer to the iovec
161 * @iov_count: number of elements in the iovec 173 * @iov_count: number of elements in the iovec
162 * @len: I/O byte count 174 * @len: I/O byte count
175 * @gfp_mask: memory allocation flags
163 * 176 *
164 * Description: 177 * Description:
165 * Data will be mapped directly for zero copy io, if possible. Otherwise 178 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
166 * a kernel bounce buffer is used. 179 * a kernel bounce buffer is used.
167 * 180 *
168 * A matching blk_rq_unmap_user() must be issued at the end of io, while 181 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
169 * still in process context. 182 * still in process context.
170 * 183 *
171 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 184 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
@@ -175,7 +188,8 @@ EXPORT_SYMBOL(blk_rq_map_user);
175 * unmapping. 188 * unmapping.
176 */ 189 */
177int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 190int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
178 struct sg_iovec *iov, int iov_count, unsigned int len) 191 struct rq_map_data *map_data, struct sg_iovec *iov,
192 int iov_count, unsigned int len, gfp_t gfp_mask)
179{ 193{
180 struct bio *bio; 194 struct bio *bio;
181 int i, read = rq_data_dir(rq) == READ; 195 int i, read = rq_data_dir(rq) == READ;
@@ -193,10 +207,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
193 } 207 }
194 } 208 }
195 209
196 if (unaligned || (q->dma_pad_mask & len)) 210 if (unaligned || (q->dma_pad_mask & len) || map_data)
197 bio = bio_copy_user_iov(q, iov, iov_count, read); 211 bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
212 gfp_mask);
198 else 213 else
199 bio = bio_map_user_iov(q, NULL, iov, iov_count, read); 214 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
200 215
201 if (IS_ERR(bio)) 216 if (IS_ERR(bio))
202 return PTR_ERR(bio); 217 return PTR_ERR(bio);
@@ -216,6 +231,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
216 rq->buffer = rq->data = NULL; 231 rq->buffer = rq->data = NULL;
217 return 0; 232 return 0;
218} 233}
234EXPORT_SYMBOL(blk_rq_map_user_iov);
219 235
220/** 236/**
221 * blk_rq_unmap_user - unmap a request with user data 237 * blk_rq_unmap_user - unmap a request with user data
@@ -224,7 +240,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
224 * Description: 240 * Description:
225 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 241 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
226 * supply the original rq->bio from the blk_rq_map_user() return, since 242 * supply the original rq->bio from the blk_rq_map_user() return, since
227 * the io completion may have changed rq->bio. 243 * the I/O completion may have changed rq->bio.
228 */ 244 */
229int blk_rq_unmap_user(struct bio *bio) 245int blk_rq_unmap_user(struct bio *bio)
230{ 246{
@@ -250,7 +266,7 @@ int blk_rq_unmap_user(struct bio *bio)
250EXPORT_SYMBOL(blk_rq_unmap_user); 266EXPORT_SYMBOL(blk_rq_unmap_user);
251 267
252/** 268/**
253 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage 269 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
254 * @q: request queue where request should be inserted 270 * @q: request queue where request should be inserted
255 * @rq: request to fill 271 * @rq: request to fill
256 * @kbuf: the kernel buffer 272 * @kbuf: the kernel buffer
@@ -264,26 +280,16 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
264int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 280int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
265 unsigned int len, gfp_t gfp_mask) 281 unsigned int len, gfp_t gfp_mask)
266{ 282{
267 unsigned long kaddr;
268 unsigned int alignment;
269 int reading = rq_data_dir(rq) == READ; 283 int reading = rq_data_dir(rq) == READ;
270 int do_copy = 0; 284 int do_copy = 0;
271 struct bio *bio; 285 struct bio *bio;
272 unsigned long stack_mask = ~(THREAD_SIZE - 1);
273 286
274 if (len > (q->max_hw_sectors << 9)) 287 if (len > (q->max_hw_sectors << 9))
275 return -EINVAL; 288 return -EINVAL;
276 if (!len || !kbuf) 289 if (!len || !kbuf)
277 return -EINVAL; 290 return -EINVAL;
278 291
279 kaddr = (unsigned long)kbuf; 292 do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
280 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
281 do_copy = ((kaddr & alignment) || (len & alignment));
282
283 if (!((kaddr & stack_mask) ^
284 ((unsigned long)current->stack & stack_mask)))
285 do_copy = 1;
286
287 if (do_copy) 293 if (do_copy)
288 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 294 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
289 else 295 else