aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Bottomley <jejb@titanic.(none)>2005-08-28 11:43:07 -0400
committerJames Bottomley <jejb@titanic.(none)>2005-08-28 11:43:07 -0400
commit31151ba2cef171344beac254e65bd7e00138bb0d (patch)
treea8f9cd3c0395656d974125c0ca7ed635aacddeee
parent3d52acb34247816c453f94596e6c7fc4499b76dc (diff)
parent73747aed04d3b3fb694961d025f81863b99c6898 (diff)
fix mismerge in ll_rw_blk.c
-rw-r--r--drivers/block/ll_rw_blk.c192
-rw-r--r--drivers/block/scsi_ioctl.c60
-rw-r--r--drivers/cdrom/cdrom.c15
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--fs/bio.c227
-rw-r--r--include/linux/bio.h6
-rw-r--r--include/linux/blkdev.h10
7 files changed, 404 insertions, 108 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 3c818544475e..0c7599563b65 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -284,6 +284,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
284 rq->special = NULL; 284 rq->special = NULL;
285 rq->data_len = 0; 285 rq->data_len = 0;
286 rq->data = NULL; 286 rq->data = NULL;
287 rq->nr_phys_segments = 0;
287 rq->sense = NULL; 288 rq->sense = NULL;
288 rq->end_io = NULL; 289 rq->end_io = NULL;
289 rq->end_io_data = NULL; 290 rq->end_io_data = NULL;
@@ -2115,7 +2116,7 @@ EXPORT_SYMBOL(blk_insert_request);
2115/** 2116/**
2116 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage 2117 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
2117 * @q: request queue where request should be inserted 2118 * @q: request queue where request should be inserted
2118 * @rw: READ or WRITE data 2119 * @rq: request structure to fill
2119 * @ubuf: the user buffer 2120 * @ubuf: the user buffer
2120 * @len: length of user data 2121 * @len: length of user data
2121 * 2122 *
@@ -2132,21 +2133,19 @@ EXPORT_SYMBOL(blk_insert_request);
2132 * original bio must be passed back in to blk_rq_unmap_user() for proper 2133 * original bio must be passed back in to blk_rq_unmap_user() for proper
2133 * unmapping. 2134 * unmapping.
2134 */ 2135 */
2135struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, 2136int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2136 unsigned int len) 2137 unsigned int len)
2137{ 2138{
2138 unsigned long uaddr; 2139 unsigned long uaddr;
2139 struct request *rq;
2140 struct bio *bio; 2140 struct bio *bio;
2141 int reading;
2141 2142
2142 if (len > (q->max_sectors << 9)) 2143 if (len > (q->max_sectors << 9))
2143 return ERR_PTR(-EINVAL); 2144 return -EINVAL;
2144 if ((!len && ubuf) || (len && !ubuf)) 2145 if (!len || !ubuf)
2145 return ERR_PTR(-EINVAL); 2146 return -EINVAL;
2146 2147
2147 rq = blk_get_request(q, rw, __GFP_WAIT); 2148 reading = rq_data_dir(rq) == READ;
2148 if (!rq)
2149 return ERR_PTR(-ENOMEM);
2150 2149
2151 /* 2150 /*
2152 * if alignment requirement is satisfied, map in user pages for 2151 * if alignment requirement is satisfied, map in user pages for
@@ -2154,9 +2153,9 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2154 */ 2153 */
2155 uaddr = (unsigned long) ubuf; 2154 uaddr = (unsigned long) ubuf;
2156 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) 2155 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
2157 bio = bio_map_user(q, NULL, uaddr, len, rw == READ); 2156 bio = bio_map_user(q, NULL, uaddr, len, reading);
2158 else 2157 else
2159 bio = bio_copy_user(q, uaddr, len, rw == READ); 2158 bio = bio_copy_user(q, uaddr, len, reading);
2160 2159
2161 if (!IS_ERR(bio)) { 2160 if (!IS_ERR(bio)) {
2162 rq->bio = rq->biotail = bio; 2161 rq->bio = rq->biotail = bio;
@@ -2164,28 +2163,70 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2164 2163
2165 rq->buffer = rq->data = NULL; 2164 rq->buffer = rq->data = NULL;
2166 rq->data_len = len; 2165 rq->data_len = len;
2167 return rq; 2166 return 0;
2168 } 2167 }
2169 2168
2170 /* 2169 /*
2171 * bio is the err-ptr 2170 * bio is the err-ptr
2172 */ 2171 */
2173 blk_put_request(rq); 2172 return PTR_ERR(bio);
2174 return (struct request *) bio;
2175} 2173}
2176 2174
2177EXPORT_SYMBOL(blk_rq_map_user); 2175EXPORT_SYMBOL(blk_rq_map_user);
2178 2176
2179/** 2177/**
2178 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
2179 * @q: request queue where request should be inserted
2180 * @rq: request to map data to
2181 * @iov: pointer to the iovec
2182 * @iov_count: number of elements in the iovec
2183 *
2184 * Description:
2185 * Data will be mapped directly for zero copy io, if possible. Otherwise
2186 * a kernel bounce buffer is used.
2187 *
2188 * A matching blk_rq_unmap_user() must be issued at the end of io, while
2189 * still in process context.
2190 *
2191 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
2192 * before being submitted to the device, as pages mapped may be out of
2193 * reach. It's the callers responsibility to make sure this happens. The
2194 * original bio must be passed back in to blk_rq_unmap_user() for proper
2195 * unmapping.
2196 */
2197int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
2198 struct sg_iovec *iov, int iov_count)
2199{
2200 struct bio *bio;
2201
2202 if (!iov || iov_count <= 0)
2203 return -EINVAL;
2204
2205 /* we don't allow misaligned data like bio_map_user() does. If the
2206 * user is using sg, they're expected to know the alignment constraints
2207 * and respect them accordingly */
2208 bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
2209 if (IS_ERR(bio))
2210 return PTR_ERR(bio);
2211
2212 rq->bio = rq->biotail = bio;
2213 blk_rq_bio_prep(q, rq, bio);
2214 rq->buffer = rq->data = NULL;
2215 rq->data_len = bio->bi_size;
2216 return 0;
2217}
2218
2219EXPORT_SYMBOL(blk_rq_map_user_iov);
2220
2221/**
2180 * blk_rq_unmap_user - unmap a request with user data 2222 * blk_rq_unmap_user - unmap a request with user data
2181 * @rq: request to be unmapped 2223 * @bio: bio to be unmapped
2182 * @bio: bio for the request
2183 * @ulen: length of user buffer 2224 * @ulen: length of user buffer
2184 * 2225 *
2185 * Description: 2226 * Description:
2186 * Unmap a request previously mapped by blk_rq_map_user(). 2227 * Unmap a bio previously mapped by blk_rq_map_user().
2187 */ 2228 */
2188int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen) 2229int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
2189{ 2230{
2190 int ret = 0; 2231 int ret = 0;
2191 2232
@@ -2196,31 +2237,89 @@ int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
2196 ret = bio_uncopy_user(bio); 2237 ret = bio_uncopy_user(bio);
2197 } 2238 }
2198 2239
2199 blk_put_request(rq); 2240 return 0;
2200 return ret;
2201} 2241}
2202 2242
2203EXPORT_SYMBOL(blk_rq_unmap_user); 2243EXPORT_SYMBOL(blk_rq_unmap_user);
2204 2244
2205/** 2245/**
2246 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
2247 * @q: request queue where request should be inserted
2248 * @rq: request to fill
2249 * @kbuf: the kernel buffer
2250 * @len: length of user data
2251 * @gfp_mask: memory allocation flags
2252 */
2253int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
2254 unsigned int len, unsigned int gfp_mask)
2255{
2256 struct bio *bio;
2257
2258 if (len > (q->max_sectors << 9))
2259 return -EINVAL;
2260 if (!len || !kbuf)
2261 return -EINVAL;
2262
2263 bio = bio_map_kern(q, kbuf, len, gfp_mask);
2264 if (IS_ERR(bio))
2265 return PTR_ERR(bio);
2266
2267 if (rq_data_dir(rq) == WRITE)
2268 bio->bi_rw |= (1 << BIO_RW);
2269
2270 rq->bio = rq->biotail = bio;
2271 blk_rq_bio_prep(q, rq, bio);
2272
2273 rq->buffer = rq->data = NULL;
2274 rq->data_len = len;
2275 return 0;
2276}
2277
2278EXPORT_SYMBOL(blk_rq_map_kern);
2279
2280/**
2281 * blk_execute_rq_nowait - insert a request into queue for execution
2282 * @q: queue to insert the request in
2283 * @bd_disk: matching gendisk
2284 * @rq: request to insert
2285 * @at_head: insert request at head or tail of queue
2286 * @done: I/O completion handler
2287 *
2288 * Description:
2289 * Insert a fully prepared request at the back of the io scheduler queue
2290 * for execution. Don't wait for completion.
2291 */
2292void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
2293 struct request *rq, int at_head,
2294 void (*done)(struct request *))
2295{
2296 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
2297
2298 rq->rq_disk = bd_disk;
2299 rq->flags |= REQ_NOMERGE;
2300 rq->end_io = done;
2301 elv_add_request(q, rq, where, 1);
2302 generic_unplug_device(q);
2303}
2304
2305/**
2206 * blk_execute_rq - insert a request into queue for execution 2306 * blk_execute_rq - insert a request into queue for execution
2207 * @q: queue to insert the request in 2307 * @q: queue to insert the request in
2208 * @bd_disk: matching gendisk 2308 * @bd_disk: matching gendisk
2209 * @rq: request to insert 2309 * @rq: request to insert
2310 * @at_head: insert request at head or tail of queue
2210 * 2311 *
2211 * Description: 2312 * Description:
2212 * Insert a fully prepared request at the back of the io scheduler queue 2313 * Insert a fully prepared request at the back of the io scheduler queue
2213 * for execution. 2314 * for execution and wait for completion.
2214 */ 2315 */
2215int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, 2316int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
2216 struct request *rq) 2317 struct request *rq, int at_head)
2217{ 2318{
2218 DECLARE_COMPLETION(wait); 2319 DECLARE_COMPLETION(wait);
2219 char sense[SCSI_SENSE_BUFFERSIZE]; 2320 char sense[SCSI_SENSE_BUFFERSIZE];
2220 int err = 0; 2321 int err = 0;
2221 2322
2222 rq->rq_disk = bd_disk;
2223
2224 /* 2323 /*
2225 * we need an extra reference to the request, so we can look at 2324 * we need an extra reference to the request, so we can look at
2226 * it after io completion 2325 * it after io completion
@@ -2233,11 +2332,8 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
2233 rq->sense_len = 0; 2332 rq->sense_len = 0;
2234 } 2333 }
2235 2334
2236 rq->flags |= REQ_NOMERGE;
2237 rq->waiting = &wait; 2335 rq->waiting = &wait;
2238 rq->end_io = blk_end_sync_rq; 2336 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
2239 elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
2240 generic_unplug_device(q);
2241 wait_for_completion(&wait); 2337 wait_for_completion(&wait);
2242 rq->waiting = NULL; 2338 rq->waiting = NULL;
2243 2339
@@ -2277,6 +2373,44 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2277 2373
2278EXPORT_SYMBOL(blkdev_issue_flush); 2374EXPORT_SYMBOL(blkdev_issue_flush);
2279 2375
2376/**
2377 * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
2378 * @q: device queue
2379 * @disk: gendisk
2380 * @error_sector: error offset
2381 *
2382 * Description:
2383 * Devices understanding the SCSI command set, can use this function as
2384 * a helper for issuing a cache flush. Note: driver is required to store
2385 * the error offset (in case of error flushing) in ->sector of struct
2386 * request.
2387 */
2388int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
2389 sector_t *error_sector)
2390{
2391 struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
2392 int ret;
2393
2394 rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
2395 rq->sector = 0;
2396 memset(rq->cmd, 0, sizeof(rq->cmd));
2397 rq->cmd[0] = 0x35;
2398 rq->cmd_len = 12;
2399 rq->data = NULL;
2400 rq->data_len = 0;
2401 rq->timeout = 60 * HZ;
2402
2403 ret = blk_execute_rq(q, disk, rq, 0);
2404
2405 if (ret && error_sector)
2406 *error_sector = rq->sector;
2407
2408 blk_put_request(rq);
2409 return ret;
2410}
2411
2412EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
2413
2280static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) 2414static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2281{ 2415{
2282 int rw = rq_data_dir(rq); 2416 int rw = rq_data_dir(rq);
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
index 681871ca5d60..abb2df249fd3 100644
--- a/drivers/block/scsi_ioctl.c
+++ b/drivers/block/scsi_ioctl.c
@@ -216,7 +216,7 @@ static int sg_io(struct file *file, request_queue_t *q,
216 struct gendisk *bd_disk, struct sg_io_hdr *hdr) 216 struct gendisk *bd_disk, struct sg_io_hdr *hdr)
217{ 217{
218 unsigned long start_time; 218 unsigned long start_time;
219 int reading, writing; 219 int writing = 0, ret = 0;
220 struct request *rq; 220 struct request *rq;
221 struct bio *bio; 221 struct bio *bio;
222 char sense[SCSI_SENSE_BUFFERSIZE]; 222 char sense[SCSI_SENSE_BUFFERSIZE];
@@ -231,38 +231,48 @@ static int sg_io(struct file *file, request_queue_t *q,
231 if (verify_command(file, cmd)) 231 if (verify_command(file, cmd))
232 return -EPERM; 232 return -EPERM;
233 233
234 /*
235 * we'll do that later
236 */
237 if (hdr->iovec_count)
238 return -EOPNOTSUPP;
239
240 if (hdr->dxfer_len > (q->max_sectors << 9)) 234 if (hdr->dxfer_len > (q->max_sectors << 9))
241 return -EIO; 235 return -EIO;
242 236
243 reading = writing = 0; 237 if (hdr->dxfer_len)
244 if (hdr->dxfer_len) {
245 switch (hdr->dxfer_direction) { 238 switch (hdr->dxfer_direction) {
246 default: 239 default:
247 return -EINVAL; 240 return -EINVAL;
248 case SG_DXFER_TO_FROM_DEV: 241 case SG_DXFER_TO_FROM_DEV:
249 reading = 1;
250 /* fall through */
251 case SG_DXFER_TO_DEV: 242 case SG_DXFER_TO_DEV:
252 writing = 1; 243 writing = 1;
253 break; 244 break;
254 case SG_DXFER_FROM_DEV: 245 case SG_DXFER_FROM_DEV:
255 reading = 1;
256 break; 246 break;
257 } 247 }
258 248
259 rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp, 249 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
260 hdr->dxfer_len); 250 if (!rq)
251 return -ENOMEM;
252
253 if (hdr->iovec_count) {
254 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
255 struct sg_iovec *iov;
256
257 iov = kmalloc(size, GFP_KERNEL);
258 if (!iov) {
259 ret = -ENOMEM;
260 goto out;
261 }
262
263 if (copy_from_user(iov, hdr->dxferp, size)) {
264 kfree(iov);
265 ret = -EFAULT;
266 goto out;
267 }
268
269 ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
270 kfree(iov);
271 } else if (hdr->dxfer_len)
272 ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
261 273
262 if (IS_ERR(rq)) 274 if (ret)
263 return PTR_ERR(rq); 275 goto out;
264 } else
265 rq = blk_get_request(q, READ, __GFP_WAIT);
266 276
267 /* 277 /*
268 * fill in request structure 278 * fill in request structure
@@ -298,7 +308,7 @@ static int sg_io(struct file *file, request_queue_t *q,
298 * (if he doesn't check that is his problem). 308 * (if he doesn't check that is his problem).
299 * N.B. a non-zero SCSI status is _not_ necessarily an error. 309 * N.B. a non-zero SCSI status is _not_ necessarily an error.
300 */ 310 */
301 blk_execute_rq(q, bd_disk, rq); 311 blk_execute_rq(q, bd_disk, rq, 0);
302 312
303 /* write to all output members */ 313 /* write to all output members */
304 hdr->status = 0xff & rq->errors; 314 hdr->status = 0xff & rq->errors;
@@ -320,12 +330,14 @@ static int sg_io(struct file *file, request_queue_t *q,
320 hdr->sb_len_wr = len; 330 hdr->sb_len_wr = len;
321 } 331 }
322 332
323 if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len)) 333 if (blk_rq_unmap_user(bio, hdr->dxfer_len))
324 return -EFAULT; 334 ret = -EFAULT;
325 335
326 /* may not have succeeded, but output values written to control 336 /* may not have succeeded, but output values written to control
327 * structure (struct sg_io_hdr). */ 337 * structure (struct sg_io_hdr). */
328 return 0; 338out:
339 blk_put_request(rq);
340 return ret;
329} 341}
330 342
331#define OMAX_SB_LEN 16 /* For backward compatibility */ 343#define OMAX_SB_LEN 16 /* For backward compatibility */
@@ -408,7 +420,7 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
408 rq->data_len = bytes; 420 rq->data_len = bytes;
409 rq->flags |= REQ_BLOCK_PC; 421 rq->flags |= REQ_BLOCK_PC;
410 422
411 blk_execute_rq(q, bd_disk, rq); 423 blk_execute_rq(q, bd_disk, rq, 0);
412 err = rq->errors & 0xff; /* only 8 bit SCSI status */ 424 err = rq->errors & 0xff; /* only 8 bit SCSI status */
413 if (err) { 425 if (err) {
414 if (rq->sense_len && rq->sense) { 426 if (rq->sense_len && rq->sense) {
@@ -561,7 +573,7 @@ int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd,
561 rq->cmd[0] = GPCMD_START_STOP_UNIT; 573 rq->cmd[0] = GPCMD_START_STOP_UNIT;
562 rq->cmd[4] = 0x02 + (close != 0); 574 rq->cmd[4] = 0x02 + (close != 0);
563 rq->cmd_len = 6; 575 rq->cmd_len = 6;
564 err = blk_execute_rq(q, bd_disk, rq); 576 err = blk_execute_rq(q, bd_disk, rq, 0);
565 blk_put_request(rq); 577 blk_put_request(rq);
566 break; 578 break;
567 default: 579 default:
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index beaa561f2ed8..153960348414 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2097,6 +2097,10 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2097 if (!q) 2097 if (!q)
2098 return -ENXIO; 2098 return -ENXIO;
2099 2099
2100 rq = blk_get_request(q, READ, GFP_KERNEL);
2101 if (!rq)
2102 return -ENOMEM;
2103
2100 cdi->last_sense = 0; 2104 cdi->last_sense = 0;
2101 2105
2102 while (nframes) { 2106 while (nframes) {
@@ -2108,9 +2112,9 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2108 2112
2109 len = nr * CD_FRAMESIZE_RAW; 2113 len = nr * CD_FRAMESIZE_RAW;
2110 2114
2111 rq = blk_rq_map_user(q, READ, ubuf, len); 2115 ret = blk_rq_map_user(q, rq, ubuf, len);
2112 if (IS_ERR(rq)) 2116 if (ret)
2113 return PTR_ERR(rq); 2117 break;
2114 2118
2115 memset(rq->cmd, 0, sizeof(rq->cmd)); 2119 memset(rq->cmd, 0, sizeof(rq->cmd));
2116 rq->cmd[0] = GPCMD_READ_CD; 2120 rq->cmd[0] = GPCMD_READ_CD;
@@ -2132,13 +2136,13 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2132 if (rq->bio) 2136 if (rq->bio)
2133 blk_queue_bounce(q, &rq->bio); 2137 blk_queue_bounce(q, &rq->bio);
2134 2138
2135 if (blk_execute_rq(q, cdi->disk, rq)) { 2139 if (blk_execute_rq(q, cdi->disk, rq, 0)) {
2136 struct request_sense *s = rq->sense; 2140 struct request_sense *s = rq->sense;
2137 ret = -EIO; 2141 ret = -EIO;
2138 cdi->last_sense = s->sense_key; 2142 cdi->last_sense = s->sense_key;
2139 } 2143 }
2140 2144
2141 if (blk_rq_unmap_user(rq, bio, len)) 2145 if (blk_rq_unmap_user(bio, len))
2142 ret = -EFAULT; 2146 ret = -EFAULT;
2143 2147
2144 if (ret) 2148 if (ret)
@@ -2149,6 +2153,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2149 ubuf += len; 2153 ubuf += len;
2150 } 2154 }
2151 2155
2156 blk_put_request(rq);
2152 return ret; 2157 return ret;
2153} 2158}
2154 2159
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index c9d3a00a3c0c..234f5de3e929 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -754,7 +754,7 @@ static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
754 754
755 idedisk_prepare_flush(q, rq); 755 idedisk_prepare_flush(q, rq);
756 756
757 ret = blk_execute_rq(q, disk, rq); 757 ret = blk_execute_rq(q, disk, rq, 0);
758 758
759 /* 759 /*
760 * if we failed and caller wants error offset, get it 760 * if we failed and caller wants error offset, get it
diff --git a/fs/bio.c b/fs/bio.c
index 1f2d4649b188..e49cf7dd2e92 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/mempool.h> 26#include <linux/mempool.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <scsi/sg.h> /* for struct sg_iovec */
28 29
29#define BIO_POOL_SIZE 256 30#define BIO_POOL_SIZE 256
30 31
@@ -546,22 +547,34 @@ out_bmd:
546 return ERR_PTR(ret); 547 return ERR_PTR(ret);
547} 548}
548 549
549static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev, 550static struct bio *__bio_map_user_iov(request_queue_t *q,
550 unsigned long uaddr, unsigned int len, 551 struct block_device *bdev,
551 int write_to_vm) 552 struct sg_iovec *iov, int iov_count,
553 int write_to_vm)
552{ 554{
553 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 555 int i, j;
554 unsigned long start = uaddr >> PAGE_SHIFT; 556 int nr_pages = 0;
555 const int nr_pages = end - start;
556 int ret, offset, i;
557 struct page **pages; 557 struct page **pages;
558 struct bio *bio; 558 struct bio *bio;
559 int cur_page = 0;
560 int ret, offset;
559 561
560 /* 562 for (i = 0; i < iov_count; i++) {
561 * transfer and buffer must be aligned to at least hardsector 563 unsigned long uaddr = (unsigned long)iov[i].iov_base;
562 * size for now, in the future we can relax this restriction 564 unsigned long len = iov[i].iov_len;
563 */ 565 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
564 if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) 566 unsigned long start = uaddr >> PAGE_SHIFT;
567
568 nr_pages += end - start;
569 /*
570 * transfer and buffer must be aligned to at least hardsector
571 * size for now, in the future we can relax this restriction
572 */
573 if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
574 return ERR_PTR(-EINVAL);
575 }
576
577 if (!nr_pages)
565 return ERR_PTR(-EINVAL); 578 return ERR_PTR(-EINVAL);
566 579
567 bio = bio_alloc(GFP_KERNEL, nr_pages); 580 bio = bio_alloc(GFP_KERNEL, nr_pages);
@@ -573,42 +586,54 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
573 if (!pages) 586 if (!pages)
574 goto out; 587 goto out;
575 588
576 down_read(&current->mm->mmap_sem); 589 memset(pages, 0, nr_pages * sizeof(struct page *));
577 ret = get_user_pages(current, current->mm, uaddr, nr_pages, 590
578 write_to_vm, 0, pages, NULL); 591 for (i = 0; i < iov_count; i++) {
579 up_read(&current->mm->mmap_sem); 592 unsigned long uaddr = (unsigned long)iov[i].iov_base;
580 593 unsigned long len = iov[i].iov_len;
581 if (ret < nr_pages) 594 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
582 goto out; 595 unsigned long start = uaddr >> PAGE_SHIFT;
583 596 const int local_nr_pages = end - start;
584 bio->bi_bdev = bdev; 597 const int page_limit = cur_page + local_nr_pages;
585 598
586 offset = uaddr & ~PAGE_MASK; 599 down_read(&current->mm->mmap_sem);
587 for (i = 0; i < nr_pages; i++) { 600 ret = get_user_pages(current, current->mm, uaddr,
588 unsigned int bytes = PAGE_SIZE - offset; 601 local_nr_pages,
589 602 write_to_vm, 0, &pages[cur_page], NULL);
590 if (len <= 0) 603 up_read(&current->mm->mmap_sem);
591 break; 604
592 605 if (ret < local_nr_pages)
593 if (bytes > len) 606 goto out_unmap;
594 bytes = len; 607
608
609 offset = uaddr & ~PAGE_MASK;
610 for (j = cur_page; j < page_limit; j++) {
611 unsigned int bytes = PAGE_SIZE - offset;
612
613 if (len <= 0)
614 break;
615
616 if (bytes > len)
617 bytes = len;
618
619 /*
620 * sorry...
621 */
622 if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes)
623 break;
624
625 len -= bytes;
626 offset = 0;
627 }
595 628
629 cur_page = j;
596 /* 630 /*
597 * sorry... 631 * release the pages we didn't map into the bio, if any
598 */ 632 */
599 if (__bio_add_page(q, bio, pages[i], bytes, offset) < bytes) 633 while (j < page_limit)
600 break; 634 page_cache_release(pages[j++]);
601
602 len -= bytes;
603 offset = 0;
604 } 635 }
605 636
606 /*
607 * release the pages we didn't map into the bio, if any
608 */
609 while (i < nr_pages)
610 page_cache_release(pages[i++]);
611
612 kfree(pages); 637 kfree(pages);
613 638
614 /* 639 /*
@@ -617,9 +642,17 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
617 if (!write_to_vm) 642 if (!write_to_vm)
618 bio->bi_rw |= (1 << BIO_RW); 643 bio->bi_rw |= (1 << BIO_RW);
619 644
645 bio->bi_bdev = bdev;
620 bio->bi_flags |= (1 << BIO_USER_MAPPED); 646 bio->bi_flags |= (1 << BIO_USER_MAPPED);
621 return bio; 647 return bio;
622out: 648
649 out_unmap:
650 for (i = 0; i < nr_pages; i++) {
651 if(!pages[i])
652 break;
653 page_cache_release(pages[i]);
654 }
655 out:
623 kfree(pages); 656 kfree(pages);
624 bio_put(bio); 657 bio_put(bio);
625 return ERR_PTR(ret); 658 return ERR_PTR(ret);
@@ -639,9 +672,33 @@ out:
639struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, 672struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
640 unsigned long uaddr, unsigned int len, int write_to_vm) 673 unsigned long uaddr, unsigned int len, int write_to_vm)
641{ 674{
675 struct sg_iovec iov;
676
677 iov.iov_base = (__user void *)uaddr;
678 iov.iov_len = len;
679
680 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm);
681}
682
683/**
684 * bio_map_user_iov - map user sg_iovec table into bio
685 * @q: the request_queue_t for the bio
686 * @bdev: destination block device
687 * @iov: the iovec.
688 * @iov_count: number of elements in the iovec
689 * @write_to_vm: bool indicating writing to pages or not
690 *
691 * Map the user space address into a bio suitable for io to a block
692 * device. Returns an error pointer in case of error.
693 */
694struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
695 struct sg_iovec *iov, int iov_count,
696 int write_to_vm)
697{
642 struct bio *bio; 698 struct bio *bio;
699 int len = 0, i;
643 700
644 bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm); 701 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
645 702
646 if (IS_ERR(bio)) 703 if (IS_ERR(bio))
647 return bio; 704 return bio;
@@ -654,6 +711,9 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
654 */ 711 */
655 bio_get(bio); 712 bio_get(bio);
656 713
714 for (i = 0; i < iov_count; i++)
715 len += iov[i].iov_len;
716
657 if (bio->bi_size == len) 717 if (bio->bi_size == len)
658 return bio; 718 return bio;
659 719
@@ -698,6 +758,82 @@ void bio_unmap_user(struct bio *bio)
698 bio_put(bio); 758 bio_put(bio);
699} 759}
700 760
761static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
762{
763 if (bio->bi_size)
764 return 1;
765
766 bio_put(bio);
767 return 0;
768}
769
770
771static struct bio *__bio_map_kern(request_queue_t *q, void *data,
772 unsigned int len, unsigned int gfp_mask)
773{
774 unsigned long kaddr = (unsigned long)data;
775 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
776 unsigned long start = kaddr >> PAGE_SHIFT;
777 const int nr_pages = end - start;
778 int offset, i;
779 struct bio *bio;
780
781 bio = bio_alloc(gfp_mask, nr_pages);
782 if (!bio)
783 return ERR_PTR(-ENOMEM);
784
785 offset = offset_in_page(kaddr);
786 for (i = 0; i < nr_pages; i++) {
787 unsigned int bytes = PAGE_SIZE - offset;
788
789 if (len <= 0)
790 break;
791
792 if (bytes > len)
793 bytes = len;
794
795 if (__bio_add_page(q, bio, virt_to_page(data), bytes,
796 offset) < bytes)
797 break;
798
799 data += bytes;
800 len -= bytes;
801 offset = 0;
802 }
803
804 bio->bi_end_io = bio_map_kern_endio;
805 return bio;
806}
807
808/**
809 * bio_map_kern - map kernel address into bio
810 * @q: the request_queue_t for the bio
811 * @data: pointer to buffer to map
812 * @len: length in bytes
813 * @gfp_mask: allocation flags for bio allocation
814 *
815 * Map the kernel address into a bio suitable for io to a block
816 * device. Returns an error pointer in case of error.
817 */
818struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,
819 unsigned int gfp_mask)
820{
821 struct bio *bio;
822
823 bio = __bio_map_kern(q, data, len, gfp_mask);
824 if (IS_ERR(bio))
825 return bio;
826
827 if (bio->bi_size == len)
828 return bio;
829
830 /*
831 * Don't support partial mappings.
832 */
833 bio_put(bio);
834 return ERR_PTR(-EINVAL);
835}
836
701/* 837/*
702 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 838 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
703 * for performing direct-IO in BIOs. 839 * for performing direct-IO in BIOs.
@@ -1085,6 +1221,7 @@ EXPORT_SYMBOL(bio_add_page);
1085EXPORT_SYMBOL(bio_get_nr_vecs); 1221EXPORT_SYMBOL(bio_get_nr_vecs);
1086EXPORT_SYMBOL(bio_map_user); 1222EXPORT_SYMBOL(bio_map_user);
1087EXPORT_SYMBOL(bio_unmap_user); 1223EXPORT_SYMBOL(bio_unmap_user);
1224EXPORT_SYMBOL(bio_map_kern);
1088EXPORT_SYMBOL(bio_pair_release); 1225EXPORT_SYMBOL(bio_pair_release);
1089EXPORT_SYMBOL(bio_split); 1226EXPORT_SYMBOL(bio_split);
1090EXPORT_SYMBOL(bio_split_pool); 1227EXPORT_SYMBOL(bio_split_pool);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 36ef29fa0d8b..4fda06da56c1 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -295,7 +295,13 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
295extern int bio_get_nr_vecs(struct block_device *); 295extern int bio_get_nr_vecs(struct block_device *);
296extern struct bio *bio_map_user(struct request_queue *, struct block_device *, 296extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
297 unsigned long, unsigned int, int); 297 unsigned long, unsigned int, int);
298struct sg_iovec;
299extern struct bio *bio_map_user_iov(struct request_queue *,
300 struct block_device *,
301 struct sg_iovec *, int, int);
298extern void bio_unmap_user(struct bio *); 302extern void bio_unmap_user(struct bio *);
303extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
304 unsigned int);
299extern void bio_set_pages_dirty(struct bio *bio); 305extern void bio_set_pages_dirty(struct bio *bio);
300extern void bio_check_pages_dirty(struct bio *bio); 306extern void bio_check_pages_dirty(struct bio *bio);
301extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); 307extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 19bd8e7e11bf..aefa26fbae8a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -563,10 +563,12 @@ extern void blk_sync_queue(struct request_queue *q);
563extern void __blk_stop_queue(request_queue_t *q); 563extern void __blk_stop_queue(request_queue_t *q);
564extern void blk_run_queue(request_queue_t *); 564extern void blk_run_queue(request_queue_t *);
565extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *); 565extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
566extern struct request *blk_rq_map_user(request_queue_t *, int, void __user *, unsigned int); 566extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int);
567extern int blk_rq_unmap_user(struct request *, struct bio *, unsigned int); 567extern int blk_rq_unmap_user(struct bio *, unsigned int);
568extern int blk_execute_rq(request_queue_t *, struct gendisk *, struct request *); 568extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, unsigned int);
569 569extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int);
570extern int blk_execute_rq(request_queue_t *, struct gendisk *,
571 struct request *, int);
570static inline request_queue_t *bdev_get_queue(struct block_device *bdev) 572static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
571{ 573{
572 return bdev->bd_disk->queue; 574 return bdev->bd_disk->queue;