diff options
| -rw-r--r-- | block/ll_rw_blk.c | 166 | ||||
| -rw-r--r-- | block/scsi_ioctl.c | 53 | ||||
| -rw-r--r-- | drivers/cdrom/cdrom.c | 6 | ||||
| -rw-r--r-- | fs/bio.c | 18 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 7 |
5 files changed, 160 insertions, 90 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 9eaee6640535..0f82e12f7b67 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
| @@ -2322,6 +2322,84 @@ void blk_insert_request(request_queue_t *q, struct request *rq, | |||
| 2322 | 2322 | ||
| 2323 | EXPORT_SYMBOL(blk_insert_request); | 2323 | EXPORT_SYMBOL(blk_insert_request); |
| 2324 | 2324 | ||
| 2325 | static int __blk_rq_unmap_user(struct bio *bio) | ||
| 2326 | { | ||
| 2327 | int ret = 0; | ||
| 2328 | |||
| 2329 | if (bio) { | ||
| 2330 | if (bio_flagged(bio, BIO_USER_MAPPED)) | ||
| 2331 | bio_unmap_user(bio); | ||
| 2332 | else | ||
| 2333 | ret = bio_uncopy_user(bio); | ||
| 2334 | } | ||
| 2335 | |||
| 2336 | return ret; | ||
| 2337 | } | ||
| 2338 | |||
| 2339 | static int __blk_rq_map_user(request_queue_t *q, struct request *rq, | ||
| 2340 | void __user *ubuf, unsigned int len) | ||
| 2341 | { | ||
| 2342 | unsigned long uaddr; | ||
| 2343 | struct bio *bio, *orig_bio; | ||
| 2344 | int reading, ret; | ||
| 2345 | |||
| 2346 | reading = rq_data_dir(rq) == READ; | ||
| 2347 | |||
| 2348 | /* | ||
| 2349 | * if alignment requirement is satisfied, map in user pages for | ||
| 2350 | * direct dma. else, set up kernel bounce buffers | ||
| 2351 | */ | ||
| 2352 | uaddr = (unsigned long) ubuf; | ||
| 2353 | if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) | ||
| 2354 | bio = bio_map_user(q, NULL, uaddr, len, reading); | ||
| 2355 | else | ||
| 2356 | bio = bio_copy_user(q, uaddr, len, reading); | ||
| 2357 | |||
| 2358 | if (IS_ERR(bio)) { | ||
| 2359 | return PTR_ERR(bio); | ||
| 2360 | } | ||
| 2361 | |||
| 2362 | orig_bio = bio; | ||
| 2363 | blk_queue_bounce(q, &bio); | ||
| 2364 | /* | ||
| 2365 | * We link the bounce buffer in and could have to traverse it | ||
| 2366 | * later so we have to get a ref to prevent it from being freed | ||
| 2367 | */ | ||
| 2368 | bio_get(bio); | ||
| 2369 | |||
| 2370 | /* | ||
| 2371 | * for most (all? don't know of any) queues we could | ||
| 2372 | * skip grabbing the queue lock here. only drivers with | ||
| 2373 | * funky private ->back_merge_fn() function could be | ||
| 2374 | * problematic. | ||
| 2375 | */ | ||
| 2376 | spin_lock_irq(q->queue_lock); | ||
| 2377 | if (!rq->bio) | ||
| 2378 | blk_rq_bio_prep(q, rq, bio); | ||
| 2379 | else if (!q->back_merge_fn(q, rq, bio)) { | ||
| 2380 | ret = -EINVAL; | ||
| 2381 | spin_unlock_irq(q->queue_lock); | ||
| 2382 | goto unmap_bio; | ||
| 2383 | } else { | ||
| 2384 | rq->biotail->bi_next = bio; | ||
| 2385 | rq->biotail = bio; | ||
| 2386 | |||
| 2387 | rq->nr_sectors += bio_sectors(bio); | ||
| 2388 | rq->hard_nr_sectors = rq->nr_sectors; | ||
| 2389 | rq->data_len += bio->bi_size; | ||
| 2390 | } | ||
| 2391 | spin_unlock_irq(q->queue_lock); | ||
| 2392 | |||
| 2393 | return bio->bi_size; | ||
| 2394 | |||
| 2395 | unmap_bio: | ||
| 2396 | /* if it was boucned we must call the end io function */ | ||
| 2397 | bio_endio(bio, bio->bi_size, 0); | ||
| 2398 | __blk_rq_unmap_user(orig_bio); | ||
| 2399 | bio_put(bio); | ||
| 2400 | return ret; | ||
| 2401 | } | ||
| 2402 | |||
| 2325 | /** | 2403 | /** |
| 2326 | * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage | 2404 | * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage |
| 2327 | * @q: request queue where request should be inserted | 2405 | * @q: request queue where request should be inserted |
| @@ -2343,42 +2421,44 @@ EXPORT_SYMBOL(blk_insert_request); | |||
| 2343 | * unmapping. | 2421 | * unmapping. |
| 2344 | */ | 2422 | */ |
| 2345 | int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, | 2423 | int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, |
| 2346 | unsigned int len) | 2424 | unsigned long len) |
| 2347 | { | 2425 | { |
| 2348 | unsigned long uaddr; | 2426 | unsigned long bytes_read = 0; |
| 2349 | struct bio *bio; | 2427 | int ret; |
| 2350 | int reading; | ||
| 2351 | 2428 | ||
| 2352 | if (len > (q->max_hw_sectors << 9)) | 2429 | if (len > (q->max_hw_sectors << 9)) |
| 2353 | return -EINVAL; | 2430 | return -EINVAL; |
| 2354 | if (!len || !ubuf) | 2431 | if (!len || !ubuf) |
| 2355 | return -EINVAL; | 2432 | return -EINVAL; |
| 2356 | 2433 | ||
| 2357 | reading = rq_data_dir(rq) == READ; | 2434 | while (bytes_read != len) { |
| 2435 | unsigned long map_len, end, start; | ||
| 2358 | 2436 | ||
| 2359 | /* | 2437 | map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); |
| 2360 | * if alignment requirement is satisfied, map in user pages for | 2438 | end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) |
| 2361 | * direct dma. else, set up kernel bounce buffers | 2439 | >> PAGE_SHIFT; |
| 2362 | */ | 2440 | start = (unsigned long)ubuf >> PAGE_SHIFT; |
| 2363 | uaddr = (unsigned long) ubuf; | ||
| 2364 | if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) | ||
| 2365 | bio = bio_map_user(q, NULL, uaddr, len, reading); | ||
| 2366 | else | ||
| 2367 | bio = bio_copy_user(q, uaddr, len, reading); | ||
| 2368 | 2441 | ||
| 2369 | if (!IS_ERR(bio)) { | 2442 | /* |
| 2370 | rq->bio = rq->biotail = bio; | 2443 | * A bad offset could cause us to require BIO_MAX_PAGES + 1 |
| 2371 | blk_rq_bio_prep(q, rq, bio); | 2444 | * pages. If this happens we just lower the requested |
| 2445 | * mapping len by a page so that we can fit | ||
| 2446 | */ | ||
| 2447 | if (end - start > BIO_MAX_PAGES) | ||
| 2448 | map_len -= PAGE_SIZE; | ||
| 2372 | 2449 | ||
| 2373 | rq->buffer = rq->data = NULL; | 2450 | ret = __blk_rq_map_user(q, rq, ubuf, map_len); |
| 2374 | rq->data_len = len; | 2451 | if (ret < 0) |
| 2375 | return 0; | 2452 | goto unmap_rq; |
| 2453 | bytes_read += ret; | ||
| 2454 | ubuf += ret; | ||
| 2376 | } | 2455 | } |
| 2377 | 2456 | ||
| 2378 | /* | 2457 | rq->buffer = rq->data = NULL; |
| 2379 | * bio is the err-ptr | 2458 | return 0; |
| 2380 | */ | 2459 | unmap_rq: |
| 2381 | return PTR_ERR(bio); | 2460 | blk_rq_unmap_user(rq); |
| 2461 | return ret; | ||
| 2382 | } | 2462 | } |
| 2383 | 2463 | ||
| 2384 | EXPORT_SYMBOL(blk_rq_map_user); | 2464 | EXPORT_SYMBOL(blk_rq_map_user); |
| @@ -2404,7 +2484,7 @@ EXPORT_SYMBOL(blk_rq_map_user); | |||
| 2404 | * unmapping. | 2484 | * unmapping. |
| 2405 | */ | 2485 | */ |
| 2406 | int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, | 2486 | int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, |
| 2407 | struct sg_iovec *iov, int iov_count) | 2487 | struct sg_iovec *iov, int iov_count, unsigned int len) |
| 2408 | { | 2488 | { |
| 2409 | struct bio *bio; | 2489 | struct bio *bio; |
| 2410 | 2490 | ||
| @@ -2418,10 +2498,15 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, | |||
| 2418 | if (IS_ERR(bio)) | 2498 | if (IS_ERR(bio)) |
| 2419 | return PTR_ERR(bio); | 2499 | return PTR_ERR(bio); |
| 2420 | 2500 | ||
| 2421 | rq->bio = rq->biotail = bio; | 2501 | if (bio->bi_size != len) { |
| 2502 | bio_endio(bio, bio->bi_size, 0); | ||
| 2503 | bio_unmap_user(bio); | ||
| 2504 | return -EINVAL; | ||
| 2505 | } | ||
| 2506 | |||
| 2507 | bio_get(bio); | ||
| 2422 | blk_rq_bio_prep(q, rq, bio); | 2508 | blk_rq_bio_prep(q, rq, bio); |
| 2423 | rq->buffer = rq->data = NULL; | 2509 | rq->buffer = rq->data = NULL; |
| 2424 | rq->data_len = bio->bi_size; | ||
| 2425 | return 0; | 2510 | return 0; |
| 2426 | } | 2511 | } |
| 2427 | 2512 | ||
| @@ -2429,23 +2514,26 @@ EXPORT_SYMBOL(blk_rq_map_user_iov); | |||
| 2429 | 2514 | ||
| 2430 | /** | 2515 | /** |
| 2431 | * blk_rq_unmap_user - unmap a request with user data | 2516 | * blk_rq_unmap_user - unmap a request with user data |
| 2432 | * @bio: bio to be unmapped | 2517 | * @rq: rq to be unmapped |
| 2433 | * @ulen: length of user buffer | ||
| 2434 | * | 2518 | * |
| 2435 | * Description: | 2519 | * Description: |
| 2436 | * Unmap a bio previously mapped by blk_rq_map_user(). | 2520 | * Unmap a rq previously mapped by blk_rq_map_user(). |
| 2521 | * rq->bio must be set to the original head of the request. | ||
| 2437 | */ | 2522 | */ |
| 2438 | int blk_rq_unmap_user(struct bio *bio, unsigned int ulen) | 2523 | int blk_rq_unmap_user(struct request *rq) |
| 2439 | { | 2524 | { |
| 2440 | int ret = 0; | 2525 | struct bio *bio, *mapped_bio; |
| 2441 | 2526 | ||
| 2442 | if (bio) { | 2527 | while ((bio = rq->bio)) { |
| 2443 | if (bio_flagged(bio, BIO_USER_MAPPED)) | 2528 | if (bio_flagged(bio, BIO_BOUNCED)) |
| 2444 | bio_unmap_user(bio); | 2529 | mapped_bio = bio->bi_private; |
| 2445 | else | 2530 | else |
| 2446 | ret = bio_uncopy_user(bio); | 2531 | mapped_bio = bio; |
| 2447 | } | ||
| 2448 | 2532 | ||
| 2533 | __blk_rq_unmap_user(mapped_bio); | ||
| 2534 | rq->bio = bio->bi_next; | ||
| 2535 | bio_put(bio); | ||
| 2536 | } | ||
| 2449 | return 0; | 2537 | return 0; |
| 2450 | } | 2538 | } |
| 2451 | 2539 | ||
| @@ -2476,11 +2564,8 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, | |||
| 2476 | if (rq_data_dir(rq) == WRITE) | 2564 | if (rq_data_dir(rq) == WRITE) |
| 2477 | bio->bi_rw |= (1 << BIO_RW); | 2565 | bio->bi_rw |= (1 << BIO_RW); |
| 2478 | 2566 | ||
| 2479 | rq->bio = rq->biotail = bio; | ||
| 2480 | blk_rq_bio_prep(q, rq, bio); | 2567 | blk_rq_bio_prep(q, rq, bio); |
| 2481 | |||
| 2482 | rq->buffer = rq->data = NULL; | 2568 | rq->buffer = rq->data = NULL; |
| 2483 | rq->data_len = len; | ||
| 2484 | return 0; | 2569 | return 0; |
| 2485 | } | 2570 | } |
| 2486 | 2571 | ||
| @@ -3495,6 +3580,7 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) | |||
| 3495 | rq->hard_cur_sectors = rq->current_nr_sectors; | 3580 | rq->hard_cur_sectors = rq->current_nr_sectors; |
| 3496 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); | 3581 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); |
| 3497 | rq->buffer = bio_data(bio); | 3582 | rq->buffer = bio_data(bio); |
| 3583 | rq->data_len = bio->bi_size; | ||
| 3498 | 3584 | ||
| 3499 | rq->bio = rq->biotail = bio; | 3585 | rq->bio = rq->biotail = bio; |
| 3500 | } | 3586 | } |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index e55a75621437..5493c2fbbab1 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
| @@ -226,7 +226,6 @@ static int sg_io(struct file *file, request_queue_t *q, | |||
| 226 | unsigned long start_time; | 226 | unsigned long start_time; |
| 227 | int writing = 0, ret = 0; | 227 | int writing = 0, ret = 0; |
| 228 | struct request *rq; | 228 | struct request *rq; |
| 229 | struct bio *bio; | ||
| 230 | char sense[SCSI_SENSE_BUFFERSIZE]; | 229 | char sense[SCSI_SENSE_BUFFERSIZE]; |
| 231 | unsigned char cmd[BLK_MAX_CDB]; | 230 | unsigned char cmd[BLK_MAX_CDB]; |
| 232 | 231 | ||
| @@ -258,30 +257,6 @@ static int sg_io(struct file *file, request_queue_t *q, | |||
| 258 | if (!rq) | 257 | if (!rq) |
| 259 | return -ENOMEM; | 258 | return -ENOMEM; |
| 260 | 259 | ||
| 261 | if (hdr->iovec_count) { | ||
| 262 | const int size = sizeof(struct sg_iovec) * hdr->iovec_count; | ||
| 263 | struct sg_iovec *iov; | ||
| 264 | |||
| 265 | iov = kmalloc(size, GFP_KERNEL); | ||
| 266 | if (!iov) { | ||
| 267 | ret = -ENOMEM; | ||
| 268 | goto out; | ||
| 269 | } | ||
| 270 | |||
| 271 | if (copy_from_user(iov, hdr->dxferp, size)) { | ||
| 272 | kfree(iov); | ||
| 273 | ret = -EFAULT; | ||
| 274 | goto out; | ||
| 275 | } | ||
| 276 | |||
| 277 | ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count); | ||
| 278 | kfree(iov); | ||
| 279 | } else if (hdr->dxfer_len) | ||
| 280 | ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); | ||
| 281 | |||
| 282 | if (ret) | ||
| 283 | goto out; | ||
| 284 | |||
| 285 | /* | 260 | /* |
| 286 | * fill in request structure | 261 | * fill in request structure |
| 287 | */ | 262 | */ |
| @@ -294,7 +269,6 @@ static int sg_io(struct file *file, request_queue_t *q, | |||
| 294 | rq->sense_len = 0; | 269 | rq->sense_len = 0; |
| 295 | 270 | ||
| 296 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | 271 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
| 297 | bio = rq->bio; | ||
| 298 | 272 | ||
| 299 | /* | 273 | /* |
| 300 | * bounce this after holding a reference to the original bio, it's | 274 | * bounce this after holding a reference to the original bio, it's |
| @@ -309,6 +283,31 @@ static int sg_io(struct file *file, request_queue_t *q, | |||
| 309 | if (!rq->timeout) | 283 | if (!rq->timeout) |
| 310 | rq->timeout = BLK_DEFAULT_TIMEOUT; | 284 | rq->timeout = BLK_DEFAULT_TIMEOUT; |
| 311 | 285 | ||
| 286 | if (hdr->iovec_count) { | ||
| 287 | const int size = sizeof(struct sg_iovec) * hdr->iovec_count; | ||
| 288 | struct sg_iovec *iov; | ||
| 289 | |||
| 290 | iov = kmalloc(size, GFP_KERNEL); | ||
| 291 | if (!iov) { | ||
| 292 | ret = -ENOMEM; | ||
| 293 | goto out; | ||
| 294 | } | ||
| 295 | |||
| 296 | if (copy_from_user(iov, hdr->dxferp, size)) { | ||
| 297 | kfree(iov); | ||
| 298 | ret = -EFAULT; | ||
| 299 | goto out; | ||
| 300 | } | ||
| 301 | |||
| 302 | ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count, | ||
| 303 | hdr->dxfer_len); | ||
| 304 | kfree(iov); | ||
| 305 | } else if (hdr->dxfer_len) | ||
| 306 | ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); | ||
| 307 | |||
| 308 | if (ret) | ||
| 309 | goto out; | ||
| 310 | |||
| 312 | rq->retries = 0; | 311 | rq->retries = 0; |
| 313 | 312 | ||
| 314 | start_time = jiffies; | 313 | start_time = jiffies; |
| @@ -339,7 +338,7 @@ static int sg_io(struct file *file, request_queue_t *q, | |||
| 339 | hdr->sb_len_wr = len; | 338 | hdr->sb_len_wr = len; |
| 340 | } | 339 | } |
| 341 | 340 | ||
| 342 | if (blk_rq_unmap_user(bio, hdr->dxfer_len)) | 341 | if (blk_rq_unmap_user(rq)) |
| 343 | ret = -EFAULT; | 342 | ret = -EFAULT; |
| 344 | 343 | ||
| 345 | /* may not have succeeded, but output values written to control | 344 | /* may not have succeeded, but output values written to control |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 7ea0f48f8fa6..2df5cf4ec743 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
| @@ -2133,16 +2133,14 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, | |||
| 2133 | rq->timeout = 60 * HZ; | 2133 | rq->timeout = 60 * HZ; |
| 2134 | bio = rq->bio; | 2134 | bio = rq->bio; |
| 2135 | 2135 | ||
| 2136 | if (rq->bio) | ||
| 2137 | blk_queue_bounce(q, &rq->bio); | ||
| 2138 | |||
| 2139 | if (blk_execute_rq(q, cdi->disk, rq, 0)) { | 2136 | if (blk_execute_rq(q, cdi->disk, rq, 0)) { |
| 2140 | struct request_sense *s = rq->sense; | 2137 | struct request_sense *s = rq->sense; |
| 2141 | ret = -EIO; | 2138 | ret = -EIO; |
| 2142 | cdi->last_sense = s->sense_key; | 2139 | cdi->last_sense = s->sense_key; |
| 2143 | } | 2140 | } |
| 2144 | 2141 | ||
| 2145 | if (blk_rq_unmap_user(bio, len)) | 2142 | rq->bio = bio; |
| 2143 | if (blk_rq_unmap_user(rq)) | ||
| 2146 | ret = -EFAULT; | 2144 | ret = -EFAULT; |
| 2147 | 2145 | ||
| 2148 | if (ret) | 2146 | if (ret) |
| @@ -560,10 +560,8 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, | |||
| 560 | break; | 560 | break; |
| 561 | } | 561 | } |
| 562 | 562 | ||
| 563 | if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { | 563 | if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) |
| 564 | ret = -EINVAL; | ||
| 565 | break; | 564 | break; |
| 566 | } | ||
| 567 | 565 | ||
| 568 | len -= bytes; | 566 | len -= bytes; |
| 569 | } | 567 | } |
| @@ -750,7 +748,6 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, | |||
| 750 | int write_to_vm) | 748 | int write_to_vm) |
| 751 | { | 749 | { |
| 752 | struct bio *bio; | 750 | struct bio *bio; |
| 753 | int len = 0, i; | ||
| 754 | 751 | ||
| 755 | bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); | 752 | bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); |
| 756 | 753 | ||
| @@ -765,18 +762,7 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, | |||
| 765 | */ | 762 | */ |
| 766 | bio_get(bio); | 763 | bio_get(bio); |
| 767 | 764 | ||
| 768 | for (i = 0; i < iov_count; i++) | 765 | return bio; |
| 769 | len += iov[i].iov_len; | ||
| 770 | |||
| 771 | if (bio->bi_size == len) | ||
| 772 | return bio; | ||
| 773 | |||
| 774 | /* | ||
| 775 | * don't support partial mappings | ||
| 776 | */ | ||
| 777 | bio_endio(bio, bio->bi_size, 0); | ||
| 778 | bio_unmap_user(bio); | ||
| 779 | return ERR_PTR(-EINVAL); | ||
| 780 | } | 766 | } |
| 781 | 767 | ||
| 782 | static void __bio_unmap_user(struct bio *bio) | 768 | static void __bio_unmap_user(struct bio *bio) |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7bfcde2d5578..e1c7286165ff 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -678,10 +678,11 @@ extern void __blk_stop_queue(request_queue_t *q); | |||
| 678 | extern void blk_run_queue(request_queue_t *); | 678 | extern void blk_run_queue(request_queue_t *); |
| 679 | extern void blk_start_queueing(request_queue_t *); | 679 | extern void blk_start_queueing(request_queue_t *); |
| 680 | extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *); | 680 | extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *); |
| 681 | extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int); | 681 | extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long); |
| 682 | extern int blk_rq_unmap_user(struct bio *, unsigned int); | 682 | extern int blk_rq_unmap_user(struct request *); |
| 683 | extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); | 683 | extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); |
| 684 | extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int); | 684 | extern int blk_rq_map_user_iov(request_queue_t *, struct request *, |
| 685 | struct sg_iovec *, int, unsigned int); | ||
| 685 | extern int blk_execute_rq(request_queue_t *, struct gendisk *, | 686 | extern int blk_execute_rq(request_queue_t *, struct gendisk *, |
| 686 | struct request *, int); | 687 | struct request *, int); |
| 687 | extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, | 688 | extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, |
