diff options
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 166 |
1 files changed, 126 insertions, 40 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index eb4cf6df7374..cc6e95f8e5d9 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -2322,6 +2322,84 @@ void blk_insert_request(request_queue_t *q, struct request *rq, | |||
2322 | 2322 | ||
2323 | EXPORT_SYMBOL(blk_insert_request); | 2323 | EXPORT_SYMBOL(blk_insert_request); |
2324 | 2324 | ||
2325 | static int __blk_rq_unmap_user(struct bio *bio) | ||
2326 | { | ||
2327 | int ret = 0; | ||
2328 | |||
2329 | if (bio) { | ||
2330 | if (bio_flagged(bio, BIO_USER_MAPPED)) | ||
2331 | bio_unmap_user(bio); | ||
2332 | else | ||
2333 | ret = bio_uncopy_user(bio); | ||
2334 | } | ||
2335 | |||
2336 | return ret; | ||
2337 | } | ||
2338 | |||
2339 | static int __blk_rq_map_user(request_queue_t *q, struct request *rq, | ||
2340 | void __user *ubuf, unsigned int len) | ||
2341 | { | ||
2342 | unsigned long uaddr; | ||
2343 | struct bio *bio, *orig_bio; | ||
2344 | int reading, ret; | ||
2345 | |||
2346 | reading = rq_data_dir(rq) == READ; | ||
2347 | |||
2348 | /* | ||
2349 | * if alignment requirement is satisfied, map in user pages for | ||
2350 | * direct dma. else, set up kernel bounce buffers | ||
2351 | */ | ||
2352 | uaddr = (unsigned long) ubuf; | ||
2353 | if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) | ||
2354 | bio = bio_map_user(q, NULL, uaddr, len, reading); | ||
2355 | else | ||
2356 | bio = bio_copy_user(q, uaddr, len, reading); | ||
2357 | |||
2358 | if (IS_ERR(bio)) { | ||
2359 | return PTR_ERR(bio); | ||
2360 | } | ||
2361 | |||
2362 | orig_bio = bio; | ||
2363 | blk_queue_bounce(q, &bio); | ||
2364 | /* | ||
2365 | * We link the bounce buffer in and could have to traverse it | ||
2366 | * later so we have to get a ref to prevent it from being freed | ||
2367 | */ | ||
2368 | bio_get(bio); | ||
2369 | |||
2370 | /* | ||
2371 | * for most (all? don't know of any) queues we could | ||
2372 | * skip grabbing the queue lock here. only drivers with | ||
2373 | * funky private ->back_merge_fn() function could be | ||
2374 | * problematic. | ||
2375 | */ | ||
2376 | spin_lock_irq(q->queue_lock); | ||
2377 | if (!rq->bio) | ||
2378 | blk_rq_bio_prep(q, rq, bio); | ||
2379 | else if (!q->back_merge_fn(q, rq, bio)) { | ||
2380 | ret = -EINVAL; | ||
2381 | spin_unlock_irq(q->queue_lock); | ||
2382 | goto unmap_bio; | ||
2383 | } else { | ||
2384 | rq->biotail->bi_next = bio; | ||
2385 | rq->biotail = bio; | ||
2386 | |||
2387 | rq->nr_sectors += bio_sectors(bio); | ||
2388 | rq->hard_nr_sectors = rq->nr_sectors; | ||
2389 | rq->data_len += bio->bi_size; | ||
2390 | } | ||
2391 | spin_unlock_irq(q->queue_lock); | ||
2392 | |||
2393 | return bio->bi_size; | ||
2394 | |||
2395 | unmap_bio: | ||
2396 | /* if it was boucned we must call the end io function */ | ||
2397 | bio_endio(bio, bio->bi_size, 0); | ||
2398 | __blk_rq_unmap_user(orig_bio); | ||
2399 | bio_put(bio); | ||
2400 | return ret; | ||
2401 | } | ||
2402 | |||
2325 | /** | 2403 | /** |
2326 | * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage | 2404 | * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage |
2327 | * @q: request queue where request should be inserted | 2405 | * @q: request queue where request should be inserted |
@@ -2343,42 +2421,44 @@ EXPORT_SYMBOL(blk_insert_request); | |||
2343 | * unmapping. | 2421 | * unmapping. |
2344 | */ | 2422 | */ |
2345 | int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, | 2423 | int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, |
2346 | unsigned int len) | 2424 | unsigned long len) |
2347 | { | 2425 | { |
2348 | unsigned long uaddr; | 2426 | unsigned long bytes_read = 0; |
2349 | struct bio *bio; | 2427 | int ret; |
2350 | int reading; | ||
2351 | 2428 | ||
2352 | if (len > (q->max_hw_sectors << 9)) | 2429 | if (len > (q->max_hw_sectors << 9)) |
2353 | return -EINVAL; | 2430 | return -EINVAL; |
2354 | if (!len || !ubuf) | 2431 | if (!len || !ubuf) |
2355 | return -EINVAL; | 2432 | return -EINVAL; |
2356 | 2433 | ||
2357 | reading = rq_data_dir(rq) == READ; | 2434 | while (bytes_read != len) { |
2435 | unsigned long map_len, end, start; | ||
2358 | 2436 | ||
2359 | /* | 2437 | map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); |
2360 | * if alignment requirement is satisfied, map in user pages for | 2438 | end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) |
2361 | * direct dma. else, set up kernel bounce buffers | 2439 | >> PAGE_SHIFT; |
2362 | */ | 2440 | start = (unsigned long)ubuf >> PAGE_SHIFT; |
2363 | uaddr = (unsigned long) ubuf; | ||
2364 | if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) | ||
2365 | bio = bio_map_user(q, NULL, uaddr, len, reading); | ||
2366 | else | ||
2367 | bio = bio_copy_user(q, uaddr, len, reading); | ||
2368 | 2441 | ||
2369 | if (!IS_ERR(bio)) { | 2442 | /* |
2370 | rq->bio = rq->biotail = bio; | 2443 | * A bad offset could cause us to require BIO_MAX_PAGES + 1 |
2371 | blk_rq_bio_prep(q, rq, bio); | 2444 | * pages. If this happens we just lower the requested |
2445 | * mapping len by a page so that we can fit | ||
2446 | */ | ||
2447 | if (end - start > BIO_MAX_PAGES) | ||
2448 | map_len -= PAGE_SIZE; | ||
2372 | 2449 | ||
2373 | rq->buffer = rq->data = NULL; | 2450 | ret = __blk_rq_map_user(q, rq, ubuf, map_len); |
2374 | rq->data_len = len; | 2451 | if (ret < 0) |
2375 | return 0; | 2452 | goto unmap_rq; |
2453 | bytes_read += ret; | ||
2454 | ubuf += ret; | ||
2376 | } | 2455 | } |
2377 | 2456 | ||
2378 | /* | 2457 | rq->buffer = rq->data = NULL; |
2379 | * bio is the err-ptr | 2458 | return 0; |
2380 | */ | 2459 | unmap_rq: |
2381 | return PTR_ERR(bio); | 2460 | blk_rq_unmap_user(rq); |
2461 | return ret; | ||
2382 | } | 2462 | } |
2383 | 2463 | ||
2384 | EXPORT_SYMBOL(blk_rq_map_user); | 2464 | EXPORT_SYMBOL(blk_rq_map_user); |
@@ -2404,7 +2484,7 @@ EXPORT_SYMBOL(blk_rq_map_user); | |||
2404 | * unmapping. | 2484 | * unmapping. |
2405 | */ | 2485 | */ |
2406 | int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, | 2486 | int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, |
2407 | struct sg_iovec *iov, int iov_count) | 2487 | struct sg_iovec *iov, int iov_count, unsigned int len) |
2408 | { | 2488 | { |
2409 | struct bio *bio; | 2489 | struct bio *bio; |
2410 | 2490 | ||
@@ -2418,10 +2498,15 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, | |||
2418 | if (IS_ERR(bio)) | 2498 | if (IS_ERR(bio)) |
2419 | return PTR_ERR(bio); | 2499 | return PTR_ERR(bio); |
2420 | 2500 | ||
2421 | rq->bio = rq->biotail = bio; | 2501 | if (bio->bi_size != len) { |
2502 | bio_endio(bio, bio->bi_size, 0); | ||
2503 | bio_unmap_user(bio); | ||
2504 | return -EINVAL; | ||
2505 | } | ||
2506 | |||
2507 | bio_get(bio); | ||
2422 | blk_rq_bio_prep(q, rq, bio); | 2508 | blk_rq_bio_prep(q, rq, bio); |
2423 | rq->buffer = rq->data = NULL; | 2509 | rq->buffer = rq->data = NULL; |
2424 | rq->data_len = bio->bi_size; | ||
2425 | return 0; | 2510 | return 0; |
2426 | } | 2511 | } |
2427 | 2512 | ||
@@ -2429,23 +2514,26 @@ EXPORT_SYMBOL(blk_rq_map_user_iov); | |||
2429 | 2514 | ||
2430 | /** | 2515 | /** |
2431 | * blk_rq_unmap_user - unmap a request with user data | 2516 | * blk_rq_unmap_user - unmap a request with user data |
2432 | * @bio: bio to be unmapped | 2517 | * @rq: rq to be unmapped |
2433 | * @ulen: length of user buffer | ||
2434 | * | 2518 | * |
2435 | * Description: | 2519 | * Description: |
2436 | * Unmap a bio previously mapped by blk_rq_map_user(). | 2520 | * Unmap a rq previously mapped by blk_rq_map_user(). |
2521 | * rq->bio must be set to the original head of the request. | ||
2437 | */ | 2522 | */ |
2438 | int blk_rq_unmap_user(struct bio *bio, unsigned int ulen) | 2523 | int blk_rq_unmap_user(struct request *rq) |
2439 | { | 2524 | { |
2440 | int ret = 0; | 2525 | struct bio *bio, *mapped_bio; |
2441 | 2526 | ||
2442 | if (bio) { | 2527 | while ((bio = rq->bio)) { |
2443 | if (bio_flagged(bio, BIO_USER_MAPPED)) | 2528 | if (bio_flagged(bio, BIO_BOUNCED)) |
2444 | bio_unmap_user(bio); | 2529 | mapped_bio = bio->bi_private; |
2445 | else | 2530 | else |
2446 | ret = bio_uncopy_user(bio); | 2531 | mapped_bio = bio; |
2447 | } | ||
2448 | 2532 | ||
2533 | __blk_rq_unmap_user(mapped_bio); | ||
2534 | rq->bio = bio->bi_next; | ||
2535 | bio_put(bio); | ||
2536 | } | ||
2449 | return 0; | 2537 | return 0; |
2450 | } | 2538 | } |
2451 | 2539 | ||
@@ -2476,11 +2564,8 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, | |||
2476 | if (rq_data_dir(rq) == WRITE) | 2564 | if (rq_data_dir(rq) == WRITE) |
2477 | bio->bi_rw |= (1 << BIO_RW); | 2565 | bio->bi_rw |= (1 << BIO_RW); |
2478 | 2566 | ||
2479 | rq->bio = rq->biotail = bio; | ||
2480 | blk_rq_bio_prep(q, rq, bio); | 2567 | blk_rq_bio_prep(q, rq, bio); |
2481 | |||
2482 | rq->buffer = rq->data = NULL; | 2568 | rq->buffer = rq->data = NULL; |
2483 | rq->data_len = len; | ||
2484 | return 0; | 2569 | return 0; |
2485 | } | 2570 | } |
2486 | 2571 | ||
@@ -3495,6 +3580,7 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) | |||
3495 | rq->hard_cur_sectors = rq->current_nr_sectors; | 3580 | rq->hard_cur_sectors = rq->current_nr_sectors; |
3496 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); | 3581 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); |
3497 | rq->buffer = bio_data(bio); | 3582 | rq->buffer = bio_data(bio); |
3583 | rq->data_len = bio->bi_size; | ||
3498 | 3584 | ||
3499 | rq->bio = rq->biotail = bio; | 3585 | rq->bio = rq->biotail = bio; |
3500 | } | 3586 | } |