diff options
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 67 |
1 files changed, 31 insertions, 36 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 79807dbc306e..e07c079e07e6 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -1405,8 +1405,7 @@ static inline int ll_new_hw_segment(request_queue_t *q, | |||
1405 | return 1; | 1405 | return 1; |
1406 | } | 1406 | } |
1407 | 1407 | ||
1408 | static int ll_back_merge_fn(request_queue_t *q, struct request *req, | 1408 | int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) |
1409 | struct bio *bio) | ||
1410 | { | 1409 | { |
1411 | unsigned short max_sectors; | 1410 | unsigned short max_sectors; |
1412 | int len; | 1411 | int len; |
@@ -1442,6 +1441,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, | |||
1442 | 1441 | ||
1443 | return ll_new_hw_segment(q, req, bio); | 1442 | return ll_new_hw_segment(q, req, bio); |
1444 | } | 1443 | } |
1444 | EXPORT_SYMBOL(ll_back_merge_fn); | ||
1445 | 1445 | ||
1446 | static int ll_front_merge_fn(request_queue_t *q, struct request *req, | 1446 | static int ll_front_merge_fn(request_queue_t *q, struct request *req, |
1447 | struct bio *bio) | 1447 | struct bio *bio) |
@@ -1912,9 +1912,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
1912 | } | 1912 | } |
1913 | 1913 | ||
1914 | q->request_fn = rfn; | 1914 | q->request_fn = rfn; |
1915 | q->back_merge_fn = ll_back_merge_fn; | ||
1916 | q->front_merge_fn = ll_front_merge_fn; | ||
1917 | q->merge_requests_fn = ll_merge_requests_fn; | ||
1918 | q->prep_rq_fn = NULL; | 1915 | q->prep_rq_fn = NULL; |
1919 | q->unplug_fn = generic_unplug_device; | 1916 | q->unplug_fn = generic_unplug_device; |
1920 | q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); | 1917 | q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); |
@@ -2350,40 +2347,29 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq, | |||
2350 | else | 2347 | else |
2351 | bio = bio_copy_user(q, uaddr, len, reading); | 2348 | bio = bio_copy_user(q, uaddr, len, reading); |
2352 | 2349 | ||
2353 | if (IS_ERR(bio)) { | 2350 | if (IS_ERR(bio)) |
2354 | return PTR_ERR(bio); | 2351 | return PTR_ERR(bio); |
2355 | } | ||
2356 | 2352 | ||
2357 | orig_bio = bio; | 2353 | orig_bio = bio; |
2358 | blk_queue_bounce(q, &bio); | 2354 | blk_queue_bounce(q, &bio); |
2355 | |||
2359 | /* | 2356 | /* |
2360 | * We link the bounce buffer in and could have to traverse it | 2357 | * We link the bounce buffer in and could have to traverse it |
2361 | * later so we have to get a ref to prevent it from being freed | 2358 | * later so we have to get a ref to prevent it from being freed |
2362 | */ | 2359 | */ |
2363 | bio_get(bio); | 2360 | bio_get(bio); |
2364 | 2361 | ||
2365 | /* | ||
2366 | * for most (all? don't know of any) queues we could | ||
2367 | * skip grabbing the queue lock here. only drivers with | ||
2368 | * funky private ->back_merge_fn() function could be | ||
2369 | * problematic. | ||
2370 | */ | ||
2371 | spin_lock_irq(q->queue_lock); | ||
2372 | if (!rq->bio) | 2362 | if (!rq->bio) |
2373 | blk_rq_bio_prep(q, rq, bio); | 2363 | blk_rq_bio_prep(q, rq, bio); |
2374 | else if (!q->back_merge_fn(q, rq, bio)) { | 2364 | else if (!ll_back_merge_fn(q, rq, bio)) { |
2375 | ret = -EINVAL; | 2365 | ret = -EINVAL; |
2376 | spin_unlock_irq(q->queue_lock); | ||
2377 | goto unmap_bio; | 2366 | goto unmap_bio; |
2378 | } else { | 2367 | } else { |
2379 | rq->biotail->bi_next = bio; | 2368 | rq->biotail->bi_next = bio; |
2380 | rq->biotail = bio; | 2369 | rq->biotail = bio; |
2381 | 2370 | ||
2382 | rq->nr_sectors += bio_sectors(bio); | ||
2383 | rq->hard_nr_sectors = rq->nr_sectors; | ||
2384 | rq->data_len += bio->bi_size; | 2371 | rq->data_len += bio->bi_size; |
2385 | } | 2372 | } |
2386 | spin_unlock_irq(q->queue_lock); | ||
2387 | 2373 | ||
2388 | return bio->bi_size; | 2374 | return bio->bi_size; |
2389 | 2375 | ||
@@ -2419,6 +2405,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, | |||
2419 | unsigned long len) | 2405 | unsigned long len) |
2420 | { | 2406 | { |
2421 | unsigned long bytes_read = 0; | 2407 | unsigned long bytes_read = 0; |
2408 | struct bio *bio = NULL; | ||
2422 | int ret; | 2409 | int ret; |
2423 | 2410 | ||
2424 | if (len > (q->max_hw_sectors << 9)) | 2411 | if (len > (q->max_hw_sectors << 9)) |
@@ -2445,6 +2432,8 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, | |||
2445 | ret = __blk_rq_map_user(q, rq, ubuf, map_len); | 2432 | ret = __blk_rq_map_user(q, rq, ubuf, map_len); |
2446 | if (ret < 0) | 2433 | if (ret < 0) |
2447 | goto unmap_rq; | 2434 | goto unmap_rq; |
2435 | if (!bio) | ||
2436 | bio = rq->bio; | ||
2448 | bytes_read += ret; | 2437 | bytes_read += ret; |
2449 | ubuf += ret; | 2438 | ubuf += ret; |
2450 | } | 2439 | } |
@@ -2452,7 +2441,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, | |||
2452 | rq->buffer = rq->data = NULL; | 2441 | rq->buffer = rq->data = NULL; |
2453 | return 0; | 2442 | return 0; |
2454 | unmap_rq: | 2443 | unmap_rq: |
2455 | blk_rq_unmap_user(rq); | 2444 | blk_rq_unmap_user(bio); |
2456 | return ret; | 2445 | return ret; |
2457 | } | 2446 | } |
2458 | 2447 | ||
@@ -2509,27 +2498,33 @@ EXPORT_SYMBOL(blk_rq_map_user_iov); | |||
2509 | 2498 | ||
2510 | /** | 2499 | /** |
2511 | * blk_rq_unmap_user - unmap a request with user data | 2500 | * blk_rq_unmap_user - unmap a request with user data |
2512 | * @rq: rq to be unmapped | 2501 | * @bio: start of bio list |
2513 | * | 2502 | * |
2514 | * Description: | 2503 | * Description: |
2515 | * Unmap a rq previously mapped by blk_rq_map_user(). | 2504 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must |
2516 | * rq->bio must be set to the original head of the request. | 2505 | * supply the original rq->bio from the blk_rq_map_user() return, since |
2506 | * the io completion may have changed rq->bio. | ||
2517 | */ | 2507 | */ |
2518 | int blk_rq_unmap_user(struct request *rq) | 2508 | int blk_rq_unmap_user(struct bio *bio) |
2519 | { | 2509 | { |
2520 | struct bio *bio, *mapped_bio; | 2510 | struct bio *mapped_bio; |
2511 | int ret = 0, ret2; | ||
2521 | 2512 | ||
2522 | while ((bio = rq->bio)) { | 2513 | while (bio) { |
2523 | if (bio_flagged(bio, BIO_BOUNCED)) | 2514 | mapped_bio = bio; |
2515 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) | ||
2524 | mapped_bio = bio->bi_private; | 2516 | mapped_bio = bio->bi_private; |
2525 | else | ||
2526 | mapped_bio = bio; | ||
2527 | 2517 | ||
2528 | __blk_rq_unmap_user(mapped_bio); | 2518 | ret2 = __blk_rq_unmap_user(mapped_bio); |
2529 | rq->bio = bio->bi_next; | 2519 | if (ret2 && !ret) |
2530 | bio_put(bio); | 2520 | ret = ret2; |
2521 | |||
2522 | mapped_bio = bio; | ||
2523 | bio = bio->bi_next; | ||
2524 | bio_put(mapped_bio); | ||
2531 | } | 2525 | } |
2532 | return 0; | 2526 | |
2527 | return ret; | ||
2533 | } | 2528 | } |
2534 | 2529 | ||
2535 | EXPORT_SYMBOL(blk_rq_unmap_user); | 2530 | EXPORT_SYMBOL(blk_rq_unmap_user); |
@@ -2822,7 +2817,7 @@ static int attempt_merge(request_queue_t *q, struct request *req, | |||
2822 | * will have updated segment counts, update sector | 2817 | * will have updated segment counts, update sector |
2823 | * counts here. | 2818 | * counts here. |
2824 | */ | 2819 | */ |
2825 | if (!q->merge_requests_fn(q, req, next)) | 2820 | if (!ll_merge_requests_fn(q, req, next)) |
2826 | return 0; | 2821 | return 0; |
2827 | 2822 | ||
2828 | /* | 2823 | /* |
@@ -2939,7 +2934,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) | |||
2939 | case ELEVATOR_BACK_MERGE: | 2934 | case ELEVATOR_BACK_MERGE: |
2940 | BUG_ON(!rq_mergeable(req)); | 2935 | BUG_ON(!rq_mergeable(req)); |
2941 | 2936 | ||
2942 | if (!q->back_merge_fn(q, req, bio)) | 2937 | if (!ll_back_merge_fn(q, req, bio)) |
2943 | break; | 2938 | break; |
2944 | 2939 | ||
2945 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | 2940 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); |
@@ -2956,7 +2951,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) | |||
2956 | case ELEVATOR_FRONT_MERGE: | 2951 | case ELEVATOR_FRONT_MERGE: |
2957 | BUG_ON(!rq_mergeable(req)); | 2952 | BUG_ON(!rq_mergeable(req)); |
2958 | 2953 | ||
2959 | if (!q->front_merge_fn(q, req, bio)) | 2954 | if (!ll_front_merge_fn(q, req, bio)) |
2960 | break; | 2955 | break; |
2961 | 2956 | ||
2962 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | 2957 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); |