aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-21 03:03:38 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-21 03:03:38 -0500
commit4604096768d3be37ee1a05aee424aceed3e1b56f (patch)
tree56010e180bb32be7e57971e4bb617c28d0d09099 /block
parent8df8bb4adf7e4abb48d29dc16c29eda40a64afed (diff)
parent126ec9a676f601818dc3a85af0552b146410d888 (diff)
Merge branch 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block
* 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block: [PATCH] block: document io scheduler allow_merge_fn hook [PATCH] cfq-iosched: don't allow sync merges across queues [PATCH] Fixup blk_rq_unmap_user() API [PATCH] __blk_rq_unmap_user() fails to return error [PATCH] __blk_rq_map_user() doesn't need to grab the queue_lock [PATCH] Remove queue merging hooks [PATCH] ->nr_sectors and ->hard_nr_sectors are not used for BLOCK_PC requests [PATCH] cciss: fix XFER_READ/XFER_WRITE in do_cciss_request [PATCH] cciss: set default raid level when reading geometry fails
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c33
-rw-r--r--block/elevator.c26
-rw-r--r--block/ll_rw_blk.c67
-rw-r--r--block/scsi_ioctl.c3
4 files changed, 87 insertions, 42 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 533a2938ff..9fc5eafa6c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -568,6 +568,38 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
568 cfq_remove_request(next); 568 cfq_remove_request(next);
569} 569}
570 570
571static int cfq_allow_merge(request_queue_t *q, struct request *rq,
572 struct bio *bio)
573{
574 struct cfq_data *cfqd = q->elevator->elevator_data;
575 const int rw = bio_data_dir(bio);
576 struct cfq_queue *cfqq;
577 pid_t key;
578
579 /*
580 * If bio is async or a write, always allow merge
581 */
582 if (!bio_sync(bio) || rw == WRITE)
583 return 1;
584
585 /*
586 * bio is sync. if request is not, disallow.
587 */
588 if (!rq_is_sync(rq))
589 return 0;
590
591 /*
592 * Ok, both bio and request are sync. Allow merge if they are
593 * from the same queue.
594 */
595 key = cfq_queue_pid(current, rw, 1);
596 cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
597 if (cfqq != RQ_CFQQ(rq))
598 return 0;
599
600 return 1;
601}
602
571static inline void 603static inline void
572__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 604__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
573{ 605{
@@ -2125,6 +2157,7 @@ static struct elevator_type iosched_cfq = {
2125 .elevator_merge_fn = cfq_merge, 2157 .elevator_merge_fn = cfq_merge,
2126 .elevator_merged_fn = cfq_merged_request, 2158 .elevator_merged_fn = cfq_merged_request,
2127 .elevator_merge_req_fn = cfq_merged_requests, 2159 .elevator_merge_req_fn = cfq_merged_requests,
2160 .elevator_allow_merge_fn = cfq_allow_merge,
2128 .elevator_dispatch_fn = cfq_dispatch_requests, 2161 .elevator_dispatch_fn = cfq_dispatch_requests,
2129 .elevator_add_req_fn = cfq_insert_request, 2162 .elevator_add_req_fn = cfq_insert_request,
2130 .elevator_activate_req_fn = cfq_activate_request, 2163 .elevator_activate_req_fn = cfq_activate_request,
diff --git a/block/elevator.c b/block/elevator.c
index c0063f345c..62c7a3069d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -51,6 +51,21 @@ static const int elv_hash_shift = 6;
51#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 51#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
52 52
53/* 53/*
54 * Query io scheduler to see if the current process issuing bio may be
55 * merged with rq.
56 */
57static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
58{
59 request_queue_t *q = rq->q;
60 elevator_t *e = q->elevator;
61
62 if (e->ops->elevator_allow_merge_fn)
63 return e->ops->elevator_allow_merge_fn(q, rq, bio);
64
65 return 1;
66}
67
68/*
54 * can we safely merge with this request? 69 * can we safely merge with this request?
55 */ 70 */
56inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) 71inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
@@ -65,12 +80,15 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
65 return 0; 80 return 0;
66 81
67 /* 82 /*
68 * same device and no special stuff set, merge is ok 83 * must be same device and not a special request
69 */ 84 */
70 if (rq->rq_disk == bio->bi_bdev->bd_disk && !rq->special) 85 if (rq->rq_disk != bio->bi_bdev->bd_disk || !rq->special)
71 return 1; 86 return 0;
72 87
73 return 0; 88 if (!elv_iosched_allow_merge(rq, bio))
89 return 0;
90
91 return 1;
74} 92}
75EXPORT_SYMBOL(elv_rq_merge_ok); 93EXPORT_SYMBOL(elv_rq_merge_ok);
76 94
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 79807dbc30..e07c079e07 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1405,8 +1405,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
1405 return 1; 1405 return 1;
1406} 1406}
1407 1407
1408static int ll_back_merge_fn(request_queue_t *q, struct request *req, 1408int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
1409 struct bio *bio)
1410{ 1409{
1411 unsigned short max_sectors; 1410 unsigned short max_sectors;
1412 int len; 1411 int len;
@@ -1442,6 +1441,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
1442 1441
1443 return ll_new_hw_segment(q, req, bio); 1442 return ll_new_hw_segment(q, req, bio);
1444} 1443}
1444EXPORT_SYMBOL(ll_back_merge_fn);
1445 1445
1446static int ll_front_merge_fn(request_queue_t *q, struct request *req, 1446static int ll_front_merge_fn(request_queue_t *q, struct request *req,
1447 struct bio *bio) 1447 struct bio *bio)
@@ -1912,9 +1912,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1912 } 1912 }
1913 1913
1914 q->request_fn = rfn; 1914 q->request_fn = rfn;
1915 q->back_merge_fn = ll_back_merge_fn;
1916 q->front_merge_fn = ll_front_merge_fn;
1917 q->merge_requests_fn = ll_merge_requests_fn;
1918 q->prep_rq_fn = NULL; 1915 q->prep_rq_fn = NULL;
1919 q->unplug_fn = generic_unplug_device; 1916 q->unplug_fn = generic_unplug_device;
1920 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); 1917 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
@@ -2350,40 +2347,29 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
2350 else 2347 else
2351 bio = bio_copy_user(q, uaddr, len, reading); 2348 bio = bio_copy_user(q, uaddr, len, reading);
2352 2349
2353 if (IS_ERR(bio)) { 2350 if (IS_ERR(bio))
2354 return PTR_ERR(bio); 2351 return PTR_ERR(bio);
2355 }
2356 2352
2357 orig_bio = bio; 2353 orig_bio = bio;
2358 blk_queue_bounce(q, &bio); 2354 blk_queue_bounce(q, &bio);
2355
2359 /* 2356 /*
2360 * We link the bounce buffer in and could have to traverse it 2357 * We link the bounce buffer in and could have to traverse it
2361 * later so we have to get a ref to prevent it from being freed 2358 * later so we have to get a ref to prevent it from being freed
2362 */ 2359 */
2363 bio_get(bio); 2360 bio_get(bio);
2364 2361
2365 /*
2366 * for most (all? don't know of any) queues we could
2367 * skip grabbing the queue lock here. only drivers with
2368 * funky private ->back_merge_fn() function could be
2369 * problematic.
2370 */
2371 spin_lock_irq(q->queue_lock);
2372 if (!rq->bio) 2362 if (!rq->bio)
2373 blk_rq_bio_prep(q, rq, bio); 2363 blk_rq_bio_prep(q, rq, bio);
2374 else if (!q->back_merge_fn(q, rq, bio)) { 2364 else if (!ll_back_merge_fn(q, rq, bio)) {
2375 ret = -EINVAL; 2365 ret = -EINVAL;
2376 spin_unlock_irq(q->queue_lock);
2377 goto unmap_bio; 2366 goto unmap_bio;
2378 } else { 2367 } else {
2379 rq->biotail->bi_next = bio; 2368 rq->biotail->bi_next = bio;
2380 rq->biotail = bio; 2369 rq->biotail = bio;
2381 2370
2382 rq->nr_sectors += bio_sectors(bio);
2383 rq->hard_nr_sectors = rq->nr_sectors;
2384 rq->data_len += bio->bi_size; 2371 rq->data_len += bio->bi_size;
2385 } 2372 }
2386 spin_unlock_irq(q->queue_lock);
2387 2373
2388 return bio->bi_size; 2374 return bio->bi_size;
2389 2375
@@ -2419,6 +2405,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2419 unsigned long len) 2405 unsigned long len)
2420{ 2406{
2421 unsigned long bytes_read = 0; 2407 unsigned long bytes_read = 0;
2408 struct bio *bio = NULL;
2422 int ret; 2409 int ret;
2423 2410
2424 if (len > (q->max_hw_sectors << 9)) 2411 if (len > (q->max_hw_sectors << 9))
@@ -2445,6 +2432,8 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2445 ret = __blk_rq_map_user(q, rq, ubuf, map_len); 2432 ret = __blk_rq_map_user(q, rq, ubuf, map_len);
2446 if (ret < 0) 2433 if (ret < 0)
2447 goto unmap_rq; 2434 goto unmap_rq;
2435 if (!bio)
2436 bio = rq->bio;
2448 bytes_read += ret; 2437 bytes_read += ret;
2449 ubuf += ret; 2438 ubuf += ret;
2450 } 2439 }
@@ -2452,7 +2441,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2452 rq->buffer = rq->data = NULL; 2441 rq->buffer = rq->data = NULL;
2453 return 0; 2442 return 0;
2454unmap_rq: 2443unmap_rq:
2455 blk_rq_unmap_user(rq); 2444 blk_rq_unmap_user(bio);
2456 return ret; 2445 return ret;
2457} 2446}
2458 2447
@@ -2509,27 +2498,33 @@ EXPORT_SYMBOL(blk_rq_map_user_iov);
2509 2498
2510/** 2499/**
2511 * blk_rq_unmap_user - unmap a request with user data 2500 * blk_rq_unmap_user - unmap a request with user data
2512 * @rq: rq to be unmapped 2501 * @bio: start of bio list
2513 * 2502 *
2514 * Description: 2503 * Description:
2515 * Unmap a rq previously mapped by blk_rq_map_user(). 2504 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
2516 * rq->bio must be set to the original head of the request. 2505 * supply the original rq->bio from the blk_rq_map_user() return, since
2506 * the io completion may have changed rq->bio.
2517 */ 2507 */
2518int blk_rq_unmap_user(struct request *rq) 2508int blk_rq_unmap_user(struct bio *bio)
2519{ 2509{
2520 struct bio *bio, *mapped_bio; 2510 struct bio *mapped_bio;
2511 int ret = 0, ret2;
2521 2512
2522 while ((bio = rq->bio)) { 2513 while (bio) {
2523 if (bio_flagged(bio, BIO_BOUNCED)) 2514 mapped_bio = bio;
2515 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
2524 mapped_bio = bio->bi_private; 2516 mapped_bio = bio->bi_private;
2525 else
2526 mapped_bio = bio;
2527 2517
2528 __blk_rq_unmap_user(mapped_bio); 2518 ret2 = __blk_rq_unmap_user(mapped_bio);
2529 rq->bio = bio->bi_next; 2519 if (ret2 && !ret)
2530 bio_put(bio); 2520 ret = ret2;
2521
2522 mapped_bio = bio;
2523 bio = bio->bi_next;
2524 bio_put(mapped_bio);
2531 } 2525 }
2532 return 0; 2526
2527 return ret;
2533} 2528}
2534 2529
2535EXPORT_SYMBOL(blk_rq_unmap_user); 2530EXPORT_SYMBOL(blk_rq_unmap_user);
@@ -2822,7 +2817,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
2822 * will have updated segment counts, update sector 2817 * will have updated segment counts, update sector
2823 * counts here. 2818 * counts here.
2824 */ 2819 */
2825 if (!q->merge_requests_fn(q, req, next)) 2820 if (!ll_merge_requests_fn(q, req, next))
2826 return 0; 2821 return 0;
2827 2822
2828 /* 2823 /*
@@ -2939,7 +2934,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2939 case ELEVATOR_BACK_MERGE: 2934 case ELEVATOR_BACK_MERGE:
2940 BUG_ON(!rq_mergeable(req)); 2935 BUG_ON(!rq_mergeable(req));
2941 2936
2942 if (!q->back_merge_fn(q, req, bio)) 2937 if (!ll_back_merge_fn(q, req, bio))
2943 break; 2938 break;
2944 2939
2945 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); 2940 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
@@ -2956,7 +2951,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2956 case ELEVATOR_FRONT_MERGE: 2951 case ELEVATOR_FRONT_MERGE:
2957 BUG_ON(!rq_mergeable(req)); 2952 BUG_ON(!rq_mergeable(req));
2958 2953
2959 if (!q->front_merge_fn(q, req, bio)) 2954 if (!ll_front_merge_fn(q, req, bio))
2960 break; 2955 break;
2961 2956
2962 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); 2957 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index f322b6a441..2528a0c0de 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -333,8 +333,7 @@ static int sg_io(struct file *file, request_queue_t *q,
333 hdr->sb_len_wr = len; 333 hdr->sb_len_wr = len;
334 } 334 }
335 335
336 rq->bio = bio; 336 if (blk_rq_unmap_user(bio))
337 if (blk_rq_unmap_user(rq))
338 ret = -EFAULT; 337 ret = -EFAULT;
339 338
340 /* may not have succeeded, but output values written to control 339 /* may not have succeeded, but output values written to control