aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/biodoc.txt7
-rw-r--r--block/cfq-iosched.c33
-rw-r--r--block/elevator.c26
-rw-r--r--block/ll_rw_blk.c67
-rw-r--r--block/scsi_ioctl.c3
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/cdrom/cdrom.c3
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--include/linux/blkdev.h14
-rw-r--r--include/linux/elevator.h3
10 files changed, 107 insertions, 56 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index c6c9a9c10d7f..3adaace328a6 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -946,6 +946,13 @@ elevator_merged_fn called when a request in the scheduler has been
946 scheduler for example, to reposition the request 946 scheduler for example, to reposition the request
947 if its sorting order has changed. 947 if its sorting order has changed.
948 948
949elevator_allow_merge_fn called whenever the block layer determines
950 that a bio can be merged into an existing
951 request safely. The io scheduler may still
952 want to stop a merge at this point if it
953 results in some sort of conflict internally,
954 this hook allows it to do that.
955
949elevator_dispatch_fn fills the dispatch queue with ready requests. 956elevator_dispatch_fn fills the dispatch queue with ready requests.
950 I/O schedulers are free to postpone requests by 957 I/O schedulers are free to postpone requests by
951 not filling the dispatch queue unless @force 958 not filling the dispatch queue unless @force
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 533a2938ffd6..9fc5eafa6c0e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -568,6 +568,38 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
568 cfq_remove_request(next); 568 cfq_remove_request(next);
569} 569}
570 570
571static int cfq_allow_merge(request_queue_t *q, struct request *rq,
572 struct bio *bio)
573{
574 struct cfq_data *cfqd = q->elevator->elevator_data;
575 const int rw = bio_data_dir(bio);
576 struct cfq_queue *cfqq;
577 pid_t key;
578
579 /*
580 * If bio is async or a write, always allow merge
581 */
582 if (!bio_sync(bio) || rw == WRITE)
583 return 1;
584
585 /*
586 * bio is sync. if request is not, disallow.
587 */
588 if (!rq_is_sync(rq))
589 return 0;
590
591 /*
592 * Ok, both bio and request are sync. Allow merge if they are
593 * from the same queue.
594 */
595 key = cfq_queue_pid(current, rw, 1);
596 cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
597 if (cfqq != RQ_CFQQ(rq))
598 return 0;
599
600 return 1;
601}
602
571static inline void 603static inline void
572__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 604__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
573{ 605{
@@ -2125,6 +2157,7 @@ static struct elevator_type iosched_cfq = {
2125 .elevator_merge_fn = cfq_merge, 2157 .elevator_merge_fn = cfq_merge,
2126 .elevator_merged_fn = cfq_merged_request, 2158 .elevator_merged_fn = cfq_merged_request,
2127 .elevator_merge_req_fn = cfq_merged_requests, 2159 .elevator_merge_req_fn = cfq_merged_requests,
2160 .elevator_allow_merge_fn = cfq_allow_merge,
2128 .elevator_dispatch_fn = cfq_dispatch_requests, 2161 .elevator_dispatch_fn = cfq_dispatch_requests,
2129 .elevator_add_req_fn = cfq_insert_request, 2162 .elevator_add_req_fn = cfq_insert_request,
2130 .elevator_activate_req_fn = cfq_activate_request, 2163 .elevator_activate_req_fn = cfq_activate_request,
diff --git a/block/elevator.c b/block/elevator.c
index c0063f345c5d..62c7a3069d3a 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -51,6 +51,21 @@ static const int elv_hash_shift = 6;
51#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 51#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
52 52
53/* 53/*
54 * Query io scheduler to see if the current process issuing bio may be
55 * merged with rq.
56 */
57static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
58{
59 request_queue_t *q = rq->q;
60 elevator_t *e = q->elevator;
61
62 if (e->ops->elevator_allow_merge_fn)
63 return e->ops->elevator_allow_merge_fn(q, rq, bio);
64
65 return 1;
66}
67
68/*
54 * can we safely merge with this request? 69 * can we safely merge with this request?
55 */ 70 */
56inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) 71inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
@@ -65,12 +80,15 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
65 return 0; 80 return 0;
66 81
67 /* 82 /*
68 * same device and no special stuff set, merge is ok 83 * must be same device and not a special request
69 */ 84 */
70 if (rq->rq_disk == bio->bi_bdev->bd_disk && !rq->special) 85 if (rq->rq_disk != bio->bi_bdev->bd_disk || !rq->special)
71 return 1; 86 return 0;
72 87
73 return 0; 88 if (!elv_iosched_allow_merge(rq, bio))
89 return 0;
90
91 return 1;
74} 92}
75EXPORT_SYMBOL(elv_rq_merge_ok); 93EXPORT_SYMBOL(elv_rq_merge_ok);
76 94
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 79807dbc306e..e07c079e07e6 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1405,8 +1405,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
1405 return 1; 1405 return 1;
1406} 1406}
1407 1407
1408static int ll_back_merge_fn(request_queue_t *q, struct request *req, 1408int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
1409 struct bio *bio)
1410{ 1409{
1411 unsigned short max_sectors; 1410 unsigned short max_sectors;
1412 int len; 1411 int len;
@@ -1442,6 +1441,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
1442 1441
1443 return ll_new_hw_segment(q, req, bio); 1442 return ll_new_hw_segment(q, req, bio);
1444} 1443}
1444EXPORT_SYMBOL(ll_back_merge_fn);
1445 1445
1446static int ll_front_merge_fn(request_queue_t *q, struct request *req, 1446static int ll_front_merge_fn(request_queue_t *q, struct request *req,
1447 struct bio *bio) 1447 struct bio *bio)
@@ -1912,9 +1912,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1912 } 1912 }
1913 1913
1914 q->request_fn = rfn; 1914 q->request_fn = rfn;
1915 q->back_merge_fn = ll_back_merge_fn;
1916 q->front_merge_fn = ll_front_merge_fn;
1917 q->merge_requests_fn = ll_merge_requests_fn;
1918 q->prep_rq_fn = NULL; 1915 q->prep_rq_fn = NULL;
1919 q->unplug_fn = generic_unplug_device; 1916 q->unplug_fn = generic_unplug_device;
1920 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); 1917 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
@@ -2350,40 +2347,29 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
2350 else 2347 else
2351 bio = bio_copy_user(q, uaddr, len, reading); 2348 bio = bio_copy_user(q, uaddr, len, reading);
2352 2349
2353 if (IS_ERR(bio)) { 2350 if (IS_ERR(bio))
2354 return PTR_ERR(bio); 2351 return PTR_ERR(bio);
2355 }
2356 2352
2357 orig_bio = bio; 2353 orig_bio = bio;
2358 blk_queue_bounce(q, &bio); 2354 blk_queue_bounce(q, &bio);
2355
2359 /* 2356 /*
2360 * We link the bounce buffer in and could have to traverse it 2357 * We link the bounce buffer in and could have to traverse it
2361 * later so we have to get a ref to prevent it from being freed 2358 * later so we have to get a ref to prevent it from being freed
2362 */ 2359 */
2363 bio_get(bio); 2360 bio_get(bio);
2364 2361
2365 /*
2366 * for most (all? don't know of any) queues we could
2367 * skip grabbing the queue lock here. only drivers with
2368 * funky private ->back_merge_fn() function could be
2369 * problematic.
2370 */
2371 spin_lock_irq(q->queue_lock);
2372 if (!rq->bio) 2362 if (!rq->bio)
2373 blk_rq_bio_prep(q, rq, bio); 2363 blk_rq_bio_prep(q, rq, bio);
2374 else if (!q->back_merge_fn(q, rq, bio)) { 2364 else if (!ll_back_merge_fn(q, rq, bio)) {
2375 ret = -EINVAL; 2365 ret = -EINVAL;
2376 spin_unlock_irq(q->queue_lock);
2377 goto unmap_bio; 2366 goto unmap_bio;
2378 } else { 2367 } else {
2379 rq->biotail->bi_next = bio; 2368 rq->biotail->bi_next = bio;
2380 rq->biotail = bio; 2369 rq->biotail = bio;
2381 2370
2382 rq->nr_sectors += bio_sectors(bio);
2383 rq->hard_nr_sectors = rq->nr_sectors;
2384 rq->data_len += bio->bi_size; 2371 rq->data_len += bio->bi_size;
2385 } 2372 }
2386 spin_unlock_irq(q->queue_lock);
2387 2373
2388 return bio->bi_size; 2374 return bio->bi_size;
2389 2375
@@ -2419,6 +2405,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2419 unsigned long len) 2405 unsigned long len)
2420{ 2406{
2421 unsigned long bytes_read = 0; 2407 unsigned long bytes_read = 0;
2408 struct bio *bio = NULL;
2422 int ret; 2409 int ret;
2423 2410
2424 if (len > (q->max_hw_sectors << 9)) 2411 if (len > (q->max_hw_sectors << 9))
@@ -2445,6 +2432,8 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2445 ret = __blk_rq_map_user(q, rq, ubuf, map_len); 2432 ret = __blk_rq_map_user(q, rq, ubuf, map_len);
2446 if (ret < 0) 2433 if (ret < 0)
2447 goto unmap_rq; 2434 goto unmap_rq;
2435 if (!bio)
2436 bio = rq->bio;
2448 bytes_read += ret; 2437 bytes_read += ret;
2449 ubuf += ret; 2438 ubuf += ret;
2450 } 2439 }
@@ -2452,7 +2441,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2452 rq->buffer = rq->data = NULL; 2441 rq->buffer = rq->data = NULL;
2453 return 0; 2442 return 0;
2454unmap_rq: 2443unmap_rq:
2455 blk_rq_unmap_user(rq); 2444 blk_rq_unmap_user(bio);
2456 return ret; 2445 return ret;
2457} 2446}
2458 2447
@@ -2509,27 +2498,33 @@ EXPORT_SYMBOL(blk_rq_map_user_iov);
2509 2498
2510/** 2499/**
2511 * blk_rq_unmap_user - unmap a request with user data 2500 * blk_rq_unmap_user - unmap a request with user data
2512 * @rq: rq to be unmapped 2501 * @bio: start of bio list
2513 * 2502 *
2514 * Description: 2503 * Description:
2515 * Unmap a rq previously mapped by blk_rq_map_user(). 2504 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
2516 * rq->bio must be set to the original head of the request. 2505 * supply the original rq->bio from the blk_rq_map_user() return, since
2506 * the io completion may have changed rq->bio.
2517 */ 2507 */
2518int blk_rq_unmap_user(struct request *rq) 2508int blk_rq_unmap_user(struct bio *bio)
2519{ 2509{
2520 struct bio *bio, *mapped_bio; 2510 struct bio *mapped_bio;
2511 int ret = 0, ret2;
2521 2512
2522 while ((bio = rq->bio)) { 2513 while (bio) {
2523 if (bio_flagged(bio, BIO_BOUNCED)) 2514 mapped_bio = bio;
2515 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
2524 mapped_bio = bio->bi_private; 2516 mapped_bio = bio->bi_private;
2525 else
2526 mapped_bio = bio;
2527 2517
2528 __blk_rq_unmap_user(mapped_bio); 2518 ret2 = __blk_rq_unmap_user(mapped_bio);
2529 rq->bio = bio->bi_next; 2519 if (ret2 && !ret)
2530 bio_put(bio); 2520 ret = ret2;
2521
2522 mapped_bio = bio;
2523 bio = bio->bi_next;
2524 bio_put(mapped_bio);
2531 } 2525 }
2532 return 0; 2526
2527 return ret;
2533} 2528}
2534 2529
2535EXPORT_SYMBOL(blk_rq_unmap_user); 2530EXPORT_SYMBOL(blk_rq_unmap_user);
@@ -2822,7 +2817,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
2822 * will have updated segment counts, update sector 2817 * will have updated segment counts, update sector
2823 * counts here. 2818 * counts here.
2824 */ 2819 */
2825 if (!q->merge_requests_fn(q, req, next)) 2820 if (!ll_merge_requests_fn(q, req, next))
2826 return 0; 2821 return 0;
2827 2822
2828 /* 2823 /*
@@ -2939,7 +2934,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2939 case ELEVATOR_BACK_MERGE: 2934 case ELEVATOR_BACK_MERGE:
2940 BUG_ON(!rq_mergeable(req)); 2935 BUG_ON(!rq_mergeable(req));
2941 2936
2942 if (!q->back_merge_fn(q, req, bio)) 2937 if (!ll_back_merge_fn(q, req, bio))
2943 break; 2938 break;
2944 2939
2945 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); 2940 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
@@ -2956,7 +2951,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2956 case ELEVATOR_FRONT_MERGE: 2951 case ELEVATOR_FRONT_MERGE:
2957 BUG_ON(!rq_mergeable(req)); 2952 BUG_ON(!rq_mergeable(req));
2958 2953
2959 if (!q->front_merge_fn(q, req, bio)) 2954 if (!ll_front_merge_fn(q, req, bio))
2960 break; 2955 break;
2961 2956
2962 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); 2957 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index f322b6a441d8..2528a0c0dec8 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -333,8 +333,7 @@ static int sg_io(struct file *file, request_queue_t *q,
333 hdr->sb_len_wr = len; 333 hdr->sb_len_wr = len;
334 } 334 }
335 335
336 rq->bio = bio; 336 if (blk_rq_unmap_user(bio))
337 if (blk_rq_unmap_user(rq))
338 ret = -EFAULT; 337 ret = -EFAULT;
339 338
340 /* may not have succeeded, but output values written to control 339 /* may not have succeeded, but output values written to control
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index d719a5d8f435..9d2ddb209343 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1907,6 +1907,7 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
1907 "does not support reading geometry\n"); 1907 "does not support reading geometry\n");
1908 drv->heads = 255; 1908 drv->heads = 255;
1909 drv->sectors = 32; // Sectors per track 1909 drv->sectors = 32; // Sectors per track
1910 drv->raid_level = RAID_UNKNOWN;
1910 } else { 1911 } else {
1911 drv->heads = inq_buff->data_byte[6]; 1912 drv->heads = inq_buff->data_byte[6];
1912 drv->sectors = inq_buff->data_byte[7]; 1913 drv->sectors = inq_buff->data_byte[7];
@@ -2491,7 +2492,7 @@ static void do_cciss_request(request_queue_t *q)
2491 c->Request.Type.Type = TYPE_CMD; // It is a command. 2492 c->Request.Type.Type = TYPE_CMD; // It is a command.
2492 c->Request.Type.Attribute = ATTR_SIMPLE; 2493 c->Request.Type.Attribute = ATTR_SIMPLE;
2493 c->Request.Type.Direction = 2494 c->Request.Type.Direction =
2494 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; 2495 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2495 c->Request.Timeout = 0; // Don't time out 2496 c->Request.Timeout = 0; // Don't time out
2496 c->Request.CDB[0] = 2497 c->Request.CDB[0] =
2497 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; 2498 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index e4a2f8f3a1d7..66d028d30439 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2139,8 +2139,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2139 cdi->last_sense = s->sense_key; 2139 cdi->last_sense = s->sense_key;
2140 } 2140 }
2141 2141
2142 rq->bio = bio; 2142 if (blk_rq_unmap_user(bio))
2143 if (blk_rq_unmap_user(rq))
2144 ret = -EFAULT; 2143 ret = -EFAULT;
2145 2144
2146 if (ret) 2145 if (ret)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 1748e27501cd..f02f48a882a9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -265,13 +265,11 @@ static int scsi_merge_bio(struct request *rq, struct bio *bio)
265 265
266 if (!rq->bio) 266 if (!rq->bio)
267 blk_rq_bio_prep(q, rq, bio); 267 blk_rq_bio_prep(q, rq, bio);
268 else if (!q->back_merge_fn(q, rq, bio)) 268 else if (!ll_back_merge_fn(q, rq, bio))
269 return -EINVAL; 269 return -EINVAL;
270 else { 270 else {
271 rq->biotail->bi_next = bio; 271 rq->biotail->bi_next = bio;
272 rq->biotail = bio; 272 rq->biotail = bio;
273 rq->hard_nr_sectors += bio_sectors(bio);
274 rq->nr_sectors = rq->hard_nr_sectors;
275 } 273 }
276 274
277 return 0; 275 return 0;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ea330d7b46c0..36a6eacefe20 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -331,10 +331,6 @@ struct request_pm_state
331 331
332#include <linux/elevator.h> 332#include <linux/elevator.h>
333 333
334typedef int (merge_request_fn) (request_queue_t *, struct request *,
335 struct bio *);
336typedef int (merge_requests_fn) (request_queue_t *, struct request *,
337 struct request *);
338typedef void (request_fn_proc) (request_queue_t *q); 334typedef void (request_fn_proc) (request_queue_t *q);
339typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); 335typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
340typedef int (prep_rq_fn) (request_queue_t *, struct request *); 336typedef int (prep_rq_fn) (request_queue_t *, struct request *);
@@ -376,9 +372,6 @@ struct request_queue
376 struct request_list rq; 372 struct request_list rq;
377 373
378 request_fn_proc *request_fn; 374 request_fn_proc *request_fn;
379 merge_request_fn *back_merge_fn;
380 merge_request_fn *front_merge_fn;
381 merge_requests_fn *merge_requests_fn;
382 make_request_fn *make_request_fn; 375 make_request_fn *make_request_fn;
383 prep_rq_fn *prep_rq_fn; 376 prep_rq_fn *prep_rq_fn;
384 unplug_fn *unplug_fn; 377 unplug_fn *unplug_fn;
@@ -649,6 +642,11 @@ extern int sg_scsi_ioctl(struct file *, struct request_queue *,
649 struct gendisk *, struct scsi_ioctl_command __user *); 642 struct gendisk *, struct scsi_ioctl_command __user *);
650 643
651/* 644/*
645 * Temporary export, until SCSI gets fixed up.
646 */
647extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *);
648
649/*
652 * A queue has just exitted congestion. Note this in the global counter of 650 * A queue has just exitted congestion. Note this in the global counter of
653 * congested queues, and wake up anyone who was waiting for requests to be 651 * congested queues, and wake up anyone who was waiting for requests to be
654 * put back. 652 * put back.
@@ -674,7 +672,7 @@ extern void __blk_stop_queue(request_queue_t *q);
674extern void blk_run_queue(request_queue_t *); 672extern void blk_run_queue(request_queue_t *);
675extern void blk_start_queueing(request_queue_t *); 673extern void blk_start_queueing(request_queue_t *);
676extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long); 674extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
677extern int blk_rq_unmap_user(struct request *); 675extern int blk_rq_unmap_user(struct bio *);
678extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); 676extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
679extern int blk_rq_map_user_iov(request_queue_t *, struct request *, 677extern int blk_rq_map_user_iov(request_queue_t *, struct request *,
680 struct sg_iovec *, int, unsigned int); 678 struct sg_iovec *, int, unsigned int);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index a24931d24404..e88fcbc77f8f 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -12,6 +12,8 @@ typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struc
12 12
13typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int); 13typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int);
14 14
15typedef int (elevator_allow_merge_fn) (request_queue_t *, struct request *, struct bio *);
16
15typedef int (elevator_dispatch_fn) (request_queue_t *, int); 17typedef int (elevator_dispatch_fn) (request_queue_t *, int);
16 18
17typedef void (elevator_add_req_fn) (request_queue_t *, struct request *); 19typedef void (elevator_add_req_fn) (request_queue_t *, struct request *);
@@ -33,6 +35,7 @@ struct elevator_ops
33 elevator_merge_fn *elevator_merge_fn; 35 elevator_merge_fn *elevator_merge_fn;
34 elevator_merged_fn *elevator_merged_fn; 36 elevator_merged_fn *elevator_merged_fn;
35 elevator_merge_req_fn *elevator_merge_req_fn; 37 elevator_merge_req_fn *elevator_merge_req_fn;
38 elevator_allow_merge_fn *elevator_allow_merge_fn;
36 39
37 elevator_dispatch_fn *elevator_dispatch_fn; 40 elevator_dispatch_fn *elevator_dispatch_fn;
38 elevator_add_req_fn *elevator_add_req_fn; 41 elevator_add_req_fn *elevator_add_req_fn;