diff options
author | Jens Axboe <axboe@suse.de> | 2005-10-20 10:42:29 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2005-10-28 02:45:08 -0400 |
commit | b4878f245ec8e168cdd1f170f823a750b7dd4af5 (patch) | |
tree | de784c2a7e1174e4843807998f0356bf92ee78be /drivers/block/deadline-iosched.c | |
parent | d9ebb192aa13a026edc6faff137dcb14f2c91731 (diff) |
[PATCH] 02/05: update ioscheds to use generic dispatch queue
This patch updates all four ioscheds to use generic dispatch
queue. There's one behavior change in as-iosched.
* In as-iosched, when force dispatching
(ELEVATOR_INSERT_BACK), batch_data_dir is reset to REQ_SYNC
and changed_batch and new_batch are cleared to zero. This
prevernts AS from doing incorrect update_write_batch after
the forced dispatched requests are finished.
* In cfq-iosched, cfqd->rq_in_driver currently counts the
number of activated (removed) requests to determine
whether queue-kicking is needed and cfq_max_depth has been
reached. With generic dispatch queue, I think counting
the number of dispatched requests would be more appropriate.
* cfq_max_depth can be lowered to 1 again.
Original from Tejun Heo, modified version applied.
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'drivers/block/deadline-iosched.c')
-rw-r--r-- | drivers/block/deadline-iosched.c | 95 |
1 files changed, 17 insertions, 78 deletions
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c index 52a3ae5289a0..07de4d24ddba 100644 --- a/drivers/block/deadline-iosched.c +++ b/drivers/block/deadline-iosched.c | |||
@@ -50,7 +50,6 @@ struct deadline_data { | |||
50 | * next in sort order. read, write or both are NULL | 50 | * next in sort order. read, write or both are NULL |
51 | */ | 51 | */ |
52 | struct deadline_rq *next_drq[2]; | 52 | struct deadline_rq *next_drq[2]; |
53 | struct list_head *dispatch; /* driver dispatch queue */ | ||
54 | struct list_head *hash; /* request hash */ | 53 | struct list_head *hash; /* request hash */ |
55 | unsigned int batching; /* number of sequential requests made */ | 54 | unsigned int batching; /* number of sequential requests made */ |
56 | sector_t last_sector; /* head position */ | 55 | sector_t last_sector; /* head position */ |
@@ -239,10 +238,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) | |||
239 | dd->next_drq[data_dir] = rb_entry_drq(rbnext); | 238 | dd->next_drq[data_dir] = rb_entry_drq(rbnext); |
240 | } | 239 | } |
241 | 240 | ||
242 | if (ON_RB(&drq->rb_node)) { | 241 | BUG_ON(!ON_RB(&drq->rb_node)); |
243 | rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); | 242 | rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); |
244 | RB_CLEAR(&drq->rb_node); | 243 | RB_CLEAR(&drq->rb_node); |
245 | } | ||
246 | } | 244 | } |
247 | 245 | ||
248 | static struct request * | 246 | static struct request * |
@@ -286,7 +284,7 @@ deadline_find_first_drq(struct deadline_data *dd, int data_dir) | |||
286 | /* | 284 | /* |
287 | * add drq to rbtree and fifo | 285 | * add drq to rbtree and fifo |
288 | */ | 286 | */ |
289 | static inline void | 287 | static void |
290 | deadline_add_request(struct request_queue *q, struct request *rq) | 288 | deadline_add_request(struct request_queue *q, struct request *rq) |
291 | { | 289 | { |
292 | struct deadline_data *dd = q->elevator->elevator_data; | 290 | struct deadline_data *dd = q->elevator->elevator_data; |
@@ -315,14 +313,11 @@ deadline_add_request(struct request_queue *q, struct request *rq) | |||
315 | static void deadline_remove_request(request_queue_t *q, struct request *rq) | 313 | static void deadline_remove_request(request_queue_t *q, struct request *rq) |
316 | { | 314 | { |
317 | struct deadline_rq *drq = RQ_DATA(rq); | 315 | struct deadline_rq *drq = RQ_DATA(rq); |
316 | struct deadline_data *dd = q->elevator->elevator_data; | ||
318 | 317 | ||
319 | if (drq) { | 318 | list_del_init(&drq->fifo); |
320 | struct deadline_data *dd = q->elevator->elevator_data; | 319 | deadline_remove_merge_hints(q, drq); |
321 | 320 | deadline_del_drq_rb(dd, drq); | |
322 | list_del_init(&drq->fifo); | ||
323 | deadline_remove_merge_hints(q, drq); | ||
324 | deadline_del_drq_rb(dd, drq); | ||
325 | } | ||
326 | } | 321 | } |
327 | 322 | ||
328 | static int | 323 | static int |
@@ -452,7 +447,7 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq) | |||
452 | request_queue_t *q = drq->request->q; | 447 | request_queue_t *q = drq->request->q; |
453 | 448 | ||
454 | deadline_remove_request(q, drq->request); | 449 | deadline_remove_request(q, drq->request); |
455 | list_add_tail(&drq->request->queuelist, dd->dispatch); | 450 | elv_dispatch_add_tail(q, drq->request); |
456 | } | 451 | } |
457 | 452 | ||
458 | /* | 453 | /* |
@@ -502,8 +497,9 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) | |||
502 | * deadline_dispatch_requests selects the best request according to | 497 | * deadline_dispatch_requests selects the best request according to |
503 | * read/write expire, fifo_batch, etc | 498 | * read/write expire, fifo_batch, etc |
504 | */ | 499 | */ |
505 | static int deadline_dispatch_requests(struct deadline_data *dd) | 500 | static int deadline_dispatch_requests(request_queue_t *q, int force) |
506 | { | 501 | { |
502 | struct deadline_data *dd = q->elevator->elevator_data; | ||
507 | const int reads = !list_empty(&dd->fifo_list[READ]); | 503 | const int reads = !list_empty(&dd->fifo_list[READ]); |
508 | const int writes = !list_empty(&dd->fifo_list[WRITE]); | 504 | const int writes = !list_empty(&dd->fifo_list[WRITE]); |
509 | struct deadline_rq *drq; | 505 | struct deadline_rq *drq; |
@@ -597,65 +593,12 @@ dispatch_request: | |||
597 | return 1; | 593 | return 1; |
598 | } | 594 | } |
599 | 595 | ||
600 | static struct request *deadline_next_request(request_queue_t *q) | ||
601 | { | ||
602 | struct deadline_data *dd = q->elevator->elevator_data; | ||
603 | struct request *rq; | ||
604 | |||
605 | /* | ||
606 | * if there are still requests on the dispatch queue, grab the first one | ||
607 | */ | ||
608 | if (!list_empty(dd->dispatch)) { | ||
609 | dispatch: | ||
610 | rq = list_entry_rq(dd->dispatch->next); | ||
611 | return rq; | ||
612 | } | ||
613 | |||
614 | if (deadline_dispatch_requests(dd)) | ||
615 | goto dispatch; | ||
616 | |||
617 | return NULL; | ||
618 | } | ||
619 | |||
620 | static void | ||
621 | deadline_insert_request(request_queue_t *q, struct request *rq, int where) | ||
622 | { | ||
623 | struct deadline_data *dd = q->elevator->elevator_data; | ||
624 | |||
625 | /* barriers must flush the reorder queue */ | ||
626 | if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) | ||
627 | && where == ELEVATOR_INSERT_SORT)) | ||
628 | where = ELEVATOR_INSERT_BACK; | ||
629 | |||
630 | switch (where) { | ||
631 | case ELEVATOR_INSERT_BACK: | ||
632 | while (deadline_dispatch_requests(dd)) | ||
633 | ; | ||
634 | list_add_tail(&rq->queuelist, dd->dispatch); | ||
635 | break; | ||
636 | case ELEVATOR_INSERT_FRONT: | ||
637 | list_add(&rq->queuelist, dd->dispatch); | ||
638 | break; | ||
639 | case ELEVATOR_INSERT_SORT: | ||
640 | BUG_ON(!blk_fs_request(rq)); | ||
641 | deadline_add_request(q, rq); | ||
642 | break; | ||
643 | default: | ||
644 | printk("%s: bad insert point %d\n", __FUNCTION__,where); | ||
645 | return; | ||
646 | } | ||
647 | } | ||
648 | |||
649 | static int deadline_queue_empty(request_queue_t *q) | 596 | static int deadline_queue_empty(request_queue_t *q) |
650 | { | 597 | { |
651 | struct deadline_data *dd = q->elevator->elevator_data; | 598 | struct deadline_data *dd = q->elevator->elevator_data; |
652 | 599 | ||
653 | if (!list_empty(&dd->fifo_list[WRITE]) | 600 | return list_empty(&dd->fifo_list[WRITE]) |
654 | || !list_empty(&dd->fifo_list[READ]) | 601 | && list_empty(&dd->fifo_list[READ]); |
655 | || !list_empty(dd->dispatch)) | ||
656 | return 0; | ||
657 | |||
658 | return 1; | ||
659 | } | 602 | } |
660 | 603 | ||
661 | static struct request * | 604 | static struct request * |
@@ -733,7 +676,6 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e) | |||
733 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | 676 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); |
734 | dd->sort_list[READ] = RB_ROOT; | 677 | dd->sort_list[READ] = RB_ROOT; |
735 | dd->sort_list[WRITE] = RB_ROOT; | 678 | dd->sort_list[WRITE] = RB_ROOT; |
736 | dd->dispatch = &q->queue_head; | ||
737 | dd->fifo_expire[READ] = read_expire; | 679 | dd->fifo_expire[READ] = read_expire; |
738 | dd->fifo_expire[WRITE] = write_expire; | 680 | dd->fifo_expire[WRITE] = write_expire; |
739 | dd->writes_starved = writes_starved; | 681 | dd->writes_starved = writes_starved; |
@@ -748,10 +690,8 @@ static void deadline_put_request(request_queue_t *q, struct request *rq) | |||
748 | struct deadline_data *dd = q->elevator->elevator_data; | 690 | struct deadline_data *dd = q->elevator->elevator_data; |
749 | struct deadline_rq *drq = RQ_DATA(rq); | 691 | struct deadline_rq *drq = RQ_DATA(rq); |
750 | 692 | ||
751 | if (drq) { | 693 | mempool_free(drq, dd->drq_pool); |
752 | mempool_free(drq, dd->drq_pool); | 694 | rq->elevator_private = NULL; |
753 | rq->elevator_private = NULL; | ||
754 | } | ||
755 | } | 695 | } |
756 | 696 | ||
757 | static int | 697 | static int |
@@ -917,9 +857,8 @@ static struct elevator_type iosched_deadline = { | |||
917 | .elevator_merge_fn = deadline_merge, | 857 | .elevator_merge_fn = deadline_merge, |
918 | .elevator_merged_fn = deadline_merged_request, | 858 | .elevator_merged_fn = deadline_merged_request, |
919 | .elevator_merge_req_fn = deadline_merged_requests, | 859 | .elevator_merge_req_fn = deadline_merged_requests, |
920 | .elevator_next_req_fn = deadline_next_request, | 860 | .elevator_dispatch_fn = deadline_dispatch_requests, |
921 | .elevator_add_req_fn = deadline_insert_request, | 861 | .elevator_add_req_fn = deadline_add_request, |
922 | .elevator_remove_req_fn = deadline_remove_request, | ||
923 | .elevator_queue_empty_fn = deadline_queue_empty, | 862 | .elevator_queue_empty_fn = deadline_queue_empty, |
924 | .elevator_former_req_fn = deadline_former_request, | 863 | .elevator_former_req_fn = deadline_former_request, |
925 | .elevator_latter_req_fn = deadline_latter_request, | 864 | .elevator_latter_req_fn = deadline_latter_request, |