diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-04-25 06:44:27 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2007-04-30 03:01:21 -0400 |
commit | 6d048f5310aa2dda2b5acd947eab3598c25e269f (patch) | |
tree | 4f0dbcd21b82dd015a908139fb4de3601b3d834a /block | |
parent | 1e3335de05da3dfbe48b8caa03db1834a2133256 (diff) |
cfq-iosched: development update
- Implement logic for detecting cooperating processes, so we
choose the best available queue whenever possible.
- Improve residual slice time accounting.
- Remove dead code: we no longer see async requests coming in on
sync queues. That part was removed a long time ago. That means
that we can also remove the difference between cfq_cfqq_sync()
and cfq_cfqq_class_sync(), they are now indentical. And we can
kill the on_dispatch array, just make it a counter.
- Allow a process to go into the current list, if it hasn't been
serviced in this scheduler tick yet.
Possible future improvements including caching the cfqq lookup
in cfq_close_cooperator(), so we don't have to look it up twice.
cfq_get_best_queue() should just use that last decision instead
of doing it again.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 381 |
1 files changed, 261 insertions, 120 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index bfb396774cbb..28236f2cd908 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -56,13 +56,7 @@ static struct completion *ioc_gone; | |||
56 | #define ASYNC (0) | 56 | #define ASYNC (0) |
57 | #define SYNC (1) | 57 | #define SYNC (1) |
58 | 58 | ||
59 | #define cfq_cfqq_dispatched(cfqq) \ | 59 | #define cfq_cfqq_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC) |
60 | ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC]) | ||
61 | |||
62 | #define cfq_cfqq_class_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC) | ||
63 | |||
64 | #define cfq_cfqq_sync(cfqq) \ | ||
65 | (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC]) | ||
66 | 60 | ||
67 | #define sample_valid(samples) ((samples) > 80) | 61 | #define sample_valid(samples) ((samples) > 80) |
68 | 62 | ||
@@ -79,6 +73,7 @@ struct cfq_data { | |||
79 | struct list_head busy_rr; | 73 | struct list_head busy_rr; |
80 | struct list_head cur_rr; | 74 | struct list_head cur_rr; |
81 | struct list_head idle_rr; | 75 | struct list_head idle_rr; |
76 | unsigned long cur_rr_tick; | ||
82 | unsigned int busy_queues; | 77 | unsigned int busy_queues; |
83 | 78 | ||
84 | /* | 79 | /* |
@@ -98,11 +93,12 @@ struct cfq_data { | |||
98 | struct cfq_queue *active_queue; | 93 | struct cfq_queue *active_queue; |
99 | struct cfq_io_context *active_cic; | 94 | struct cfq_io_context *active_cic; |
100 | int cur_prio, cur_end_prio; | 95 | int cur_prio, cur_end_prio; |
96 | unsigned long prio_time; | ||
101 | unsigned int dispatch_slice; | 97 | unsigned int dispatch_slice; |
102 | 98 | ||
103 | struct timer_list idle_class_timer; | 99 | struct timer_list idle_class_timer; |
104 | 100 | ||
105 | sector_t last_sector; | 101 | sector_t last_position; |
106 | unsigned long last_end_request; | 102 | unsigned long last_end_request; |
107 | 103 | ||
108 | /* | 104 | /* |
@@ -117,6 +113,9 @@ struct cfq_data { | |||
117 | unsigned int cfq_slice_idle; | 113 | unsigned int cfq_slice_idle; |
118 | 114 | ||
119 | struct list_head cic_list; | 115 | struct list_head cic_list; |
116 | |||
117 | sector_t new_seek_mean; | ||
118 | u64 new_seek_total; | ||
120 | }; | 119 | }; |
121 | 120 | ||
122 | /* | 121 | /* |
@@ -133,6 +132,8 @@ struct cfq_queue { | |||
133 | unsigned int key; | 132 | unsigned int key; |
134 | /* member of the rr/busy/cur/idle cfqd list */ | 133 | /* member of the rr/busy/cur/idle cfqd list */ |
135 | struct list_head cfq_list; | 134 | struct list_head cfq_list; |
135 | /* in what tick we were last serviced */ | ||
136 | unsigned long rr_tick; | ||
136 | /* sorted list of pending requests */ | 137 | /* sorted list of pending requests */ |
137 | struct rb_root sort_list; | 138 | struct rb_root sort_list; |
138 | /* if fifo isn't expired, next request to serve */ | 139 | /* if fifo isn't expired, next request to serve */ |
@@ -148,10 +149,11 @@ struct cfq_queue { | |||
148 | 149 | ||
149 | unsigned long slice_end; | 150 | unsigned long slice_end; |
150 | unsigned long service_last; | 151 | unsigned long service_last; |
152 | unsigned long slice_start; | ||
151 | long slice_resid; | 153 | long slice_resid; |
152 | 154 | ||
153 | /* number of requests that are on the dispatch list */ | 155 | /* number of requests that are on the dispatch list or inside driver */ |
154 | int on_dispatch[2]; | 156 | int dispatched; |
155 | 157 | ||
156 | /* io prio of this group */ | 158 | /* io prio of this group */ |
157 | unsigned short ioprio, org_ioprio; | 159 | unsigned short ioprio, org_ioprio; |
@@ -159,6 +161,8 @@ struct cfq_queue { | |||
159 | 161 | ||
160 | /* various state flags, see below */ | 162 | /* various state flags, see below */ |
161 | unsigned int flags; | 163 | unsigned int flags; |
164 | |||
165 | sector_t last_request_pos; | ||
162 | }; | 166 | }; |
163 | 167 | ||
164 | enum cfqq_state_flags { | 168 | enum cfqq_state_flags { |
@@ -259,6 +263,8 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
259 | * easily introduce oscillations. | 263 | * easily introduce oscillations. |
260 | */ | 264 | */ |
261 | cfqq->slice_resid = 0; | 265 | cfqq->slice_resid = 0; |
266 | |||
267 | cfqq->slice_start = jiffies; | ||
262 | } | 268 | } |
263 | 269 | ||
264 | /* | 270 | /* |
@@ -307,7 +313,7 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) | |||
307 | s1 = rq1->sector; | 313 | s1 = rq1->sector; |
308 | s2 = rq2->sector; | 314 | s2 = rq2->sector; |
309 | 315 | ||
310 | last = cfqd->last_sector; | 316 | last = cfqd->last_position; |
311 | 317 | ||
312 | /* | 318 | /* |
313 | * by definition, 1KiB is 2 sectors | 319 | * by definition, 1KiB is 2 sectors |
@@ -398,39 +404,42 @@ cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
398 | return cfq_choose_req(cfqd, next, prev); | 404 | return cfq_choose_req(cfqd, next, prev); |
399 | } | 405 | } |
400 | 406 | ||
401 | static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) | 407 | /* |
408 | * This function finds out where to insert a BE queue in the service hierarchy | ||
409 | */ | ||
410 | static void cfq_resort_be_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
411 | int preempted) | ||
402 | { | 412 | { |
403 | struct cfq_data *cfqd = cfqq->cfqd; | ||
404 | struct list_head *list, *n; | 413 | struct list_head *list, *n; |
405 | struct cfq_queue *__cfqq; | 414 | struct cfq_queue *__cfqq; |
415 | int add_tail = 0; | ||
406 | 416 | ||
407 | /* | 417 | /* |
408 | * Resorting requires the cfqq to be on the RR list already. | 418 | * if cfqq has requests in flight, don't allow it to be |
419 | * found in cfq_set_active_queue before it has finished them. | ||
420 | * this is done to increase fairness between a process that | ||
421 | * has lots of io pending vs one that only generates one | ||
422 | * sporadically or synchronously | ||
409 | */ | 423 | */ |
410 | if (!cfq_cfqq_on_rr(cfqq)) | 424 | if (cfqq->dispatched) |
411 | return; | 425 | list = &cfqd->busy_rr; |
412 | 426 | else if (cfqq->ioprio == (cfqd->cur_prio + 1) && | |
413 | list_del(&cfqq->cfq_list); | 427 | cfq_cfqq_sync(cfqq) && |
414 | 428 | (time_before(cfqd->prio_time, cfqq->service_last) || | |
415 | if (cfq_class_rt(cfqq)) | 429 | cfq_cfqq_queue_new(cfqq) || preempted)) { |
416 | list = &cfqd->cur_rr; | 430 | list = &cfqd->cur_rr; |
417 | else if (cfq_class_idle(cfqq)) | 431 | add_tail = 1; |
418 | list = &cfqd->idle_rr; | 432 | } else |
419 | else { | 433 | list = &cfqd->rr_list[cfqq->ioprio]; |
434 | |||
435 | if (!cfq_cfqq_sync(cfqq) || add_tail) { | ||
420 | /* | 436 | /* |
421 | * if cfqq has requests in flight, don't allow it to be | 437 | * async queue always goes to the end. this wont be overly |
422 | * found in cfq_set_active_queue before it has finished them. | 438 | * unfair to writes, as the sort of the sync queue wont be |
423 | * this is done to increase fairness between a process that | 439 | * allowed to pass the async queue again. |
424 | * has lots of io pending vs one that only generates one | ||
425 | * sporadically or synchronously | ||
426 | */ | 440 | */ |
427 | if (cfq_cfqq_dispatched(cfqq)) | 441 | list_add_tail(&cfqq->cfq_list, list); |
428 | list = &cfqd->busy_rr; | 442 | } else if (preempted || cfq_cfqq_queue_new(cfqq)) { |
429 | else | ||
430 | list = &cfqd->rr_list[cfqq->ioprio]; | ||
431 | } | ||
432 | |||
433 | if (preempted || cfq_cfqq_queue_new(cfqq)) { | ||
434 | /* | 443 | /* |
435 | * If this queue was preempted or is new (never been serviced), | 444 | * If this queue was preempted or is new (never been serviced), |
436 | * let it be added first for fairness but beind other new | 445 | * let it be added first for fairness but beind other new |
@@ -444,14 +453,7 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) | |||
444 | 453 | ||
445 | n = n->next; | 454 | n = n->next; |
446 | } | 455 | } |
447 | list_add_tail(&cfqq->cfq_list, n); | 456 | list_add(&cfqq->cfq_list, n); |
448 | } else if (!cfq_cfqq_class_sync(cfqq)) { | ||
449 | /* | ||
450 | * async queue always goes to the end. this wont be overly | ||
451 | * unfair to writes, as the sort of the sync queue wont be | ||
452 | * allowed to pass the async queue again. | ||
453 | */ | ||
454 | list_add_tail(&cfqq->cfq_list, list); | ||
455 | } else { | 457 | } else { |
456 | /* | 458 | /* |
457 | * sort by last service, but don't cross a new or async | 459 | * sort by last service, but don't cross a new or async |
@@ -461,17 +463,54 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) | |||
461 | */ | 463 | */ |
462 | n = list; | 464 | n = list; |
463 | while ((n = n->prev) != list) { | 465 | while ((n = n->prev) != list) { |
464 | struct cfq_queue *__cfqq = list_entry_cfqq(n); | 466 | struct cfq_queue *__c = list_entry_cfqq(n); |
465 | 467 | ||
466 | if (!cfq_cfqq_class_sync(cfqq) || !__cfqq->service_last) | 468 | if (!cfq_cfqq_sync(__c) || !__c->service_last) |
467 | break; | 469 | break; |
468 | if (time_before(__cfqq->service_last, cfqq->service_last)) | 470 | if (time_before(__c->service_last, cfqq->service_last)) |
469 | break; | 471 | break; |
470 | } | 472 | } |
471 | list_add(&cfqq->cfq_list, n); | 473 | list_add(&cfqq->cfq_list, n); |
472 | } | 474 | } |
473 | } | 475 | } |
474 | 476 | ||
477 | static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) | ||
478 | { | ||
479 | struct cfq_data *cfqd = cfqq->cfqd; | ||
480 | struct list_head *n; | ||
481 | |||
482 | /* | ||
483 | * Resorting requires the cfqq to be on the RR list already. | ||
484 | */ | ||
485 | if (!cfq_cfqq_on_rr(cfqq)) | ||
486 | return; | ||
487 | |||
488 | list_del(&cfqq->cfq_list); | ||
489 | |||
490 | if (cfq_class_rt(cfqq)) { | ||
491 | /* | ||
492 | * At to the front of the current list, but behind other | ||
493 | * RT queues. | ||
494 | */ | ||
495 | n = &cfqd->cur_rr; | ||
496 | while (n->next != &cfqd->cur_rr) | ||
497 | if (!cfq_class_rt(cfqq)) | ||
498 | break; | ||
499 | |||
500 | list_add(&cfqq->cfq_list, n); | ||
501 | } else if (cfq_class_idle(cfqq)) { | ||
502 | /* | ||
503 | * IDLE goes to the tail of the idle list | ||
504 | */ | ||
505 | list_add_tail(&cfqq->cfq_list, &cfqd->idle_rr); | ||
506 | } else { | ||
507 | /* | ||
508 | * So we get here, ergo the queue is a regular best-effort queue | ||
509 | */ | ||
510 | cfq_resort_be_queue(cfqd, cfqq, preempted); | ||
511 | } | ||
512 | } | ||
513 | |||
475 | /* | 514 | /* |
476 | * add to busy list of queues for service, trying to be fair in ordering | 515 | * add to busy list of queues for service, trying to be fair in ordering |
477 | * the pending list according to last request service | 516 | * the pending list according to last request service |
@@ -579,6 +618,8 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq) | |||
579 | */ | 618 | */ |
580 | if (!cfqd->hw_tag && cfqd->rq_in_driver > 4) | 619 | if (!cfqd->hw_tag && cfqd->rq_in_driver > 4) |
581 | cfqd->hw_tag = 1; | 620 | cfqd->hw_tag = 1; |
621 | |||
622 | cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; | ||
582 | } | 623 | } |
583 | 624 | ||
584 | static void cfq_deactivate_request(request_queue_t *q, struct request *rq) | 625 | static void cfq_deactivate_request(request_queue_t *q, struct request *rq) |
@@ -684,6 +725,7 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
684 | cfq_clear_cfqq_must_alloc_slice(cfqq); | 725 | cfq_clear_cfqq_must_alloc_slice(cfqq); |
685 | cfq_clear_cfqq_fifo_expire(cfqq); | 726 | cfq_clear_cfqq_fifo_expire(cfqq); |
686 | cfq_mark_cfqq_slice_new(cfqq); | 727 | cfq_mark_cfqq_slice_new(cfqq); |
728 | cfqq->rr_tick = cfqd->cur_rr_tick; | ||
687 | } | 729 | } |
688 | 730 | ||
689 | cfqd->active_queue = cfqq; | 731 | cfqd->active_queue = cfqq; |
@@ -786,10 +828,46 @@ static int cfq_get_next_prio_level(struct cfq_data *cfqd) | |||
786 | cfqd->cur_end_prio = 0; | 828 | cfqd->cur_end_prio = 0; |
787 | } | 829 | } |
788 | 830 | ||
831 | cfqd->cur_rr_tick++; | ||
832 | cfqd->prio_time = jiffies; | ||
789 | return prio; | 833 | return prio; |
790 | } | 834 | } |
791 | 835 | ||
792 | static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) | 836 | static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, |
837 | struct request *rq) | ||
838 | { | ||
839 | if (rq->sector >= cfqd->last_position) | ||
840 | return rq->sector - cfqd->last_position; | ||
841 | else | ||
842 | return cfqd->last_position - rq->sector; | ||
843 | } | ||
844 | |||
845 | static struct cfq_queue *cfq_get_best_queue(struct cfq_data *cfqd) | ||
846 | { | ||
847 | struct cfq_queue *cfqq = NULL, *__cfqq; | ||
848 | sector_t best = -1, dist; | ||
849 | |||
850 | list_for_each_entry(__cfqq, &cfqd->cur_rr, cfq_list) { | ||
851 | if (!__cfqq->next_rq || !cfq_cfqq_sync(__cfqq)) | ||
852 | continue; | ||
853 | |||
854 | dist = cfq_dist_from_last(cfqd, __cfqq->next_rq); | ||
855 | if (dist < best) { | ||
856 | best = dist; | ||
857 | cfqq = __cfqq; | ||
858 | } | ||
859 | } | ||
860 | |||
861 | /* | ||
862 | * Only async queue(s) available, grab first entry | ||
863 | */ | ||
864 | if (!cfqq) | ||
865 | cfqq = list_entry_cfqq(cfqd->cur_rr.next); | ||
866 | |||
867 | return cfqq; | ||
868 | } | ||
869 | |||
870 | static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) | ||
793 | { | 871 | { |
794 | struct cfq_queue *cfqq = NULL; | 872 | struct cfq_queue *cfqq = NULL; |
795 | 873 | ||
@@ -799,7 +877,7 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) | |||
799 | * empty, get next prio level and grab first entry then if any | 877 | * empty, get next prio level and grab first entry then if any |
800 | * are spliced | 878 | * are spliced |
801 | */ | 879 | */ |
802 | cfqq = list_entry_cfqq(cfqd->cur_rr.next); | 880 | cfqq = cfq_get_best_queue(cfqd); |
803 | } else if (!list_empty(&cfqd->busy_rr)) { | 881 | } else if (!list_empty(&cfqd->busy_rr)) { |
804 | /* | 882 | /* |
805 | * If no new queues are available, check if the busy list has | 883 | * If no new queues are available, check if the busy list has |
@@ -820,49 +898,128 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) | |||
820 | mod_timer(&cfqd->idle_class_timer, end); | 898 | mod_timer(&cfqd->idle_class_timer, end); |
821 | } | 899 | } |
822 | 900 | ||
901 | return cfqq; | ||
902 | } | ||
903 | |||
904 | static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) | ||
905 | { | ||
906 | struct cfq_queue *cfqq; | ||
907 | |||
908 | do { | ||
909 | long prio; | ||
910 | |||
911 | cfqq = cfq_get_next_queue(cfqd); | ||
912 | if (!cfqq) | ||
913 | break; | ||
914 | |||
915 | prio = cfq_prio_to_slice(cfqd, cfqq); | ||
916 | if (cfqq->slice_resid > -prio) | ||
917 | break; | ||
918 | |||
919 | cfqq->slice_resid += prio; | ||
920 | list_del_init(&cfqq->cfq_list); | ||
921 | list_add_tail(&cfqq->cfq_list, &cfqd->rr_list[cfqq->ioprio]); | ||
922 | cfqq = NULL; | ||
923 | } while (1); | ||
924 | |||
823 | __cfq_set_active_queue(cfqd, cfqq); | 925 | __cfq_set_active_queue(cfqd, cfqq); |
824 | return cfqq; | 926 | return cfqq; |
825 | } | 927 | } |
826 | 928 | ||
827 | #define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024)) | 929 | static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) |
930 | { | ||
931 | struct cfq_io_context *cic = cfqd->active_cic; | ||
932 | |||
933 | if (!sample_valid(cic->seek_samples)) | ||
934 | return 0; | ||
935 | |||
936 | return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean; | ||
937 | } | ||
938 | |||
939 | static struct cfq_queue *__cfq_close_cooperator(struct cfq_data *cfqd, | ||
940 | struct cfq_queue *cur_cfqq, | ||
941 | struct list_head *list) | ||
942 | { | ||
943 | struct cfq_queue *cfqq; | ||
944 | |||
945 | list_for_each_entry(cfqq, list, cfq_list) { | ||
946 | if (cfqq == cur_cfqq || !cfq_cfqq_sync(cfqq)) | ||
947 | continue; | ||
948 | |||
949 | BUG_ON(!cfqq->next_rq); | ||
950 | |||
951 | if (cfq_rq_close(cfqd, cfqq->next_rq)) | ||
952 | return cfqq; | ||
953 | } | ||
954 | |||
955 | return NULL; | ||
956 | } | ||
957 | |||
958 | static int cfq_close_cooperator(struct cfq_data *cfqd, | ||
959 | struct cfq_queue *cur_cfqq) | ||
960 | { | ||
961 | struct cfq_queue *cfqq; | ||
962 | |||
963 | if (!cfqd->busy_queues) | ||
964 | return 0; | ||
965 | |||
966 | /* | ||
967 | * check cur_rr and same-prio rr_list for candidates | ||
968 | */ | ||
969 | cfqq = __cfq_close_cooperator(cfqd, cur_cfqq, &cfqd->cur_rr); | ||
970 | if (cfqq) | ||
971 | return 1; | ||
972 | |||
973 | cfqq = __cfq_close_cooperator(cfqd, cur_cfqq, &cfqd->rr_list[cur_cfqq->ioprio]); | ||
974 | if (cfqq && (cfqq->rr_tick == cfqd->cur_rr_tick)) | ||
975 | cfqq = NULL; | ||
976 | |||
977 | return cfqq != NULL; | ||
978 | } | ||
979 | |||
980 | #define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024)) | ||
828 | 981 | ||
829 | static int cfq_arm_slice_timer(struct cfq_data *cfqd) | 982 | static void cfq_arm_slice_timer(struct cfq_data *cfqd) |
830 | { | 983 | { |
831 | struct cfq_queue *cfqq = cfqd->active_queue; | 984 | struct cfq_queue *cfqq = cfqd->active_queue; |
832 | struct cfq_io_context *cic; | 985 | struct cfq_io_context *cic; |
833 | unsigned long sl; | 986 | unsigned long sl; |
834 | 987 | ||
835 | WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); | 988 | WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); |
989 | WARN_ON(cfq_cfqq_slice_new(cfqq)); | ||
836 | 990 | ||
837 | /* | 991 | /* |
838 | * idle is disabled, either manually or by past process history | 992 | * idle is disabled, either manually or by past process history |
839 | */ | 993 | */ |
840 | if (!cfqd->cfq_slice_idle) | 994 | if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq)) |
841 | return 0; | 995 | return; |
842 | if (!cfq_cfqq_idle_window(cfqq)) | 996 | |
843 | return 0; | ||
844 | /* | 997 | /* |
845 | * task has exited, don't wait | 998 | * task has exited, don't wait |
846 | */ | 999 | */ |
847 | cic = cfqd->active_cic; | 1000 | cic = cfqd->active_cic; |
848 | if (!cic || !cic->ioc->task) | 1001 | if (!cic || !cic->ioc->task) |
849 | return 0; | 1002 | return; |
1003 | |||
1004 | /* | ||
1005 | * See if this prio level has a good candidate | ||
1006 | */ | ||
1007 | if (cfq_close_cooperator(cfqd, cfqq)) | ||
1008 | return; | ||
850 | 1009 | ||
851 | cfq_mark_cfqq_must_dispatch(cfqq); | 1010 | cfq_mark_cfqq_must_dispatch(cfqq); |
852 | cfq_mark_cfqq_wait_request(cfqq); | 1011 | cfq_mark_cfqq_wait_request(cfqq); |
853 | 1012 | ||
854 | sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle); | ||
855 | |||
856 | /* | 1013 | /* |
857 | * we don't want to idle for seeks, but we do want to allow | 1014 | * we don't want to idle for seeks, but we do want to allow |
858 | * fair distribution of slice time for a process doing back-to-back | 1015 | * fair distribution of slice time for a process doing back-to-back |
859 | * seeks. so allow a little bit of time for him to submit a new rq | 1016 | * seeks. so allow a little bit of time for him to submit a new rq |
860 | */ | 1017 | */ |
1018 | sl = cfqd->cfq_slice_idle; | ||
861 | if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) | 1019 | if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) |
862 | sl = min(sl, msecs_to_jiffies(2)); | 1020 | sl = min(sl, msecs_to_jiffies(2)); |
863 | 1021 | ||
864 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); | 1022 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
865 | return 1; | ||
866 | } | 1023 | } |
867 | 1024 | ||
868 | static void cfq_dispatch_insert(request_queue_t *q, struct request *rq) | 1025 | static void cfq_dispatch_insert(request_queue_t *q, struct request *rq) |
@@ -870,7 +1027,7 @@ static void cfq_dispatch_insert(request_queue_t *q, struct request *rq) | |||
870 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 1027 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
871 | 1028 | ||
872 | cfq_remove_request(rq); | 1029 | cfq_remove_request(rq); |
873 | cfqq->on_dispatch[rq_is_sync(rq)]++; | 1030 | cfqq->dispatched++; |
874 | elv_dispatch_sort(q, rq); | 1031 | elv_dispatch_sort(q, rq); |
875 | } | 1032 | } |
876 | 1033 | ||
@@ -891,13 +1048,13 @@ static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq) | |||
891 | if (list_empty(&cfqq->fifo)) | 1048 | if (list_empty(&cfqq->fifo)) |
892 | return NULL; | 1049 | return NULL; |
893 | 1050 | ||
894 | fifo = cfq_cfqq_class_sync(cfqq); | 1051 | fifo = cfq_cfqq_sync(cfqq); |
895 | rq = rq_entry_fifo(cfqq->fifo.next); | 1052 | rq = rq_entry_fifo(cfqq->fifo.next); |
896 | 1053 | ||
897 | if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) | 1054 | if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) |
898 | return rq; | 1055 | return NULL; |
899 | 1056 | ||
900 | return NULL; | 1057 | return rq; |
901 | } | 1058 | } |
902 | 1059 | ||
903 | static inline int | 1060 | static inline int |
@@ -922,23 +1079,26 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
922 | goto new_queue; | 1079 | goto new_queue; |
923 | 1080 | ||
924 | /* | 1081 | /* |
925 | * slice has expired | 1082 | * The active queue has run out of time, expire it and select new. |
926 | */ | 1083 | */ |
927 | if (!cfq_cfqq_must_dispatch(cfqq) && cfq_slice_used(cfqq)) | 1084 | if (cfq_slice_used(cfqq)) |
928 | goto expire; | 1085 | goto expire; |
929 | 1086 | ||
930 | /* | 1087 | /* |
931 | * if queue has requests, dispatch one. if not, check if | 1088 | * The active queue has requests and isn't expired, allow it to |
932 | * enough slice is left to wait for one | 1089 | * dispatch. |
933 | */ | 1090 | */ |
934 | if (!RB_EMPTY_ROOT(&cfqq->sort_list)) | 1091 | if (!RB_EMPTY_ROOT(&cfqq->sort_list)) |
935 | goto keep_queue; | 1092 | goto keep_queue; |
936 | else if (cfq_cfqq_slice_new(cfqq) || cfq_cfqq_dispatched(cfqq)) { | 1093 | |
1094 | /* | ||
1095 | * No requests pending. If the active queue still has requests in | ||
1096 | * flight or is idling for a new request, allow either of these | ||
1097 | * conditions to happen (or time out) before selecting a new queue. | ||
1098 | */ | ||
1099 | if (cfqq->dispatched || timer_pending(&cfqd->idle_slice_timer)) { | ||
937 | cfqq = NULL; | 1100 | cfqq = NULL; |
938 | goto keep_queue; | 1101 | goto keep_queue; |
939 | } else if (cfq_cfqq_class_sync(cfqq)) { | ||
940 | if (cfq_arm_slice_timer(cfqd)) | ||
941 | return NULL; | ||
942 | } | 1102 | } |
943 | 1103 | ||
944 | expire: | 1104 | expire: |
@@ -1039,7 +1199,7 @@ static int | |||
1039 | cfq_dispatch_requests(request_queue_t *q, int force) | 1199 | cfq_dispatch_requests(request_queue_t *q, int force) |
1040 | { | 1200 | { |
1041 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1201 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1042 | struct cfq_queue *cfqq, *prev_cfqq; | 1202 | struct cfq_queue *cfqq; |
1043 | int dispatched; | 1203 | int dispatched; |
1044 | 1204 | ||
1045 | if (!cfqd->busy_queues) | 1205 | if (!cfqd->busy_queues) |
@@ -1049,23 +1209,19 @@ cfq_dispatch_requests(request_queue_t *q, int force) | |||
1049 | return cfq_forced_dispatch(cfqd); | 1209 | return cfq_forced_dispatch(cfqd); |
1050 | 1210 | ||
1051 | dispatched = 0; | 1211 | dispatched = 0; |
1052 | prev_cfqq = NULL; | ||
1053 | while ((cfqq = cfq_select_queue(cfqd)) != NULL) { | 1212 | while ((cfqq = cfq_select_queue(cfqd)) != NULL) { |
1054 | int max_dispatch; | 1213 | int max_dispatch; |
1055 | 1214 | ||
1056 | if (cfqd->busy_queues > 1) { | 1215 | if (cfqd->busy_queues > 1) { |
1057 | /* | 1216 | /* |
1058 | * Don't repeat dispatch from the previous queue. | ||
1059 | */ | ||
1060 | if (prev_cfqq == cfqq) | ||
1061 | break; | ||
1062 | |||
1063 | /* | ||
1064 | * So we have dispatched before in this round, if the | 1217 | * So we have dispatched before in this round, if the |
1065 | * next queue has idling enabled (must be sync), don't | 1218 | * next queue has idling enabled (must be sync), don't |
1066 | * allow it service until the previous have continued. | 1219 | * allow it service until the previous have completed. |
1067 | */ | 1220 | */ |
1068 | if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq)) | 1221 | if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq) && |
1222 | dispatched) | ||
1223 | break; | ||
1224 | if (cfqq->dispatched >= cfqd->cfq_quantum) | ||
1069 | break; | 1225 | break; |
1070 | } | 1226 | } |
1071 | 1227 | ||
@@ -1078,7 +1234,6 @@ cfq_dispatch_requests(request_queue_t *q, int force) | |||
1078 | max_dispatch = 1; | 1234 | max_dispatch = 1; |
1079 | 1235 | ||
1080 | dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); | 1236 | dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); |
1081 | prev_cfqq = cfqq; | ||
1082 | } | 1237 | } |
1083 | 1238 | ||
1084 | return dispatched; | 1239 | return dispatched; |
@@ -1520,7 +1675,8 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) | |||
1520 | } | 1675 | } |
1521 | 1676 | ||
1522 | static void | 1677 | static void |
1523 | cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq) | 1678 | cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, |
1679 | struct request *rq) | ||
1524 | { | 1680 | { |
1525 | sector_t sdist; | 1681 | sector_t sdist; |
1526 | u64 total; | 1682 | u64 total; |
@@ -1530,6 +1686,11 @@ cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq) | |||
1530 | else | 1686 | else |
1531 | sdist = cic->last_request_pos - rq->sector; | 1687 | sdist = cic->last_request_pos - rq->sector; |
1532 | 1688 | ||
1689 | if (!cic->seek_samples) { | ||
1690 | cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; | ||
1691 | cfqd->new_seek_mean = cfqd->new_seek_total / 256; | ||
1692 | } | ||
1693 | |||
1533 | /* | 1694 | /* |
1534 | * Don't allow the seek distance to get too large from the | 1695 | * Don't allow the seek distance to get too large from the |
1535 | * odd fragment, pagein, etc | 1696 | * odd fragment, pagein, etc |
@@ -1580,13 +1741,16 @@ static int | |||
1580 | cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | 1741 | cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, |
1581 | struct request *rq) | 1742 | struct request *rq) |
1582 | { | 1743 | { |
1583 | struct cfq_queue *cfqq = cfqd->active_queue; | 1744 | struct cfq_queue *cfqq; |
1584 | sector_t dist; | ||
1585 | 1745 | ||
1586 | if (cfq_class_idle(new_cfqq)) | 1746 | cfqq = cfqd->active_queue; |
1747 | if (!cfqq) | ||
1587 | return 0; | 1748 | return 0; |
1588 | 1749 | ||
1589 | if (!cfqq) | 1750 | if (cfq_slice_used(cfqq)) |
1751 | return 1; | ||
1752 | |||
1753 | if (cfq_class_idle(new_cfqq)) | ||
1590 | return 0; | 1754 | return 0; |
1591 | 1755 | ||
1592 | if (cfq_class_idle(cfqq)) | 1756 | if (cfq_class_idle(cfqq)) |
@@ -1613,12 +1777,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
1613 | * if this request is as-good as one we would expect from the | 1777 | * if this request is as-good as one we would expect from the |
1614 | * current cfqq, let it preempt | 1778 | * current cfqq, let it preempt |
1615 | */ | 1779 | */ |
1616 | if (rq->sector > cfqd->last_sector) | 1780 | if (cfq_rq_close(cfqd, rq)) |
1617 | dist = rq->sector - cfqd->last_sector; | ||
1618 | else | ||
1619 | dist = cfqd->last_sector - rq->sector; | ||
1620 | |||
1621 | if (dist <= cfqd->active_cic->seek_mean) | ||
1622 | return 1; | 1781 | return 1; |
1623 | 1782 | ||
1624 | return 0; | 1783 | return 0; |
@@ -1656,28 +1815,12 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1656 | if (rq_is_meta(rq)) | 1815 | if (rq_is_meta(rq)) |
1657 | cfqq->meta_pending++; | 1816 | cfqq->meta_pending++; |
1658 | 1817 | ||
1659 | /* | ||
1660 | * we never wait for an async request and we don't allow preemption | ||
1661 | * of an async request. so just return early | ||
1662 | */ | ||
1663 | if (!rq_is_sync(rq)) { | ||
1664 | /* | ||
1665 | * sync process issued an async request, if it's waiting | ||
1666 | * then expire it and kick rq handling. | ||
1667 | */ | ||
1668 | if (cic == cfqd->active_cic && | ||
1669 | del_timer(&cfqd->idle_slice_timer)) { | ||
1670 | cfq_slice_expired(cfqd, 0, 0); | ||
1671 | blk_start_queueing(cfqd->queue); | ||
1672 | } | ||
1673 | return; | ||
1674 | } | ||
1675 | |||
1676 | cfq_update_io_thinktime(cfqd, cic); | 1818 | cfq_update_io_thinktime(cfqd, cic); |
1677 | cfq_update_io_seektime(cic, rq); | 1819 | cfq_update_io_seektime(cfqd, cic, rq); |
1678 | cfq_update_idle_window(cfqd, cfqq, cic); | 1820 | cfq_update_idle_window(cfqd, cfqq, cic); |
1679 | 1821 | ||
1680 | cic->last_request_pos = rq->sector + rq->nr_sectors; | 1822 | cic->last_request_pos = rq->sector + rq->nr_sectors; |
1823 | cfqq->last_request_pos = cic->last_request_pos; | ||
1681 | 1824 | ||
1682 | if (cfqq == cfqd->active_queue) { | 1825 | if (cfqq == cfqd->active_queue) { |
1683 | /* | 1826 | /* |
@@ -1726,13 +1869,11 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq) | |||
1726 | now = jiffies; | 1869 | now = jiffies; |
1727 | 1870 | ||
1728 | WARN_ON(!cfqd->rq_in_driver); | 1871 | WARN_ON(!cfqd->rq_in_driver); |
1729 | WARN_ON(!cfqq->on_dispatch[sync]); | 1872 | WARN_ON(!cfqq->dispatched); |
1730 | cfqd->rq_in_driver--; | 1873 | cfqd->rq_in_driver--; |
1731 | cfqq->on_dispatch[sync]--; | 1874 | cfqq->dispatched--; |
1732 | cfqq->service_last = now; | 1875 | cfqq->service_last = now; |
1733 | 1876 | ||
1734 | cfqd->last_sector = rq->hard_sector + rq->hard_nr_sectors; | ||
1735 | |||
1736 | if (!cfq_class_idle(cfqq)) | 1877 | if (!cfq_class_idle(cfqq)) |
1737 | cfqd->last_end_request = now; | 1878 | cfqd->last_end_request = now; |
1738 | 1879 | ||
@@ -1752,11 +1893,12 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq) | |||
1752 | } | 1893 | } |
1753 | if (cfq_slice_used(cfqq)) | 1894 | if (cfq_slice_used(cfqq)) |
1754 | cfq_slice_expired(cfqd, 0, 1); | 1895 | cfq_slice_expired(cfqd, 0, 1); |
1755 | else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) { | 1896 | else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) |
1756 | if (!cfq_arm_slice_timer(cfqd)) | 1897 | cfq_arm_slice_timer(cfqd); |
1757 | cfq_schedule_dispatch(cfqd); | ||
1758 | } | ||
1759 | } | 1898 | } |
1899 | |||
1900 | if (!cfqd->rq_in_driver) | ||
1901 | cfq_schedule_dispatch(cfqd); | ||
1760 | } | 1902 | } |
1761 | 1903 | ||
1762 | /* | 1904 | /* |
@@ -2101,7 +2243,6 @@ fail: | |||
2101 | /* | 2243 | /* |
2102 | * sysfs parts below --> | 2244 | * sysfs parts below --> |
2103 | */ | 2245 | */ |
2104 | |||
2105 | static ssize_t | 2246 | static ssize_t |
2106 | cfq_var_show(unsigned int var, char *page) | 2247 | cfq_var_show(unsigned int var, char *page) |
2107 | { | 2248 | { |