diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-04-17 06:47:55 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2007-04-30 03:01:21 -0400 |
commit | 1afba0451c83cbff622a08f2d86fbb2e680dfd5f (patch) | |
tree | 7da7b97114b73d6d9788cf8663cd3aa28433c0dd /block/cfq-iosched.c | |
parent | 6d048f5310aa2dda2b5acd947eab3598c25e269f (diff) |
cfq-iosched: minor updates
- Move the queue_new flag clear to when the queue is selected
- Only select the non-first queue in cfq_get_best_queue(), if there's
a substantial difference between the best and first.
- Get rid of ->busy_rr
- Only select a close cooperator, if the current queue is known to take
a while to "think".
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 81 |
1 files changed, 18 insertions, 63 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 28236f2cd908..9d6f04103f01 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -70,7 +70,6 @@ struct cfq_data { | |||
70 | * rr list of queues with requests and the count of them | 70 | * rr list of queues with requests and the count of them |
71 | */ | 71 | */ |
72 | struct list_head rr_list[CFQ_PRIO_LISTS]; | 72 | struct list_head rr_list[CFQ_PRIO_LISTS]; |
73 | struct list_head busy_rr; | ||
74 | struct list_head cur_rr; | 73 | struct list_head cur_rr; |
75 | struct list_head idle_rr; | 74 | struct list_head idle_rr; |
76 | unsigned long cur_rr_tick; | 75 | unsigned long cur_rr_tick; |
@@ -410,59 +409,18 @@ cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
410 | static void cfq_resort_be_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 409 | static void cfq_resort_be_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
411 | int preempted) | 410 | int preempted) |
412 | { | 411 | { |
413 | struct list_head *list, *n; | 412 | if (!cfq_cfqq_sync(cfqq)) |
414 | struct cfq_queue *__cfqq; | 413 | list_add_tail(&cfqq->cfq_list, &cfqd->rr_list[cfqq->ioprio]); |
415 | int add_tail = 0; | 414 | else { |
416 | 415 | struct list_head *n = &cfqd->rr_list[cfqq->ioprio]; | |
417 | /* | ||
418 | * if cfqq has requests in flight, don't allow it to be | ||
419 | * found in cfq_set_active_queue before it has finished them. | ||
420 | * this is done to increase fairness between a process that | ||
421 | * has lots of io pending vs one that only generates one | ||
422 | * sporadically or synchronously | ||
423 | */ | ||
424 | if (cfqq->dispatched) | ||
425 | list = &cfqd->busy_rr; | ||
426 | else if (cfqq->ioprio == (cfqd->cur_prio + 1) && | ||
427 | cfq_cfqq_sync(cfqq) && | ||
428 | (time_before(cfqd->prio_time, cfqq->service_last) || | ||
429 | cfq_cfqq_queue_new(cfqq) || preempted)) { | ||
430 | list = &cfqd->cur_rr; | ||
431 | add_tail = 1; | ||
432 | } else | ||
433 | list = &cfqd->rr_list[cfqq->ioprio]; | ||
434 | |||
435 | if (!cfq_cfqq_sync(cfqq) || add_tail) { | ||
436 | /* | ||
437 | * async queue always goes to the end. this wont be overly | ||
438 | * unfair to writes, as the sort of the sync queue wont be | ||
439 | * allowed to pass the async queue again. | ||
440 | */ | ||
441 | list_add_tail(&cfqq->cfq_list, list); | ||
442 | } else if (preempted || cfq_cfqq_queue_new(cfqq)) { | ||
443 | /* | ||
444 | * If this queue was preempted or is new (never been serviced), | ||
445 | * let it be added first for fairness but beind other new | ||
446 | * queues. | ||
447 | */ | ||
448 | n = list; | ||
449 | while (n->next != list) { | ||
450 | __cfqq = list_entry_cfqq(n->next); | ||
451 | if (!cfq_cfqq_queue_new(__cfqq)) | ||
452 | break; | ||
453 | 416 | ||
454 | n = n->next; | ||
455 | } | ||
456 | list_add(&cfqq->cfq_list, n); | ||
457 | } else { | ||
458 | /* | 417 | /* |
459 | * sort by last service, but don't cross a new or async | 418 | * sort by last service, but don't cross a new or async |
460 | * queue. we don't cross a new queue because it hasn't been | 419 | * queue. we don't cross a new queue because it hasn't |
461 | * service before, and we don't cross an async queue because | 420 | * been service before, and we don't cross an async |
462 | * it gets added to the end on expire. | 421 | * queue because it gets added to the end on expire. |
463 | */ | 422 | */ |
464 | n = list; | 423 | while ((n = n->prev) != &cfqd->rr_list[cfqq->ioprio]) { |
465 | while ((n = n->prev) != list) { | ||
466 | struct cfq_queue *__c = list_entry_cfqq(n); | 424 | struct cfq_queue *__c = list_entry_cfqq(n); |
467 | 425 | ||
468 | if (!cfq_cfqq_sync(__c) || !__c->service_last) | 426 | if (!cfq_cfqq_sync(__c) || !__c->service_last) |
@@ -725,6 +683,7 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
725 | cfq_clear_cfqq_must_alloc_slice(cfqq); | 683 | cfq_clear_cfqq_must_alloc_slice(cfqq); |
726 | cfq_clear_cfqq_fifo_expire(cfqq); | 684 | cfq_clear_cfqq_fifo_expire(cfqq); |
727 | cfq_mark_cfqq_slice_new(cfqq); | 685 | cfq_mark_cfqq_slice_new(cfqq); |
686 | cfq_clear_cfqq_queue_new(cfqq); | ||
728 | cfqq->rr_tick = cfqd->cur_rr_tick; | 687 | cfqq->rr_tick = cfqd->cur_rr_tick; |
729 | } | 688 | } |
730 | 689 | ||
@@ -743,7 +702,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
743 | 702 | ||
744 | cfq_clear_cfqq_must_dispatch(cfqq); | 703 | cfq_clear_cfqq_must_dispatch(cfqq); |
745 | cfq_clear_cfqq_wait_request(cfqq); | 704 | cfq_clear_cfqq_wait_request(cfqq); |
746 | cfq_clear_cfqq_queue_new(cfqq); | ||
747 | 705 | ||
748 | /* | 706 | /* |
749 | * store what was left of this slice, if the queue idled out | 707 | * store what was left of this slice, if the queue idled out |
@@ -845,13 +803,15 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, | |||
845 | static struct cfq_queue *cfq_get_best_queue(struct cfq_data *cfqd) | 803 | static struct cfq_queue *cfq_get_best_queue(struct cfq_data *cfqd) |
846 | { | 804 | { |
847 | struct cfq_queue *cfqq = NULL, *__cfqq; | 805 | struct cfq_queue *cfqq = NULL, *__cfqq; |
848 | sector_t best = -1, dist; | 806 | sector_t best = -1, first = -1, dist; |
849 | 807 | ||
850 | list_for_each_entry(__cfqq, &cfqd->cur_rr, cfq_list) { | 808 | list_for_each_entry(__cfqq, &cfqd->cur_rr, cfq_list) { |
851 | if (!__cfqq->next_rq || !cfq_cfqq_sync(__cfqq)) | 809 | if (!__cfqq->next_rq || !cfq_cfqq_sync(__cfqq)) |
852 | continue; | 810 | continue; |
853 | 811 | ||
854 | dist = cfq_dist_from_last(cfqd, __cfqq->next_rq); | 812 | dist = cfq_dist_from_last(cfqd, __cfqq->next_rq); |
813 | if (first == -1) | ||
814 | first = dist; | ||
855 | if (dist < best) { | 815 | if (dist < best) { |
856 | best = dist; | 816 | best = dist; |
857 | cfqq = __cfqq; | 817 | cfqq = __cfqq; |
@@ -859,9 +819,11 @@ static struct cfq_queue *cfq_get_best_queue(struct cfq_data *cfqd) | |||
859 | } | 819 | } |
860 | 820 | ||
861 | /* | 821 | /* |
862 | * Only async queue(s) available, grab first entry | 822 | * Only async queue(s) available, grab first entry. Do the same |
823 | * if the difference between the first and best isn't more than | ||
824 | * twice, to obey fairness. | ||
863 | */ | 825 | */ |
864 | if (!cfqq) | 826 | if (!cfqq || (best && first != best && ((first / best) < 4))) |
865 | cfqq = list_entry_cfqq(cfqd->cur_rr.next); | 827 | cfqq = list_entry_cfqq(cfqd->cur_rr.next); |
866 | 828 | ||
867 | return cfqq; | 829 | return cfqq; |
@@ -878,12 +840,6 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) | |||
878 | * are spliced | 840 | * are spliced |
879 | */ | 841 | */ |
880 | cfqq = cfq_get_best_queue(cfqd); | 842 | cfqq = cfq_get_best_queue(cfqd); |
881 | } else if (!list_empty(&cfqd->busy_rr)) { | ||
882 | /* | ||
883 | * If no new queues are available, check if the busy list has | ||
884 | * some before falling back to idle io. | ||
885 | */ | ||
886 | cfqq = list_entry_cfqq(cfqd->busy_rr.next); | ||
887 | } else if (!list_empty(&cfqd->idle_rr)) { | 843 | } else if (!list_empty(&cfqd->idle_rr)) { |
888 | /* | 844 | /* |
889 | * if we have idle queues and no rt or be queues had pending | 845 | * if we have idle queues and no rt or be queues had pending |
@@ -1004,7 +960,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1004 | /* | 960 | /* |
1005 | * See if this prio level has a good candidate | 961 | * See if this prio level has a good candidate |
1006 | */ | 962 | */ |
1007 | if (cfq_close_cooperator(cfqd, cfqq)) | 963 | if (cfq_close_cooperator(cfqd, cfqq) && |
964 | (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2)) | ||
1008 | return; | 965 | return; |
1009 | 966 | ||
1010 | cfq_mark_cfqq_must_dispatch(cfqq); | 967 | cfq_mark_cfqq_must_dispatch(cfqq); |
@@ -1184,7 +1141,6 @@ cfq_forced_dispatch(struct cfq_data *cfqd) | |||
1184 | for (i = 0; i < CFQ_PRIO_LISTS; i++) | 1141 | for (i = 0; i < CFQ_PRIO_LISTS; i++) |
1185 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]); | 1142 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]); |
1186 | 1143 | ||
1187 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr); | ||
1188 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr); | 1144 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr); |
1189 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr); | 1145 | dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr); |
1190 | 1146 | ||
@@ -2174,7 +2130,6 @@ static void *cfq_init_queue(request_queue_t *q) | |||
2174 | for (i = 0; i < CFQ_PRIO_LISTS; i++) | 2130 | for (i = 0; i < CFQ_PRIO_LISTS; i++) |
2175 | INIT_LIST_HEAD(&cfqd->rr_list[i]); | 2131 | INIT_LIST_HEAD(&cfqd->rr_list[i]); |
2176 | 2132 | ||
2177 | INIT_LIST_HEAD(&cfqd->busy_rr); | ||
2178 | INIT_LIST_HEAD(&cfqd->cur_rr); | 2133 | INIT_LIST_HEAD(&cfqd->cur_rr); |
2179 | INIT_LIST_HEAD(&cfqd->idle_rr); | 2134 | INIT_LIST_HEAD(&cfqd->idle_rr); |
2180 | INIT_LIST_HEAD(&cfqd->cic_list); | 2135 | INIT_LIST_HEAD(&cfqd->cic_list); |