diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-01-18 19:56:49 -0500 |
---|---|---|
committer | Jens Axboe <axboe@carl.home.kernel.dk> | 2007-02-11 17:14:45 -0500 |
commit | c5b680f3b7593f2b066c683df799d19f807fb23d (patch) | |
tree | 657df453cdf7b872f5ea713e66f2e090048a6c1f /block/cfq-iosched.c | |
parent | 44f7c16065c83060cbb9dd9b367141682a6e2b8e (diff) |
cfq-iosched: account for slice over/under time
If a slice uses less than it is entitled to (or perhaps more), include
that in the decision on how much time to give it the next time it
gets serviced.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 32 |
1 files changed, 12 insertions, 20 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index d44402a4c5cd..039b38cf805c 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -147,8 +147,8 @@ struct cfq_queue { | |||
147 | struct list_head fifo; | 147 | struct list_head fifo; |
148 | 148 | ||
149 | unsigned long slice_end; | 149 | unsigned long slice_end; |
150 | unsigned long slice_left; | ||
151 | unsigned long service_last; | 150 | unsigned long service_last; |
151 | long slice_resid; | ||
152 | 152 | ||
153 | /* number of requests that are on the dispatch list */ | 153 | /* number of requests that are on the dispatch list */ |
154 | int on_dispatch[2]; | 154 | int on_dispatch[2]; |
@@ -251,6 +251,14 @@ static inline void | |||
251 | cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 251 | cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
252 | { | 252 | { |
253 | cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; | 253 | cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; |
254 | cfqq->slice_end += cfqq->slice_resid; | ||
255 | |||
256 | /* | ||
257 | * Don't carry over residual for more than one slice, we only want | ||
258 | * to slightly correct the fairness. Carrying over forever would | ||
259 | * easily introduce oscillations. | ||
260 | */ | ||
261 | cfqq->slice_resid = 0; | ||
254 | } | 262 | } |
255 | 263 | ||
256 | /* | 264 | /* |
@@ -667,7 +675,6 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
667 | del_timer(&cfqd->idle_class_timer); | 675 | del_timer(&cfqd->idle_class_timer); |
668 | 676 | ||
669 | cfqq->slice_end = 0; | 677 | cfqq->slice_end = 0; |
670 | cfqq->slice_left = 0; | ||
671 | cfq_clear_cfqq_must_alloc_slice(cfqq); | 678 | cfq_clear_cfqq_must_alloc_slice(cfqq); |
672 | cfq_clear_cfqq_fifo_expire(cfqq); | 679 | cfq_clear_cfqq_fifo_expire(cfqq); |
673 | cfq_mark_cfqq_slice_new(cfqq); | 680 | cfq_mark_cfqq_slice_new(cfqq); |
@@ -683,8 +690,6 @@ static void | |||
683 | __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 690 | __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
684 | int preempted) | 691 | int preempted) |
685 | { | 692 | { |
686 | unsigned long now = jiffies; | ||
687 | |||
688 | if (cfq_cfqq_wait_request(cfqq)) | 693 | if (cfq_cfqq_wait_request(cfqq)) |
689 | del_timer(&cfqd->idle_slice_timer); | 694 | del_timer(&cfqd->idle_slice_timer); |
690 | 695 | ||
@@ -699,10 +704,8 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
699 | * store what was left of this slice, if the queue idled out | 704 | * store what was left of this slice, if the queue idled out |
700 | * or was preempted | 705 | * or was preempted |
701 | */ | 706 | */ |
702 | if (cfq_slice_used(cfqq)) | 707 | if (!cfq_cfqq_slice_new(cfqq)) |
703 | cfqq->slice_left = cfqq->slice_end - now; | 708 | cfqq->slice_resid = cfqq->slice_end - jiffies; |
704 | else | ||
705 | cfqq->slice_left = 0; | ||
706 | 709 | ||
707 | cfq_resort_rr_list(cfqq, preempted); | 710 | cfq_resort_rr_list(cfqq, preempted); |
708 | 711 | ||
@@ -1364,10 +1367,7 @@ retry: | |||
1364 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); | 1367 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); |
1365 | atomic_set(&cfqq->ref, 0); | 1368 | atomic_set(&cfqq->ref, 0); |
1366 | cfqq->cfqd = cfqd; | 1369 | cfqq->cfqd = cfqd; |
1367 | /* | 1370 | |
1368 | * set ->slice_left to allow preemption for a new process | ||
1369 | */ | ||
1370 | cfqq->slice_left = 2 * cfqd->cfq_slice_idle; | ||
1371 | cfq_mark_cfqq_idle_window(cfqq); | 1371 | cfq_mark_cfqq_idle_window(cfqq); |
1372 | cfq_mark_cfqq_prio_changed(cfqq); | 1372 | cfq_mark_cfqq_prio_changed(cfqq); |
1373 | cfq_mark_cfqq_queue_new(cfqq); | 1373 | cfq_mark_cfqq_queue_new(cfqq); |
@@ -1586,11 +1586,6 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
1586 | if (!cfq_cfqq_wait_request(new_cfqq)) | 1586 | if (!cfq_cfqq_wait_request(new_cfqq)) |
1587 | return 0; | 1587 | return 0; |
1588 | /* | 1588 | /* |
1589 | * if it doesn't have slice left, forget it | ||
1590 | */ | ||
1591 | if (new_cfqq->slice_left < cfqd->cfq_slice_idle) | ||
1592 | return 0; | ||
1593 | /* | ||
1594 | * if the new request is sync, but the currently running queue is | 1589 | * if the new request is sync, but the currently running queue is |
1595 | * not, let the sync request have priority. | 1590 | * not, let the sync request have priority. |
1596 | */ | 1591 | */ |
@@ -1614,9 +1609,6 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1614 | { | 1609 | { |
1615 | cfq_slice_expired(cfqd, 1); | 1610 | cfq_slice_expired(cfqd, 1); |
1616 | 1611 | ||
1617 | if (!cfqq->slice_left) | ||
1618 | cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; | ||
1619 | |||
1620 | /* | 1612 | /* |
1621 | * Put the new queue at the front of the of the current list, | 1613 | * Put the new queue at the front of the of the current list, |
1622 | * so we know that it will be selected next. | 1614 | * so we know that it will be selected next. |