aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-02-05 05:56:25 -0500
committerJens Axboe <axboe@carl.home.kernel.dk>2007-02-11 17:14:45 -0500
commit99f9628aba4d8fb3b8d955c9efded0d0a1995fad (patch)
treee1f0547190c0872bdb99a31d6750d739739d2ffe /block/cfq-iosched.c
parentb0b8d74941b7bc67edec26e4c114d27827edfd09 (diff)
[PATCH] cfq-iosched: use last service point as the fairness criteria
Right now we use slice_start, which gives async queues an unfair advantage. Chance that to service_last, and base the resorter on that. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c48
1 files changed, 34 insertions, 14 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 975cdfc8d614..7a8ef0f09699 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -146,9 +146,9 @@ struct cfq_queue {
146 /* fifo list of requests in sort_list */ 146 /* fifo list of requests in sort_list */
147 struct list_head fifo; 147 struct list_head fifo;
148 148
149 unsigned long slice_start;
150 unsigned long slice_end; 149 unsigned long slice_end;
151 unsigned long slice_left; 150 unsigned long slice_left;
151 unsigned long service_last;
152 152
153 /* number of requests that are on the dispatch list */ 153 /* number of requests that are on the dispatch list */
154 int on_dispatch[2]; 154 int on_dispatch[2];
@@ -355,7 +355,8 @@ cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
355static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) 355static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
356{ 356{
357 struct cfq_data *cfqd = cfqq->cfqd; 357 struct cfq_data *cfqd = cfqq->cfqd;
358 struct list_head *list; 358 struct list_head *list, *n;
359 struct cfq_queue *__cfqq;
359 360
360 /* 361 /*
361 * Resorting requires the cfqq to be on the RR list already. 362 * Resorting requires the cfqq to be on the RR list already.
@@ -383,15 +384,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
383 list = &cfqd->rr_list[cfqq->ioprio]; 384 list = &cfqd->rr_list[cfqq->ioprio];
384 } 385 }
385 386
386 /*
387 * If this queue was preempted or is new (never been serviced), let
388 * it be added first for fairness but beind other new queues.
389 * Otherwise, just add to the back of the list.
390 */
391 if (preempted || cfq_cfqq_queue_new(cfqq)) { 387 if (preempted || cfq_cfqq_queue_new(cfqq)) {
392 struct list_head *n = list; 388 /*
393 struct cfq_queue *__cfqq; 389 * If this queue was preempted or is new (never been serviced),
394 390 * let it be added first for fairness but beind other new
391 * queues.
392 */
393 n = list;
395 while (n->next != list) { 394 while (n->next != list) {
396 __cfqq = list_entry_cfqq(n->next); 395 __cfqq = list_entry_cfqq(n->next);
397 if (!cfq_cfqq_queue_new(__cfqq)) 396 if (!cfq_cfqq_queue_new(__cfqq))
@@ -399,11 +398,32 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
399 398
400 n = n->next; 399 n = n->next;
401 } 400 }
401 list_add_tail(&cfqq->cfq_list, n);
402 } else if (!cfq_cfqq_class_sync(cfqq)) {
403 /*
404 * async queue always goes to the end. this wont be overly
405 * unfair to writes, as the sort of the sync queue wont be
406 * allowed to pass the async queue again.
407 */
408 list_add_tail(&cfqq->cfq_list, list);
409 } else {
410 /*
411 * sort by last service, but don't cross a new or async
412 * queue. we don't cross a new queue because it hasn't been
413 * service before, and we don't cross an async queue because
414 * it gets added to the end on expire.
415 */
416 n = list;
417 while ((n = n->prev) != list) {
418 struct cfq_queue *__cfqq = list_entry_cfqq(n);
402 419
403 list = n; 420 if (!cfq_cfqq_class_sync(cfqq) || !__cfqq->service_last)
421 break;
422 if (time_before(__cfqq->service_last, cfqq->service_last))
423 break;
424 }
425 list_add(&cfqq->cfq_list, n);
404 } 426 }
405
406 list_add_tail(&cfqq->cfq_list, list);
407} 427}
408 428
409/* 429/*
@@ -608,7 +628,6 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
608 */ 628 */
609 del_timer(&cfqd->idle_class_timer); 629 del_timer(&cfqd->idle_class_timer);
610 630
611 cfqq->slice_start = jiffies;
612 cfqq->slice_end = 0; 631 cfqq->slice_end = 0;
613 cfqq->slice_left = 0; 632 cfqq->slice_left = 0;
614 cfq_clear_cfqq_must_alloc_slice(cfqq); 633 cfq_clear_cfqq_must_alloc_slice(cfqq);
@@ -1688,6 +1707,7 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
1688 WARN_ON(!cfqq->on_dispatch[sync]); 1707 WARN_ON(!cfqq->on_dispatch[sync]);
1689 cfqd->rq_in_driver--; 1708 cfqd->rq_in_driver--;
1690 cfqq->on_dispatch[sync]--; 1709 cfqq->on_dispatch[sync]--;
1710 cfqq->service_last = now;
1691 1711
1692 if (!cfq_class_idle(cfqq)) 1712 if (!cfq_class_idle(cfqq))
1693 cfqd->last_end_request = now; 1713 cfqd->last_end_request = now;