aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-04-18 14:13:32 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2007-04-30 03:01:22 -0400
commit67060e37994444ee9c0bd2413c8baa6cc58e7adb (patch)
treebdb012bf9a3527c73cc73fc43069c0abeb767b75 /block/cfq-iosched.c
parent0c534e0a463e2eeafc97ba25ab23c14f3cdf2bdb (diff)
cfq-iosched: sort IDLE queues into the rbtree
Same treatment as the RT conversion, just put the sorted idle branch at the end of the tree. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c67
1 files changed, 31 insertions, 36 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 81c057eadfcc..6a6a5f7930d8 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -92,7 +92,6 @@ struct cfq_data {
92 */ 92 */
93 struct cfq_rb_root service_tree; 93 struct cfq_rb_root service_tree;
94 struct list_head cur_rr; 94 struct list_head cur_rr;
95 struct list_head idle_rr;
96 unsigned int busy_queues; 95 unsigned int busy_queues;
97 96
98 /* 97 /*
@@ -467,25 +466,33 @@ static void cfq_service_tree_add(struct cfq_data *cfqd,
467 466
468 while (*p) { 467 while (*p) {
469 struct cfq_queue *__cfqq; 468 struct cfq_queue *__cfqq;
469 struct rb_node **n;
470 470
471 parent = *p; 471 parent = *p;
472 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 472 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
473 473
474 /* 474 /*
475 * sort RT queues first, we always want to give 475 * sort RT queues first, we always want to give
476 * preference to them. after that, sort on the next 476 * preference to them. IDLE queues goes to the back.
477 * service time. 477 * after that, sort on the next service time.
478 */ 478 */
479 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq)) 479 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
480 p = &(*p)->rb_left; 480 n = &(*p)->rb_left;
481 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq)) 481 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
482 p = &(*p)->rb_right; 482 n = &(*p)->rb_right;
483 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
484 n = &(*p)->rb_left;
485 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
486 n = &(*p)->rb_right;
483 else if (rb_key < __cfqq->rb_key) 487 else if (rb_key < __cfqq->rb_key)
484 p = &(*p)->rb_left; 488 n = &(*p)->rb_left;
485 else { 489 else
486 p = &(*p)->rb_right; 490 n = &(*p)->rb_right;
491
492 if (n == &(*p)->rb_right)
487 left = 0; 493 left = 0;
488 } 494
495 p = n;
489 } 496 }
490 497
491 if (left) 498 if (left)
@@ -506,19 +513,7 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
506 if (!cfq_cfqq_on_rr(cfqq)) 513 if (!cfq_cfqq_on_rr(cfqq))
507 return; 514 return;
508 515
509 list_del_init(&cfqq->cfq_list); 516 cfq_service_tree_add(cfqd, cfqq);
510
511 if (cfq_class_idle(cfqq)) {
512 /*
513 * IDLE goes to the tail of the idle list
514 */
515 list_add_tail(&cfqq->cfq_list, &cfqd->idle_rr);
516 } else {
517 /*
518 * RT and BE queues, sort into the rbtree
519 */
520 cfq_service_tree_add(cfqd, cfqq);
521 }
522} 517}
523 518
524/* 519/*
@@ -797,20 +792,22 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
797 cfqq = list_entry_cfqq(cfqd->cur_rr.next); 792 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
798 } else if (!RB_EMPTY_ROOT(&cfqd->service_tree.rb)) { 793 } else if (!RB_EMPTY_ROOT(&cfqd->service_tree.rb)) {
799 struct rb_node *n = cfq_rb_first(&cfqd->service_tree); 794 struct rb_node *n = cfq_rb_first(&cfqd->service_tree);
795 unsigned long end;
800 796
801 cfqq = rb_entry(n, struct cfq_queue, rb_node); 797 cfqq = rb_entry(n, struct cfq_queue, rb_node);
802 } else if (!list_empty(&cfqd->idle_rr)) { 798 if (cfq_class_idle(cfqq)) {
803 /* 799 /*
804 * if we have idle queues and no rt or be queues had pending 800 * if we have idle queues and no rt or be queues had
805 * requests, either allow immediate service if the grace period 801 * pending requests, either allow immediate service if
806 * has passed or arm the idle grace timer 802 * the grace period has passed or arm the idle grace
807 */ 803 * timer
808 unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE; 804 */
809 805 end = cfqd->last_end_request + CFQ_IDLE_GRACE;
810 if (time_after_eq(jiffies, end)) 806 if (time_before(jiffies, end)) {
811 cfqq = list_entry_cfqq(cfqd->idle_rr.next); 807 mod_timer(&cfqd->idle_class_timer, end);
812 else 808 cfqq = NULL;
813 mod_timer(&cfqd->idle_class_timer, end); 809 }
810 }
814 } 811 }
815 812
816 return cfqq; 813 return cfqq;
@@ -1074,7 +1071,6 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
1074 } 1071 }
1075 1072
1076 dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr); 1073 dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr);
1077 dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr);
1078 1074
1079 cfq_slice_expired(cfqd, 0, 0); 1075 cfq_slice_expired(cfqd, 0, 0);
1080 1076
@@ -2047,7 +2043,6 @@ static void *cfq_init_queue(request_queue_t *q)
2047 2043
2048 cfqd->service_tree = CFQ_RB_ROOT; 2044 cfqd->service_tree = CFQ_RB_ROOT;
2049 INIT_LIST_HEAD(&cfqd->cur_rr); 2045 INIT_LIST_HEAD(&cfqd->cur_rr);
2050 INIT_LIST_HEAD(&cfqd->idle_rr);
2051 INIT_LIST_HEAD(&cfqd->cic_list); 2046 INIT_LIST_HEAD(&cfqd->cic_list);
2052 2047
2053 cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node); 2048 cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);