diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-04-18 14:01:57 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2007-04-30 03:01:22 -0400 |
commit | 0c534e0a463e2eeafc97ba25ab23c14f3cdf2bdb (patch) | |
tree | 20f3b12b05a853e9e52eaabead16a195d519501b /block/cfq-iosched.c | |
parent | cc09e2990fdd96d25fdbb9db6bc9b4c82d9e4a3c (diff) |
cfq-iosched: sort RT queues into the rbtree
Currently CFQ does a linked insert into the current list for RT
queues. We can just factor the class into the rb insertion,
and then we don't have to treat RT queues in a special way. It's
faster, too.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 27 |
1 files changed, 12 insertions, 15 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 55c476baa692..81c057eadfcc 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -471,7 +471,16 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, | |||
471 | parent = *p; | 471 | parent = *p; |
472 | __cfqq = rb_entry(parent, struct cfq_queue, rb_node); | 472 | __cfqq = rb_entry(parent, struct cfq_queue, rb_node); |
473 | 473 | ||
474 | if (rb_key < __cfqq->rb_key) | 474 | /* |
475 | * sort RT queues first, we always want to give | ||
476 | * preference to them. after that, sort on the next | ||
477 | * service time. | ||
478 | */ | ||
479 | if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq)) | ||
480 | p = &(*p)->rb_left; | ||
481 | else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq)) | ||
482 | p = &(*p)->rb_right; | ||
483 | else if (rb_key < __cfqq->rb_key) | ||
475 | p = &(*p)->rb_left; | 484 | p = &(*p)->rb_left; |
476 | else { | 485 | else { |
477 | p = &(*p)->rb_right; | 486 | p = &(*p)->rb_right; |
@@ -490,7 +499,6 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, | |||
490 | static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) | 499 | static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) |
491 | { | 500 | { |
492 | struct cfq_data *cfqd = cfqq->cfqd; | 501 | struct cfq_data *cfqd = cfqq->cfqd; |
493 | struct list_head *n; | ||
494 | 502 | ||
495 | /* | 503 | /* |
496 | * Resorting requires the cfqq to be on the RR list already. | 504 | * Resorting requires the cfqq to be on the RR list already. |
@@ -500,25 +508,14 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) | |||
500 | 508 | ||
501 | list_del_init(&cfqq->cfq_list); | 509 | list_del_init(&cfqq->cfq_list); |
502 | 510 | ||
503 | if (cfq_class_rt(cfqq)) { | 511 | if (cfq_class_idle(cfqq)) { |
504 | /* | ||
505 | * At to the front of the current list, but behind other | ||
506 | * RT queues. | ||
507 | */ | ||
508 | n = &cfqd->cur_rr; | ||
509 | while (n->next != &cfqd->cur_rr) | ||
510 | if (!cfq_class_rt(cfqq)) | ||
511 | break; | ||
512 | |||
513 | list_add(&cfqq->cfq_list, n); | ||
514 | } else if (cfq_class_idle(cfqq)) { | ||
515 | /* | 512 | /* |
516 | * IDLE goes to the tail of the idle list | 513 | * IDLE goes to the tail of the idle list |
517 | */ | 514 | */ |
518 | list_add_tail(&cfqq->cfq_list, &cfqd->idle_rr); | 515 | list_add_tail(&cfqq->cfq_list, &cfqd->idle_rr); |
519 | } else { | 516 | } else { |
520 | /* | 517 | /* |
521 | * So we get here, ergo the queue is a regular best-effort queue | 518 | * RT and BE queues, sort into the rbtree |
522 | */ | 519 | */ |
523 | cfq_service_tree_add(cfqd, cfqq); | 520 | cfq_service_tree_add(cfqd, cfqq); |
524 | } | 521 | } |