aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-04-26 06:53:50 -0400
committerJens Axboe <axboe@nelson.home.kernel.dk>2007-04-30 03:01:21 -0400
commitcc09e2990fdd96d25fdbb9db6bc9b4c82d9e4a3c (patch)
tree89c538c6182335592a981ded03fc120b616aef47
parentd9e7620e60bc6648c3dcabbc8d1a320b69c846f9 (diff)
[PATCH] cfq-iosched: speed up rbtree handling
For cases where the rbtree is mainly used for sorting and min retrieval, a nice speedup of the rbtree code is to maintain a cache of the leftmost node in the tree. Also spotted in the CFS CPU scheduler code. Improved by Alan D. Brunelle <Alan.Brunelle@hp.com> by updating the leftmost hint in cfq_rb_first() if it isn't set, instead of only updating it on insert. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/cfq-iosched.c62
1 files changed, 48 insertions, 14 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4838c2b16f2c..55c476baa692 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -70,6 +70,18 @@ static struct completion *ioc_gone;
70#define sample_valid(samples) ((samples) > 80) 70#define sample_valid(samples) ((samples) > 80)
71 71
72/* 72/*
73 * Most of our rbtree usage is for sorting with min extraction, so
74 * if we cache the leftmost node we don't have to walk down the tree
75 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
76 * move this into the elevator for the rq sorting as well.
77 */
78struct cfq_rb_root {
79 struct rb_root rb;
80 struct rb_node *left;
81};
82#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
83
84/*
73 * Per block device queue structure 85 * Per block device queue structure
74 */ 86 */
75struct cfq_data { 87struct cfq_data {
@@ -78,7 +90,7 @@ struct cfq_data {
78 /* 90 /*
79 * rr list of queues with requests and the count of them 91 * rr list of queues with requests and the count of them
80 */ 92 */
81 struct rb_root service_tree; 93 struct cfq_rb_root service_tree;
82 struct list_head cur_rr; 94 struct list_head cur_rr;
83 struct list_head idle_rr; 95 struct list_head idle_rr;
84 unsigned int busy_queues; 96 unsigned int busy_queues;
@@ -378,6 +390,23 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
378 } 390 }
379} 391}
380 392
393static struct rb_node *cfq_rb_first(struct cfq_rb_root *root)
394{
395 if (!root->left)
396 root->left = rb_first(&root->rb);
397
398 return root->left;
399}
400
401static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
402{
403 if (root->left == n)
404 root->left = NULL;
405
406 rb_erase(n, &root->rb);
407 RB_CLEAR_NODE(n);
408}
409
381/* 410/*
382 * would be nice to take fifo expire time into account as well 411 * would be nice to take fifo expire time into account as well
383 */ 412 */
@@ -417,10 +446,10 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
417static void cfq_service_tree_add(struct cfq_data *cfqd, 446static void cfq_service_tree_add(struct cfq_data *cfqd,
418 struct cfq_queue *cfqq) 447 struct cfq_queue *cfqq)
419{ 448{
420 struct rb_node **p = &cfqd->service_tree.rb_node; 449 struct rb_node **p = &cfqd->service_tree.rb.rb_node;
421 struct rb_node *parent = NULL; 450 struct rb_node *parent = NULL;
422 struct cfq_queue *__cfqq;
423 unsigned long rb_key; 451 unsigned long rb_key;
452 int left = 1;
424 453
425 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 454 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
426 rb_key += cfqq->slice_resid; 455 rb_key += cfqq->slice_resid;
@@ -433,22 +462,29 @@ static void cfq_service_tree_add(struct cfq_data *cfqd,
433 if (rb_key == cfqq->rb_key) 462 if (rb_key == cfqq->rb_key)
434 return; 463 return;
435 464
436 rb_erase(&cfqq->rb_node, &cfqd->service_tree); 465 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
437 } 466 }
438 467
439 while (*p) { 468 while (*p) {
469 struct cfq_queue *__cfqq;
470
440 parent = *p; 471 parent = *p;
441 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 472 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
442 473
443 if (rb_key < __cfqq->rb_key) 474 if (rb_key < __cfqq->rb_key)
444 p = &(*p)->rb_left; 475 p = &(*p)->rb_left;
445 else 476 else {
446 p = &(*p)->rb_right; 477 p = &(*p)->rb_right;
478 left = 0;
479 }
447 } 480 }
448 481
482 if (left)
483 cfqd->service_tree.left = &cfqq->rb_node;
484
449 cfqq->rb_key = rb_key; 485 cfqq->rb_key = rb_key;
450 rb_link_node(&cfqq->rb_node, parent, p); 486 rb_link_node(&cfqq->rb_node, parent, p);
451 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree); 487 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
452} 488}
453 489
454static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) 490static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
@@ -509,10 +545,8 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
509 cfq_clear_cfqq_on_rr(cfqq); 545 cfq_clear_cfqq_on_rr(cfqq);
510 list_del_init(&cfqq->cfq_list); 546 list_del_init(&cfqq->cfq_list);
511 547
512 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 548 if (!RB_EMPTY_NODE(&cfqq->rb_node))
513 rb_erase(&cfqq->rb_node, &cfqd->service_tree); 549 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
514 RB_CLEAR_NODE(&cfqq->rb_node);
515 }
516 550
517 BUG_ON(!cfqd->busy_queues); 551 BUG_ON(!cfqd->busy_queues);
518 cfqd->busy_queues--; 552 cfqd->busy_queues--;
@@ -764,8 +798,8 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
764 * if current list is non-empty, grab first entry. 798 * if current list is non-empty, grab first entry.
765 */ 799 */
766 cfqq = list_entry_cfqq(cfqd->cur_rr.next); 800 cfqq = list_entry_cfqq(cfqd->cur_rr.next);
767 } else if (!RB_EMPTY_ROOT(&cfqd->service_tree)) { 801 } else if (!RB_EMPTY_ROOT(&cfqd->service_tree.rb)) {
768 struct rb_node *n = rb_first(&cfqd->service_tree); 802 struct rb_node *n = cfq_rb_first(&cfqd->service_tree);
769 803
770 cfqq = rb_entry(n, struct cfq_queue, rb_node); 804 cfqq = rb_entry(n, struct cfq_queue, rb_node);
771 } else if (!list_empty(&cfqd->idle_rr)) { 805 } else if (!list_empty(&cfqd->idle_rr)) {
@@ -1036,7 +1070,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
1036 int dispatched = 0; 1070 int dispatched = 0;
1037 struct rb_node *n; 1071 struct rb_node *n;
1038 1072
1039 while ((n = rb_first(&cfqd->service_tree)) != NULL) { 1073 while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) {
1040 struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node); 1074 struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);
1041 1075
1042 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 1076 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
@@ -2014,7 +2048,7 @@ static void *cfq_init_queue(request_queue_t *q)
2014 2048
2015 memset(cfqd, 0, sizeof(*cfqd)); 2049 memset(cfqd, 0, sizeof(*cfqd));
2016 2050
2017 cfqd->service_tree = RB_ROOT; 2051 cfqd->service_tree = CFQ_RB_ROOT;
2018 INIT_LIST_HEAD(&cfqd->cur_rr); 2052 INIT_LIST_HEAD(&cfqd->cur_rr);
2019 INIT_LIST_HEAD(&cfqd->idle_rr); 2053 INIT_LIST_HEAD(&cfqd->idle_rr);
2020 INIT_LIST_HEAD(&cfqd->cic_list); 2054 INIT_LIST_HEAD(&cfqd->cic_list);