aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2009-12-03 12:59:41 -0500
committerJens Axboe <jens.axboe@oracle.com>2009-12-03 13:28:51 -0500
commit1fa8f6d68b5c8ca0a608fd8d296c5f07ac788cd6 (patch)
tree1e6d9e0ed1bd61f7f8f71a7191815be2ab65aac3 /block/cfq-iosched.c
parentf04a64246344ad50e4b4b4186174a0912d07f30b (diff)
blkio: Introduce the root service tree for cfq groups
o So far we just had one cfq_group in cfq_data. To create space for more than one cfq_group, we need to have a service tree of groups where all the groups can be queued if they have active cfq queues backlogged in these. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c137
1 files changed, 134 insertions, 3 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 7f5646ac9f5d..e1f822ac4690 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -66,6 +66,7 @@ static DEFINE_SPINLOCK(ioc_gone_lock);
66#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 66#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
67 67
68#define sample_valid(samples) ((samples) > 80) 68#define sample_valid(samples) ((samples) > 80)
69#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
69 70
70/* 71/*
71 * Most of our rbtree usage is for sorting with min extraction, so 72 * Most of our rbtree usage is for sorting with min extraction, so
@@ -77,8 +78,9 @@ struct cfq_rb_root {
77 struct rb_root rb; 78 struct rb_root rb;
78 struct rb_node *left; 79 struct rb_node *left;
79 unsigned count; 80 unsigned count;
81 u64 min_vdisktime;
80}; 82};
81#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, 0, } 83#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, 0, 0, }
82 84
83/* 85/*
84 * Per process-grouping structure 86 * Per process-grouping structure
@@ -156,6 +158,16 @@ enum wl_type_t {
156 158
157/* This is per cgroup per device grouping structure */ 159/* This is per cgroup per device grouping structure */
158struct cfq_group { 160struct cfq_group {
161 /* group service_tree member */
162 struct rb_node rb_node;
163
164 /* group service_tree key */
165 u64 vdisktime;
166 bool on_st;
167
168 /* number of cfqq currently on this group */
169 int nr_cfqq;
170
159 /* 171 /*
160 * rr lists of queues with requests, onle rr for each priority class. 172 * rr lists of queues with requests, onle rr for each priority class.
161 * Counts are embedded in the cfq_rb_root 173 * Counts are embedded in the cfq_rb_root
@@ -169,6 +181,8 @@ struct cfq_group {
169 */ 181 */
170struct cfq_data { 182struct cfq_data {
171 struct request_queue *queue; 183 struct request_queue *queue;
184 /* Root service tree for cfq_groups */
185 struct cfq_rb_root grp_service_tree;
172 struct cfq_group root_group; 186 struct cfq_group root_group;
173 187
174 /* 188 /*
@@ -251,6 +265,9 @@ static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
251 enum wl_type_t type, 265 enum wl_type_t type,
252 struct cfq_data *cfqd) 266 struct cfq_data *cfqd)
253{ 267{
268 if (!cfqg)
269 return NULL;
270
254 if (prio == IDLE_WORKLOAD) 271 if (prio == IDLE_WORKLOAD)
255 return &cfqg->service_tree_idle; 272 return &cfqg->service_tree_idle;
256 273
@@ -589,6 +606,17 @@ static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
589 return NULL; 606 return NULL;
590} 607}
591 608
609static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
610{
611 if (!root->left)
612 root->left = rb_first(&root->rb);
613
614 if (root->left)
615 return rb_entry_cfqg(root->left);
616
617 return NULL;
618}
619
592static void rb_erase_init(struct rb_node *n, struct rb_root *root) 620static void rb_erase_init(struct rb_node *n, struct rb_root *root)
593{ 621{
594 rb_erase(n, root); 622 rb_erase(n, root);
@@ -640,6 +668,83 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
640 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); 668 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
641} 669}
642 670
671static inline s64
672cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
673{
674 return cfqg->vdisktime - st->min_vdisktime;
675}
676
677static void
678__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
679{
680 struct rb_node **node = &st->rb.rb_node;
681 struct rb_node *parent = NULL;
682 struct cfq_group *__cfqg;
683 s64 key = cfqg_key(st, cfqg);
684 int left = 1;
685
686 while (*node != NULL) {
687 parent = *node;
688 __cfqg = rb_entry_cfqg(parent);
689
690 if (key < cfqg_key(st, __cfqg))
691 node = &parent->rb_left;
692 else {
693 node = &parent->rb_right;
694 left = 0;
695 }
696 }
697
698 if (left)
699 st->left = &cfqg->rb_node;
700
701 rb_link_node(&cfqg->rb_node, parent, node);
702 rb_insert_color(&cfqg->rb_node, &st->rb);
703}
704
705static void
706cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
707{
708 struct cfq_rb_root *st = &cfqd->grp_service_tree;
709 struct cfq_group *__cfqg;
710 struct rb_node *n;
711
712 cfqg->nr_cfqq++;
713 if (cfqg->on_st)
714 return;
715
716 /*
717 * Currently put the group at the end. Later implement something
718 * so that groups get lesser vtime based on their weights, so that
719 * if group does not loose all if it was not continously backlogged.
720 */
721 n = rb_last(&st->rb);
722 if (n) {
723 __cfqg = rb_entry_cfqg(n);
724 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
725 } else
726 cfqg->vdisktime = st->min_vdisktime;
727
728 __cfq_group_service_tree_add(st, cfqg);
729 cfqg->on_st = true;
730}
731
732static void
733cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
734{
735 struct cfq_rb_root *st = &cfqd->grp_service_tree;
736
737 BUG_ON(cfqg->nr_cfqq < 1);
738 cfqg->nr_cfqq--;
739 /* If there are other cfq queues under this group, don't delete it */
740 if (cfqg->nr_cfqq)
741 return;
742
743 cfqg->on_st = false;
744 if (!RB_EMPTY_NODE(&cfqg->rb_node))
745 cfq_rb_erase(&cfqg->rb_node, st);
746}
747
643/* 748/*
644 * The cfqd->service_trees holds all pending cfq_queue's that have 749 * The cfqd->service_trees holds all pending cfq_queue's that have
645 * requests waiting to be processed. It is sorted in the order that 750 * requests waiting to be processed. It is sorted in the order that
@@ -722,6 +827,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
722 rb_link_node(&cfqq->rb_node, parent, p); 827 rb_link_node(&cfqq->rb_node, parent, p);
723 rb_insert_color(&cfqq->rb_node, &service_tree->rb); 828 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
724 service_tree->count++; 829 service_tree->count++;
830 cfq_group_service_tree_add(cfqd, cfqq->cfqg);
725} 831}
726 832
727static struct cfq_queue * 833static struct cfq_queue *
@@ -832,6 +938,7 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
832 cfqq->p_root = NULL; 938 cfqq->p_root = NULL;
833 } 939 }
834 940
941 cfq_group_service_tree_del(cfqd, cfqq->cfqg);
835 BUG_ON(!cfqd->busy_queues); 942 BUG_ON(!cfqd->busy_queues);
836 cfqd->busy_queues--; 943 cfqd->busy_queues--;
837} 944}
@@ -1108,6 +1215,9 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1108 if (!cfqd->rq_queued) 1215 if (!cfqd->rq_queued)
1109 return NULL; 1216 return NULL;
1110 1217
1218 /* There is nothing to dispatch */
1219 if (!service_tree)
1220 return NULL;
1111 if (RB_EMPTY_ROOT(&service_tree->rb)) 1221 if (RB_EMPTY_ROOT(&service_tree->rb))
1112 return NULL; 1222 return NULL;
1113 return cfq_rb_first(service_tree); 1223 return cfq_rb_first(service_tree);
@@ -1477,6 +1587,12 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
1477 unsigned count; 1587 unsigned count;
1478 struct cfq_rb_root *st; 1588 struct cfq_rb_root *st;
1479 1589
1590 if (!cfqg) {
1591 cfqd->serving_prio = IDLE_WORKLOAD;
1592 cfqd->workload_expires = jiffies + 1;
1593 return;
1594 }
1595
1480 /* Choose next priority. RT > BE > IDLE */ 1596 /* Choose next priority. RT > BE > IDLE */
1481 if (cfq_busy_queues_wl(RT_WORKLOAD, cfqd)) 1597 if (cfq_busy_queues_wl(RT_WORKLOAD, cfqd))
1482 cfqd->serving_prio = RT_WORKLOAD; 1598 cfqd->serving_prio = RT_WORKLOAD;
@@ -1535,10 +1651,21 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
1535 cfqd->noidle_tree_requires_idle = false; 1651 cfqd->noidle_tree_requires_idle = false;
1536} 1652}
1537 1653
1654static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
1655{
1656 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1657
1658 if (RB_EMPTY_ROOT(&st->rb))
1659 return NULL;
1660 return cfq_rb_first_group(st);
1661}
1662
1538static void cfq_choose_cfqg(struct cfq_data *cfqd) 1663static void cfq_choose_cfqg(struct cfq_data *cfqd)
1539{ 1664{
1540 cfqd->serving_group = &cfqd->root_group; 1665 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
1541 choose_service_tree(cfqd, &cfqd->root_group); 1666
1667 cfqd->serving_group = cfqg;
1668 choose_service_tree(cfqd, cfqg);
1542} 1669}
1543 1670
1544/* 1671/*
@@ -3014,10 +3141,14 @@ static void *cfq_init_queue(struct request_queue *q)
3014 if (!cfqd) 3141 if (!cfqd)
3015 return NULL; 3142 return NULL;
3016 3143
3144 /* Init root service tree */
3145 cfqd->grp_service_tree = CFQ_RB_ROOT;
3146
3017 /* Init root group */ 3147 /* Init root group */
3018 cfqg = &cfqd->root_group; 3148 cfqg = &cfqd->root_group;
3019 for_each_cfqg_st(cfqg, i, j, st) 3149 for_each_cfqg_st(cfqg, i, j, st)
3020 *st = CFQ_RB_ROOT; 3150 *st = CFQ_RB_ROOT;
3151 RB_CLEAR_NODE(&cfqg->rb_node);
3021 3152
3022 /* 3153 /*
3023 * Not strictly needed (since RB_ROOT just clears the node and we 3154 * Not strictly needed (since RB_ROOT just clears the node and we