aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2010-11-15 18:47:05 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-18 07:27:48 -0500
commit67e86250f8ea7b8f7da53ac25ea73c6bd71f5cd9 (patch)
tree9e429bdc568172f00e81aa4dcb57796ed0d404dd
parente33078baa4d30ad1d0e46d1f62b9e5a63a3e6ee3 (diff)
sched: Introduce hierarchal order on shares update list
Avoid duplicate shares update calls by ensuring children always appear before parents in rq->leaf_cfs_rq_list. This allows us to do a single in-order traversal for update_shares(). Since we always enqueue in bottom-up order this reduces to 2 cases: 1) Our parent is already in the list, e.g. root \ b /\ c d* (root->b->c already enqueued) Since d's parent is enqueued we push it to the head of the list, implicitly ahead of b. 2) Our parent does not appear in the list (or we have no parent) In this case we enqueue to the tail of the list, if our parent is subsequently enqueued (bottom-up) it will appear to our right by the same rule. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234938.022488865@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched_fair.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a543a5b202a4..b320753aa6c9 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -146,8 +146,20 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
146static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 146static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
147{ 147{
148 if (!cfs_rq->on_list) { 148 if (!cfs_rq->on_list) {
149 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, 149 /*
150 * Ensure we either appear before our parent (if already
151 * enqueued) or force our parent to appear after us when it is
152 * enqueued. The fact that we always enqueue bottom-up
153 * reduces this to two cases.
154 */
155 if (cfs_rq->tg->parent &&
156 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
157 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
158 &rq_of(cfs_rq)->leaf_cfs_rq_list);
159 } else {
160 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
150 &rq_of(cfs_rq)->leaf_cfs_rq_list); 161 &rq_of(cfs_rq)->leaf_cfs_rq_list);
162 }
151 163
152 cfs_rq->on_list = 1; 164 cfs_rq->on_list = 1;
153 } 165 }
@@ -2016,7 +2028,7 @@ out:
2016/* 2028/*
2017 * update tg->load_weight by folding this cpu's load_avg 2029 * update tg->load_weight by folding this cpu's load_avg
2018 */ 2030 */
2019static int tg_shares_up(struct task_group *tg, int cpu) 2031static int update_shares_cpu(struct task_group *tg, int cpu)
2020{ 2032{
2021 struct cfs_rq *cfs_rq; 2033 struct cfs_rq *cfs_rq;
2022 unsigned long flags; 2034 unsigned long flags;
@@ -2056,14 +2068,8 @@ static void update_shares(int cpu)
2056 struct rq *rq = cpu_rq(cpu); 2068 struct rq *rq = cpu_rq(cpu);
2057 2069
2058 rcu_read_lock(); 2070 rcu_read_lock();
2059 for_each_leaf_cfs_rq(rq, cfs_rq) { 2071 for_each_leaf_cfs_rq(rq, cfs_rq)
2060 struct task_group *tg = cfs_rq->tg; 2072 update_shares_cpu(cfs_rq->tg, cpu);
2061
2062 do {
2063 tg_shares_up(tg, cpu);
2064 tg = tg->parent;
2065 } while (tg);
2066 }
2067 rcu_read_unlock(); 2073 rcu_read_unlock();
2068} 2074}
2069 2075