aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-11-15 18:47:02 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-18 07:27:47 -0500
commit9e3081ca61147b29f52fddb4f7c6b6b82ea5eb7a (patch)
tree74ec7a4f3e544ed64afb6cb7abe6322ca700d19a /kernel/sched_fair.c
parent3d4b47b4b040c9d77dd68104cfc1055d89a55afd (diff)
sched: Make tg_shares_up() walk on-demand
Make tg_shares_up() use the active cgroup list, this means we cannot do a strict bottom-up walk of the hierarchy, but assuming its a very wide tree with a small number of active groups it should be a win. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234937.754159484@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c58
1 files changed, 58 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0560e72bd732..46ff6587dc16 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2004,6 +2004,60 @@ out:
2004} 2004}
2005 2005
2006#ifdef CONFIG_FAIR_GROUP_SCHED 2006#ifdef CONFIG_FAIR_GROUP_SCHED
2007/*
2008 * update tg->load_weight by folding this cpu's load_avg
2009 */
2010static int tg_shares_up(struct task_group *tg, int cpu)
2011{
2012 struct cfs_rq *cfs_rq;
2013 unsigned long flags;
2014 struct rq *rq;
2015 long load_avg;
2016
2017 if (!tg->se[cpu])
2018 return 0;
2019
2020 rq = cpu_rq(cpu);
2021 cfs_rq = tg->cfs_rq[cpu];
2022
2023 raw_spin_lock_irqsave(&rq->lock, flags);
2024
2025 update_rq_clock(rq);
2026 update_cfs_load(cfs_rq, 1);
2027
2028 load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
2029 load_avg -= cfs_rq->load_contribution;
2030 atomic_add(load_avg, &tg->load_weight);
2031 cfs_rq->load_contribution += load_avg;
2032
2033 /*
2034 * We need to update shares after updating tg->load_weight in
2035 * order to adjust the weight of groups with long running tasks.
2036 */
2037 update_cfs_shares(cfs_rq);
2038
2039 raw_spin_unlock_irqrestore(&rq->lock, flags);
2040
2041 return 0;
2042}
2043
2044static void update_shares(int cpu)
2045{
2046 struct cfs_rq *cfs_rq;
2047 struct rq *rq = cpu_rq(cpu);
2048
2049 rcu_read_lock();
2050 for_each_leaf_cfs_rq(rq, cfs_rq) {
2051 struct task_group *tg = cfs_rq->tg;
2052
2053 do {
2054 tg_shares_up(tg, cpu);
2055 tg = tg->parent;
2056 } while (tg);
2057 }
2058 rcu_read_unlock();
2059}
2060
2007static unsigned long 2061static unsigned long
2008load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 2062load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2009 unsigned long max_load_move, 2063 unsigned long max_load_move,
@@ -2051,6 +2105,10 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2051 return max_load_move - rem_load_move; 2105 return max_load_move - rem_load_move;
2052} 2106}
2053#else 2107#else
2108static inline void update_shares(int cpu)
2109{
2110}
2111
2054static unsigned long 2112static unsigned long
2055load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 2113load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2056 unsigned long max_load_move, 2114 unsigned long max_load_move,