aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c58
1 files changed, 58 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0560e72bd732..46ff6587dc16 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2004,6 +2004,60 @@ out:
2004} 2004}
2005 2005
2006#ifdef CONFIG_FAIR_GROUP_SCHED 2006#ifdef CONFIG_FAIR_GROUP_SCHED
2007/*
2008 * update tg->load_weight by folding this cpu's load_avg
2009 */
2010static int tg_shares_up(struct task_group *tg, int cpu)
2011{
2012 struct cfs_rq *cfs_rq;
2013 unsigned long flags;
2014 struct rq *rq;
2015 long load_avg;
2016
2017 if (!tg->se[cpu])
2018 return 0;
2019
2020 rq = cpu_rq(cpu);
2021 cfs_rq = tg->cfs_rq[cpu];
2022
2023 raw_spin_lock_irqsave(&rq->lock, flags);
2024
2025 update_rq_clock(rq);
2026 update_cfs_load(cfs_rq, 1);
2027
2028 load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
2029 load_avg -= cfs_rq->load_contribution;
2030 atomic_add(load_avg, &tg->load_weight);
2031 cfs_rq->load_contribution += load_avg;
2032
2033 /*
2034 * We need to update shares after updating tg->load_weight in
2035 * order to adjust the weight of groups with long running tasks.
2036 */
2037 update_cfs_shares(cfs_rq);
2038
2039 raw_spin_unlock_irqrestore(&rq->lock, flags);
2040
2041 return 0;
2042}
2043
2044static void update_shares(int cpu)
2045{
2046 struct cfs_rq *cfs_rq;
2047 struct rq *rq = cpu_rq(cpu);
2048
2049 rcu_read_lock();
2050 for_each_leaf_cfs_rq(rq, cfs_rq) {
2051 struct task_group *tg = cfs_rq->tg;
2052
2053 do {
2054 tg_shares_up(tg, cpu);
2055 tg = tg->parent;
2056 } while (tg);
2057 }
2058 rcu_read_unlock();
2059}
2060
2007static unsigned long 2061static unsigned long
2008load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 2062load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2009 unsigned long max_load_move, 2063 unsigned long max_load_move,
@@ -2051,6 +2105,10 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2051 return max_load_move - rem_load_move; 2105 return max_load_move - rem_load_move;
2052} 2106}
2053#else 2107#else
2108static inline void update_shares(int cpu)
2109{
2110}
2111
2054static unsigned long 2112static unsigned long
2055load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 2113load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2056 unsigned long max_load_move, 2114 unsigned long max_load_move,