aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c32
-rw-r--r--kernel/sched_fair.c40
2 files changed, 35 insertions, 37 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b0e7ad796d3b..474f341d6f91 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1568,38 +1568,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1568 return rq->avg_load_per_task; 1568 return rq->avg_load_per_task;
1569} 1569}
1570 1570
1571#ifdef CONFIG_FAIR_GROUP_SCHED
1572
1573/*
1574 * Compute the cpu's hierarchical load factor for each task group.
1575 * This needs to be done in a top-down fashion because the load of a child
1576 * group is a fraction of its parents load.
1577 */
1578static int tg_load_down(struct task_group *tg, void *data)
1579{
1580 unsigned long load;
1581 long cpu = (long)data;
1582
1583 if (!tg->parent) {
1584 load = cpu_rq(cpu)->load.weight;
1585 } else {
1586 load = tg->parent->cfs_rq[cpu]->h_load;
1587 load *= tg->se[cpu]->load.weight;
1588 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1589 }
1590
1591 tg->cfs_rq[cpu]->h_load = load;
1592
1593 return 0;
1594}
1595
1596static void update_h_load(long cpu)
1597{
1598 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
1599}
1600
1601#endif
1602
1603#ifdef CONFIG_PREEMPT 1571#ifdef CONFIG_PREEMPT
1604 1572
1605static void double_rq_lock(struct rq *rq1, struct rq *rq2); 1573static void double_rq_lock(struct rq *rq1, struct rq *rq2);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 6cdff849fc19..180bcf1efa79 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2232,11 +2232,43 @@ static void update_shares(int cpu)
2232 struct rq *rq = cpu_rq(cpu); 2232 struct rq *rq = cpu_rq(cpu);
2233 2233
2234 rcu_read_lock(); 2234 rcu_read_lock();
2235 /*
2236 * Iterates the task_group tree in a bottom up fashion, see
2237 * list_add_leaf_cfs_rq() for details.
2238 */
2235 for_each_leaf_cfs_rq(rq, cfs_rq) 2239 for_each_leaf_cfs_rq(rq, cfs_rq)
2236 update_shares_cpu(cfs_rq->tg, cpu); 2240 update_shares_cpu(cfs_rq->tg, cpu);
2237 rcu_read_unlock(); 2241 rcu_read_unlock();
2238} 2242}
2239 2243
2244/*
2245 * Compute the cpu's hierarchical load factor for each task group.
2246 * This needs to be done in a top-down fashion because the load of a child
2247 * group is a fraction of its parents load.
2248 */
2249static int tg_load_down(struct task_group *tg, void *data)
2250{
2251 unsigned long load;
2252 long cpu = (long)data;
2253
2254 if (!tg->parent) {
2255 load = cpu_rq(cpu)->load.weight;
2256 } else {
2257 load = tg->parent->cfs_rq[cpu]->h_load;
2258 load *= tg->se[cpu]->load.weight;
2259 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
2260 }
2261
2262 tg->cfs_rq[cpu]->h_load = load;
2263
2264 return 0;
2265}
2266
2267static void update_h_load(long cpu)
2268{
2269 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
2270}
2271
2240static unsigned long 2272static unsigned long
2241load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, 2273load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2242 unsigned long max_load_move, 2274 unsigned long max_load_move,
@@ -2244,14 +2276,12 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2244 int *all_pinned) 2276 int *all_pinned)
2245{ 2277{
2246 long rem_load_move = max_load_move; 2278 long rem_load_move = max_load_move;
2247 int busiest_cpu = cpu_of(busiest); 2279 struct cfs_rq *busiest_cfs_rq;
2248 struct task_group *tg;
2249 2280
2250 rcu_read_lock(); 2281 rcu_read_lock();
2251 update_h_load(busiest_cpu); 2282 update_h_load(cpu_of(busiest));
2252 2283
2253 list_for_each_entry_rcu(tg, &task_groups, list) { 2284 for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) {
2254 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
2255 unsigned long busiest_h_load = busiest_cfs_rq->h_load; 2285 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
2256 unsigned long busiest_weight = busiest_cfs_rq->load.weight; 2286 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
2257 u64 rem_load, moved_load; 2287 u64 rem_load, moved_load;