diff options
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 58 |
1 files changed, 27 insertions, 31 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bb456f44b7b1..765d87acdf05 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -4171,47 +4171,48 @@ static void update_blocked_averages(int cpu) | |||
4171 | } | 4171 | } |
4172 | 4172 | ||
4173 | /* | 4173 | /* |
4174 | * Compute the cpu's hierarchical load factor for each task group. | 4174 | * Compute the hierarchical load factor for cfs_rq and all its ascendants. |
4175 | * This needs to be done in a top-down fashion because the load of a child | 4175 | * This needs to be done in a top-down fashion because the load of a child |
4176 | * group is a fraction of its parents load. | 4176 | * group is a fraction of its parents load. |
4177 | */ | 4177 | */ |
4178 | static int tg_load_down(struct task_group *tg, void *data) | 4178 | static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) |
4179 | { | 4179 | { |
4180 | unsigned long load; | 4180 | struct rq *rq = rq_of(cfs_rq); |
4181 | long cpu = (long)data; | 4181 | struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; |
4182 | |||
4183 | if (!tg->parent) { | ||
4184 | load = cpu_rq(cpu)->avg.load_avg_contrib; | ||
4185 | } else { | ||
4186 | load = tg->parent->cfs_rq[cpu]->h_load; | ||
4187 | load = div64_ul(load * tg->se[cpu]->avg.load_avg_contrib, | ||
4188 | tg->parent->cfs_rq[cpu]->runnable_load_avg + 1); | ||
4189 | } | ||
4190 | |||
4191 | tg->cfs_rq[cpu]->h_load = load; | ||
4192 | |||
4193 | return 0; | ||
4194 | } | ||
4195 | |||
4196 | static void update_h_load(long cpu) | ||
4197 | { | ||
4198 | struct rq *rq = cpu_rq(cpu); | ||
4199 | unsigned long now = jiffies; | 4182 | unsigned long now = jiffies; |
4183 | unsigned long load; | ||
4200 | 4184 | ||
4201 | if (rq->h_load_throttle == now) | 4185 | if (cfs_rq->last_h_load_update == now) |
4202 | return; | 4186 | return; |
4203 | 4187 | ||
4204 | rq->h_load_throttle = now; | 4188 | cfs_rq->h_load_next = NULL; |
4189 | for_each_sched_entity(se) { | ||
4190 | cfs_rq = cfs_rq_of(se); | ||
4191 | cfs_rq->h_load_next = se; | ||
4192 | if (cfs_rq->last_h_load_update == now) | ||
4193 | break; | ||
4194 | } | ||
4205 | 4195 | ||
4206 | rcu_read_lock(); | 4196 | if (!se) { |
4207 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); | 4197 | cfs_rq->h_load = rq->avg.load_avg_contrib; |
4208 | rcu_read_unlock(); | 4198 | cfs_rq->last_h_load_update = now; |
4199 | } | ||
4200 | |||
4201 | while ((se = cfs_rq->h_load_next) != NULL) { | ||
4202 | load = cfs_rq->h_load; | ||
4203 | load = div64_ul(load * se->avg.load_avg_contrib, | ||
4204 | cfs_rq->runnable_load_avg + 1); | ||
4205 | cfs_rq = group_cfs_rq(se); | ||
4206 | cfs_rq->h_load = load; | ||
4207 | cfs_rq->last_h_load_update = now; | ||
4208 | } | ||
4209 | } | 4209 | } |
4210 | 4210 | ||
4211 | static unsigned long task_h_load(struct task_struct *p) | 4211 | static unsigned long task_h_load(struct task_struct *p) |
4212 | { | 4212 | { |
4213 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | 4213 | struct cfs_rq *cfs_rq = task_cfs_rq(p); |
4214 | 4214 | ||
4215 | update_cfs_rq_h_load(cfs_rq); | ||
4215 | return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load, | 4216 | return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load, |
4216 | cfs_rq->runnable_load_avg + 1); | 4217 | cfs_rq->runnable_load_avg + 1); |
4217 | } | 4218 | } |
@@ -4220,10 +4221,6 @@ static inline void update_blocked_averages(int cpu) | |||
4220 | { | 4221 | { |
4221 | } | 4222 | } |
4222 | 4223 | ||
4223 | static inline void update_h_load(long cpu) | ||
4224 | { | ||
4225 | } | ||
4226 | |||
4227 | static unsigned long task_h_load(struct task_struct *p) | 4224 | static unsigned long task_h_load(struct task_struct *p) |
4228 | { | 4225 | { |
4229 | return p->se.avg.load_avg_contrib; | 4226 | return p->se.avg.load_avg_contrib; |
@@ -5108,7 +5105,6 @@ redo: | |||
5108 | env.src_rq = busiest; | 5105 | env.src_rq = busiest; |
5109 | env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); | 5106 | env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); |
5110 | 5107 | ||
5111 | update_h_load(env.src_cpu); | ||
5112 | more_balance: | 5108 | more_balance: |
5113 | local_irq_save(flags); | 5109 | local_irq_save(flags); |
5114 | double_rq_lock(env.dst_rq, busiest); | 5110 | double_rq_lock(env.dst_rq, busiest); |