aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e3199df426e3..4d78aef4559d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4119,12 +4119,23 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
4119 continue; 4119 continue;
4120 4120
4121 rq = cpu_rq(i); 4121 rq = cpu_rq(i);
4122 wl = weighted_cpuload(i) * SCHED_LOAD_SCALE; 4122 wl = weighted_cpuload(i);
4123 wl /= power;
4124 4123
4124 /*
4125 * When comparing with imbalance, use weighted_cpuload()
4126 * which is not scaled with the cpu power.
4127 */
4125 if (capacity && rq->nr_running == 1 && wl > imbalance) 4128 if (capacity && rq->nr_running == 1 && wl > imbalance)
4126 continue; 4129 continue;
4127 4130
4131 /*
4132 * For the load comparisons with the other cpu's, consider
4133 * the weighted_cpuload() scaled with the cpu power, so that
4134 * the load can be moved away from the cpu that is potentially
4135 * running at a lower capacity.
4136 */
4137 wl = (wl * SCHED_LOAD_SCALE) / power;
4138
4128 if (wl > max_load) { 4139 if (wl > max_load) {
4129 max_load = wl; 4140 max_load = wl;
4130 busiest = rq; 4141 busiest = rq;