aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index b45abbe55067..ff7692ccda89 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2792,12 +2792,23 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
2792 continue; 2792 continue;
2793 2793
2794 rq = cpu_rq(i); 2794 rq = cpu_rq(i);
2795 wl = weighted_cpuload(i) * SCHED_LOAD_SCALE; 2795 wl = weighted_cpuload(i);
2796 wl /= power;
2797 2796
2797 /*
2798 * When comparing with imbalance, use weighted_cpuload()
2799 * which is not scaled with the cpu power.
2800 */
2798 if (capacity && rq->nr_running == 1 && wl > imbalance) 2801 if (capacity && rq->nr_running == 1 && wl > imbalance)
2799 continue; 2802 continue;
2800 2803
2804 /*
2805 * For the load comparisons with the other cpu's, consider
2806 * the weighted_cpuload() scaled with the cpu power, so that
2807 * the load can be moved away from the cpu that is potentially
2808 * running at a lower capacity.
2809 */
2810 wl = (wl * SCHED_LOAD_SCALE) / power;
2811
2801 if (wl > max_load) { 2812 if (wl > max_load) {
2802 max_load = wl; 2813 max_load = wl;
2803 busiest = rq; 2814 busiest = rq;