diff options
| -rw-r--r-- | kernel/sched_fair.c | 3 |
1 files changed, 2 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7f00772e57c9..60f9d407c5ec 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -3127,6 +3127,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3127 | if (!sds.busiest || sds.busiest_nr_running == 0) | 3127 | if (!sds.busiest || sds.busiest_nr_running == 0) |
| 3128 | goto out_balanced; | 3128 | goto out_balanced; |
| 3129 | 3129 | ||
| 3130 | sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||
| 3131 | |||
| 3130 | /* | 3132 | /* |
| 3131 | * If the busiest group is imbalanced the below checks don't | 3133 | * If the busiest group is imbalanced the below checks don't |
| 3132 | * work because they assumes all things are equal, which typically | 3134 | * work because they assumes all things are equal, which typically |
| @@ -3151,7 +3153,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3151 | * Don't pull any tasks if this group is already above the domain | 3153 | * Don't pull any tasks if this group is already above the domain |
| 3152 | * average load. | 3154 | * average load. |
| 3153 | */ | 3155 | */ |
| 3154 | sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||
| 3155 | if (sds.this_load >= sds.avg_load) | 3156 | if (sds.this_load >= sds.avg_load) |
| 3156 | goto out_balanced; | 3157 | goto out_balanced; |
| 3157 | 3158 | ||
