aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c28
1 files changed, 13 insertions, 15 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index cd18600a8a63..03496ebc4553 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -3113,19 +3113,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3113 */ 3113 */
3114 update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds); 3114 update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds);
3115 3115
3116 /* Cases where imbalance does not exist from POV of this_cpu */ 3116 /*
3117 /* 1) this_cpu is not the appropriate cpu to perform load balancing 3117 * this_cpu is not the appropriate cpu to perform load balancing at
3118 * at this level. 3118 * this level.
3119 * 2) There is no busy sibling group to pull from.
3120 * 3) This group is the busiest group.
3121 * 4) This group is more busy than the avg busieness at this
3122 * sched_domain.
3123 * 5) The imbalance is within the specified limit.
3124 *
3125 * Note: when doing newidle balance, if the local group has excess
3126 * capacity (i.e. nr_running < group_capacity) and the busiest group
3127 * does not have any capacity, we force a load balance to pull tasks
3128 * to the local group. In this case, we skip past checks 3, 4 and 5.
3129 */ 3119 */
3130 if (!(*balance)) 3120 if (!(*balance))
3131 goto ret; 3121 goto ret;
@@ -3134,19 +3124,27 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3134 check_asym_packing(sd, &sds, this_cpu, imbalance)) 3124 check_asym_packing(sd, &sds, this_cpu, imbalance))
3135 return sds.busiest; 3125 return sds.busiest;
3136 3126
3127 /* There is no busy sibling group to pull tasks from */
3137 if (!sds.busiest || sds.busiest_nr_running == 0) 3128 if (!sds.busiest || sds.busiest_nr_running == 0)
3138 goto out_balanced; 3129 goto out_balanced;
3139 3130
3140 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ 3131 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
3141 if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity && 3132 if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
3142 !sds.busiest_has_capacity) 3133 !sds.busiest_has_capacity)
3143 goto force_balance; 3134 goto force_balance;
3144 3135
3136 /*
3137 * If the local group is more busy than the selected busiest group
3138 * don't try and pull any tasks.
3139 */
3145 if (sds.this_load >= sds.max_load) 3140 if (sds.this_load >= sds.max_load)
3146 goto out_balanced; 3141 goto out_balanced;
3147 3142
3143 /*
3144 * Don't pull any tasks if this group is already above the domain
3145 * average load.
3146 */
3148 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; 3147 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3149
3150 if (sds.this_load >= sds.avg_load) 3148 if (sds.this_load >= sds.avg_load)
3151 goto out_balanced; 3149 goto out_balanced;
3152 3150