diff options
-rw-r--r-- | kernel/sched/fair.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1cfe5a25086d..8993dfa2e82b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1030,7 +1030,7 @@ struct numa_stats { | |||
1030 | 1030 | ||
1031 | /* Approximate capacity in terms of runnable tasks on a node */ | 1031 | /* Approximate capacity in terms of runnable tasks on a node */ |
1032 | unsigned long task_capacity; | 1032 | unsigned long task_capacity; |
1033 | int has_capacity; | 1033 | int has_free_capacity; |
1034 | }; | 1034 | }; |
1035 | 1035 | ||
1036 | /* | 1036 | /* |
@@ -1056,8 +1056,8 @@ static void update_numa_stats(struct numa_stats *ns, int nid) | |||
1056 | * the @ns structure is NULL'ed and task_numa_compare() will | 1056 | * the @ns structure is NULL'ed and task_numa_compare() will |
1057 | * not find this node attractive. | 1057 | * not find this node attractive. |
1058 | * | 1058 | * |
1059 | * We'll either bail at !has_capacity, or we'll detect a huge imbalance | 1059 | * We'll either bail at !has_free_capacity, or we'll detect a huge |
1060 | * and bail there. | 1060 | * imbalance and bail there. |
1061 | */ | 1061 | */ |
1062 | if (!cpus) | 1062 | if (!cpus) |
1063 | return; | 1063 | return; |
@@ -1065,7 +1065,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid) | |||
1065 | ns->load = (ns->load * SCHED_POWER_SCALE) / ns->compute_capacity; | 1065 | ns->load = (ns->load * SCHED_POWER_SCALE) / ns->compute_capacity; |
1066 | ns->task_capacity = | 1066 | ns->task_capacity = |
1067 | DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_POWER_SCALE); | 1067 | DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_POWER_SCALE); |
1068 | ns->has_capacity = (ns->nr_running < ns->task_capacity); | 1068 | ns->has_free_capacity = (ns->nr_running < ns->task_capacity); |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | struct task_numa_env { | 1071 | struct task_numa_env { |
@@ -1196,8 +1196,8 @@ static void task_numa_compare(struct task_numa_env *env, | |||
1196 | 1196 | ||
1197 | if (!cur) { | 1197 | if (!cur) { |
1198 | /* Is there capacity at our destination? */ | 1198 | /* Is there capacity at our destination? */ |
1199 | if (env->src_stats.has_capacity && | 1199 | if (env->src_stats.has_free_capacity && |
1200 | !env->dst_stats.has_capacity) | 1200 | !env->dst_stats.has_free_capacity) |
1201 | goto unlock; | 1201 | goto unlock; |
1202 | 1202 | ||
1203 | goto balance; | 1203 | goto balance; |
@@ -1302,8 +1302,8 @@ static int task_numa_migrate(struct task_struct *p) | |||
1302 | groupimp = group_weight(p, env.dst_nid) - groupweight; | 1302 | groupimp = group_weight(p, env.dst_nid) - groupweight; |
1303 | update_numa_stats(&env.dst_stats, env.dst_nid); | 1303 | update_numa_stats(&env.dst_stats, env.dst_nid); |
1304 | 1304 | ||
1305 | /* If the preferred nid has capacity, try to use it. */ | 1305 | /* If the preferred nid has free capacity, try to use it. */ |
1306 | if (env.dst_stats.has_capacity) | 1306 | if (env.dst_stats.has_free_capacity) |
1307 | task_numa_find_cpu(&env, taskimp, groupimp); | 1307 | task_numa_find_cpu(&env, taskimp, groupimp); |
1308 | 1308 | ||
1309 | /* No space available on the preferred nid. Look elsewhere. */ | 1309 | /* No space available on the preferred nid. Look elsewhere. */ |
@@ -5538,7 +5538,7 @@ struct sg_lb_stats { | |||
5538 | unsigned int idle_cpus; | 5538 | unsigned int idle_cpus; |
5539 | unsigned int group_weight; | 5539 | unsigned int group_weight; |
5540 | int group_imb; /* Is there an imbalance in the group ? */ | 5540 | int group_imb; /* Is there an imbalance in the group ? */ |
5541 | int group_has_capacity; /* Is there extra capacity in the group? */ | 5541 | int group_has_free_capacity; |
5542 | #ifdef CONFIG_NUMA_BALANCING | 5542 | #ifdef CONFIG_NUMA_BALANCING |
5543 | unsigned int nr_numa_running; | 5543 | unsigned int nr_numa_running; |
5544 | unsigned int nr_preferred_running; | 5544 | unsigned int nr_preferred_running; |
@@ -5905,7 +5905,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
5905 | sgs->group_capacity = sg_capacity(env, group); | 5905 | sgs->group_capacity = sg_capacity(env, group); |
5906 | 5906 | ||
5907 | if (sgs->group_capacity > sgs->sum_nr_running) | 5907 | if (sgs->group_capacity > sgs->sum_nr_running) |
5908 | sgs->group_has_capacity = 1; | 5908 | sgs->group_has_free_capacity = 1; |
5909 | } | 5909 | } |
5910 | 5910 | ||
5911 | /** | 5911 | /** |
@@ -6029,7 +6029,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd | |||
6029 | * with a large weight task outweighs the tasks on the system). | 6029 | * with a large weight task outweighs the tasks on the system). |
6030 | */ | 6030 | */ |
6031 | if (prefer_sibling && sds->local && | 6031 | if (prefer_sibling && sds->local && |
6032 | sds->local_stat.group_has_capacity) | 6032 | sds->local_stat.group_has_free_capacity) |
6033 | sgs->group_capacity = min(sgs->group_capacity, 1U); | 6033 | sgs->group_capacity = min(sgs->group_capacity, 1U); |
6034 | 6034 | ||
6035 | if (update_sd_pick_busiest(env, sds, sg, sgs)) { | 6035 | if (update_sd_pick_busiest(env, sds, sg, sgs)) { |
@@ -6289,8 +6289,8 @@ static struct sched_group *find_busiest_group(struct lb_env *env) | |||
6289 | goto force_balance; | 6289 | goto force_balance; |
6290 | 6290 | ||
6291 | /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ | 6291 | /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ |
6292 | if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity && | 6292 | if (env->idle == CPU_NEWLY_IDLE && local->group_has_free_capacity && |
6293 | !busiest->group_has_capacity) | 6293 | !busiest->group_has_free_capacity) |
6294 | goto force_balance; | 6294 | goto force_balance; |
6295 | 6295 | ||
6296 | /* | 6296 | /* |