aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2014-05-26 18:19:35 -0400
committerIngo Molnar <mingo@kernel.org>2014-06-05 05:52:22 -0400
commit1b6a7495d343fcfe22ff3a8285544bb8e40f1920 (patch)
tree20eab364c41565be199b892fb90309806f652953 /kernel
parent5ef20ca181ec592e4684a45f4d5f1385f6055534 (diff)
sched/fair: Change "has_capacity" to "has_free_capacity"
The capacity of a CPU/group should be some intrinsic value that doesn't change with task placement. It is like a container which capacity is stable regardless of the amount of liquid in it (its "utilization")... unless the container itself is crushed that is, but that's another story. Therefore let's rename "has_capacity" to "has_free_capacity" in order to better convey the wanted meaning. Signed-off-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: linaro-kernel@lists.linaro.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/n/tip-djzkk027jm0e8x8jxy70opzh@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1cfe5a25086d..8993dfa2e82b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1030,7 +1030,7 @@ struct numa_stats {
1030 1030
1031 /* Approximate capacity in terms of runnable tasks on a node */ 1031 /* Approximate capacity in terms of runnable tasks on a node */
1032 unsigned long task_capacity; 1032 unsigned long task_capacity;
1033 int has_capacity; 1033 int has_free_capacity;
1034}; 1034};
1035 1035
1036/* 1036/*
@@ -1056,8 +1056,8 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
1056 * the @ns structure is NULL'ed and task_numa_compare() will 1056 * the @ns structure is NULL'ed and task_numa_compare() will
1057 * not find this node attractive. 1057 * not find this node attractive.
1058 * 1058 *
1059 * We'll either bail at !has_capacity, or we'll detect a huge imbalance 1059 * We'll either bail at !has_free_capacity, or we'll detect a huge
1060 * and bail there. 1060 * imbalance and bail there.
1061 */ 1061 */
1062 if (!cpus) 1062 if (!cpus)
1063 return; 1063 return;
@@ -1065,7 +1065,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
1065 ns->load = (ns->load * SCHED_POWER_SCALE) / ns->compute_capacity; 1065 ns->load = (ns->load * SCHED_POWER_SCALE) / ns->compute_capacity;
1066 ns->task_capacity = 1066 ns->task_capacity =
1067 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_POWER_SCALE); 1067 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_POWER_SCALE);
1068 ns->has_capacity = (ns->nr_running < ns->task_capacity); 1068 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
1069} 1069}
1070 1070
1071struct task_numa_env { 1071struct task_numa_env {
@@ -1196,8 +1196,8 @@ static void task_numa_compare(struct task_numa_env *env,
1196 1196
1197 if (!cur) { 1197 if (!cur) {
1198 /* Is there capacity at our destination? */ 1198 /* Is there capacity at our destination? */
1199 if (env->src_stats.has_capacity && 1199 if (env->src_stats.has_free_capacity &&
1200 !env->dst_stats.has_capacity) 1200 !env->dst_stats.has_free_capacity)
1201 goto unlock; 1201 goto unlock;
1202 1202
1203 goto balance; 1203 goto balance;
@@ -1302,8 +1302,8 @@ static int task_numa_migrate(struct task_struct *p)
1302 groupimp = group_weight(p, env.dst_nid) - groupweight; 1302 groupimp = group_weight(p, env.dst_nid) - groupweight;
1303 update_numa_stats(&env.dst_stats, env.dst_nid); 1303 update_numa_stats(&env.dst_stats, env.dst_nid);
1304 1304
1305 /* If the preferred nid has capacity, try to use it. */ 1305 /* If the preferred nid has free capacity, try to use it. */
1306 if (env.dst_stats.has_capacity) 1306 if (env.dst_stats.has_free_capacity)
1307 task_numa_find_cpu(&env, taskimp, groupimp); 1307 task_numa_find_cpu(&env, taskimp, groupimp);
1308 1308
1309 /* No space available on the preferred nid. Look elsewhere. */ 1309 /* No space available on the preferred nid. Look elsewhere. */
@@ -5538,7 +5538,7 @@ struct sg_lb_stats {
5538 unsigned int idle_cpus; 5538 unsigned int idle_cpus;
5539 unsigned int group_weight; 5539 unsigned int group_weight;
5540 int group_imb; /* Is there an imbalance in the group ? */ 5540 int group_imb; /* Is there an imbalance in the group ? */
5541 int group_has_capacity; /* Is there extra capacity in the group? */ 5541 int group_has_free_capacity;
5542#ifdef CONFIG_NUMA_BALANCING 5542#ifdef CONFIG_NUMA_BALANCING
5543 unsigned int nr_numa_running; 5543 unsigned int nr_numa_running;
5544 unsigned int nr_preferred_running; 5544 unsigned int nr_preferred_running;
@@ -5905,7 +5905,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
5905 sgs->group_capacity = sg_capacity(env, group); 5905 sgs->group_capacity = sg_capacity(env, group);
5906 5906
5907 if (sgs->group_capacity > sgs->sum_nr_running) 5907 if (sgs->group_capacity > sgs->sum_nr_running)
5908 sgs->group_has_capacity = 1; 5908 sgs->group_has_free_capacity = 1;
5909} 5909}
5910 5910
5911/** 5911/**
@@ -6029,7 +6029,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
6029 * with a large weight task outweighs the tasks on the system). 6029 * with a large weight task outweighs the tasks on the system).
6030 */ 6030 */
6031 if (prefer_sibling && sds->local && 6031 if (prefer_sibling && sds->local &&
6032 sds->local_stat.group_has_capacity) 6032 sds->local_stat.group_has_free_capacity)
6033 sgs->group_capacity = min(sgs->group_capacity, 1U); 6033 sgs->group_capacity = min(sgs->group_capacity, 1U);
6034 6034
6035 if (update_sd_pick_busiest(env, sds, sg, sgs)) { 6035 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
@@ -6289,8 +6289,8 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
6289 goto force_balance; 6289 goto force_balance;
6290 6290
6291 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ 6291 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
6292 if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity && 6292 if (env->idle == CPU_NEWLY_IDLE && local->group_has_free_capacity &&
6293 !busiest->group_has_capacity) 6293 !busiest->group_has_free_capacity)
6294 goto force_balance; 6294 goto force_balance;
6295 6295
6296 /* 6296 /*