diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2014-05-26 18:19:36 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-06-05 05:52:25 -0400 |
commit | 0fedc6c8e34f4ce0b37b1f25c3619b4a8faa244c (patch) | |
tree | 0e654ecf3069258fe0fe3982bf974f44326077d8 /kernel | |
parent | 1b6a7495d343fcfe22ff3a8285544bb8e40f1920 (diff) |
sched/fair: Disambiguate existing/remaining "capacity" usage
We have "power" (which should actually become "capacity") and "capacity"
which is a scaled down "capacity factor" in terms of unitary tasks.
Let's use "capacity_factor" to make room for proper usage of "capacity"
later.
Signed-off-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: linaro-kernel@lists.linaro.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/n/tip-gk1co8sqdev3763opqm6ovml@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/fair.c | 42 |
1 files changed, 21 insertions, 21 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8993dfa2e82b..e401e446e87c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -5534,7 +5534,7 @@ struct sg_lb_stats { | |||
5534 | unsigned long load_per_task; | 5534 | unsigned long load_per_task; |
5535 | unsigned long group_power; | 5535 | unsigned long group_power; |
5536 | unsigned int sum_nr_running; /* Nr tasks running in the group */ | 5536 | unsigned int sum_nr_running; /* Nr tasks running in the group */ |
5537 | unsigned int group_capacity; | 5537 | unsigned int group_capacity_factor; |
5538 | unsigned int idle_cpus; | 5538 | unsigned int idle_cpus; |
5539 | unsigned int group_weight; | 5539 | unsigned int group_weight; |
5540 | int group_imb; /* Is there an imbalance in the group ? */ | 5540 | int group_imb; /* Is there an imbalance in the group ? */ |
@@ -5829,15 +5829,15 @@ static inline int sg_imbalanced(struct sched_group *group) | |||
5829 | } | 5829 | } |
5830 | 5830 | ||
5831 | /* | 5831 | /* |
5832 | * Compute the group capacity. | 5832 | * Compute the group capacity factor. |
5833 | * | 5833 | * |
5834 | * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by | 5834 | * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by |
5835 | * first dividing out the smt factor and computing the actual number of cores | 5835 | * first dividing out the smt factor and computing the actual number of cores |
5836 | * and limit power unit capacity with that. | 5836 | * and limit power unit capacity with that. |
5837 | */ | 5837 | */ |
5838 | static inline int sg_capacity(struct lb_env *env, struct sched_group *group) | 5838 | static inline int sg_capacity_factor(struct lb_env *env, struct sched_group *group) |
5839 | { | 5839 | { |
5840 | unsigned int capacity, smt, cpus; | 5840 | unsigned int capacity_factor, smt, cpus; |
5841 | unsigned int power, power_orig; | 5841 | unsigned int power, power_orig; |
5842 | 5842 | ||
5843 | power = group->sgp->power; | 5843 | power = group->sgp->power; |
@@ -5846,13 +5846,13 @@ static inline int sg_capacity(struct lb_env *env, struct sched_group *group) | |||
5846 | 5846 | ||
5847 | /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */ | 5847 | /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */ |
5848 | smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig); | 5848 | smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig); |
5849 | capacity = cpus / smt; /* cores */ | 5849 | capacity_factor = cpus / smt; /* cores */ |
5850 | 5850 | ||
5851 | capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE)); | 5851 | capacity_factor = min_t(unsigned, capacity_factor, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE)); |
5852 | if (!capacity) | 5852 | if (!capacity_factor) |
5853 | capacity = fix_small_capacity(env->sd, group); | 5853 | capacity_factor = fix_small_capacity(env->sd, group); |
5854 | 5854 | ||
5855 | return capacity; | 5855 | return capacity_factor; |
5856 | } | 5856 | } |
5857 | 5857 | ||
5858 | /** | 5858 | /** |
@@ -5902,9 +5902,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
5902 | sgs->group_weight = group->group_weight; | 5902 | sgs->group_weight = group->group_weight; |
5903 | 5903 | ||
5904 | sgs->group_imb = sg_imbalanced(group); | 5904 | sgs->group_imb = sg_imbalanced(group); |
5905 | sgs->group_capacity = sg_capacity(env, group); | 5905 | sgs->group_capacity_factor = sg_capacity_factor(env, group); |
5906 | 5906 | ||
5907 | if (sgs->group_capacity > sgs->sum_nr_running) | 5907 | if (sgs->group_capacity_factor > sgs->sum_nr_running) |
5908 | sgs->group_has_free_capacity = 1; | 5908 | sgs->group_has_free_capacity = 1; |
5909 | } | 5909 | } |
5910 | 5910 | ||
@@ -5929,7 +5929,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, | |||
5929 | if (sgs->avg_load <= sds->busiest_stat.avg_load) | 5929 | if (sgs->avg_load <= sds->busiest_stat.avg_load) |
5930 | return false; | 5930 | return false; |
5931 | 5931 | ||
5932 | if (sgs->sum_nr_running > sgs->group_capacity) | 5932 | if (sgs->sum_nr_running > sgs->group_capacity_factor) |
5933 | return true; | 5933 | return true; |
5934 | 5934 | ||
5935 | if (sgs->group_imb) | 5935 | if (sgs->group_imb) |
@@ -6020,17 +6020,17 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd | |||
6020 | 6020 | ||
6021 | /* | 6021 | /* |
6022 | * In case the child domain prefers tasks go to siblings | 6022 | * In case the child domain prefers tasks go to siblings |
6023 | * first, lower the sg capacity to one so that we'll try | 6023 | * first, lower the sg capacity factor to one so that we'll try |
6024 | * and move all the excess tasks away. We lower the capacity | 6024 | * and move all the excess tasks away. We lower the capacity |
6025 | * of a group only if the local group has the capacity to fit | 6025 | * of a group only if the local group has the capacity to fit |
6026 | * these excess tasks, i.e. nr_running < group_capacity. The | 6026 | * these excess tasks, i.e. nr_running < group_capacity_factor. The |
6027 | * extra check prevents the case where you always pull from the | 6027 | * extra check prevents the case where you always pull from the |
6028 | * heaviest group when it is already under-utilized (possible | 6028 | * heaviest group when it is already under-utilized (possible |
6029 | * with a large weight task outweighs the tasks on the system). | 6029 | * with a large weight task outweighs the tasks on the system). |
6030 | */ | 6030 | */ |
6031 | if (prefer_sibling && sds->local && | 6031 | if (prefer_sibling && sds->local && |
6032 | sds->local_stat.group_has_free_capacity) | 6032 | sds->local_stat.group_has_free_capacity) |
6033 | sgs->group_capacity = min(sgs->group_capacity, 1U); | 6033 | sgs->group_capacity_factor = min(sgs->group_capacity_factor, 1U); |
6034 | 6034 | ||
6035 | if (update_sd_pick_busiest(env, sds, sg, sgs)) { | 6035 | if (update_sd_pick_busiest(env, sds, sg, sgs)) { |
6036 | sds->busiest = sg; | 6036 | sds->busiest = sg; |
@@ -6204,7 +6204,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s | |||
6204 | * have to drop below capacity to reach cpu-load equilibrium. | 6204 | * have to drop below capacity to reach cpu-load equilibrium. |
6205 | */ | 6205 | */ |
6206 | load_above_capacity = | 6206 | load_above_capacity = |
6207 | (busiest->sum_nr_running - busiest->group_capacity); | 6207 | (busiest->sum_nr_running - busiest->group_capacity_factor); |
6208 | 6208 | ||
6209 | load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); | 6209 | load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); |
6210 | load_above_capacity /= busiest->group_power; | 6210 | load_above_capacity /= busiest->group_power; |
@@ -6348,7 +6348,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, | |||
6348 | int i; | 6348 | int i; |
6349 | 6349 | ||
6350 | for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { | 6350 | for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { |
6351 | unsigned long power, capacity, wl; | 6351 | unsigned long power, capacity_factor, wl; |
6352 | enum fbq_type rt; | 6352 | enum fbq_type rt; |
6353 | 6353 | ||
6354 | rq = cpu_rq(i); | 6354 | rq = cpu_rq(i); |
@@ -6377,9 +6377,9 @@ static struct rq *find_busiest_queue(struct lb_env *env, | |||
6377 | continue; | 6377 | continue; |
6378 | 6378 | ||
6379 | power = power_of(i); | 6379 | power = power_of(i); |
6380 | capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE); | 6380 | capacity_factor = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE); |
6381 | if (!capacity) | 6381 | if (!capacity_factor) |
6382 | capacity = fix_small_capacity(env->sd, group); | 6382 | capacity_factor = fix_small_capacity(env->sd, group); |
6383 | 6383 | ||
6384 | wl = weighted_cpuload(i); | 6384 | wl = weighted_cpuload(i); |
6385 | 6385 | ||
@@ -6387,7 +6387,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, | |||
6387 | * When comparing with imbalance, use weighted_cpuload() | 6387 | * When comparing with imbalance, use weighted_cpuload() |
6388 | * which is not scaled with the cpu power. | 6388 | * which is not scaled with the cpu power. |
6389 | */ | 6389 | */ |
6390 | if (capacity && rq->nr_running == 1 && wl > env->imbalance) | 6390 | if (capacity_factor && rq->nr_running == 1 && wl > env->imbalance) |
6391 | continue; | 6391 | continue; |
6392 | 6392 | ||
6393 | /* | 6393 | /* |