diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2014-05-26 18:19:39 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-06-05 05:52:30 -0400 |
commit | ca8ce3d0b144c318a5a9ce99649053e9029061ea (patch) | |
tree | c16f890097b570d2703c1295831470c17937ee10 /kernel | |
parent | ced549fa5fc1fdaf7fac93894dc673092eb3dc20 (diff) |
sched: Final power vs. capacity cleanups
It is better not to think about compute capacity as being equivalent
to "CPU power". The upcoming "power aware" scheduler work may create
confusion with the notion of energy consumption if "power" is used too
liberally.
This contains the architecture visible changes. Incidentally, only ARM
takes advantage of the available pow^H^H^Hcapacity scaling hooks and
therefore those changes outside kernel/sched/ are confined to one ARM
specific file. The default arch_scale_smt_power() hook is not overridden
by anyone.
Replacements are as follows:
arch_scale_freq_power --> arch_scale_freq_capacity
arch_scale_smt_power --> arch_scale_smt_capacity
SCHED_POWER_SCALE --> SCHED_CAPACITY_SCALE
SCHED_POWER_SHIFT --> SCHED_CAPACITY_SHIFT
The local usage of "power" in arch/arm/kernel/topology.c is also changed
to "capacity" as appropriate.
Signed-off-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: linaro-kernel@lists.linaro.org
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Grant Likely <grant.likely@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Brown <broonie@linaro.org>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: devicetree@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/n/tip-48zba9qbznvglwelgq2cfygh@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 6 | ||||
-rw-r--r-- | kernel/sched/fair.c | 59 |
2 files changed, 33 insertions, 32 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 07bc78a50329..7ba4f5413a10 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -5249,7 +5249,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
5249 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); | 5249 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
5250 | 5250 | ||
5251 | printk(KERN_CONT " %s", str); | 5251 | printk(KERN_CONT " %s", str); |
5252 | if (group->sgc->capacity != SCHED_POWER_SCALE) { | 5252 | if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { |
5253 | printk(KERN_CONT " (cpu_capacity = %d)", | 5253 | printk(KERN_CONT " (cpu_capacity = %d)", |
5254 | group->sgc->capacity); | 5254 | group->sgc->capacity); |
5255 | } | 5255 | } |
@@ -5715,7 +5715,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) | |||
5715 | * domains and no possible iteration will get us here, we won't | 5715 | * domains and no possible iteration will get us here, we won't |
5716 | * die on a /0 trap. | 5716 | * die on a /0 trap. |
5717 | */ | 5717 | */ |
5718 | sg->sgc->capacity = SCHED_POWER_SCALE * cpumask_weight(sg_span); | 5718 | sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); |
5719 | sg->sgc->capacity_orig = sg->sgc->capacity; | 5719 | sg->sgc->capacity_orig = sg->sgc->capacity; |
5720 | 5720 | ||
5721 | /* | 5721 | /* |
@@ -6921,7 +6921,7 @@ void __init sched_init(void) | |||
6921 | #ifdef CONFIG_SMP | 6921 | #ifdef CONFIG_SMP |
6922 | rq->sd = NULL; | 6922 | rq->sd = NULL; |
6923 | rq->rd = NULL; | 6923 | rq->rd = NULL; |
6924 | rq->cpu_capacity = SCHED_POWER_SCALE; | 6924 | rq->cpu_capacity = SCHED_CAPACITY_SCALE; |
6925 | rq->post_schedule = 0; | 6925 | rq->post_schedule = 0; |
6926 | rq->active_balance = 0; | 6926 | rq->active_balance = 0; |
6927 | rq->next_balance = jiffies; | 6927 | rq->next_balance = jiffies; |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 58684f684fa8..dc7d6527a282 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1062,9 +1062,9 @@ static void update_numa_stats(struct numa_stats *ns, int nid) | |||
1062 | if (!cpus) | 1062 | if (!cpus) |
1063 | return; | 1063 | return; |
1064 | 1064 | ||
1065 | ns->load = (ns->load * SCHED_POWER_SCALE) / ns->compute_capacity; | 1065 | ns->load = (ns->load * SCHED_CAPACITY_SCALE) / ns->compute_capacity; |
1066 | ns->task_capacity = | 1066 | ns->task_capacity = |
1067 | DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_POWER_SCALE); | 1067 | DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE); |
1068 | ns->has_free_capacity = (ns->nr_running < ns->task_capacity); | 1068 | ns->has_free_capacity = (ns->nr_running < ns->task_capacity); |
1069 | } | 1069 | } |
1070 | 1070 | ||
@@ -4370,7 +4370,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, | |||
4370 | } | 4370 | } |
4371 | 4371 | ||
4372 | /* Adjust by relative CPU capacity of the group */ | 4372 | /* Adjust by relative CPU capacity of the group */ |
4373 | avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgc->capacity; | 4373 | avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity; |
4374 | 4374 | ||
4375 | if (local_group) { | 4375 | if (local_group) { |
4376 | this_load = avg_load; | 4376 | this_load = avg_load; |
@@ -5609,10 +5609,10 @@ static inline int get_sd_load_idx(struct sched_domain *sd, | |||
5609 | 5609 | ||
5610 | static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu) | 5610 | static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu) |
5611 | { | 5611 | { |
5612 | return SCHED_POWER_SCALE; | 5612 | return SCHED_CAPACITY_SCALE; |
5613 | } | 5613 | } |
5614 | 5614 | ||
5615 | unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu) | 5615 | unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu) |
5616 | { | 5616 | { |
5617 | return default_scale_capacity(sd, cpu); | 5617 | return default_scale_capacity(sd, cpu); |
5618 | } | 5618 | } |
@@ -5627,7 +5627,7 @@ static unsigned long default_scale_smt_capacity(struct sched_domain *sd, int cpu | |||
5627 | return smt_gain; | 5627 | return smt_gain; |
5628 | } | 5628 | } |
5629 | 5629 | ||
5630 | unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) | 5630 | unsigned long __weak arch_scale_smt_capacity(struct sched_domain *sd, int cpu) |
5631 | { | 5631 | { |
5632 | return default_scale_smt_capacity(sd, cpu); | 5632 | return default_scale_smt_capacity(sd, cpu); |
5633 | } | 5633 | } |
@@ -5658,10 +5658,10 @@ static unsigned long scale_rt_capacity(int cpu) | |||
5658 | available = total - avg; | 5658 | available = total - avg; |
5659 | } | 5659 | } |
5660 | 5660 | ||
5661 | if (unlikely((s64)total < SCHED_POWER_SCALE)) | 5661 | if (unlikely((s64)total < SCHED_CAPACITY_SCALE)) |
5662 | total = SCHED_POWER_SCALE; | 5662 | total = SCHED_CAPACITY_SCALE; |
5663 | 5663 | ||
5664 | total >>= SCHED_POWER_SHIFT; | 5664 | total >>= SCHED_CAPACITY_SHIFT; |
5665 | 5665 | ||
5666 | return div_u64(available, total); | 5666 | return div_u64(available, total); |
5667 | } | 5667 | } |
@@ -5669,29 +5669,29 @@ static unsigned long scale_rt_capacity(int cpu) | |||
5669 | static void update_cpu_capacity(struct sched_domain *sd, int cpu) | 5669 | static void update_cpu_capacity(struct sched_domain *sd, int cpu) |
5670 | { | 5670 | { |
5671 | unsigned long weight = sd->span_weight; | 5671 | unsigned long weight = sd->span_weight; |
5672 | unsigned long capacity = SCHED_POWER_SCALE; | 5672 | unsigned long capacity = SCHED_CAPACITY_SCALE; |
5673 | struct sched_group *sdg = sd->groups; | 5673 | struct sched_group *sdg = sd->groups; |
5674 | 5674 | ||
5675 | if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { | 5675 | if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { |
5676 | if (sched_feat(ARCH_POWER)) | 5676 | if (sched_feat(ARCH_POWER)) |
5677 | capacity *= arch_scale_smt_power(sd, cpu); | 5677 | capacity *= arch_scale_smt_capacity(sd, cpu); |
5678 | else | 5678 | else |
5679 | capacity *= default_scale_smt_capacity(sd, cpu); | 5679 | capacity *= default_scale_smt_capacity(sd, cpu); |
5680 | 5680 | ||
5681 | capacity >>= SCHED_POWER_SHIFT; | 5681 | capacity >>= SCHED_CAPACITY_SHIFT; |
5682 | } | 5682 | } |
5683 | 5683 | ||
5684 | sdg->sgc->capacity_orig = capacity; | 5684 | sdg->sgc->capacity_orig = capacity; |
5685 | 5685 | ||
5686 | if (sched_feat(ARCH_POWER)) | 5686 | if (sched_feat(ARCH_POWER)) |
5687 | capacity *= arch_scale_freq_power(sd, cpu); | 5687 | capacity *= arch_scale_freq_capacity(sd, cpu); |
5688 | else | 5688 | else |
5689 | capacity *= default_scale_capacity(sd, cpu); | 5689 | capacity *= default_scale_capacity(sd, cpu); |
5690 | 5690 | ||
5691 | capacity >>= SCHED_POWER_SHIFT; | 5691 | capacity >>= SCHED_CAPACITY_SHIFT; |
5692 | 5692 | ||
5693 | capacity *= scale_rt_capacity(cpu); | 5693 | capacity *= scale_rt_capacity(cpu); |
5694 | capacity >>= SCHED_POWER_SHIFT; | 5694 | capacity >>= SCHED_CAPACITY_SHIFT; |
5695 | 5695 | ||
5696 | if (!capacity) | 5696 | if (!capacity) |
5697 | capacity = 1; | 5697 | capacity = 1; |
@@ -5780,7 +5780,7 @@ static inline int | |||
5780 | fix_small_capacity(struct sched_domain *sd, struct sched_group *group) | 5780 | fix_small_capacity(struct sched_domain *sd, struct sched_group *group) |
5781 | { | 5781 | { |
5782 | /* | 5782 | /* |
5783 | * Only siblings can have significantly less than SCHED_POWER_SCALE | 5783 | * Only siblings can have significantly less than SCHED_CAPACITY_SCALE |
5784 | */ | 5784 | */ |
5785 | if (!(sd->flags & SD_SHARE_CPUPOWER)) | 5785 | if (!(sd->flags & SD_SHARE_CPUPOWER)) |
5786 | return 0; | 5786 | return 0; |
@@ -5845,11 +5845,11 @@ static inline int sg_capacity_factor(struct lb_env *env, struct sched_group *gro | |||
5845 | cpus = group->group_weight; | 5845 | cpus = group->group_weight; |
5846 | 5846 | ||
5847 | /* smt := ceil(cpus / capacity), assumes: 1 < smt_capacity < 2 */ | 5847 | /* smt := ceil(cpus / capacity), assumes: 1 < smt_capacity < 2 */ |
5848 | smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, capacity_orig); | 5848 | smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, capacity_orig); |
5849 | capacity_factor = cpus / smt; /* cores */ | 5849 | capacity_factor = cpus / smt; /* cores */ |
5850 | 5850 | ||
5851 | capacity_factor = min_t(unsigned, | 5851 | capacity_factor = min_t(unsigned, |
5852 | capacity_factor, DIV_ROUND_CLOSEST(capacity, SCHED_POWER_SCALE)); | 5852 | capacity_factor, DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE)); |
5853 | if (!capacity_factor) | 5853 | if (!capacity_factor) |
5854 | capacity_factor = fix_small_capacity(env->sd, group); | 5854 | capacity_factor = fix_small_capacity(env->sd, group); |
5855 | 5855 | ||
@@ -5895,7 +5895,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
5895 | 5895 | ||
5896 | /* Adjust by relative CPU capacity of the group */ | 5896 | /* Adjust by relative CPU capacity of the group */ |
5897 | sgs->group_capacity = group->sgc->capacity; | 5897 | sgs->group_capacity = group->sgc->capacity; |
5898 | sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_capacity; | 5898 | sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity; |
5899 | 5899 | ||
5900 | if (sgs->sum_nr_running) | 5900 | if (sgs->sum_nr_running) |
5901 | sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; | 5901 | sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; |
@@ -6089,7 +6089,7 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) | |||
6089 | 6089 | ||
6090 | env->imbalance = DIV_ROUND_CLOSEST( | 6090 | env->imbalance = DIV_ROUND_CLOSEST( |
6091 | sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity, | 6091 | sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity, |
6092 | SCHED_POWER_SCALE); | 6092 | SCHED_CAPACITY_SCALE); |
6093 | 6093 | ||
6094 | return 1; | 6094 | return 1; |
6095 | } | 6095 | } |
@@ -6118,7 +6118,7 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) | |||
6118 | imbn = 1; | 6118 | imbn = 1; |
6119 | 6119 | ||
6120 | scaled_busy_load_per_task = | 6120 | scaled_busy_load_per_task = |
6121 | (busiest->load_per_task * SCHED_POWER_SCALE) / | 6121 | (busiest->load_per_task * SCHED_CAPACITY_SCALE) / |
6122 | busiest->group_capacity; | 6122 | busiest->group_capacity; |
6123 | 6123 | ||
6124 | if (busiest->avg_load + scaled_busy_load_per_task >= | 6124 | if (busiest->avg_load + scaled_busy_load_per_task >= |
@@ -6137,7 +6137,7 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) | |||
6137 | min(busiest->load_per_task, busiest->avg_load); | 6137 | min(busiest->load_per_task, busiest->avg_load); |
6138 | capa_now += local->group_capacity * | 6138 | capa_now += local->group_capacity * |
6139 | min(local->load_per_task, local->avg_load); | 6139 | min(local->load_per_task, local->avg_load); |
6140 | capa_now /= SCHED_POWER_SCALE; | 6140 | capa_now /= SCHED_CAPACITY_SCALE; |
6141 | 6141 | ||
6142 | /* Amount of load we'd subtract */ | 6142 | /* Amount of load we'd subtract */ |
6143 | if (busiest->avg_load > scaled_busy_load_per_task) { | 6143 | if (busiest->avg_load > scaled_busy_load_per_task) { |
@@ -6148,16 +6148,16 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) | |||
6148 | 6148 | ||
6149 | /* Amount of load we'd add */ | 6149 | /* Amount of load we'd add */ |
6150 | if (busiest->avg_load * busiest->group_capacity < | 6150 | if (busiest->avg_load * busiest->group_capacity < |
6151 | busiest->load_per_task * SCHED_POWER_SCALE) { | 6151 | busiest->load_per_task * SCHED_CAPACITY_SCALE) { |
6152 | tmp = (busiest->avg_load * busiest->group_capacity) / | 6152 | tmp = (busiest->avg_load * busiest->group_capacity) / |
6153 | local->group_capacity; | 6153 | local->group_capacity; |
6154 | } else { | 6154 | } else { |
6155 | tmp = (busiest->load_per_task * SCHED_POWER_SCALE) / | 6155 | tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / |
6156 | local->group_capacity; | 6156 | local->group_capacity; |
6157 | } | 6157 | } |
6158 | capa_move += local->group_capacity * | 6158 | capa_move += local->group_capacity * |
6159 | min(local->load_per_task, local->avg_load + tmp); | 6159 | min(local->load_per_task, local->avg_load + tmp); |
6160 | capa_move /= SCHED_POWER_SCALE; | 6160 | capa_move /= SCHED_CAPACITY_SCALE; |
6161 | 6161 | ||
6162 | /* Move if we gain throughput */ | 6162 | /* Move if we gain throughput */ |
6163 | if (capa_move > capa_now) | 6163 | if (capa_move > capa_now) |
@@ -6207,7 +6207,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s | |||
6207 | load_above_capacity = | 6207 | load_above_capacity = |
6208 | (busiest->sum_nr_running - busiest->group_capacity_factor); | 6208 | (busiest->sum_nr_running - busiest->group_capacity_factor); |
6209 | 6209 | ||
6210 | load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); | 6210 | load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_CAPACITY_SCALE); |
6211 | load_above_capacity /= busiest->group_capacity; | 6211 | load_above_capacity /= busiest->group_capacity; |
6212 | } | 6212 | } |
6213 | 6213 | ||
@@ -6225,7 +6225,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s | |||
6225 | env->imbalance = min( | 6225 | env->imbalance = min( |
6226 | max_pull * busiest->group_capacity, | 6226 | max_pull * busiest->group_capacity, |
6227 | (sds->avg_load - local->avg_load) * local->group_capacity | 6227 | (sds->avg_load - local->avg_load) * local->group_capacity |
6228 | ) / SCHED_POWER_SCALE; | 6228 | ) / SCHED_CAPACITY_SCALE; |
6229 | 6229 | ||
6230 | /* | 6230 | /* |
6231 | * if *imbalance is less than the average load per runnable task | 6231 | * if *imbalance is less than the average load per runnable task |
@@ -6279,7 +6279,8 @@ static struct sched_group *find_busiest_group(struct lb_env *env) | |||
6279 | if (!sds.busiest || busiest->sum_nr_running == 0) | 6279 | if (!sds.busiest || busiest->sum_nr_running == 0) |
6280 | goto out_balanced; | 6280 | goto out_balanced; |
6281 | 6281 | ||
6282 | sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_capacity; | 6282 | sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) |
6283 | / sds.total_capacity; | ||
6283 | 6284 | ||
6284 | /* | 6285 | /* |
6285 | * If the busiest group is imbalanced the below checks don't | 6286 | * If the busiest group is imbalanced the below checks don't |
@@ -6378,7 +6379,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, | |||
6378 | continue; | 6379 | continue; |
6379 | 6380 | ||
6380 | capacity = capacity_of(i); | 6381 | capacity = capacity_of(i); |
6381 | capacity_factor = DIV_ROUND_CLOSEST(capacity, SCHED_POWER_SCALE); | 6382 | capacity_factor = DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE); |
6382 | if (!capacity_factor) | 6383 | if (!capacity_factor) |
6383 | capacity_factor = fix_small_capacity(env->sd, group); | 6384 | capacity_factor = fix_small_capacity(env->sd, group); |
6384 | 6385 | ||