aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2014-05-26 18:19:38 -0400
committerIngo Molnar <mingo@kernel.org>2014-06-05 05:52:29 -0400
commitced549fa5fc1fdaf7fac93894dc673092eb3dc20 (patch)
tree83b391620a9b1d269de50e06f8d03714b59298d6
parent63b2ca30bdb3dbf60bc7ac5f46713c0d32308261 (diff)
sched: Remove remaining dubious usage of "power"
It is better not to think about compute capacity as being equivalent to "CPU power". The upcoming "power aware" scheduler work may create confusion with the notion of energy consumption if "power" is used too liberally. This is the remaining "power" -> "capacity" rename for local symbols. Those symbols visible to the rest of the kernel are not included yet. Signed-off-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: linaro-kernel@lists.linaro.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/n/tip-yyyhohzhkwnaotr3lx8zd5aa@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/fair.c102
-rw-r--r--kernel/sched/sched.h2
3 files changed, 55 insertions, 55 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2e1fb0902200..07bc78a50329 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5764,7 +5764,7 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
5764/* 5764/*
5765 * build_sched_groups will build a circular linked list of the groups 5765 * build_sched_groups will build a circular linked list of the groups
5766 * covered by the given span, and will set each group's ->cpumask correctly, 5766 * covered by the given span, and will set each group's ->cpumask correctly,
5767 * and ->cpu_power to 0. 5767 * and ->cpu_capacity to 0.
5768 * 5768 *
5769 * Assumes the sched_domain tree is fully constructed 5769 * Assumes the sched_domain tree is fully constructed
5770 */ 5770 */
@@ -6471,7 +6471,7 @@ static int build_sched_domains(const struct cpumask *cpu_map,
6471 } 6471 }
6472 } 6472 }
6473 6473
6474 /* Calculate CPU power for physical packages and nodes */ 6474 /* Calculate CPU capacity for physical packages and nodes */
6475 for (i = nr_cpumask_bits-1; i >= 0; i--) { 6475 for (i = nr_cpumask_bits-1; i >= 0; i--) {
6476 if (!cpumask_test_cpu(i, cpu_map)) 6476 if (!cpumask_test_cpu(i, cpu_map))
6477 continue; 6477 continue;
@@ -6921,7 +6921,7 @@ void __init sched_init(void)
6921#ifdef CONFIG_SMP 6921#ifdef CONFIG_SMP
6922 rq->sd = NULL; 6922 rq->sd = NULL;
6923 rq->rd = NULL; 6923 rq->rd = NULL;
6924 rq->cpu_power = SCHED_POWER_SCALE; 6924 rq->cpu_capacity = SCHED_POWER_SCALE;
6925 rq->post_schedule = 0; 6925 rq->post_schedule = 0;
6926 rq->active_balance = 0; 6926 rq->active_balance = 0;
6927 rq->next_balance = jiffies; 6927 rq->next_balance = jiffies;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 36bd4d23fca8..58684f684fa8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1017,7 +1017,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1017static unsigned long weighted_cpuload(const int cpu); 1017static unsigned long weighted_cpuload(const int cpu);
1018static unsigned long source_load(int cpu, int type); 1018static unsigned long source_load(int cpu, int type);
1019static unsigned long target_load(int cpu, int type); 1019static unsigned long target_load(int cpu, int type);
1020static unsigned long power_of(int cpu); 1020static unsigned long capacity_of(int cpu);
1021static long effective_load(struct task_group *tg, int cpu, long wl, long wg); 1021static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1022 1022
1023/* Cached statistics for all CPUs within a node */ 1023/* Cached statistics for all CPUs within a node */
@@ -1046,7 +1046,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
1046 1046
1047 ns->nr_running += rq->nr_running; 1047 ns->nr_running += rq->nr_running;
1048 ns->load += weighted_cpuload(cpu); 1048 ns->load += weighted_cpuload(cpu);
1049 ns->compute_capacity += power_of(cpu); 1049 ns->compute_capacity += capacity_of(cpu);
1050 1050
1051 cpus++; 1051 cpus++;
1052 } 1052 }
@@ -1214,7 +1214,7 @@ balance:
1214 orig_dst_load = env->dst_stats.load; 1214 orig_dst_load = env->dst_stats.load;
1215 orig_src_load = env->src_stats.load; 1215 orig_src_load = env->src_stats.load;
1216 1216
1217 /* XXX missing power terms */ 1217 /* XXX missing capacity terms */
1218 load = task_h_load(env->p); 1218 load = task_h_load(env->p);
1219 dst_load = orig_dst_load + load; 1219 dst_load = orig_dst_load + load;
1220 src_load = orig_src_load - load; 1220 src_load = orig_src_load - load;
@@ -4043,9 +4043,9 @@ static unsigned long target_load(int cpu, int type)
4043 return max(rq->cpu_load[type-1], total); 4043 return max(rq->cpu_load[type-1], total);
4044} 4044}
4045 4045
4046static unsigned long power_of(int cpu) 4046static unsigned long capacity_of(int cpu)
4047{ 4047{
4048 return cpu_rq(cpu)->cpu_power; 4048 return cpu_rq(cpu)->cpu_capacity;
4049} 4049}
4050 4050
4051static unsigned long cpu_avg_load_per_task(int cpu) 4051static unsigned long cpu_avg_load_per_task(int cpu)
@@ -4288,12 +4288,12 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4288 s64 this_eff_load, prev_eff_load; 4288 s64 this_eff_load, prev_eff_load;
4289 4289
4290 this_eff_load = 100; 4290 this_eff_load = 100;
4291 this_eff_load *= power_of(prev_cpu); 4291 this_eff_load *= capacity_of(prev_cpu);
4292 this_eff_load *= this_load + 4292 this_eff_load *= this_load +
4293 effective_load(tg, this_cpu, weight, weight); 4293 effective_load(tg, this_cpu, weight, weight);
4294 4294
4295 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; 4295 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4296 prev_eff_load *= power_of(this_cpu); 4296 prev_eff_load *= capacity_of(this_cpu);
4297 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); 4297 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4298 4298
4299 balanced = this_eff_load <= prev_eff_load; 4299 balanced = this_eff_load <= prev_eff_load;
@@ -4950,14 +4950,14 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp
4950 * 4950 *
4951 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3) 4951 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
4952 * 4952 *
4953 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the 4953 * C_i is the compute capacity of cpu i, typically it is the
4954 * fraction of 'recent' time available for SCHED_OTHER task execution. But it 4954 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
4955 * can also include other factors [XXX]. 4955 * can also include other factors [XXX].
4956 * 4956 *
4957 * To achieve this balance we define a measure of imbalance which follows 4957 * To achieve this balance we define a measure of imbalance which follows
4958 * directly from (1): 4958 * directly from (1):
4959 * 4959 *
4960 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4) 4960 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
4961 * 4961 *
4962 * We them move tasks around to minimize the imbalance. In the continuous 4962 * We them move tasks around to minimize the imbalance. In the continuous
4963 * function space it is obvious this converges, in the discrete case we get 4963 * function space it is obvious this converges, in the discrete case we get
@@ -5607,17 +5607,17 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
5607 return load_idx; 5607 return load_idx;
5608} 5608}
5609 5609
5610static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu) 5610static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu)
5611{ 5611{
5612 return SCHED_POWER_SCALE; 5612 return SCHED_POWER_SCALE;
5613} 5613}
5614 5614
5615unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu) 5615unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
5616{ 5616{
5617 return default_scale_freq_power(sd, cpu); 5617 return default_scale_capacity(sd, cpu);
5618} 5618}
5619 5619
5620static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu) 5620static unsigned long default_scale_smt_capacity(struct sched_domain *sd, int cpu)
5621{ 5621{
5622 unsigned long weight = sd->span_weight; 5622 unsigned long weight = sd->span_weight;
5623 unsigned long smt_gain = sd->smt_gain; 5623 unsigned long smt_gain = sd->smt_gain;
@@ -5629,10 +5629,10 @@ static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
5629 5629
5630unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) 5630unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
5631{ 5631{
5632 return default_scale_smt_power(sd, cpu); 5632 return default_scale_smt_capacity(sd, cpu);
5633} 5633}
5634 5634
5635static unsigned long scale_rt_power(int cpu) 5635static unsigned long scale_rt_capacity(int cpu)
5636{ 5636{
5637 struct rq *rq = cpu_rq(cpu); 5637 struct rq *rq = cpu_rq(cpu);
5638 u64 total, available, age_stamp, avg; 5638 u64 total, available, age_stamp, avg;
@@ -5652,7 +5652,7 @@ static unsigned long scale_rt_power(int cpu)
5652 total = sched_avg_period() + delta; 5652 total = sched_avg_period() + delta;
5653 5653
5654 if (unlikely(total < avg)) { 5654 if (unlikely(total < avg)) {
5655 /* Ensures that power won't end up being negative */ 5655 /* Ensures that capacity won't end up being negative */
5656 available = 0; 5656 available = 0;
5657 } else { 5657 } else {
5658 available = total - avg; 5658 available = total - avg;
@@ -5666,38 +5666,38 @@ static unsigned long scale_rt_power(int cpu)
5666 return div_u64(available, total); 5666 return div_u64(available, total);
5667} 5667}
5668 5668
5669static void update_cpu_power(struct sched_domain *sd, int cpu) 5669static void update_cpu_capacity(struct sched_domain *sd, int cpu)
5670{ 5670{
5671 unsigned long weight = sd->span_weight; 5671 unsigned long weight = sd->span_weight;
5672 unsigned long power = SCHED_POWER_SCALE; 5672 unsigned long capacity = SCHED_POWER_SCALE;
5673 struct sched_group *sdg = sd->groups; 5673 struct sched_group *sdg = sd->groups;
5674 5674
5675 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { 5675 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
5676 if (sched_feat(ARCH_POWER)) 5676 if (sched_feat(ARCH_POWER))
5677 power *= arch_scale_smt_power(sd, cpu); 5677 capacity *= arch_scale_smt_power(sd, cpu);
5678 else 5678 else
5679 power *= default_scale_smt_power(sd, cpu); 5679 capacity *= default_scale_smt_capacity(sd, cpu);
5680 5680
5681 power >>= SCHED_POWER_SHIFT; 5681 capacity >>= SCHED_POWER_SHIFT;
5682 } 5682 }
5683 5683
5684 sdg->sgc->capacity_orig = power; 5684 sdg->sgc->capacity_orig = capacity;
5685 5685
5686 if (sched_feat(ARCH_POWER)) 5686 if (sched_feat(ARCH_POWER))
5687 power *= arch_scale_freq_power(sd, cpu); 5687 capacity *= arch_scale_freq_power(sd, cpu);
5688 else 5688 else
5689 power *= default_scale_freq_power(sd, cpu); 5689 capacity *= default_scale_capacity(sd, cpu);
5690 5690
5691 power >>= SCHED_POWER_SHIFT; 5691 capacity >>= SCHED_POWER_SHIFT;
5692 5692
5693 power *= scale_rt_power(cpu); 5693 capacity *= scale_rt_capacity(cpu);
5694 power >>= SCHED_POWER_SHIFT; 5694 capacity >>= SCHED_POWER_SHIFT;
5695 5695
5696 if (!power) 5696 if (!capacity)
5697 power = 1; 5697 capacity = 1;
5698 5698
5699 cpu_rq(cpu)->cpu_power = power; 5699 cpu_rq(cpu)->cpu_capacity = capacity;
5700 sdg->sgc->capacity = power; 5700 sdg->sgc->capacity = capacity;
5701} 5701}
5702 5702
5703void update_group_capacity(struct sched_domain *sd, int cpu) 5703void update_group_capacity(struct sched_domain *sd, int cpu)
@@ -5712,7 +5712,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
5712 sdg->sgc->next_update = jiffies + interval; 5712 sdg->sgc->next_update = jiffies + interval;
5713 5713
5714 if (!child) { 5714 if (!child) {
5715 update_cpu_power(sd, cpu); 5715 update_cpu_capacity(sd, cpu);
5716 return; 5716 return;
5717 } 5717 }
5718 5718
@@ -5733,8 +5733,8 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
5733 * gets here before we've attached the domains to the 5733 * gets here before we've attached the domains to the
5734 * runqueues. 5734 * runqueues.
5735 * 5735 *
5736 * Use power_of(), which is set irrespective of domains 5736 * Use capacity_of(), which is set irrespective of domains
5737 * in update_cpu_power(). 5737 * in update_cpu_capacity().
5738 * 5738 *
5739 * This avoids capacity/capacity_orig from being 0 and 5739 * This avoids capacity/capacity_orig from being 0 and
5740 * causing divide-by-zero issues on boot. 5740 * causing divide-by-zero issues on boot.
@@ -5742,8 +5742,8 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
5742 * Runtime updates will correct capacity_orig. 5742 * Runtime updates will correct capacity_orig.
5743 */ 5743 */
5744 if (unlikely(!rq->sd)) { 5744 if (unlikely(!rq->sd)) {
5745 capacity_orig += power_of(cpu); 5745 capacity_orig += capacity_of(cpu);
5746 capacity += power_of(cpu); 5746 capacity += capacity_of(cpu);
5747 continue; 5747 continue;
5748 } 5748 }
5749 5749
@@ -5831,7 +5831,7 @@ static inline int sg_imbalanced(struct sched_group *group)
5831/* 5831/*
5832 * Compute the group capacity factor. 5832 * Compute the group capacity factor.
5833 * 5833 *
5834 * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by 5834 * Avoid the issue where N*frac(smt_capacity) >= 1 creates 'phantom' cores by
5835 * first dividing out the smt factor and computing the actual number of cores 5835 * first dividing out the smt factor and computing the actual number of cores
5836 * and limit unit capacity with that. 5836 * and limit unit capacity with that.
5837 */ 5837 */
@@ -6129,7 +6129,7 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6129 6129
6130 /* 6130 /*
6131 * OK, we don't have enough imbalance to justify moving tasks, 6131 * OK, we don't have enough imbalance to justify moving tasks,
6132 * however we may be able to increase total CPU power used by 6132 * however we may be able to increase total CPU capacity used by
6133 * moving them. 6133 * moving them.
6134 */ 6134 */
6135 6135
@@ -6190,7 +6190,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
6190 /* 6190 /*
6191 * In the presence of smp nice balancing, certain scenarios can have 6191 * In the presence of smp nice balancing, certain scenarios can have
6192 * max load less than avg load(as we skip the groups at or below 6192 * max load less than avg load(as we skip the groups at or below
6193 * its cpu_power, while calculating max_load..) 6193 * its cpu_capacity, while calculating max_load..)
6194 */ 6194 */
6195 if (busiest->avg_load <= sds->avg_load || 6195 if (busiest->avg_load <= sds->avg_load ||
6196 local->avg_load >= sds->avg_load) { 6196 local->avg_load >= sds->avg_load) {
@@ -6345,11 +6345,11 @@ static struct rq *find_busiest_queue(struct lb_env *env,
6345 struct sched_group *group) 6345 struct sched_group *group)
6346{ 6346{
6347 struct rq *busiest = NULL, *rq; 6347 struct rq *busiest = NULL, *rq;
6348 unsigned long busiest_load = 0, busiest_power = 1; 6348 unsigned long busiest_load = 0, busiest_capacity = 1;
6349 int i; 6349 int i;
6350 6350
6351 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { 6351 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
6352 unsigned long power, capacity_factor, wl; 6352 unsigned long capacity, capacity_factor, wl;
6353 enum fbq_type rt; 6353 enum fbq_type rt;
6354 6354
6355 rq = cpu_rq(i); 6355 rq = cpu_rq(i);
@@ -6377,8 +6377,8 @@ static struct rq *find_busiest_queue(struct lb_env *env,
6377 if (rt > env->fbq_type) 6377 if (rt > env->fbq_type)
6378 continue; 6378 continue;
6379 6379
6380 power = power_of(i); 6380 capacity = capacity_of(i);
6381 capacity_factor = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE); 6381 capacity_factor = DIV_ROUND_CLOSEST(capacity, SCHED_POWER_SCALE);
6382 if (!capacity_factor) 6382 if (!capacity_factor)
6383 capacity_factor = fix_small_capacity(env->sd, group); 6383 capacity_factor = fix_small_capacity(env->sd, group);
6384 6384
@@ -6386,25 +6386,25 @@ static struct rq *find_busiest_queue(struct lb_env *env,
6386 6386
6387 /* 6387 /*
6388 * When comparing with imbalance, use weighted_cpuload() 6388 * When comparing with imbalance, use weighted_cpuload()
6389 * which is not scaled with the cpu power. 6389 * which is not scaled with the cpu capacity.
6390 */ 6390 */
6391 if (capacity_factor && rq->nr_running == 1 && wl > env->imbalance) 6391 if (capacity_factor && rq->nr_running == 1 && wl > env->imbalance)
6392 continue; 6392 continue;
6393 6393
6394 /* 6394 /*
6395 * For the load comparisons with the other cpu's, consider 6395 * For the load comparisons with the other cpu's, consider
6396 * the weighted_cpuload() scaled with the cpu power, so that 6396 * the weighted_cpuload() scaled with the cpu capacity, so
6397 * the load can be moved away from the cpu that is potentially 6397 * that the load can be moved away from the cpu that is
6398 * running at a lower capacity. 6398 * potentially running at a lower capacity.
6399 * 6399 *
6400 * Thus we're looking for max(wl_i / power_i), crosswise 6400 * Thus we're looking for max(wl_i / capacity_i), crosswise
6401 * multiplication to rid ourselves of the division works out 6401 * multiplication to rid ourselves of the division works out
6402 * to: wl_i * power_j > wl_j * power_i; where j is our 6402 * to: wl_i * capacity_j > wl_j * capacity_i; where j is
6403 * previous maximum. 6403 * our previous maximum.
6404 */ 6404 */
6405 if (wl * busiest_power > busiest_load * power) { 6405 if (wl * busiest_capacity > busiest_load * capacity) {
6406 busiest_load = wl; 6406 busiest_load = wl;
6407 busiest_power = power; 6407 busiest_capacity = capacity;
6408 busiest = rq; 6408 busiest = rq;
6409 } 6409 }
6410 } 6410 }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a5b957d53c92..956b8ca24893 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -567,7 +567,7 @@ struct rq {
567 struct root_domain *rd; 567 struct root_domain *rd;
568 struct sched_domain *sd; 568 struct sched_domain *sd;
569 569
570 unsigned long cpu_power; 570 unsigned long cpu_capacity;
571 571
572 unsigned char idle_balance; 572 unsigned char idle_balance;
573 /* For active balancing */ 573 /* For active balancing */