aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2014-05-27 13:50:41 -0400
committerIngo Molnar <mingo@kernel.org>2014-06-05 05:52:32 -0400
commit5d4dfddd4f02b028d6ddaaa04d75d3b0cad1c9ae (patch)
tree03bd39cc1c5a904eef49340d46b2265769b5f557
parentca8ce3d0b144c318a5a9ce99649053e9029061ea (diff)
sched: Rename capacity related flags
It is better not to think about compute capacity as being equivalent to "CPU power". The upcoming "power aware" scheduler work may create confusion with the notion of energy consumption if "power" is used too liberally. Let's rename the following feature flags since they do relate to capacity: SD_SHARE_CPUPOWER -> SD_SHARE_CPUCAPACITY ARCH_POWER -> ARCH_CAPACITY NONTASK_POWER -> NONTASK_CAPACITY Signed-off-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: linaro-kernel@lists.linaro.org Cc: Andy Fleming <afleming@freescale.com> Cc: Anton Blanchard <anton@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Grant Likely <grant.likely@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Preeti U Murthy <preeti@linux.vnet.ibm.com> Cc: Rob Herring <robh+dt@kernel.org> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Toshi Kani <toshi.kani@hp.com> Cc: Vasant Hegde <hegdevasant@linux.vnet.ibm.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: devicetree@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/n/tip-e93lpnxb87owfievqatey6b5@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--include/linux/sched.h4
-rw-r--r--kernel/sched/core.c14
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/sched/features.h8
5 files changed, 18 insertions, 18 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 10ffffef0414..c51d16379cba 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -770,7 +770,7 @@ int setup_profiling_timer(unsigned int multiplier)
770/* cpumask of CPUs with asymetric SMT dependancy */ 770/* cpumask of CPUs with asymetric SMT dependancy */
771static const int powerpc_smt_flags(void) 771static const int powerpc_smt_flags(void)
772{ 772{
773 int flags = SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES; 773 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
774 774
775 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { 775 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
776 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); 776 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 322110affe63..ce93768a3312 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -869,7 +869,7 @@ enum cpu_idle_type {
869#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ 869#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
870#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ 870#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
871#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ 871#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
872#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ 872#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */
873#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ 873#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
874#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ 874#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
875#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 875#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
@@ -881,7 +881,7 @@ enum cpu_idle_type {
881#ifdef CONFIG_SCHED_SMT 881#ifdef CONFIG_SCHED_SMT
882static inline const int cpu_smt_flags(void) 882static inline const int cpu_smt_flags(void)
883{ 883{
884 return SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES; 884 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
885} 885}
886#endif 886#endif
887 887
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7ba4f5413a10..5976ca579d3e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -872,7 +872,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
872 rq->clock_task += delta; 872 rq->clock_task += delta;
873 873
874#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) 874#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
875 if ((irq_delta + steal) && sched_feat(NONTASK_POWER)) 875 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
876 sched_rt_avg_update(rq, irq_delta + steal); 876 sched_rt_avg_update(rq, irq_delta + steal);
877#endif 877#endif
878} 878}
@@ -5309,7 +5309,7 @@ static int sd_degenerate(struct sched_domain *sd)
5309 SD_BALANCE_NEWIDLE | 5309 SD_BALANCE_NEWIDLE |
5310 SD_BALANCE_FORK | 5310 SD_BALANCE_FORK |
5311 SD_BALANCE_EXEC | 5311 SD_BALANCE_EXEC |
5312 SD_SHARE_CPUPOWER | 5312 SD_SHARE_CPUCAPACITY |
5313 SD_SHARE_PKG_RESOURCES | 5313 SD_SHARE_PKG_RESOURCES |
5314 SD_SHARE_POWERDOMAIN)) { 5314 SD_SHARE_POWERDOMAIN)) {
5315 if (sd->groups != sd->groups->next) 5315 if (sd->groups != sd->groups->next)
@@ -5340,7 +5340,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5340 SD_BALANCE_NEWIDLE | 5340 SD_BALANCE_NEWIDLE |
5341 SD_BALANCE_FORK | 5341 SD_BALANCE_FORK |
5342 SD_BALANCE_EXEC | 5342 SD_BALANCE_EXEC |
5343 SD_SHARE_CPUPOWER | 5343 SD_SHARE_CPUCAPACITY |
5344 SD_SHARE_PKG_RESOURCES | 5344 SD_SHARE_PKG_RESOURCES |
5345 SD_PREFER_SIBLING | 5345 SD_PREFER_SIBLING |
5346 SD_SHARE_POWERDOMAIN); 5346 SD_SHARE_POWERDOMAIN);
@@ -5947,7 +5947,7 @@ static int sched_domains_curr_level;
5947/* 5947/*
5948 * SD_flags allowed in topology descriptions. 5948 * SD_flags allowed in topology descriptions.
5949 * 5949 *
5950 * SD_SHARE_CPUPOWER - describes SMT topologies 5950 * SD_SHARE_CPUCAPACITY - describes SMT topologies
5951 * SD_SHARE_PKG_RESOURCES - describes shared caches 5951 * SD_SHARE_PKG_RESOURCES - describes shared caches
5952 * SD_NUMA - describes NUMA topologies 5952 * SD_NUMA - describes NUMA topologies
5953 * SD_SHARE_POWERDOMAIN - describes shared power domain 5953 * SD_SHARE_POWERDOMAIN - describes shared power domain
@@ -5956,7 +5956,7 @@ static int sched_domains_curr_level;
5956 * SD_ASYM_PACKING - describes SMT quirks 5956 * SD_ASYM_PACKING - describes SMT quirks
5957 */ 5957 */
5958#define TOPOLOGY_SD_FLAGS \ 5958#define TOPOLOGY_SD_FLAGS \
5959 (SD_SHARE_CPUPOWER | \ 5959 (SD_SHARE_CPUCAPACITY | \
5960 SD_SHARE_PKG_RESOURCES | \ 5960 SD_SHARE_PKG_RESOURCES | \
5961 SD_NUMA | \ 5961 SD_NUMA | \
5962 SD_ASYM_PACKING | \ 5962 SD_ASYM_PACKING | \
@@ -6002,7 +6002,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
6002 | 1*SD_BALANCE_FORK 6002 | 1*SD_BALANCE_FORK
6003 | 0*SD_BALANCE_WAKE 6003 | 0*SD_BALANCE_WAKE
6004 | 1*SD_WAKE_AFFINE 6004 | 1*SD_WAKE_AFFINE
6005 | 0*SD_SHARE_CPUPOWER 6005 | 0*SD_SHARE_CPUCAPACITY
6006 | 0*SD_SHARE_PKG_RESOURCES 6006 | 0*SD_SHARE_PKG_RESOURCES
6007 | 0*SD_SERIALIZE 6007 | 0*SD_SERIALIZE
6008 | 0*SD_PREFER_SIBLING 6008 | 0*SD_PREFER_SIBLING
@@ -6024,7 +6024,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
6024 * Convert topological properties into behaviour. 6024 * Convert topological properties into behaviour.
6025 */ 6025 */
6026 6026
6027 if (sd->flags & SD_SHARE_CPUPOWER) { 6027 if (sd->flags & SD_SHARE_CPUCAPACITY) {
6028 sd->imbalance_pct = 110; 6028 sd->imbalance_pct = 110;
6029 sd->smt_gain = 1178; /* ~15% */ 6029 sd->smt_gain = 1178; /* ~15% */
6030 6030
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index dc7d6527a282..d3c731222199 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5672,8 +5672,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
5672 unsigned long capacity = SCHED_CAPACITY_SCALE; 5672 unsigned long capacity = SCHED_CAPACITY_SCALE;
5673 struct sched_group *sdg = sd->groups; 5673 struct sched_group *sdg = sd->groups;
5674 5674
5675 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { 5675 if ((sd->flags & SD_SHARE_CPUCAPACITY) && weight > 1) {
5676 if (sched_feat(ARCH_POWER)) 5676 if (sched_feat(ARCH_CAPACITY))
5677 capacity *= arch_scale_smt_capacity(sd, cpu); 5677 capacity *= arch_scale_smt_capacity(sd, cpu);
5678 else 5678 else
5679 capacity *= default_scale_smt_capacity(sd, cpu); 5679 capacity *= default_scale_smt_capacity(sd, cpu);
@@ -5683,7 +5683,7 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
5683 5683
5684 sdg->sgc->capacity_orig = capacity; 5684 sdg->sgc->capacity_orig = capacity;
5685 5685
5686 if (sched_feat(ARCH_POWER)) 5686 if (sched_feat(ARCH_CAPACITY))
5687 capacity *= arch_scale_freq_capacity(sd, cpu); 5687 capacity *= arch_scale_freq_capacity(sd, cpu);
5688 else 5688 else
5689 capacity *= default_scale_capacity(sd, cpu); 5689 capacity *= default_scale_capacity(sd, cpu);
@@ -5782,7 +5782,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
5782 /* 5782 /*
5783 * Only siblings can have significantly less than SCHED_CAPACITY_SCALE 5783 * Only siblings can have significantly less than SCHED_CAPACITY_SCALE
5784 */ 5784 */
5785 if (!(sd->flags & SD_SHARE_CPUPOWER)) 5785 if (!(sd->flags & SD_SHARE_CPUCAPACITY))
5786 return 0; 5786 return 0;
5787 5787
5788 /* 5788 /*
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 5716929a2e3a..90284d117fe6 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -37,18 +37,18 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true)
37SCHED_FEAT(WAKEUP_PREEMPTION, true) 37SCHED_FEAT(WAKEUP_PREEMPTION, true)
38 38
39/* 39/*
40 * Use arch dependent cpu power functions 40 * Use arch dependent cpu capacity functions
41 */ 41 */
42SCHED_FEAT(ARCH_POWER, true) 42SCHED_FEAT(ARCH_CAPACITY, true)
43 43
44SCHED_FEAT(HRTICK, false) 44SCHED_FEAT(HRTICK, false)
45SCHED_FEAT(DOUBLE_TICK, false) 45SCHED_FEAT(DOUBLE_TICK, false)
46SCHED_FEAT(LB_BIAS, true) 46SCHED_FEAT(LB_BIAS, true)
47 47
48/* 48/*
49 * Decrement CPU power based on time not spent running tasks 49 * Decrement CPU capacity based on time not spent running tasks
50 */ 50 */
51SCHED_FEAT(NONTASK_POWER, true) 51SCHED_FEAT(NONTASK_CAPACITY, true)
52 52
53/* 53/*
54 * Queue remote wakeups on the target CPU and process them 54 * Queue remote wakeups on the target CPU and process them