diff options
author | Juri Lelli <juri.lelli@arm.com> | 2017-12-04 05:23:23 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-01-10 06:53:34 -0500 |
commit | 7673c8a4c75d1cac2cd47156b9768f462683a09d (patch) | |
tree | 6df27e380ce881974238d7c62feb9c1ff04857c4 | |
parent | 0fa7d181f1a60149061632266bb432b4b61acdac (diff) |
sched/cpufreq: Remove arch_scale_freq_capacity()'s 'sd' parameter
The 'sd' parameter is never used in arch_scale_freq_capacity() (and it's hard to
see where information coming from scheduling domains might help doing
frequency invariance scaling).
Remove it; also in anticipation of moving arch_scale_freq_capacity()
outside CONFIG_SMP.
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: alessio.balsini@arm.com
Cc: bristot@redhat.com
Cc: claudio@evidence.eu.com
Cc: dietmar.eggemann@arm.com
Cc: joelaf@google.com
Cc: juri.lelli@redhat.com
Cc: luca.abeni@santannapisa.it
Cc: mathieu.poirier@linaro.org
Cc: morten.rasmussen@arm.com
Cc: patrick.bellasi@arm.com
Cc: rjw@rjwysocki.net
Cc: rostedt@goodmis.org
Cc: tkjos@android.com
Cc: tommaso.cucinotta@santannapisa.it
Cc: vincent.guittot@linaro.org
Cc: viresh.kumar@linaro.org
Link: http://lkml.kernel.org/r/20171204102325.5110-7-juri.lelli@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/arch_topology.h | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 2 | ||||
-rw-r--r-- | kernel/sched/sched.h | 4 |
3 files changed, 4 insertions, 4 deletions
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 304511267c82..2b709416de05 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h | |||
@@ -27,7 +27,7 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity); | |||
27 | DECLARE_PER_CPU(unsigned long, freq_scale); | 27 | DECLARE_PER_CPU(unsigned long, freq_scale); |
28 | 28 | ||
29 | static inline | 29 | static inline |
30 | unsigned long topology_get_freq_scale(struct sched_domain *sd, int cpu) | 30 | unsigned long topology_get_freq_scale(int cpu) |
31 | { | 31 | { |
32 | return per_cpu(freq_scale, cpu); | 32 | return per_cpu(freq_scale, cpu); |
33 | } | 33 | } |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9fec992410f7..14859757bff0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -3120,7 +3120,7 @@ accumulate_sum(u64 delta, int cpu, struct sched_avg *sa, | |||
3120 | u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */ | 3120 | u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */ |
3121 | u64 periods; | 3121 | u64 periods; |
3122 | 3122 | ||
3123 | scale_freq = arch_scale_freq_capacity(NULL, cpu); | 3123 | scale_freq = arch_scale_freq_capacity(cpu); |
3124 | scale_cpu = arch_scale_cpu_capacity(NULL, cpu); | 3124 | scale_cpu = arch_scale_cpu_capacity(NULL, cpu); |
3125 | 3125 | ||
3126 | delta += sa->period_contrib; | 3126 | delta += sa->period_contrib; |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c5197338ac47..b7100192ecd3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1675,7 +1675,7 @@ extern void sched_avg_update(struct rq *rq); | |||
1675 | 1675 | ||
1676 | #ifndef arch_scale_freq_capacity | 1676 | #ifndef arch_scale_freq_capacity |
1677 | static __always_inline | 1677 | static __always_inline |
1678 | unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) | 1678 | unsigned long arch_scale_freq_capacity(int cpu) |
1679 | { | 1679 | { |
1680 | return SCHED_CAPACITY_SCALE; | 1680 | return SCHED_CAPACITY_SCALE; |
1681 | } | 1681 | } |
@@ -1694,7 +1694,7 @@ unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) | |||
1694 | 1694 | ||
1695 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) | 1695 | static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) |
1696 | { | 1696 | { |
1697 | rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); | 1697 | rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq)); |
1698 | sched_avg_update(rq); | 1698 | sched_avg_update(rq); |
1699 | } | 1699 | } |
1700 | #else | 1700 | #else |