summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2019-06-17 11:00:17 -0400
committerIngo Molnar <mingo@kernel.org>2019-06-24 13:23:39 -0400
commit8ec59c0f5f4966f89f4e3e3cab81710c7fa959d0 (patch)
treeff6a6d519b089e759a168302f917ebe8d28a8046 /kernel/sched
parentd2abae71ebcc409828b24ce9da402548ecdf1311 (diff)
sched/topology: Remove unused 'sd' parameter from arch_scale_cpu_capacity()
The 'struct sched_domain *sd' parameter to arch_scale_cpu_capacity() is unused since commit: 765d0af19f5f ("sched/topology: Remove the ::smt_gain field from 'struct sched_domain'") Remove it. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: gregkh@linuxfoundation.org Cc: linux@armlinux.org.uk Cc: quentin.perret@arm.com Cc: rafael@kernel.org Link: https://lkml.kernel.org/r/1560783617-5827-1-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/cpufreq_schedutil.c2
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/sched/pelt.c2
-rw-r--r--kernel/sched/pelt.h2
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/sched/topology.c8
7 files changed, 12 insertions, 12 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 962cf343f798..7c4ce69067c4 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -276,7 +276,7 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
276{ 276{
277 struct rq *rq = cpu_rq(sg_cpu->cpu); 277 struct rq *rq = cpu_rq(sg_cpu->cpu);
278 unsigned long util = cpu_util_cfs(rq); 278 unsigned long util = cpu_util_cfs(rq);
279 unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); 279 unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
280 280
281 sg_cpu->max = max; 281 sg_cpu->max = max;
282 sg_cpu->bw_dl = cpu_bw_dl(rq); 282 sg_cpu->bw_dl = cpu_bw_dl(rq);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index c1ef30861068..8b5bb2ac16e2 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1195,7 +1195,7 @@ static void update_curr_dl(struct rq *rq)
1195 &curr->dl); 1195 &curr->dl);
1196 } else { 1196 } else {
1197 unsigned long scale_freq = arch_scale_freq_capacity(cpu); 1197 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1198 unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu); 1198 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1199 1199
1200 scaled_delta_exec = cap_scale(delta_exec, scale_freq); 1200 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1201 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu); 1201 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3c11dcdedcbc..4f8754157763 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -764,7 +764,7 @@ void post_init_entity_util_avg(struct task_struct *p)
764 struct sched_entity *se = &p->se; 764 struct sched_entity *se = &p->se;
765 struct cfs_rq *cfs_rq = cfs_rq_of(se); 765 struct cfs_rq *cfs_rq = cfs_rq_of(se);
766 struct sched_avg *sa = &se->avg; 766 struct sched_avg *sa = &se->avg;
767 long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq))); 767 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
768 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; 768 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
769 769
770 if (cap > 0) { 770 if (cap > 0) {
@@ -7646,7 +7646,7 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
7646static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu) 7646static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
7647{ 7647{
7648 struct rq *rq = cpu_rq(cpu); 7648 struct rq *rq = cpu_rq(cpu);
7649 unsigned long max = arch_scale_cpu_capacity(sd, cpu); 7649 unsigned long max = arch_scale_cpu_capacity(cpu);
7650 unsigned long used, free; 7650 unsigned long used, free;
7651 unsigned long irq; 7651 unsigned long irq;
7652 7652
@@ -7671,7 +7671,7 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
7671 unsigned long capacity = scale_rt_capacity(sd, cpu); 7671 unsigned long capacity = scale_rt_capacity(sd, cpu);
7672 struct sched_group *sdg = sd->groups; 7672 struct sched_group *sdg = sd->groups;
7673 7673
7674 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu); 7674 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
7675 7675
7676 if (!capacity) 7676 if (!capacity)
7677 capacity = 1; 7677 capacity = 1;
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index befce29bd882..42ea66b07b1d 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -366,7 +366,7 @@ int update_irq_load_avg(struct rq *rq, u64 running)
366 * reflect the real amount of computation 366 * reflect the real amount of computation
367 */ 367 */
368 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq))); 368 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
369 running = cap_scale(running, arch_scale_cpu_capacity(NULL, cpu_of(rq))); 369 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
370 370
371 /* 371 /*
372 * We know the time that has been used by interrupt since last update 372 * We know the time that has been used by interrupt since last update
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index 7489d5f56960..afff644da065 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -79,7 +79,7 @@ static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
79 * Scale the elapsed time to reflect the real amount of 79 * Scale the elapsed time to reflect the real amount of
80 * computation 80 * computation
81 */ 81 */
82 delta = cap_scale(delta, arch_scale_cpu_capacity(NULL, cpu_of(rq))); 82 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
83 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); 83 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
84 84
85 rq->clock_pelt += delta; 85 rq->clock_pelt += delta;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b08dee29ef5e..e58ab597ec88 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2248,7 +2248,7 @@ unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs,
2248 2248
2249static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs) 2249static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs)
2250{ 2250{
2251 unsigned long max = arch_scale_cpu_capacity(NULL, cpu); 2251 unsigned long max = arch_scale_cpu_capacity(cpu);
2252 2252
2253 return schedutil_freq_util(cpu, cfs, max, ENERGY_UTIL); 2253 return schedutil_freq_util(cpu, cfs, max, ENERGY_UTIL);
2254} 2254}
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 63184cf0d0d7..f751ce0b783e 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1874,10 +1874,10 @@ static struct sched_domain_topology_level
1874 unsigned long cap; 1874 unsigned long cap;
1875 1875
1876 /* Is there any asymmetry? */ 1876 /* Is there any asymmetry? */
1877 cap = arch_scale_cpu_capacity(NULL, cpumask_first(cpu_map)); 1877 cap = arch_scale_cpu_capacity(cpumask_first(cpu_map));
1878 1878
1879 for_each_cpu(i, cpu_map) { 1879 for_each_cpu(i, cpu_map) {
1880 if (arch_scale_cpu_capacity(NULL, i) != cap) { 1880 if (arch_scale_cpu_capacity(i) != cap) {
1881 asym = true; 1881 asym = true;
1882 break; 1882 break;
1883 } 1883 }
@@ -1892,7 +1892,7 @@ static struct sched_domain_topology_level
1892 * to everyone. 1892 * to everyone.
1893 */ 1893 */
1894 for_each_cpu(i, cpu_map) { 1894 for_each_cpu(i, cpu_map) {
1895 unsigned long max_capacity = arch_scale_cpu_capacity(NULL, i); 1895 unsigned long max_capacity = arch_scale_cpu_capacity(i);
1896 int tl_id = 0; 1896 int tl_id = 0;
1897 1897
1898 for_each_sd_topology(tl) { 1898 for_each_sd_topology(tl) {
@@ -1902,7 +1902,7 @@ static struct sched_domain_topology_level
1902 for_each_cpu_and(j, tl->mask(i), cpu_map) { 1902 for_each_cpu_and(j, tl->mask(i), cpu_map) {
1903 unsigned long capacity; 1903 unsigned long capacity;
1904 1904
1905 capacity = arch_scale_cpu_capacity(NULL, j); 1905 capacity = arch_scale_cpu_capacity(j);
1906 1906
1907 if (capacity <= max_capacity) 1907 if (capacity <= max_capacity)
1908 continue; 1908 continue;