aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-05-09 04:38:41 -0400
committerIngo Molnar <mingo@kernel.org>2016-09-30 05:03:26 -0400
commit1b568f0aabf280555125bc7cefc08321ff0ebaba (patch)
tree5ca491fb2b8a7ff46af700bc64c2d83cea68eabb
parent10e2f1acd0106c05229f94c70a344ce3a2c8008b (diff)
sched/core: Optimize SCHED_SMT
Avoid pointless SCHED_SMT code when running on !SMT hardware. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/core.c19
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/sched/sched.h23
3 files changed, 43 insertions, 7 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 75ecd4f29199..94115453c1c4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7412,6 +7412,22 @@ int sched_cpu_dying(unsigned int cpu)
7412} 7412}
7413#endif 7413#endif
7414 7414
7415#ifdef CONFIG_SCHED_SMT
7416DEFINE_STATIC_KEY_FALSE(sched_smt_present);
7417
7418static void sched_init_smt(void)
7419{
7420 /*
7421 * We've enumerated all CPUs and will assume that if any CPU
7422 * has SMT siblings, CPU0 will too.
7423 */
7424 if (cpumask_weight(cpu_smt_mask(0)) > 1)
7425 static_branch_enable(&sched_smt_present);
7426}
7427#else
7428static inline void sched_init_smt(void) { }
7429#endif
7430
7415void __init sched_init_smp(void) 7431void __init sched_init_smp(void)
7416{ 7432{
7417 cpumask_var_t non_isolated_cpus; 7433 cpumask_var_t non_isolated_cpus;
@@ -7441,6 +7457,9 @@ void __init sched_init_smp(void)
7441 7457
7442 init_sched_rt_class(); 7458 init_sched_rt_class();
7443 init_sched_dl_class(); 7459 init_sched_dl_class();
7460
7461 sched_init_smt();
7462
7444 sched_smp_initialized = true; 7463 sched_smp_initialized = true;
7445} 7464}
7446 7465
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6b41589c41e4..87caf2bd26f0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5357,7 +5357,7 @@ static inline bool test_idle_cores(int cpu, bool def)
5357 * Since SMT siblings share all cache levels, inspecting this limited remote 5357 * Since SMT siblings share all cache levels, inspecting this limited remote
5358 * state should be fairly cheap. 5358 * state should be fairly cheap.
5359 */ 5359 */
5360void update_idle_core(struct rq *rq) 5360void __update_idle_core(struct rq *rq)
5361{ 5361{
5362 int core = cpu_of(rq); 5362 int core = cpu_of(rq);
5363 int cpu; 5363 int cpu;
@@ -5389,6 +5389,9 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
5389 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); 5389 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
5390 int core, cpu, wrap; 5390 int core, cpu, wrap;
5391 5391
5392 if (!static_branch_likely(&sched_smt_present))
5393 return -1;
5394
5392 if (!test_idle_cores(target, false)) 5395 if (!test_idle_cores(target, false))
5393 return -1; 5396 return -1;
5394 5397
@@ -5422,6 +5425,9 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
5422{ 5425{
5423 int cpu; 5426 int cpu;
5424 5427
5428 if (!static_branch_likely(&sched_smt_present))
5429 return -1;
5430
5425 for_each_cpu(cpu, cpu_smt_mask(target)) { 5431 for_each_cpu(cpu, cpu_smt_mask(target)) {
5426 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) 5432 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
5427 continue; 5433 continue;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c917dcad82ad..01b5189235f2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -36,12 +36,6 @@ extern void cpu_load_update_active(struct rq *this_rq);
36static inline void cpu_load_update_active(struct rq *this_rq) { } 36static inline void cpu_load_update_active(struct rq *this_rq) { }
37#endif 37#endif
38 38
39#ifdef CONFIG_SCHED_SMT
40extern void update_idle_core(struct rq *rq);
41#else
42static inline void update_idle_core(struct rq *rq) { }
43#endif
44
45/* 39/*
46 * Helpers for converting nanosecond timing to jiffy resolution 40 * Helpers for converting nanosecond timing to jiffy resolution
47 */ 41 */
@@ -730,6 +724,23 @@ static inline int cpu_of(struct rq *rq)
730#endif 724#endif
731} 725}
732 726
727
728#ifdef CONFIG_SCHED_SMT
729
730extern struct static_key_false sched_smt_present;
731
732extern void __update_idle_core(struct rq *rq);
733
734static inline void update_idle_core(struct rq *rq)
735{
736 if (static_branch_unlikely(&sched_smt_present))
737 __update_idle_core(rq);
738}
739
740#else
741static inline void update_idle_core(struct rq *rq) { }
742#endif
743
733DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 744DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
734 745
735#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 746#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))