aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f12225f26b70..6fedf3a98581 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5738,15 +5738,10 @@ int sched_cpu_activate(unsigned int cpu)
5738 5738
5739#ifdef CONFIG_SCHED_SMT 5739#ifdef CONFIG_SCHED_SMT
5740 /* 5740 /*
5741 * The sched_smt_present static key needs to be evaluated on every 5741 * When going up, increment the number of cores with SMT present.
5742 * hotplug event because at boot time SMT might be disabled when
5743 * the number of booted CPUs is limited.
5744 *
5745 * If then later a sibling gets hotplugged, then the key would stay
5746 * off and SMT scheduling would never be functional.
5747 */ 5742 */
5748 if (cpumask_weight(cpu_smt_mask(cpu)) > 1) 5743 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
5749 static_branch_enable_cpuslocked(&sched_smt_present); 5744 static_branch_inc_cpuslocked(&sched_smt_present);
5750#endif 5745#endif
5751 set_cpu_active(cpu, true); 5746 set_cpu_active(cpu, true);
5752 5747
@@ -5790,6 +5785,14 @@ int sched_cpu_deactivate(unsigned int cpu)
5790 */ 5785 */
5791 synchronize_rcu_mult(call_rcu, call_rcu_sched); 5786 synchronize_rcu_mult(call_rcu, call_rcu_sched);
5792 5787
5788#ifdef CONFIG_SCHED_SMT
5789 /*
5790 * When going down, decrement the number of cores with SMT present.
5791 */
5792 if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
5793 static_branch_dec_cpuslocked(&sched_smt_present);
5794#endif
5795
5793 if (!sched_smp_initialized) 5796 if (!sched_smp_initialized)
5794 return 0; 5797 return 0;
5795 5798
@@ -5851,11 +5854,14 @@ void __init sched_init_smp(void)
5851 /* 5854 /*
5852 * There's no userspace yet to cause hotplug operations; hence all the 5855 * There's no userspace yet to cause hotplug operations; hence all the
5853 * CPU masks are stable and all blatant races in the below code cannot 5856 * CPU masks are stable and all blatant races in the below code cannot
5854 * happen. 5857 * happen. The hotplug lock is nevertheless taken to satisfy lockdep,
5858 * but there won't be any contention on it.
5855 */ 5859 */
5860 cpus_read_lock();
5856 mutex_lock(&sched_domains_mutex); 5861 mutex_lock(&sched_domains_mutex);
5857 sched_init_domains(cpu_active_mask); 5862 sched_init_domains(cpu_active_mask);
5858 mutex_unlock(&sched_domains_mutex); 5863 mutex_unlock(&sched_domains_mutex);
5864 cpus_read_unlock();
5859 5865
5860 /* Move init over to a non-isolated CPU */ 5866 /* Move init over to a non-isolated CPU */
5861 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) 5867 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)