summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-03-10 06:54:10 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-05-06 08:58:23 -0400
commite26fbffd32c28107d9d268b432706ccf84fb6411 (patch)
tree3d1cbee297a4fc3496b36f25b1ce3a39bec2dd4c /kernel/sched
parent9cf7243d5d83d27aca47f842107bfa02b5f11d16 (diff)
sched: Allow hotplug notifiers to be setup early
Prevent the SMP scheduler related notifiers to be executed before the smp scheduler is initialized and install them early. This is a preparatory change for further consolidation of the hotplug notifier maze. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: rt@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c59
1 files changed, 36 insertions, 23 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4df9aaae27a2..328502c9af00 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5195,6 +5195,8 @@ out:
5195 5195
5196#ifdef CONFIG_SMP 5196#ifdef CONFIG_SMP
5197 5197
5198static bool sched_smp_initialized __read_mostly;
5199
5198#ifdef CONFIG_NUMA_BALANCING 5200#ifdef CONFIG_NUMA_BALANCING
5199/* Migrate current task p to target_cpu */ 5201/* Migrate current task p to target_cpu */
5200int migrate_task_to(struct task_struct *p, int target_cpu) 5202int migrate_task_to(struct task_struct *p, int target_cpu)
@@ -5513,25 +5515,6 @@ int sched_cpu_starting(unsigned int cpu)
5513 return 0; 5515 return 0;
5514} 5516}
5515 5517
5516static int __init migration_init(void)
5517{
5518 void *cpu = (void *)(long)smp_processor_id();
5519 int err;
5520
5521 /* Initialize migration for the boot CPU */
5522 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5523 BUG_ON(err == NOTIFY_BAD);
5524 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5525 register_cpu_notifier(&migration_notifier);
5526
5527 /* Register cpu active notifiers */
5528 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5529 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5530
5531 return 0;
5532}
5533early_initcall(migration_init);
5534
5535static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ 5518static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5536 5519
5537#ifdef CONFIG_SCHED_DEBUG 5520#ifdef CONFIG_SCHED_DEBUG
@@ -6711,6 +6694,9 @@ static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6711{ 6694{
6712 int cpu = (long)hcpu; 6695 int cpu = (long)hcpu;
6713 6696
6697 if (!sched_smp_initialized)
6698 return NOTIFY_DONE;
6699
6714 switch (action & ~CPU_TASKS_FROZEN) { 6700 switch (action & ~CPU_TASKS_FROZEN) {
6715 case CPU_ONLINE: 6701 case CPU_ONLINE:
6716 sched_domains_numa_masks_set(cpu); 6702 sched_domains_numa_masks_set(cpu);
@@ -7129,6 +7115,9 @@ static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
7129static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, 7115static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7130 void *hcpu) 7116 void *hcpu)
7131{ 7117{
7118 if (!sched_smp_initialized)
7119 return NOTIFY_DONE;
7120
7132 switch (action) { 7121 switch (action) {
7133 case CPU_ONLINE_FROZEN: 7122 case CPU_ONLINE_FROZEN:
7134 case CPU_DOWN_FAILED_FROZEN: 7123 case CPU_DOWN_FAILED_FROZEN:
@@ -7169,6 +7158,9 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7169 bool overflow; 7158 bool overflow;
7170 int cpus; 7159 int cpus;
7171 7160
7161 if (!sched_smp_initialized)
7162 return NOTIFY_DONE;
7163
7172 switch (action) { 7164 switch (action) {
7173 case CPU_DOWN_PREPARE: 7165 case CPU_DOWN_PREPARE:
7174 rcu_read_lock_sched(); 7166 rcu_read_lock_sched();
@@ -7216,10 +7208,6 @@ void __init sched_init_smp(void)
7216 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); 7208 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
7217 mutex_unlock(&sched_domains_mutex); 7209 mutex_unlock(&sched_domains_mutex);
7218 7210
7219 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
7220 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7221 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
7222
7223 init_hrtick(); 7211 init_hrtick();
7224 7212
7225 /* Move init over to a non-isolated CPU */ 7213 /* Move init over to a non-isolated CPU */
@@ -7230,7 +7218,32 @@ void __init sched_init_smp(void)
7230 7218
7231 init_sched_rt_class(); 7219 init_sched_rt_class();
7232 init_sched_dl_class(); 7220 init_sched_dl_class();
7221 sched_smp_initialized = true;
7233} 7222}
7223
7224static int __init migration_init(void)
7225{
7226 void *cpu = (void *)(long)smp_processor_id();
7227 int err;
7228
7229 /* Initialize migration for the boot CPU */
7230 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
7231 BUG_ON(err == NOTIFY_BAD);
7232 migration_call(&migration_notifier, CPU_ONLINE, cpu);
7233 register_cpu_notifier(&migration_notifier);
7234
7235 /* Register cpu active notifiers */
7236 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
7237 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
7238
7239 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
7240 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7241 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
7242
7243 return 0;
7244}
7245early_initcall(migration_init);
7246
7234#else 7247#else
7235void __init sched_init_smp(void) 7248void __init sched_init_smp(void)
7236{ 7249{