aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c38
1 files changed, 28 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 66bd7ff23f18..799d23b4e35d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -305,6 +305,7 @@ struct rq {
305}; 305};
306 306
307static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp; 307static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
308static DEFINE_MUTEX(sched_hotcpu_mutex);
308 309
309static inline int cpu_of(struct rq *rq) 310static inline int cpu_of(struct rq *rq)
310{ 311{
@@ -4520,13 +4521,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
4520 struct task_struct *p; 4521 struct task_struct *p;
4521 int retval; 4522 int retval;
4522 4523
4523 lock_cpu_hotplug(); 4524 mutex_lock(&sched_hotcpu_mutex);
4524 read_lock(&tasklist_lock); 4525 read_lock(&tasklist_lock);
4525 4526
4526 p = find_process_by_pid(pid); 4527 p = find_process_by_pid(pid);
4527 if (!p) { 4528 if (!p) {
4528 read_unlock(&tasklist_lock); 4529 read_unlock(&tasklist_lock);
4529 unlock_cpu_hotplug(); 4530 mutex_unlock(&sched_hotcpu_mutex);
4530 return -ESRCH; 4531 return -ESRCH;
4531 } 4532 }
4532 4533
@@ -4553,7 +4554,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
4553 4554
4554out_unlock: 4555out_unlock:
4555 put_task_struct(p); 4556 put_task_struct(p);
4556 unlock_cpu_hotplug(); 4557 mutex_unlock(&sched_hotcpu_mutex);
4557 return retval; 4558 return retval;
4558} 4559}
4559 4560
@@ -4610,7 +4611,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
4610 struct task_struct *p; 4611 struct task_struct *p;
4611 int retval; 4612 int retval;
4612 4613
4613 lock_cpu_hotplug(); 4614 mutex_lock(&sched_hotcpu_mutex);
4614 read_lock(&tasklist_lock); 4615 read_lock(&tasklist_lock);
4615 4616
4616 retval = -ESRCH; 4617 retval = -ESRCH;
@@ -4626,7 +4627,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
4626 4627
4627out_unlock: 4628out_unlock:
4628 read_unlock(&tasklist_lock); 4629 read_unlock(&tasklist_lock);
4629 unlock_cpu_hotplug(); 4630 mutex_unlock(&sched_hotcpu_mutex);
4630 if (retval) 4631 if (retval)
4631 return retval; 4632 return retval;
4632 4633
@@ -5388,7 +5389,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5388 struct rq *rq; 5389 struct rq *rq;
5389 5390
5390 switch (action) { 5391 switch (action) {
5392 case CPU_LOCK_ACQUIRE:
5393 mutex_lock(&sched_hotcpu_mutex);
5394 break;
5395
5391 case CPU_UP_PREPARE: 5396 case CPU_UP_PREPARE:
5397 case CPU_UP_PREPARE_FROZEN:
5392 p = kthread_create(migration_thread, hcpu, "migration/%d",cpu); 5398 p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
5393 if (IS_ERR(p)) 5399 if (IS_ERR(p))
5394 return NOTIFY_BAD; 5400 return NOTIFY_BAD;
@@ -5402,12 +5408,14 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5402 break; 5408 break;
5403 5409
5404 case CPU_ONLINE: 5410 case CPU_ONLINE:
5411 case CPU_ONLINE_FROZEN:
5405 /* Strictly unneccessary, as first user will wake it. */ 5412 /* Strictly unneccessary, as first user will wake it. */
5406 wake_up_process(cpu_rq(cpu)->migration_thread); 5413 wake_up_process(cpu_rq(cpu)->migration_thread);
5407 break; 5414 break;
5408 5415
5409#ifdef CONFIG_HOTPLUG_CPU 5416#ifdef CONFIG_HOTPLUG_CPU
5410 case CPU_UP_CANCELED: 5417 case CPU_UP_CANCELED:
5418 case CPU_UP_CANCELED_FROZEN:
5411 if (!cpu_rq(cpu)->migration_thread) 5419 if (!cpu_rq(cpu)->migration_thread)
5412 break; 5420 break;
5413 /* Unbind it from offline cpu so it can run. Fall thru. */ 5421 /* Unbind it from offline cpu so it can run. Fall thru. */
@@ -5418,6 +5426,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5418 break; 5426 break;
5419 5427
5420 case CPU_DEAD: 5428 case CPU_DEAD:
5429 case CPU_DEAD_FROZEN:
5421 migrate_live_tasks(cpu); 5430 migrate_live_tasks(cpu);
5422 rq = cpu_rq(cpu); 5431 rq = cpu_rq(cpu);
5423 kthread_stop(rq->migration_thread); 5432 kthread_stop(rq->migration_thread);
@@ -5433,7 +5442,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5433 BUG_ON(rq->nr_running != 0); 5442 BUG_ON(rq->nr_running != 0);
5434 5443
5435 /* No need to migrate the tasks: it was best-effort if 5444 /* No need to migrate the tasks: it was best-effort if
5436 * they didn't do lock_cpu_hotplug(). Just wake up 5445 * they didn't take sched_hotcpu_mutex. Just wake up
5437 * the requestors. */ 5446 * the requestors. */
5438 spin_lock_irq(&rq->lock); 5447 spin_lock_irq(&rq->lock);
5439 while (!list_empty(&rq->migration_queue)) { 5448 while (!list_empty(&rq->migration_queue)) {
@@ -5447,6 +5456,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5447 spin_unlock_irq(&rq->lock); 5456 spin_unlock_irq(&rq->lock);
5448 break; 5457 break;
5449#endif 5458#endif
5459 case CPU_LOCK_RELEASE:
5460 mutex_unlock(&sched_hotcpu_mutex);
5461 break;
5450 } 5462 }
5451 return NOTIFY_OK; 5463 return NOTIFY_OK;
5452} 5464}
@@ -6822,10 +6834,10 @@ int arch_reinit_sched_domains(void)
6822{ 6834{
6823 int err; 6835 int err;
6824 6836
6825 lock_cpu_hotplug(); 6837 mutex_lock(&sched_hotcpu_mutex);
6826 detach_destroy_domains(&cpu_online_map); 6838 detach_destroy_domains(&cpu_online_map);
6827 err = arch_init_sched_domains(&cpu_online_map); 6839 err = arch_init_sched_domains(&cpu_online_map);
6828 unlock_cpu_hotplug(); 6840 mutex_unlock(&sched_hotcpu_mutex);
6829 6841
6830 return err; 6842 return err;
6831} 6843}
@@ -6904,14 +6916,20 @@ static int update_sched_domains(struct notifier_block *nfb,
6904{ 6916{
6905 switch (action) { 6917 switch (action) {
6906 case CPU_UP_PREPARE: 6918 case CPU_UP_PREPARE:
6919 case CPU_UP_PREPARE_FROZEN:
6907 case CPU_DOWN_PREPARE: 6920 case CPU_DOWN_PREPARE:
6921 case CPU_DOWN_PREPARE_FROZEN:
6908 detach_destroy_domains(&cpu_online_map); 6922 detach_destroy_domains(&cpu_online_map);
6909 return NOTIFY_OK; 6923 return NOTIFY_OK;
6910 6924
6911 case CPU_UP_CANCELED: 6925 case CPU_UP_CANCELED:
6926 case CPU_UP_CANCELED_FROZEN:
6912 case CPU_DOWN_FAILED: 6927 case CPU_DOWN_FAILED:
6928 case CPU_DOWN_FAILED_FROZEN:
6913 case CPU_ONLINE: 6929 case CPU_ONLINE:
6930 case CPU_ONLINE_FROZEN:
6914 case CPU_DEAD: 6931 case CPU_DEAD:
6932 case CPU_DEAD_FROZEN:
6915 /* 6933 /*
6916 * Fall through and re-initialise the domains. 6934 * Fall through and re-initialise the domains.
6917 */ 6935 */
@@ -6930,12 +6948,12 @@ void __init sched_init_smp(void)
6930{ 6948{
6931 cpumask_t non_isolated_cpus; 6949 cpumask_t non_isolated_cpus;
6932 6950
6933 lock_cpu_hotplug(); 6951 mutex_lock(&sched_hotcpu_mutex);
6934 arch_init_sched_domains(&cpu_online_map); 6952 arch_init_sched_domains(&cpu_online_map);
6935 cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); 6953 cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
6936 if (cpus_empty(non_isolated_cpus)) 6954 if (cpus_empty(non_isolated_cpus))
6937 cpu_set(smp_processor_id(), non_isolated_cpus); 6955 cpu_set(smp_processor_id(), non_isolated_cpus);
6938 unlock_cpu_hotplug(); 6956 mutex_unlock(&sched_hotcpu_mutex);
6939 /* XXX: Theoretical race here - CPU may be hotplugged now */ 6957 /* XXX: Theoretical race here - CPU may be hotplugged now */
6940 hotcpu_notifier(update_sched_domains, 0); 6958 hotcpu_notifier(update_sched_domains, 0);
6941 6959