diff options
author | Gautham R Shenoy <ego@in.ibm.com> | 2007-05-09 05:34:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:51 -0400 |
commit | 5be9361cdff17fc76fa0c3e262ead94158555f16 (patch) | |
tree | c0fd48bc0a39a2ba8a71677467f3d4e164b40f65 /kernel | |
parent | baaca49f415b25fdbe2a8f3c22b39929e450fbfd (diff) |
Eliminate lock_cpu_hotplug in kernel/schedc
Eliminate lock_cpu_hotplug from kernel/sched.c and use sched_hotcpu_mutex
instead to postpone a hotplug event.
In the migration_call hotcpu callback function, take sched_hotcpu_mutex
while handling the event CPU_LOCK_ACQUIRE and release it while handling
CPU_LOCK_RELEASE event.
[akpm@linux-foundation.org: fix deadlock]
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 28 |
1 files changed, 18 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 66bd7ff23f18..fe1a9c2b855a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -305,6 +305,7 @@ struct rq { | |||
305 | }; | 305 | }; |
306 | 306 | ||
307 | static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp; | 307 | static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp; |
308 | static DEFINE_MUTEX(sched_hotcpu_mutex); | ||
308 | 309 | ||
309 | static inline int cpu_of(struct rq *rq) | 310 | static inline int cpu_of(struct rq *rq) |
310 | { | 311 | { |
@@ -4520,13 +4521,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) | |||
4520 | struct task_struct *p; | 4521 | struct task_struct *p; |
4521 | int retval; | 4522 | int retval; |
4522 | 4523 | ||
4523 | lock_cpu_hotplug(); | 4524 | mutex_lock(&sched_hotcpu_mutex); |
4524 | read_lock(&tasklist_lock); | 4525 | read_lock(&tasklist_lock); |
4525 | 4526 | ||
4526 | p = find_process_by_pid(pid); | 4527 | p = find_process_by_pid(pid); |
4527 | if (!p) { | 4528 | if (!p) { |
4528 | read_unlock(&tasklist_lock); | 4529 | read_unlock(&tasklist_lock); |
4529 | unlock_cpu_hotplug(); | 4530 | mutex_unlock(&sched_hotcpu_mutex); |
4530 | return -ESRCH; | 4531 | return -ESRCH; |
4531 | } | 4532 | } |
4532 | 4533 | ||
@@ -4553,7 +4554,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) | |||
4553 | 4554 | ||
4554 | out_unlock: | 4555 | out_unlock: |
4555 | put_task_struct(p); | 4556 | put_task_struct(p); |
4556 | unlock_cpu_hotplug(); | 4557 | mutex_unlock(&sched_hotcpu_mutex); |
4557 | return retval; | 4558 | return retval; |
4558 | } | 4559 | } |
4559 | 4560 | ||
@@ -4610,7 +4611,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
4610 | struct task_struct *p; | 4611 | struct task_struct *p; |
4611 | int retval; | 4612 | int retval; |
4612 | 4613 | ||
4613 | lock_cpu_hotplug(); | 4614 | mutex_lock(&sched_hotcpu_mutex); |
4614 | read_lock(&tasklist_lock); | 4615 | read_lock(&tasklist_lock); |
4615 | 4616 | ||
4616 | retval = -ESRCH; | 4617 | retval = -ESRCH; |
@@ -4626,7 +4627,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
4626 | 4627 | ||
4627 | out_unlock: | 4628 | out_unlock: |
4628 | read_unlock(&tasklist_lock); | 4629 | read_unlock(&tasklist_lock); |
4629 | unlock_cpu_hotplug(); | 4630 | mutex_unlock(&sched_hotcpu_mutex); |
4630 | if (retval) | 4631 | if (retval) |
4631 | return retval; | 4632 | return retval; |
4632 | 4633 | ||
@@ -5388,6 +5389,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5388 | struct rq *rq; | 5389 | struct rq *rq; |
5389 | 5390 | ||
5390 | switch (action) { | 5391 | switch (action) { |
5392 | case CPU_LOCK_ACQUIRE: | ||
5393 | mutex_lock(&sched_hotcpu_mutex); | ||
5394 | break; | ||
5395 | |||
5391 | case CPU_UP_PREPARE: | 5396 | case CPU_UP_PREPARE: |
5392 | p = kthread_create(migration_thread, hcpu, "migration/%d",cpu); | 5397 | p = kthread_create(migration_thread, hcpu, "migration/%d",cpu); |
5393 | if (IS_ERR(p)) | 5398 | if (IS_ERR(p)) |
@@ -5433,7 +5438,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5433 | BUG_ON(rq->nr_running != 0); | 5438 | BUG_ON(rq->nr_running != 0); |
5434 | 5439 | ||
5435 | /* No need to migrate the tasks: it was best-effort if | 5440 | /* No need to migrate the tasks: it was best-effort if |
5436 | * they didn't do lock_cpu_hotplug(). Just wake up | 5441 | * they didn't take sched_hotcpu_mutex. Just wake up |
5437 | * the requestors. */ | 5442 | * the requestors. */ |
5438 | spin_lock_irq(&rq->lock); | 5443 | spin_lock_irq(&rq->lock); |
5439 | while (!list_empty(&rq->migration_queue)) { | 5444 | while (!list_empty(&rq->migration_queue)) { |
@@ -5447,6 +5452,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5447 | spin_unlock_irq(&rq->lock); | 5452 | spin_unlock_irq(&rq->lock); |
5448 | break; | 5453 | break; |
5449 | #endif | 5454 | #endif |
5455 | case CPU_LOCK_RELEASE: | ||
5456 | mutex_unlock(&sched_hotcpu_mutex); | ||
5457 | break; | ||
5450 | } | 5458 | } |
5451 | return NOTIFY_OK; | 5459 | return NOTIFY_OK; |
5452 | } | 5460 | } |
@@ -6822,10 +6830,10 @@ int arch_reinit_sched_domains(void) | |||
6822 | { | 6830 | { |
6823 | int err; | 6831 | int err; |
6824 | 6832 | ||
6825 | lock_cpu_hotplug(); | 6833 | mutex_lock(&sched_hotcpu_mutex); |
6826 | detach_destroy_domains(&cpu_online_map); | 6834 | detach_destroy_domains(&cpu_online_map); |
6827 | err = arch_init_sched_domains(&cpu_online_map); | 6835 | err = arch_init_sched_domains(&cpu_online_map); |
6828 | unlock_cpu_hotplug(); | 6836 | mutex_unlock(&sched_hotcpu_mutex); |
6829 | 6837 | ||
6830 | return err; | 6838 | return err; |
6831 | } | 6839 | } |
@@ -6930,12 +6938,12 @@ void __init sched_init_smp(void) | |||
6930 | { | 6938 | { |
6931 | cpumask_t non_isolated_cpus; | 6939 | cpumask_t non_isolated_cpus; |
6932 | 6940 | ||
6933 | lock_cpu_hotplug(); | 6941 | mutex_lock(&sched_hotcpu_mutex); |
6934 | arch_init_sched_domains(&cpu_online_map); | 6942 | arch_init_sched_domains(&cpu_online_map); |
6935 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); | 6943 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); |
6936 | if (cpus_empty(non_isolated_cpus)) | 6944 | if (cpus_empty(non_isolated_cpus)) |
6937 | cpu_set(smp_processor_id(), non_isolated_cpus); | 6945 | cpu_set(smp_processor_id(), non_isolated_cpus); |
6938 | unlock_cpu_hotplug(); | 6946 | mutex_unlock(&sched_hotcpu_mutex); |
6939 | /* XXX: Theoretical race here - CPU may be hotplugged now */ | 6947 | /* XXX: Theoretical race here - CPU may be hotplugged now */ |
6940 | hotcpu_notifier(update_sched_domains, 0); | 6948 | hotcpu_notifier(update_sched_domains, 0); |
6941 | 6949 | ||