diff options
author | Gautham R Shenoy <ego@in.ibm.com> | 2008-01-25 15:08:02 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:02 -0500 |
commit | 95402b3829010fe1e208f44e4a158ccade88969a (patch) | |
tree | 3b9895b47623b4673e3c11121980e5171af76bbe /kernel/sched.c | |
parent | 86ef5c9a8edd78e6bf92879f32329d89b2d55b5a (diff) |
cpu-hotplug: replace per-subsystem mutexes with get_online_cpus()
This patch converts the known per-subsystem mutexes to get_online_cpus
put_online_cpus. It also eliminates the CPU_LOCK_ACQUIRE and
CPU_LOCK_RELEASE hotplug notification events.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 25 |
1 files changed, 9 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 672aa68bfeac..c0e2db683e29 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -439,7 +439,6 @@ struct rq { | |||
439 | }; | 439 | }; |
440 | 440 | ||
441 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 441 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
442 | static DEFINE_MUTEX(sched_hotcpu_mutex); | ||
443 | 442 | ||
444 | static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) | 443 | static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) |
445 | { | 444 | { |
@@ -4546,13 +4545,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) | |||
4546 | struct task_struct *p; | 4545 | struct task_struct *p; |
4547 | int retval; | 4546 | int retval; |
4548 | 4547 | ||
4549 | mutex_lock(&sched_hotcpu_mutex); | 4548 | get_online_cpus(); |
4550 | read_lock(&tasklist_lock); | 4549 | read_lock(&tasklist_lock); |
4551 | 4550 | ||
4552 | p = find_process_by_pid(pid); | 4551 | p = find_process_by_pid(pid); |
4553 | if (!p) { | 4552 | if (!p) { |
4554 | read_unlock(&tasklist_lock); | 4553 | read_unlock(&tasklist_lock); |
4555 | mutex_unlock(&sched_hotcpu_mutex); | 4554 | put_online_cpus(); |
4556 | return -ESRCH; | 4555 | return -ESRCH; |
4557 | } | 4556 | } |
4558 | 4557 | ||
@@ -4592,7 +4591,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) | |||
4592 | } | 4591 | } |
4593 | out_unlock: | 4592 | out_unlock: |
4594 | put_task_struct(p); | 4593 | put_task_struct(p); |
4595 | mutex_unlock(&sched_hotcpu_mutex); | 4594 | put_online_cpus(); |
4596 | return retval; | 4595 | return retval; |
4597 | } | 4596 | } |
4598 | 4597 | ||
@@ -4649,7 +4648,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
4649 | struct task_struct *p; | 4648 | struct task_struct *p; |
4650 | int retval; | 4649 | int retval; |
4651 | 4650 | ||
4652 | mutex_lock(&sched_hotcpu_mutex); | 4651 | get_online_cpus(); |
4653 | read_lock(&tasklist_lock); | 4652 | read_lock(&tasklist_lock); |
4654 | 4653 | ||
4655 | retval = -ESRCH; | 4654 | retval = -ESRCH; |
@@ -4665,7 +4664,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
4665 | 4664 | ||
4666 | out_unlock: | 4665 | out_unlock: |
4667 | read_unlock(&tasklist_lock); | 4666 | read_unlock(&tasklist_lock); |
4668 | mutex_unlock(&sched_hotcpu_mutex); | 4667 | put_online_cpus(); |
4669 | 4668 | ||
4670 | return retval; | 4669 | return retval; |
4671 | } | 4670 | } |
@@ -5625,9 +5624,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5625 | struct rq *rq; | 5624 | struct rq *rq; |
5626 | 5625 | ||
5627 | switch (action) { | 5626 | switch (action) { |
5628 | case CPU_LOCK_ACQUIRE: | ||
5629 | mutex_lock(&sched_hotcpu_mutex); | ||
5630 | break; | ||
5631 | 5627 | ||
5632 | case CPU_UP_PREPARE: | 5628 | case CPU_UP_PREPARE: |
5633 | case CPU_UP_PREPARE_FROZEN: | 5629 | case CPU_UP_PREPARE_FROZEN: |
@@ -5697,9 +5693,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5697 | spin_unlock_irq(&rq->lock); | 5693 | spin_unlock_irq(&rq->lock); |
5698 | break; | 5694 | break; |
5699 | #endif | 5695 | #endif |
5700 | case CPU_LOCK_RELEASE: | ||
5701 | mutex_unlock(&sched_hotcpu_mutex); | ||
5702 | break; | ||
5703 | } | 5696 | } |
5704 | return NOTIFY_OK; | 5697 | return NOTIFY_OK; |
5705 | } | 5698 | } |
@@ -6655,10 +6648,10 @@ static int arch_reinit_sched_domains(void) | |||
6655 | { | 6648 | { |
6656 | int err; | 6649 | int err; |
6657 | 6650 | ||
6658 | mutex_lock(&sched_hotcpu_mutex); | 6651 | get_online_cpus(); |
6659 | detach_destroy_domains(&cpu_online_map); | 6652 | detach_destroy_domains(&cpu_online_map); |
6660 | err = arch_init_sched_domains(&cpu_online_map); | 6653 | err = arch_init_sched_domains(&cpu_online_map); |
6661 | mutex_unlock(&sched_hotcpu_mutex); | 6654 | put_online_cpus(); |
6662 | 6655 | ||
6663 | return err; | 6656 | return err; |
6664 | } | 6657 | } |
@@ -6769,12 +6762,12 @@ void __init sched_init_smp(void) | |||
6769 | { | 6762 | { |
6770 | cpumask_t non_isolated_cpus; | 6763 | cpumask_t non_isolated_cpus; |
6771 | 6764 | ||
6772 | mutex_lock(&sched_hotcpu_mutex); | 6765 | get_online_cpus(); |
6773 | arch_init_sched_domains(&cpu_online_map); | 6766 | arch_init_sched_domains(&cpu_online_map); |
6774 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); | 6767 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); |
6775 | if (cpus_empty(non_isolated_cpus)) | 6768 | if (cpus_empty(non_isolated_cpus)) |
6776 | cpu_set(smp_processor_id(), non_isolated_cpus); | 6769 | cpu_set(smp_processor_id(), non_isolated_cpus); |
6777 | mutex_unlock(&sched_hotcpu_mutex); | 6770 | put_online_cpus(); |
6778 | /* XXX: Theoretical race here - CPU may be hotplugged now */ | 6771 | /* XXX: Theoretical race here - CPU may be hotplugged now */ |
6779 | hotcpu_notifier(update_sched_domains, 0); | 6772 | hotcpu_notifier(update_sched_domains, 0); |
6780 | 6773 | ||