diff options
author | Gautham R Shenoy <ego@in.ibm.com> | 2008-01-25 15:08:02 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:02 -0500 |
commit | 95402b3829010fe1e208f44e4a158ccade88969a (patch) | |
tree | 3b9895b47623b4673e3c11121980e5171af76bbe /kernel | |
parent | 86ef5c9a8edd78e6bf92879f32329d89b2d55b5a (diff) |
cpu-hotplug: replace per-subsystem mutexes with get_online_cpus()
This patch converts the known per-subsystem mutexes to get_online_cpus
put_online_cpus. It also eliminates the CPU_LOCK_ACQUIRE and
CPU_LOCK_RELEASE hotplug notification events.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 4 | ||||
-rw-r--r-- | kernel/sched.c | 25 | ||||
-rw-r--r-- | kernel/workqueue.c | 35 |
3 files changed, 24 insertions, 40 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index b0c4152995f8..e0d3a4f56ecb 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -218,7 +218,6 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) | |||
218 | return -EINVAL; | 218 | return -EINVAL; |
219 | 219 | ||
220 | cpu_hotplug_begin(); | 220 | cpu_hotplug_begin(); |
221 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); | ||
222 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, | 221 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, |
223 | hcpu, -1, &nr_calls); | 222 | hcpu, -1, &nr_calls); |
224 | if (err == NOTIFY_BAD) { | 223 | if (err == NOTIFY_BAD) { |
@@ -271,7 +270,6 @@ out_thread: | |||
271 | out_allowed: | 270 | out_allowed: |
272 | set_cpus_allowed(current, old_allowed); | 271 | set_cpus_allowed(current, old_allowed); |
273 | out_release: | 272 | out_release: |
274 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); | ||
275 | cpu_hotplug_done(); | 273 | cpu_hotplug_done(); |
276 | return err; | 274 | return err; |
277 | } | 275 | } |
@@ -302,7 +300,6 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | |||
302 | return -EINVAL; | 300 | return -EINVAL; |
303 | 301 | ||
304 | cpu_hotplug_begin(); | 302 | cpu_hotplug_begin(); |
305 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); | ||
306 | ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, | 303 | ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, |
307 | -1, &nr_calls); | 304 | -1, &nr_calls); |
308 | if (ret == NOTIFY_BAD) { | 305 | if (ret == NOTIFY_BAD) { |
@@ -326,7 +323,6 @@ out_notify: | |||
326 | if (ret != 0) | 323 | if (ret != 0) |
327 | __raw_notifier_call_chain(&cpu_chain, | 324 | __raw_notifier_call_chain(&cpu_chain, |
328 | CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); | 325 | CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); |
329 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); | ||
330 | cpu_hotplug_done(); | 326 | cpu_hotplug_done(); |
331 | 327 | ||
332 | return ret; | 328 | return ret; |
diff --git a/kernel/sched.c b/kernel/sched.c index 672aa68bfeac..c0e2db683e29 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -439,7 +439,6 @@ struct rq { | |||
439 | }; | 439 | }; |
440 | 440 | ||
441 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 441 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
442 | static DEFINE_MUTEX(sched_hotcpu_mutex); | ||
443 | 442 | ||
444 | static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) | 443 | static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) |
445 | { | 444 | { |
@@ -4546,13 +4545,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) | |||
4546 | struct task_struct *p; | 4545 | struct task_struct *p; |
4547 | int retval; | 4546 | int retval; |
4548 | 4547 | ||
4549 | mutex_lock(&sched_hotcpu_mutex); | 4548 | get_online_cpus(); |
4550 | read_lock(&tasklist_lock); | 4549 | read_lock(&tasklist_lock); |
4551 | 4550 | ||
4552 | p = find_process_by_pid(pid); | 4551 | p = find_process_by_pid(pid); |
4553 | if (!p) { | 4552 | if (!p) { |
4554 | read_unlock(&tasklist_lock); | 4553 | read_unlock(&tasklist_lock); |
4555 | mutex_unlock(&sched_hotcpu_mutex); | 4554 | put_online_cpus(); |
4556 | return -ESRCH; | 4555 | return -ESRCH; |
4557 | } | 4556 | } |
4558 | 4557 | ||
@@ -4592,7 +4591,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) | |||
4592 | } | 4591 | } |
4593 | out_unlock: | 4592 | out_unlock: |
4594 | put_task_struct(p); | 4593 | put_task_struct(p); |
4595 | mutex_unlock(&sched_hotcpu_mutex); | 4594 | put_online_cpus(); |
4596 | return retval; | 4595 | return retval; |
4597 | } | 4596 | } |
4598 | 4597 | ||
@@ -4649,7 +4648,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
4649 | struct task_struct *p; | 4648 | struct task_struct *p; |
4650 | int retval; | 4649 | int retval; |
4651 | 4650 | ||
4652 | mutex_lock(&sched_hotcpu_mutex); | 4651 | get_online_cpus(); |
4653 | read_lock(&tasklist_lock); | 4652 | read_lock(&tasklist_lock); |
4654 | 4653 | ||
4655 | retval = -ESRCH; | 4654 | retval = -ESRCH; |
@@ -4665,7 +4664,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
4665 | 4664 | ||
4666 | out_unlock: | 4665 | out_unlock: |
4667 | read_unlock(&tasklist_lock); | 4666 | read_unlock(&tasklist_lock); |
4668 | mutex_unlock(&sched_hotcpu_mutex); | 4667 | put_online_cpus(); |
4669 | 4668 | ||
4670 | return retval; | 4669 | return retval; |
4671 | } | 4670 | } |
@@ -5625,9 +5624,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5625 | struct rq *rq; | 5624 | struct rq *rq; |
5626 | 5625 | ||
5627 | switch (action) { | 5626 | switch (action) { |
5628 | case CPU_LOCK_ACQUIRE: | ||
5629 | mutex_lock(&sched_hotcpu_mutex); | ||
5630 | break; | ||
5631 | 5627 | ||
5632 | case CPU_UP_PREPARE: | 5628 | case CPU_UP_PREPARE: |
5633 | case CPU_UP_PREPARE_FROZEN: | 5629 | case CPU_UP_PREPARE_FROZEN: |
@@ -5697,9 +5693,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5697 | spin_unlock_irq(&rq->lock); | 5693 | spin_unlock_irq(&rq->lock); |
5698 | break; | 5694 | break; |
5699 | #endif | 5695 | #endif |
5700 | case CPU_LOCK_RELEASE: | ||
5701 | mutex_unlock(&sched_hotcpu_mutex); | ||
5702 | break; | ||
5703 | } | 5696 | } |
5704 | return NOTIFY_OK; | 5697 | return NOTIFY_OK; |
5705 | } | 5698 | } |
@@ -6655,10 +6648,10 @@ static int arch_reinit_sched_domains(void) | |||
6655 | { | 6648 | { |
6656 | int err; | 6649 | int err; |
6657 | 6650 | ||
6658 | mutex_lock(&sched_hotcpu_mutex); | 6651 | get_online_cpus(); |
6659 | detach_destroy_domains(&cpu_online_map); | 6652 | detach_destroy_domains(&cpu_online_map); |
6660 | err = arch_init_sched_domains(&cpu_online_map); | 6653 | err = arch_init_sched_domains(&cpu_online_map); |
6661 | mutex_unlock(&sched_hotcpu_mutex); | 6654 | put_online_cpus(); |
6662 | 6655 | ||
6663 | return err; | 6656 | return err; |
6664 | } | 6657 | } |
@@ -6769,12 +6762,12 @@ void __init sched_init_smp(void) | |||
6769 | { | 6762 | { |
6770 | cpumask_t non_isolated_cpus; | 6763 | cpumask_t non_isolated_cpus; |
6771 | 6764 | ||
6772 | mutex_lock(&sched_hotcpu_mutex); | 6765 | get_online_cpus(); |
6773 | arch_init_sched_domains(&cpu_online_map); | 6766 | arch_init_sched_domains(&cpu_online_map); |
6774 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); | 6767 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); |
6775 | if (cpus_empty(non_isolated_cpus)) | 6768 | if (cpus_empty(non_isolated_cpus)) |
6776 | cpu_set(smp_processor_id(), non_isolated_cpus); | 6769 | cpu_set(smp_processor_id(), non_isolated_cpus); |
6777 | mutex_unlock(&sched_hotcpu_mutex); | 6770 | put_online_cpus(); |
6778 | /* XXX: Theoretical race here - CPU may be hotplugged now */ | 6771 | /* XXX: Theoretical race here - CPU may be hotplugged now */ |
6779 | hotcpu_notifier(update_sched_domains, 0); | 6772 | hotcpu_notifier(update_sched_domains, 0); |
6780 | 6773 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8db0b597509e..52db48e7f6e7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -67,9 +67,8 @@ struct workqueue_struct { | |||
67 | #endif | 67 | #endif |
68 | }; | 68 | }; |
69 | 69 | ||
70 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove | 70 | /* Serializes the accesses to the list of workqueues. */ |
71 | threads to each one as cpus come/go. */ | 71 | static DEFINE_SPINLOCK(workqueue_lock); |
72 | static DEFINE_MUTEX(workqueue_mutex); | ||
73 | static LIST_HEAD(workqueues); | 72 | static LIST_HEAD(workqueues); |
74 | 73 | ||
75 | static int singlethread_cpu __read_mostly; | 74 | static int singlethread_cpu __read_mostly; |
@@ -592,8 +591,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on); | |||
592 | * Returns zero on success. | 591 | * Returns zero on success. |
593 | * Returns -ve errno on failure. | 592 | * Returns -ve errno on failure. |
594 | * | 593 | * |
595 | * Appears to be racy against CPU hotplug. | ||
596 | * | ||
597 | * schedule_on_each_cpu() is very slow. | 594 | * schedule_on_each_cpu() is very slow. |
598 | */ | 595 | */ |
599 | int schedule_on_each_cpu(work_func_t func) | 596 | int schedule_on_each_cpu(work_func_t func) |
@@ -605,7 +602,7 @@ int schedule_on_each_cpu(work_func_t func) | |||
605 | if (!works) | 602 | if (!works) |
606 | return -ENOMEM; | 603 | return -ENOMEM; |
607 | 604 | ||
608 | preempt_disable(); /* CPU hotplug */ | 605 | get_online_cpus(); |
609 | for_each_online_cpu(cpu) { | 606 | for_each_online_cpu(cpu) { |
610 | struct work_struct *work = per_cpu_ptr(works, cpu); | 607 | struct work_struct *work = per_cpu_ptr(works, cpu); |
611 | 608 | ||
@@ -613,8 +610,8 @@ int schedule_on_each_cpu(work_func_t func) | |||
613 | set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); | 610 | set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); |
614 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); | 611 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); |
615 | } | 612 | } |
616 | preempt_enable(); | ||
617 | flush_workqueue(keventd_wq); | 613 | flush_workqueue(keventd_wq); |
614 | put_online_cpus(); | ||
618 | free_percpu(works); | 615 | free_percpu(works); |
619 | return 0; | 616 | return 0; |
620 | } | 617 | } |
@@ -750,8 +747,10 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
750 | err = create_workqueue_thread(cwq, singlethread_cpu); | 747 | err = create_workqueue_thread(cwq, singlethread_cpu); |
751 | start_workqueue_thread(cwq, -1); | 748 | start_workqueue_thread(cwq, -1); |
752 | } else { | 749 | } else { |
753 | mutex_lock(&workqueue_mutex); | 750 | get_online_cpus(); |
751 | spin_lock(&workqueue_lock); | ||
754 | list_add(&wq->list, &workqueues); | 752 | list_add(&wq->list, &workqueues); |
753 | spin_unlock(&workqueue_lock); | ||
755 | 754 | ||
756 | for_each_possible_cpu(cpu) { | 755 | for_each_possible_cpu(cpu) { |
757 | cwq = init_cpu_workqueue(wq, cpu); | 756 | cwq = init_cpu_workqueue(wq, cpu); |
@@ -760,7 +759,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
760 | err = create_workqueue_thread(cwq, cpu); | 759 | err = create_workqueue_thread(cwq, cpu); |
761 | start_workqueue_thread(cwq, cpu); | 760 | start_workqueue_thread(cwq, cpu); |
762 | } | 761 | } |
763 | mutex_unlock(&workqueue_mutex); | 762 | put_online_cpus(); |
764 | } | 763 | } |
765 | 764 | ||
766 | if (err) { | 765 | if (err) { |
@@ -775,7 +774,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
775 | { | 774 | { |
776 | /* | 775 | /* |
777 | * Our caller is either destroy_workqueue() or CPU_DEAD, | 776 | * Our caller is either destroy_workqueue() or CPU_DEAD, |
778 | * workqueue_mutex protects cwq->thread | 777 | * get_online_cpus() protects cwq->thread. |
779 | */ | 778 | */ |
780 | if (cwq->thread == NULL) | 779 | if (cwq->thread == NULL) |
781 | return; | 780 | return; |
@@ -810,9 +809,11 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
810 | struct cpu_workqueue_struct *cwq; | 809 | struct cpu_workqueue_struct *cwq; |
811 | int cpu; | 810 | int cpu; |
812 | 811 | ||
813 | mutex_lock(&workqueue_mutex); | 812 | get_online_cpus(); |
813 | spin_lock(&workqueue_lock); | ||
814 | list_del(&wq->list); | 814 | list_del(&wq->list); |
815 | mutex_unlock(&workqueue_mutex); | 815 | spin_unlock(&workqueue_lock); |
816 | put_online_cpus(); | ||
816 | 817 | ||
817 | for_each_cpu_mask(cpu, *cpu_map) { | 818 | for_each_cpu_mask(cpu, *cpu_map) { |
818 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 819 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); |
@@ -835,13 +836,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
835 | action &= ~CPU_TASKS_FROZEN; | 836 | action &= ~CPU_TASKS_FROZEN; |
836 | 837 | ||
837 | switch (action) { | 838 | switch (action) { |
838 | case CPU_LOCK_ACQUIRE: | ||
839 | mutex_lock(&workqueue_mutex); | ||
840 | return NOTIFY_OK; | ||
841 | |||
842 | case CPU_LOCK_RELEASE: | ||
843 | mutex_unlock(&workqueue_mutex); | ||
844 | return NOTIFY_OK; | ||
845 | 839 | ||
846 | case CPU_UP_PREPARE: | 840 | case CPU_UP_PREPARE: |
847 | cpu_set(cpu, cpu_populated_map); | 841 | cpu_set(cpu, cpu_populated_map); |
@@ -854,7 +848,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
854 | case CPU_UP_PREPARE: | 848 | case CPU_UP_PREPARE: |
855 | if (!create_workqueue_thread(cwq, cpu)) | 849 | if (!create_workqueue_thread(cwq, cpu)) |
856 | break; | 850 | break; |
857 | printk(KERN_ERR "workqueue for %i failed\n", cpu); | 851 | printk(KERN_ERR "workqueue [%s] for %i failed\n", |
852 | wq->name, cpu); | ||
858 | return NOTIFY_BAD; | 853 | return NOTIFY_BAD; |
859 | 854 | ||
860 | case CPU_ONLINE: | 855 | case CPU_ONLINE: |