aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorGautham R Shenoy <ego@in.ibm.com>2008-01-25 15:08:02 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:02 -0500
commit95402b3829010fe1e208f44e4a158ccade88969a (patch)
tree3b9895b47623b4673e3c11121980e5171af76bbe /kernel/workqueue.c
parent86ef5c9a8edd78e6bf92879f32329d89b2d55b5a (diff)
cpu-hotplug: replace per-subsystem mutexes with get_online_cpus()
This patch converts the known per-subsystem mutexes to get_online_cpus put_online_cpus. It also eliminates the CPU_LOCK_ACQUIRE and CPU_LOCK_RELEASE hotplug notification events. Signed-off-by: Gautham R Shenoy <ego@in.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c35
1 files changed, 15 insertions, 20 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8db0b597509e..52db48e7f6e7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -67,9 +67,8 @@ struct workqueue_struct {
67#endif 67#endif
68}; 68};
69 69
70/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 70/* Serializes the accesses to the list of workqueues. */
71 threads to each one as cpus come/go. */ 71static DEFINE_SPINLOCK(workqueue_lock);
72static DEFINE_MUTEX(workqueue_mutex);
73static LIST_HEAD(workqueues); 72static LIST_HEAD(workqueues);
74 73
75static int singlethread_cpu __read_mostly; 74static int singlethread_cpu __read_mostly;
@@ -592,8 +591,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
592 * Returns zero on success. 591 * Returns zero on success.
593 * Returns -ve errno on failure. 592 * Returns -ve errno on failure.
594 * 593 *
595 * Appears to be racy against CPU hotplug.
596 *
597 * schedule_on_each_cpu() is very slow. 594 * schedule_on_each_cpu() is very slow.
598 */ 595 */
599int schedule_on_each_cpu(work_func_t func) 596int schedule_on_each_cpu(work_func_t func)
@@ -605,7 +602,7 @@ int schedule_on_each_cpu(work_func_t func)
605 if (!works) 602 if (!works)
606 return -ENOMEM; 603 return -ENOMEM;
607 604
608 preempt_disable(); /* CPU hotplug */ 605 get_online_cpus();
609 for_each_online_cpu(cpu) { 606 for_each_online_cpu(cpu) {
610 struct work_struct *work = per_cpu_ptr(works, cpu); 607 struct work_struct *work = per_cpu_ptr(works, cpu);
611 608
@@ -613,8 +610,8 @@ int schedule_on_each_cpu(work_func_t func)
613 set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); 610 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
614 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); 611 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
615 } 612 }
616 preempt_enable();
617 flush_workqueue(keventd_wq); 613 flush_workqueue(keventd_wq);
614 put_online_cpus();
618 free_percpu(works); 615 free_percpu(works);
619 return 0; 616 return 0;
620} 617}
@@ -750,8 +747,10 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
750 err = create_workqueue_thread(cwq, singlethread_cpu); 747 err = create_workqueue_thread(cwq, singlethread_cpu);
751 start_workqueue_thread(cwq, -1); 748 start_workqueue_thread(cwq, -1);
752 } else { 749 } else {
753 mutex_lock(&workqueue_mutex); 750 get_online_cpus();
751 spin_lock(&workqueue_lock);
754 list_add(&wq->list, &workqueues); 752 list_add(&wq->list, &workqueues);
753 spin_unlock(&workqueue_lock);
755 754
756 for_each_possible_cpu(cpu) { 755 for_each_possible_cpu(cpu) {
757 cwq = init_cpu_workqueue(wq, cpu); 756 cwq = init_cpu_workqueue(wq, cpu);
@@ -760,7 +759,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
760 err = create_workqueue_thread(cwq, cpu); 759 err = create_workqueue_thread(cwq, cpu);
761 start_workqueue_thread(cwq, cpu); 760 start_workqueue_thread(cwq, cpu);
762 } 761 }
763 mutex_unlock(&workqueue_mutex); 762 put_online_cpus();
764 } 763 }
765 764
766 if (err) { 765 if (err) {
@@ -775,7 +774,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
775{ 774{
776 /* 775 /*
777 * Our caller is either destroy_workqueue() or CPU_DEAD, 776 * Our caller is either destroy_workqueue() or CPU_DEAD,
778 * workqueue_mutex protects cwq->thread 777 * get_online_cpus() protects cwq->thread.
779 */ 778 */
780 if (cwq->thread == NULL) 779 if (cwq->thread == NULL)
781 return; 780 return;
@@ -810,9 +809,11 @@ void destroy_workqueue(struct workqueue_struct *wq)
810 struct cpu_workqueue_struct *cwq; 809 struct cpu_workqueue_struct *cwq;
811 int cpu; 810 int cpu;
812 811
813 mutex_lock(&workqueue_mutex); 812 get_online_cpus();
813 spin_lock(&workqueue_lock);
814 list_del(&wq->list); 814 list_del(&wq->list);
815 mutex_unlock(&workqueue_mutex); 815 spin_unlock(&workqueue_lock);
816 put_online_cpus();
816 817
817 for_each_cpu_mask(cpu, *cpu_map) { 818 for_each_cpu_mask(cpu, *cpu_map) {
818 cwq = per_cpu_ptr(wq->cpu_wq, cpu); 819 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
@@ -835,13 +836,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
835 action &= ~CPU_TASKS_FROZEN; 836 action &= ~CPU_TASKS_FROZEN;
836 837
837 switch (action) { 838 switch (action) {
838 case CPU_LOCK_ACQUIRE:
839 mutex_lock(&workqueue_mutex);
840 return NOTIFY_OK;
841
842 case CPU_LOCK_RELEASE:
843 mutex_unlock(&workqueue_mutex);
844 return NOTIFY_OK;
845 839
846 case CPU_UP_PREPARE: 840 case CPU_UP_PREPARE:
847 cpu_set(cpu, cpu_populated_map); 841 cpu_set(cpu, cpu_populated_map);
@@ -854,7 +848,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
854 case CPU_UP_PREPARE: 848 case CPU_UP_PREPARE:
855 if (!create_workqueue_thread(cwq, cpu)) 849 if (!create_workqueue_thread(cwq, cpu))
856 break; 850 break;
857 printk(KERN_ERR "workqueue for %i failed\n", cpu); 851 printk(KERN_ERR "workqueue [%s] for %i failed\n",
852 wq->name, cpu);
858 return NOTIFY_BAD; 853 return NOTIFY_BAD;
859 854
860 case CPU_ONLINE: 855 case CPU_ONLINE: