aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-11-17 17:06:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-11-17 20:40:33 -0500
commit9398180097e359646d46083c3e079a54e20bee82 (patch)
tree633933ddd86e3dba7c0970eea5768867849842c9 /kernel/workqueue.c
parente13193319d3a5545c82ed4b724bffd16f87873e3 (diff)
workqueue: fix race condition in schedule_on_each_cpu()
Commit 65a64464349883891e21e74af16c05d6e1eeb4e9 ("HWPOISON: Allow schedule_on_each_cpu() from keventd") which allows schedule_on_each_cpu() to be called from keventd added a race condition. schedule_on_each_cpu() may race with cpu hotplug and end up executing the function twice on a cpu. Fix it by moving direct execution into the section protected with get/put_online_cpus(). While at it, update code such that direct execution is done after works have been scheduled for all other cpus and drop unnecessary cpu != orig test from flush loop. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Andi Kleen <ak@linux.intel.com> Acked-by: Oleg Nesterov <oleg@redhat.com> Cc: Ingo Molnar <mingo@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c28
1 files changed, 13 insertions, 15 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 12328147132c..67e526b6ae81 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -692,31 +692,29 @@ int schedule_on_each_cpu(work_func_t func)
692 if (!works) 692 if (!works)
693 return -ENOMEM; 693 return -ENOMEM;
694 694
695 get_online_cpus();
696
695 /* 697 /*
696 * when running in keventd don't schedule a work item on itself. 698 * When running in keventd don't schedule a work item on
697 * Can just call directly because the work queue is already bound. 699 * itself. Can just call directly because the work queue is
698 * This also is faster. 700 * already bound. This also is faster.
699 * Make this a generic parameter for other workqueues?
700 */ 701 */
701 if (current_is_keventd()) { 702 if (current_is_keventd())
702 orig = raw_smp_processor_id(); 703 orig = raw_smp_processor_id();
703 INIT_WORK(per_cpu_ptr(works, orig), func);
704 func(per_cpu_ptr(works, orig));
705 }
706 704
707 get_online_cpus();
708 for_each_online_cpu(cpu) { 705 for_each_online_cpu(cpu) {
709 struct work_struct *work = per_cpu_ptr(works, cpu); 706 struct work_struct *work = per_cpu_ptr(works, cpu);
710 707
711 if (cpu == orig)
712 continue;
713 INIT_WORK(work, func); 708 INIT_WORK(work, func);
714 schedule_work_on(cpu, work);
715 }
716 for_each_online_cpu(cpu) {
717 if (cpu != orig) 709 if (cpu != orig)
718 flush_work(per_cpu_ptr(works, cpu)); 710 schedule_work_on(cpu, work);
719 } 711 }
712 if (orig >= 0)
713 func(per_cpu_ptr(works, orig));
714
715 for_each_online_cpu(cpu)
716 flush_work(per_cpu_ptr(works, cpu));
717
720 put_online_cpus(); 718 put_online_cpus();
721 free_percpu(works); 719 free_percpu(works);
722 return 0; 720 return 0;