aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-12-18 14:05:09 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-21 03:20:01 -0500
commit9bfb18392ef586467277fa25d8f3a7a93611f6df (patch)
treee0bb6aab4983a50f54b247ac3db323dbb8d025a2 /kernel/workqueue.c
parent5ccac88eeb5659c716af8e695e2943509c80d172 (diff)
[PATCH] workqueue: fix schedule_on_each_cpu()
fix the schedule_on_each_cpu() implementation: __queue_work() is now stricter, hence set the work-pending bit before passing in the new work. (found in the -rt tree, using Peter Zijlstra's files-lock scalability patchset) Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 742cbbe49bd..180a8ce1153 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -637,9 +637,11 @@ int schedule_on_each_cpu(work_func_t func)
637 637
638 mutex_lock(&workqueue_mutex); 638 mutex_lock(&workqueue_mutex);
639 for_each_online_cpu(cpu) { 639 for_each_online_cpu(cpu) {
640 INIT_WORK(per_cpu_ptr(works, cpu), func); 640 struct work_struct *work = per_cpu_ptr(works, cpu);
641 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 641
642 per_cpu_ptr(works, cpu)); 642 INIT_WORK(work, func);
643 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
644 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
643 } 645 }
644 mutex_unlock(&workqueue_mutex); 646 mutex_unlock(&workqueue_mutex);
645 flush_workqueue(keventd_wq); 647 flush_workqueue(keventd_wq);