aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-08-08 12:38:42 -0400
committerTejun Heo <tj@kernel.org>2012-08-13 19:27:55 -0400
commit1265057fa02c7bed3b6d9ddc8a2048065a370364 (patch)
treeb10e631ca6157103fcc71188e972b06e18c3570f /kernel/workqueue.c
parent41f63c5359d14ca995172b8f6eaffd93f60fec54 (diff)
workqueue: fix CPU binding of flush_delayed_work[_sync]()
delayed_work encodes the workqueue to use and the last CPU in delayed_work->work.data while it's on timer. The target CPU is implicitly recorded as the CPU the timer is queued on and delayed_work_timer_fn() queues delayed_work->work to the CPU it is running on. Unfortunately, this leaves flush_delayed_work[_sync]() no way to find out which CPU the delayed_work was queued for when they try to re-queue after killing the timer. Currently, it chooses the local CPU flush is running on. This can unexpectedly move a delayed_work queued on a specific CPU to another CPU and lead to subtle errors. There isn't much point in trying to save several bytes in struct delayed_work, which is already close to a hundred bytes on 64bit with all debug options turned off. This patch adds delayed_work->cpu to remember the CPU it's queued for. Note that if the timer is migrated during CPU down, the work item could be queued to the downed global_cwq after this change. As a detached global_cwq behaves like an unbound one, this doesn't change much for the delayed_work. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 41ae2c0979fe..11723c5b2b20 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1319,7 +1319,7 @@ void delayed_work_timer_fn(unsigned long __data)
1319 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); 1319 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1320 1320
1321 local_irq_disable(); 1321 local_irq_disable();
1322 __queue_work(WORK_CPU_UNBOUND, cwq->wq, &dwork->work); 1322 __queue_work(dwork->cpu, cwq->wq, &dwork->work);
1323 local_irq_enable(); 1323 local_irq_enable();
1324} 1324}
1325EXPORT_SYMBOL_GPL(delayed_work_timer_fn); 1325EXPORT_SYMBOL_GPL(delayed_work_timer_fn);
@@ -1356,6 +1356,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1356 1356
1357 set_work_cwq(work, get_cwq(lcpu, wq), 0); 1357 set_work_cwq(work, get_cwq(lcpu, wq), 0);
1358 1358
1359 dwork->cpu = cpu;
1359 timer->expires = jiffies + delay; 1360 timer->expires = jiffies + delay;
1360 1361
1361 if (unlikely(cpu != WORK_CPU_UNBOUND)) 1362 if (unlikely(cpu != WORK_CPU_UNBOUND))
@@ -2997,7 +2998,7 @@ bool flush_delayed_work(struct delayed_work *dwork)
2997{ 2998{
2998 local_irq_disable(); 2999 local_irq_disable();
2999 if (del_timer_sync(&dwork->timer)) 3000 if (del_timer_sync(&dwork->timer))
3000 __queue_work(WORK_CPU_UNBOUND, 3001 __queue_work(dwork->cpu,
3001 get_work_cwq(&dwork->work)->wq, &dwork->work); 3002 get_work_cwq(&dwork->work)->wq, &dwork->work);
3002 local_irq_enable(); 3003 local_irq_enable();
3003 return flush_work(&dwork->work); 3004 return flush_work(&dwork->work);
@@ -3020,7 +3021,7 @@ bool flush_delayed_work_sync(struct delayed_work *dwork)
3020{ 3021{
3021 local_irq_disable(); 3022 local_irq_disable();
3022 if (del_timer_sync(&dwork->timer)) 3023 if (del_timer_sync(&dwork->timer))
3023 __queue_work(WORK_CPU_UNBOUND, 3024 __queue_work(dwork->cpu,
3024 get_work_cwq(&dwork->work)->wq, &dwork->work); 3025 get_work_cwq(&dwork->work)->wq, &dwork->work);
3025 local_irq_enable(); 3026 local_irq_enable();
3026 return flush_work_sync(&dwork->work); 3027 return flush_work_sync(&dwork->work);