aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorStephen Boyd <sboyd@codeaurora.org>2012-04-20 20:28:50 -0400
committerTejun Heo <tj@kernel.org>2012-04-23 14:06:42 -0400
commit0976dfc1d0cd80a4e9dfaf87bd8744612bde475a (patch)
tree0af2d79070fcb0a510187eb780db796c5702df20 /kernel/workqueue.c
parentf5b2552b4ebbeadcadde1532d7bbd3f850719046 (diff)
workqueue: Catch more locking problems with flush_work()
If a workqueue is flushed with flush_work() lockdep checking can be circumvented. For example: static DEFINE_MUTEX(mutex); static void my_work(struct work_struct *w) { mutex_lock(&mutex); mutex_unlock(&mutex); } static DECLARE_WORK(work, my_work); static int __init start_test_module(void) { schedule_work(&work); return 0; } module_init(start_test_module); static void __exit stop_test_module(void) { mutex_lock(&mutex); flush_work(&work); mutex_unlock(&mutex); } module_exit(stop_test_module); would not always print a warning when flush_work() was called. In this trivial example nothing could go wrong since we are guaranteed module_init() and module_exit() don't run concurrently, but if the work item is schedule asynchronously we could have a scenario where the work item is running just at the time flush_work() is called resulting in a classic ABBA locking problem. Add a lockdep hint by acquiring and releasing the work item lockdep_map in flush_work() so that we always catch this potential deadlock scenario. Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Reviewed-by: Yong Zhang <yong.zhang0@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 66ec08de6dac..211eadb23323 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2509,6 +2509,9 @@ bool flush_work(struct work_struct *work)
2509{ 2509{
2510 struct wq_barrier barr; 2510 struct wq_barrier barr;
2511 2511
2512 lock_map_acquire(&work->lockdep_map);
2513 lock_map_release(&work->lockdep_map);
2514
2512 if (start_flush_work(work, &barr, true)) { 2515 if (start_flush_work(work, &barr, true)) {
2513 wait_for_completion(&barr.done); 2516 wait_for_completion(&barr.done);
2514 destroy_work_on_stack(&barr.work); 2517 destroy_work_on_stack(&barr.work);