diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2013-07-24 06:31:42 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-07-24 12:24:25 -0400 |
commit | c2fda509667b0fda4372a237f5a59ea4570b1627 (patch) | |
tree | 27ba4e2f32cdd58e2f6f2cfa694f25afe7020e35 /kernel | |
parent | ad81f0545ef01ea651886dddac4bef6cec930092 (diff) |
workqueue: allow work_on_cpu() to be called recursively
If the @fn call work_on_cpu() again, the lockdep will complain:
> [ INFO: possible recursive locking detected ]
> 3.11.0-rc1-lockdep-fix-a #6 Not tainted
> ---------------------------------------------
> kworker/0:1/142 is trying to acquire lock:
> ((&wfc.work)){+.+.+.}, at: [<ffffffff81077100>] flush_work+0x0/0xb0
>
> but task is already holding lock:
> ((&wfc.work)){+.+.+.}, at: [<ffffffff81075dd9>] process_one_work+0x169/0x610
>
> other info that might help us debug this:
> Possible unsafe locking scenario:
>
> CPU0
> ----
> lock((&wfc.work));
> lock((&wfc.work));
>
> *** DEADLOCK ***
It is false-positive lockdep report. In this sutiation,
the two "wfc"s of the two work_on_cpu() are different,
they are both on stack. flush_work() can't be deadlock.
To fix this, we need to avoid the lockdep checking in this case,
thus we instroduce a internal __flush_work() which skip the lockdep.
tj: Minor comment adjustment.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Reported-by: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com>
Reported-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 32 |
1 files changed, 22 insertions, 10 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f02c4a4a0c3c..55f5f0afcd0d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2817,6 +2817,19 @@ already_gone: | |||
2817 | return false; | 2817 | return false; |
2818 | } | 2818 | } |
2819 | 2819 | ||
2820 | static bool __flush_work(struct work_struct *work) | ||
2821 | { | ||
2822 | struct wq_barrier barr; | ||
2823 | |||
2824 | if (start_flush_work(work, &barr)) { | ||
2825 | wait_for_completion(&barr.done); | ||
2826 | destroy_work_on_stack(&barr.work); | ||
2827 | return true; | ||
2828 | } else { | ||
2829 | return false; | ||
2830 | } | ||
2831 | } | ||
2832 | |||
2820 | /** | 2833 | /** |
2821 | * flush_work - wait for a work to finish executing the last queueing instance | 2834 | * flush_work - wait for a work to finish executing the last queueing instance |
2822 | * @work: the work to flush | 2835 | * @work: the work to flush |
@@ -2830,18 +2843,10 @@ already_gone: | |||
2830 | */ | 2843 | */ |
2831 | bool flush_work(struct work_struct *work) | 2844 | bool flush_work(struct work_struct *work) |
2832 | { | 2845 | { |
2833 | struct wq_barrier barr; | ||
2834 | |||
2835 | lock_map_acquire(&work->lockdep_map); | 2846 | lock_map_acquire(&work->lockdep_map); |
2836 | lock_map_release(&work->lockdep_map); | 2847 | lock_map_release(&work->lockdep_map); |
2837 | 2848 | ||
2838 | if (start_flush_work(work, &barr)) { | 2849 | return __flush_work(work); |
2839 | wait_for_completion(&barr.done); | ||
2840 | destroy_work_on_stack(&barr.work); | ||
2841 | return true; | ||
2842 | } else { | ||
2843 | return false; | ||
2844 | } | ||
2845 | } | 2850 | } |
2846 | EXPORT_SYMBOL_GPL(flush_work); | 2851 | EXPORT_SYMBOL_GPL(flush_work); |
2847 | 2852 | ||
@@ -4756,7 +4761,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | |||
4756 | 4761 | ||
4757 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); | 4762 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); |
4758 | schedule_work_on(cpu, &wfc.work); | 4763 | schedule_work_on(cpu, &wfc.work); |
4759 | flush_work(&wfc.work); | 4764 | |
4765 | /* | ||
4766 | * The work item is on-stack and can't lead to deadlock through | ||
4767 | * flushing. Use __flush_work() to avoid spurious lockdep warnings | ||
4768 | * when work_on_cpu()s are nested. | ||
4769 | */ | ||
4770 | __flush_work(&wfc.work); | ||
4771 | |||
4760 | return wfc.ret; | 4772 | return wfc.ret; |
4761 | } | 4773 | } |
4762 | EXPORT_SYMBOL_GPL(work_on_cpu); | 4774 | EXPORT_SYMBOL_GPL(work_on_cpu); |