diff options
author | Johannes Berg <johannes.berg@intel.com> | 2018-08-22 05:49:04 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2018-08-22 11:31:38 -0400 |
commit | 87915adc3f0acdf03c776df42e308e5a155c19af (patch) | |
tree | cce568b654b2bb6e7d893655c9693b370dcd98bd /kernel | |
parent | d6e89786bed977f37f55ffca11e563f6d2b1e3b5 (diff) |
workqueue: re-add lockdep dependencies for flushing
In flush_work(), we need to create a lockdep dependency so that
the following scenario is appropriately tagged as a problem:
work_function()
{
mutex_lock(&mutex);
...
}
other_function()
{
mutex_lock(&mutex);
flush_work(&work); // or cancel_work_sync(&work);
}
This is a problem since the work might be running and be blocked
on trying to acquire the mutex.
Similarly, in flush_workqueue().
These were removed after cross-release partially caught these
problems, but now cross-release was reverted anyway. IMHO the
removal was erroneous anyway though, since lockdep should be
able to catch potential problems, not just actual ones, and
cross-release would only have caught the problem when actually
invoking wait_for_completion().
Fixes: fd1a5b04dfb8 ("workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes")
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index aa520e715bbc..661184fcd503 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2652,6 +2652,9 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
2652 | if (WARN_ON(!wq_online)) | 2652 | if (WARN_ON(!wq_online)) |
2653 | return; | 2653 | return; |
2654 | 2654 | ||
2655 | lock_map_acquire(&wq->lockdep_map); | ||
2656 | lock_map_release(&wq->lockdep_map); | ||
2657 | |||
2655 | mutex_lock(&wq->mutex); | 2658 | mutex_lock(&wq->mutex); |
2656 | 2659 | ||
2657 | /* | 2660 | /* |
@@ -2905,6 +2908,11 @@ static bool __flush_work(struct work_struct *work, bool from_cancel) | |||
2905 | if (WARN_ON(!wq_online)) | 2908 | if (WARN_ON(!wq_online)) |
2906 | return false; | 2909 | return false; |
2907 | 2910 | ||
2911 | if (!from_cancel) { | ||
2912 | lock_map_acquire(&work->lockdep_map); | ||
2913 | lock_map_release(&work->lockdep_map); | ||
2914 | } | ||
2915 | |||
2908 | if (start_flush_work(work, &barr, from_cancel)) { | 2916 | if (start_flush_work(work, &barr, from_cancel)) { |
2909 | wait_for_completion(&barr.done); | 2917 | wait_for_completion(&barr.done); |
2910 | destroy_work_on_stack(&barr.work); | 2918 | destroy_work_on_stack(&barr.work); |