aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-08-23 06:52:32 -0400
committerIngo Molnar <mingo@kernel.org>2017-08-25 05:06:32 -0400
commita1d14934ea4b9db816a8dbfeab1c3e7204a0d871 (patch)
treef2d6a33fdf8eed83fbe4e2d04c7989700aab2138 /kernel/workqueue.c
parente91498589746065e3ae95d9a00b068e525eec34f (diff)
workqueue/lockdep: 'Fix' flush_work() annotation
The flush_work() annotation as introduced by commit: e159489baa71 ("workqueue: relax lockdep annotation on flush_work()") hits on the lockdep problem with recursive read locks. The situation as described is: Work W1: Work W2: Task: ARR(Q) ARR(Q) flush_workqueue(Q) A(W1) A(W2) A(Q) flush_work(W2) R(Q) A(W2) R(W2) if (special) A(Q) else ARR(Q) R(Q) where: A - acquire, ARR - acquire-read-recursive, R - release. Where under 'special' conditions we want to trigger a lock recursion deadlock, but otherwise allow the flush_work(). The allowing is done by using recursive read locks (ARR), but lockdep is broken for recursive stuff. However, there appears to be no need to acquire the lock if we're not 'special', so if we remove the 'else' clause things become much simpler and no longer need the recursion thing at all. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Tejun Heo <tj@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: boqun.feng@gmail.com Cc: byungchul.park@lge.com Cc: david@fromorbit.com Cc: johannes@sipsolutions.net Cc: oleg@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f128b3becfe1..8ad214dc15a9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2091,7 +2091,7 @@ __acquires(&pool->lock)
2091 2091
2092 spin_unlock_irq(&pool->lock); 2092 spin_unlock_irq(&pool->lock);
2093 2093
2094 lock_map_acquire_read(&pwq->wq->lockdep_map); 2094 lock_map_acquire(&pwq->wq->lockdep_map);
2095 lock_map_acquire(&lockdep_map); 2095 lock_map_acquire(&lockdep_map);
2096 crossrelease_hist_start(XHLOCK_PROC); 2096 crossrelease_hist_start(XHLOCK_PROC);
2097 trace_workqueue_execute_start(work); 2097 trace_workqueue_execute_start(work);
@@ -2826,16 +2826,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2826 spin_unlock_irq(&pool->lock); 2826 spin_unlock_irq(&pool->lock);
2827 2827
2828 /* 2828 /*
2829 * If @max_active is 1 or rescuer is in use, flushing another work 2829 * Force a lock recursion deadlock when using flush_work() inside a
2830 * item on the same workqueue may lead to deadlock. Make sure the 2830 * single-threaded or rescuer equipped workqueue.
2831 * flusher is not running on the same workqueue by verifying write 2831 *
2832 * access. 2832 * For single threaded workqueues the deadlock happens when the work
2833 * is after the work issuing the flush_work(). For rescuer equipped
2834 * workqueues the deadlock happens when the rescuer stalls, blocking
2835 * forward progress.
2833 */ 2836 */
2834 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) 2837 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) {
2835 lock_map_acquire(&pwq->wq->lockdep_map); 2838 lock_map_acquire(&pwq->wq->lockdep_map);
2836 else 2839 lock_map_release(&pwq->wq->lockdep_map);
2837 lock_map_acquire_read(&pwq->wq->lockdep_map); 2840 }
2838 lock_map_release(&pwq->wq->lockdep_map);
2839 2841
2840 return true; 2842 return true;
2841already_gone: 2843already_gone: