summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/lockdep.h3
-rw-r--r--kernel/workqueue.c14
2 files changed, 15 insertions, 2 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 71c09b26c759..9f19430c7d07 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -522,12 +522,15 @@ static inline void print_irqtrace_events(struct task_struct *curr)
522#ifdef CONFIG_DEBUG_LOCK_ALLOC 522#ifdef CONFIG_DEBUG_LOCK_ALLOC
523# ifdef CONFIG_PROVE_LOCKING 523# ifdef CONFIG_PROVE_LOCKING
524# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) 524# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
525# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
525# else 526# else
526# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) 527# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
528# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
527# endif 529# endif
528# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) 530# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
529#else 531#else
530# define lock_map_acquire(l) do { } while (0) 532# define lock_map_acquire(l) do { } while (0)
533# define lock_map_acquire_read(l) do { } while (0)
531# define lock_map_release(l) do { } while (0) 534# define lock_map_release(l) do { } while (0)
532#endif 535#endif
533 536
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8ee6ec82f88a..930c2390b77e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1840,7 +1840,7 @@ __acquires(&gcwq->lock)
1840 spin_unlock_irq(&gcwq->lock); 1840 spin_unlock_irq(&gcwq->lock);
1841 1841
1842 work_clear_pending(work); 1842 work_clear_pending(work);
1843 lock_map_acquire(&cwq->wq->lockdep_map); 1843 lock_map_acquire_read(&cwq->wq->lockdep_map);
1844 lock_map_acquire(&lockdep_map); 1844 lock_map_acquire(&lockdep_map);
1845 trace_workqueue_execute_start(work); 1845 trace_workqueue_execute_start(work);
1846 f(work); 1846 f(work);
@@ -2384,8 +2384,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2384 insert_wq_barrier(cwq, barr, work, worker); 2384 insert_wq_barrier(cwq, barr, work, worker);
2385 spin_unlock_irq(&gcwq->lock); 2385 spin_unlock_irq(&gcwq->lock);
2386 2386
2387 lock_map_acquire(&cwq->wq->lockdep_map); 2387 /*
2388 * If @max_active is 1 or rescuer is in use, flushing another work
2389 * item on the same workqueue may lead to deadlock. Make sure the
2390 * flusher is not running on the same workqueue by verifying write
2391 * access.
2392 */
2393 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2394 lock_map_acquire(&cwq->wq->lockdep_map);
2395 else
2396 lock_map_acquire_read(&cwq->wq->lockdep_map);
2388 lock_map_release(&cwq->wq->lockdep_map); 2397 lock_map_release(&cwq->wq->lockdep_map);
2398
2389 return true; 2399 return true;
2390already_gone: 2400already_gone:
2391 spin_unlock_irq(&gcwq->lock); 2401 spin_unlock_irq(&gcwq->lock);