aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-01-09 17:32:15 -0500
committerTejun Heo <tj@kernel.org>2011-01-11 09:33:01 -0500
commite159489baa717dbae70f9903770a6a4990865887 (patch)
tree6e2ae803ff6ebed558ebbe03bf3ae5bda1dd6ebc
parent0c21e3aaf6ae85bee804a325aa29c325209180fd (diff)
workqueue: relax lockdep annotation on flush_work()
Currently, the lockdep annotation in flush_work() requires exclusive access on the workqueue the target work is queued on and triggers warning if a work is trying to flush another work on the same workqueue; however, this is no longer true as workqueues can now execute multiple works concurrently. This patch adds lock_map_acquire_read() and make process_one_work() hold read access to the workqueue while executing a work and start_flush_work() check for write access if concurrnecy level is one or the workqueue has a rescuer (as only one execution resource - the rescuer - is guaranteed to be available under memory pressure), and read access if higher. This better represents what's going on and removes spurious lockdep warnings which are triggered by fake dependency chain created through flush_work(). * Peter pointed out that flushing another work from a WQ_MEM_RECLAIM wq breaks forward progress guarantee under memory pressure. Condition check accordingly updated. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: "Rafael J. Wysocki" <rjw@sisk.pl> Tested-by: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Peter Zijlstra <peterz@infradead.org> Cc: stable@kernel.org
-rw-r--r--include/linux/lockdep.h3
-rw-r--r--kernel/workqueue.c14
2 files changed, 15 insertions, 2 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 71c09b26c759..9f19430c7d07 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -522,12 +522,15 @@ static inline void print_irqtrace_events(struct task_struct *curr)
522#ifdef CONFIG_DEBUG_LOCK_ALLOC 522#ifdef CONFIG_DEBUG_LOCK_ALLOC
523# ifdef CONFIG_PROVE_LOCKING 523# ifdef CONFIG_PROVE_LOCKING
524# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) 524# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
525# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
525# else 526# else
526# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) 527# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
528# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
527# endif 529# endif
528# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) 530# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
529#else 531#else
530# define lock_map_acquire(l) do { } while (0) 532# define lock_map_acquire(l) do { } while (0)
533# define lock_map_acquire_read(l) do { } while (0)
531# define lock_map_release(l) do { } while (0) 534# define lock_map_release(l) do { } while (0)
532#endif 535#endif
533 536
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8ee6ec82f88a..930c2390b77e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1840,7 +1840,7 @@ __acquires(&gcwq->lock)
1840 spin_unlock_irq(&gcwq->lock); 1840 spin_unlock_irq(&gcwq->lock);
1841 1841
1842 work_clear_pending(work); 1842 work_clear_pending(work);
1843 lock_map_acquire(&cwq->wq->lockdep_map); 1843 lock_map_acquire_read(&cwq->wq->lockdep_map);
1844 lock_map_acquire(&lockdep_map); 1844 lock_map_acquire(&lockdep_map);
1845 trace_workqueue_execute_start(work); 1845 trace_workqueue_execute_start(work);
1846 f(work); 1846 f(work);
@@ -2384,8 +2384,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2384 insert_wq_barrier(cwq, barr, work, worker); 2384 insert_wq_barrier(cwq, barr, work, worker);
2385 spin_unlock_irq(&gcwq->lock); 2385 spin_unlock_irq(&gcwq->lock);
2386 2386
2387 lock_map_acquire(&cwq->wq->lockdep_map); 2387 /*
2388 * If @max_active is 1 or rescuer is in use, flushing another work
2389 * item on the same workqueue may lead to deadlock. Make sure the
2390 * flusher is not running on the same workqueue by verifying write
2391 * access.
2392 */
2393 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2394 lock_map_acquire(&cwq->wq->lockdep_map);
2395 else
2396 lock_map_acquire_read(&cwq->wq->lockdep_map);
2388 lock_map_release(&cwq->wq->lockdep_map); 2397 lock_map_release(&cwq->wq->lockdep_map);
2398
2389 return true; 2399 return true;
2390already_gone: 2400already_gone:
2391 spin_unlock_irq(&gcwq->lock); 2401 spin_unlock_irq(&gcwq->lock);