aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-21 16:38:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-21 16:38:57 -0500
commit5bf7a6503ff416214b9a53569677dbf07657e6fd (patch)
tree0dc8e8dbc30a23d466a327bef05e78c571f2b8a3 /kernel
parent0f5c2ac58f22fd41deaeeb45ee752d4ae55f0d01 (diff)
parent42c025f3de9042d9c9abd9a6f6205d1a0f4bcadf (diff)
Merge branch 'fixes-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
* 'fixes-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: note the nested NOT_RUNNING test in worker_clr_flags() isn't a noop workqueue: relax lockdep annotation on flush_work()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c20
1 files changed, 17 insertions, 3 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8ee6ec82f88a..11869faa6819 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -768,7 +768,11 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
768 768
769 worker->flags &= ~flags; 769 worker->flags &= ~flags;
770 770
771 /* if transitioning out of NOT_RUNNING, increment nr_running */ 771 /*
772 * If transitioning out of NOT_RUNNING, increment nr_running. Note
773 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
774 * of multiple flags, not a single flag.
775 */
772 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 776 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
773 if (!(worker->flags & WORKER_NOT_RUNNING)) 777 if (!(worker->flags & WORKER_NOT_RUNNING))
774 atomic_inc(get_gcwq_nr_running(gcwq->cpu)); 778 atomic_inc(get_gcwq_nr_running(gcwq->cpu));
@@ -1840,7 +1844,7 @@ __acquires(&gcwq->lock)
1840 spin_unlock_irq(&gcwq->lock); 1844 spin_unlock_irq(&gcwq->lock);
1841 1845
1842 work_clear_pending(work); 1846 work_clear_pending(work);
1843 lock_map_acquire(&cwq->wq->lockdep_map); 1847 lock_map_acquire_read(&cwq->wq->lockdep_map);
1844 lock_map_acquire(&lockdep_map); 1848 lock_map_acquire(&lockdep_map);
1845 trace_workqueue_execute_start(work); 1849 trace_workqueue_execute_start(work);
1846 f(work); 1850 f(work);
@@ -2384,8 +2388,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2384 insert_wq_barrier(cwq, barr, work, worker); 2388 insert_wq_barrier(cwq, barr, work, worker);
2385 spin_unlock_irq(&gcwq->lock); 2389 spin_unlock_irq(&gcwq->lock);
2386 2390
2387 lock_map_acquire(&cwq->wq->lockdep_map); 2391 /*
2392 * If @max_active is 1 or rescuer is in use, flushing another work
2393 * item on the same workqueue may lead to deadlock. Make sure the
2394 * flusher is not running on the same workqueue by verifying write
2395 * access.
2396 */
2397 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2398 lock_map_acquire(&cwq->wq->lockdep_map);
2399 else
2400 lock_map_acquire_read(&cwq->wq->lockdep_map);
2388 lock_map_release(&cwq->wq->lockdep_map); 2401 lock_map_release(&cwq->wq->lockdep_map);
2402
2389 return true; 2403 return true;
2390already_gone: 2404already_gone:
2391 spin_unlock_irq(&gcwq->lock); 2405 spin_unlock_irq(&gcwq->lock);