diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-21 16:38:57 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-21 16:38:57 -0500 |
commit | 5bf7a6503ff416214b9a53569677dbf07657e6fd (patch) | |
tree | 0dc8e8dbc30a23d466a327bef05e78c571f2b8a3 | |
parent | 0f5c2ac58f22fd41deaeeb45ee752d4ae55f0d01 (diff) | |
parent | 42c025f3de9042d9c9abd9a6f6205d1a0f4bcadf (diff) |
Merge branch 'fixes-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
* 'fixes-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: note the nested NOT_RUNNING test in worker_clr_flags() isn't a noop
workqueue: relax lockdep annotation on flush_work()
-rw-r--r-- | include/linux/lockdep.h | 3 | ||||
-rw-r--r-- | kernel/workqueue.c | 20 |
2 files changed, 20 insertions, 3 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index f638fd78d106..4aef1dda6406 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -514,12 +514,15 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
514 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 514 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
515 | # ifdef CONFIG_PROVE_LOCKING | 515 | # ifdef CONFIG_PROVE_LOCKING |
516 | # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) | 516 | # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) |
517 | # define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_) | ||
517 | # else | 518 | # else |
518 | # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) | 519 | # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) |
520 | # define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
519 | # endif | 521 | # endif |
520 | # define lock_map_release(l) lock_release(l, 1, _THIS_IP_) | 522 | # define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
521 | #else | 523 | #else |
522 | # define lock_map_acquire(l) do { } while (0) | 524 | # define lock_map_acquire(l) do { } while (0) |
525 | # define lock_map_acquire_read(l) do { } while (0) | ||
523 | # define lock_map_release(l) do { } while (0) | 526 | # define lock_map_release(l) do { } while (0) |
524 | #endif | 527 | #endif |
525 | 528 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8ee6ec82f88a..11869faa6819 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -768,7 +768,11 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) | |||
768 | 768 | ||
769 | worker->flags &= ~flags; | 769 | worker->flags &= ~flags; |
770 | 770 | ||
771 | /* if transitioning out of NOT_RUNNING, increment nr_running */ | 771 | /* |
772 | * If transitioning out of NOT_RUNNING, increment nr_running. Note | ||
773 | * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask | ||
774 | * of multiple flags, not a single flag. | ||
775 | */ | ||
772 | if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) | 776 | if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) |
773 | if (!(worker->flags & WORKER_NOT_RUNNING)) | 777 | if (!(worker->flags & WORKER_NOT_RUNNING)) |
774 | atomic_inc(get_gcwq_nr_running(gcwq->cpu)); | 778 | atomic_inc(get_gcwq_nr_running(gcwq->cpu)); |
@@ -1840,7 +1844,7 @@ __acquires(&gcwq->lock) | |||
1840 | spin_unlock_irq(&gcwq->lock); | 1844 | spin_unlock_irq(&gcwq->lock); |
1841 | 1845 | ||
1842 | work_clear_pending(work); | 1846 | work_clear_pending(work); |
1843 | lock_map_acquire(&cwq->wq->lockdep_map); | 1847 | lock_map_acquire_read(&cwq->wq->lockdep_map); |
1844 | lock_map_acquire(&lockdep_map); | 1848 | lock_map_acquire(&lockdep_map); |
1845 | trace_workqueue_execute_start(work); | 1849 | trace_workqueue_execute_start(work); |
1846 | f(work); | 1850 | f(work); |
@@ -2384,8 +2388,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, | |||
2384 | insert_wq_barrier(cwq, barr, work, worker); | 2388 | insert_wq_barrier(cwq, barr, work, worker); |
2385 | spin_unlock_irq(&gcwq->lock); | 2389 | spin_unlock_irq(&gcwq->lock); |
2386 | 2390 | ||
2387 | lock_map_acquire(&cwq->wq->lockdep_map); | 2391 | /* |
2392 | * If @max_active is 1 or rescuer is in use, flushing another work | ||
2393 | * item on the same workqueue may lead to deadlock. Make sure the | ||
2394 | * flusher is not running on the same workqueue by verifying write | ||
2395 | * access. | ||
2396 | */ | ||
2397 | if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) | ||
2398 | lock_map_acquire(&cwq->wq->lockdep_map); | ||
2399 | else | ||
2400 | lock_map_acquire_read(&cwq->wq->lockdep_map); | ||
2388 | lock_map_release(&cwq->wq->lockdep_map); | 2401 | lock_map_release(&cwq->wq->lockdep_map); |
2402 | |||
2389 | return true; | 2403 | return true; |
2390 | already_gone: | 2404 | already_gone: |
2391 | spin_unlock_irq(&gcwq->lock); | 2405 | spin_unlock_irq(&gcwq->lock); |