summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c53
1 files changed, 43 insertions, 10 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ca937b0c3a96..ab3c0dc8c7ed 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2091,8 +2091,30 @@ __acquires(&pool->lock)
2091 2091
2092 spin_unlock_irq(&pool->lock); 2092 spin_unlock_irq(&pool->lock);
2093 2093
2094 lock_map_acquire_read(&pwq->wq->lockdep_map); 2094 lock_map_acquire(&pwq->wq->lockdep_map);
2095 lock_map_acquire(&lockdep_map); 2095 lock_map_acquire(&lockdep_map);
2096 /*
2097 * Strictly speaking we should mark the invariant state without holding
2098 * any locks, that is, before these two lock_map_acquire()'s.
2099 *
2100 * However, that would result in:
2101 *
2102 * A(W1)
2103 * WFC(C)
2104 * A(W1)
2105 * C(C)
2106 *
2107 * Which would create W1->C->W1 dependencies, even though there is no
2108 * actual deadlock possible. There are two solutions, using a
2109 * read-recursive acquire on the work(queue) 'locks', but this will then
2110 * hit the lockdep limitation on recursive locks, or simply discard
2111 * these locks.
2112 *
2113 * AFAICT there is no possible deadlock scenario between the
2114 * flush_work() and complete() primitives (except for single-threaded
2115 * workqueues), so hiding them isn't a problem.
2116 */
2117 lockdep_invariant_state(true);
2096 trace_workqueue_execute_start(work); 2118 trace_workqueue_execute_start(work);
2097 worker->current_func(work); 2119 worker->current_func(work);
2098 /* 2120 /*
@@ -2474,7 +2496,16 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
2474 */ 2496 */
2475 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 2497 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2476 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 2498 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2477 init_completion(&barr->done); 2499
2500 /*
2501 * Explicitly init the crosslock for wq_barrier::done, make its lock
2502 * key a subkey of the corresponding work. As a result we won't
2503 * build a dependency between wq_barrier::done and unrelated work.
2504 */
2505 lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map,
2506 "(complete)wq_barr::done",
2507 target->lockdep_map.key, 1);
2508 __init_completion(&barr->done);
2478 barr->task = current; 2509 barr->task = current;
2479 2510
2480 /* 2511 /*
@@ -2815,16 +2846,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2815 spin_unlock_irq(&pool->lock); 2846 spin_unlock_irq(&pool->lock);
2816 2847
2817 /* 2848 /*
2818 * If @max_active is 1 or rescuer is in use, flushing another work 2849 * Force a lock recursion deadlock when using flush_work() inside a
2819 * item on the same workqueue may lead to deadlock. Make sure the 2850 * single-threaded or rescuer equipped workqueue.
2820 * flusher is not running on the same workqueue by verifying write 2851 *
2821 * access. 2852 * For single threaded workqueues the deadlock happens when the work
2853 * is after the work issuing the flush_work(). For rescuer equipped
2854 * workqueues the deadlock happens when the rescuer stalls, blocking
2855 * forward progress.
2822 */ 2856 */
2823 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) 2857 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) {
2824 lock_map_acquire(&pwq->wq->lockdep_map); 2858 lock_map_acquire(&pwq->wq->lockdep_map);
2825 else 2859 lock_map_release(&pwq->wq->lockdep_map);
2826 lock_map_acquire_read(&pwq->wq->lockdep_map); 2860 }
2827 lock_map_release(&pwq->wq->lockdep_map);
2828 2861
2829 return true; 2862 return true;
2830already_gone: 2863already_gone: