aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c23
1 files changed, 5 insertions, 18 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a2dccfe1acec..1070b21ba4aa 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2491,15 +2491,8 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
2491 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 2491 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2492 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 2492 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2493 2493
2494 /* 2494 init_completion_map(&barr->done, &target->lockdep_map);
2495 * Explicitly init the crosslock for wq_barrier::done, make its lock 2495
2496 * key a subkey of the corresponding work. As a result we won't
2497 * build a dependency between wq_barrier::done and unrelated work.
2498 */
2499 lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map,
2500 "(complete)wq_barr::done",
2501 target->lockdep_map.key, 1);
2502 __init_completion(&barr->done);
2503 barr->task = current; 2496 barr->task = current;
2504 2497
2505 /* 2498 /*
@@ -2605,16 +2598,13 @@ void flush_workqueue(struct workqueue_struct *wq)
2605 struct wq_flusher this_flusher = { 2598 struct wq_flusher this_flusher = {
2606 .list = LIST_HEAD_INIT(this_flusher.list), 2599 .list = LIST_HEAD_INIT(this_flusher.list),
2607 .flush_color = -1, 2600 .flush_color = -1,
2608 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), 2601 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
2609 }; 2602 };
2610 int next_color; 2603 int next_color;
2611 2604
2612 if (WARN_ON(!wq_online)) 2605 if (WARN_ON(!wq_online))
2613 return; 2606 return;
2614 2607
2615 lock_map_acquire(&wq->lockdep_map);
2616 lock_map_release(&wq->lockdep_map);
2617
2618 mutex_lock(&wq->mutex); 2608 mutex_lock(&wq->mutex);
2619 2609
2620 /* 2610 /*
@@ -2877,9 +2867,6 @@ bool flush_work(struct work_struct *work)
2877 if (WARN_ON(!wq_online)) 2867 if (WARN_ON(!wq_online))
2878 return false; 2868 return false;
2879 2869
2880 lock_map_acquire(&work->lockdep_map);
2881 lock_map_release(&work->lockdep_map);
2882
2883 if (start_flush_work(work, &barr)) { 2870 if (start_flush_work(work, &barr)) {
2884 wait_for_completion(&barr.done); 2871 wait_for_completion(&barr.done);
2885 destroy_work_on_stack(&barr.work); 2872 destroy_work_on_stack(&barr.work);
@@ -4640,7 +4627,7 @@ static void rebind_workers(struct worker_pool *pool)
4640 * concurrency management. Note that when or whether 4627 * concurrency management. Note that when or whether
4641 * @worker clears REBOUND doesn't affect correctness. 4628 * @worker clears REBOUND doesn't affect correctness.
4642 * 4629 *
4643 * ACCESS_ONCE() is necessary because @worker->flags may be 4630 * WRITE_ONCE() is necessary because @worker->flags may be
4644 * tested without holding any lock in 4631 * tested without holding any lock in
4645 * wq_worker_waking_up(). Without it, NOT_RUNNING test may 4632 * wq_worker_waking_up(). Without it, NOT_RUNNING test may
4646 * fail incorrectly leading to premature concurrency 4633 * fail incorrectly leading to premature concurrency
@@ -4649,7 +4636,7 @@ static void rebind_workers(struct worker_pool *pool)
4649 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 4636 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4650 worker_flags |= WORKER_REBOUND; 4637 worker_flags |= WORKER_REBOUND;
4651 worker_flags &= ~WORKER_UNBOUND; 4638 worker_flags &= ~WORKER_UNBOUND;
4652 ACCESS_ONCE(worker->flags) = worker_flags; 4639 WRITE_ONCE(worker->flags, worker_flags);
4653 } 4640 }
4654 4641
4655 spin_unlock_irq(&pool->lock); 4642 spin_unlock_irq(&pool->lock);