summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 15:38:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 15:38:26 -0500
commit8e9a2dba8686187d8c8179e5b86640e653963889 (patch)
treea4ba543649219cbb28d91aab65b785d763f5d069 /kernel/workqueue.c
parent6098850e7e6978f95a958f79a645a653228d0002 (diff)
parent450cbdd0125cfa5d7bbf9e2a6b6961cc48d29730 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar: "The main changes in this cycle are: - Another attempt at enabling cross-release lockdep dependency tracking (automatically part of CONFIG_PROVE_LOCKING=y), this time with better performance and fewer false positives. (Byungchul Park) - Introduce lockdep_assert_irqs_enabled()/disabled() and convert open-coded equivalents to lockdep variants. (Frederic Weisbecker) - Add down_read_killable() and use it in the VFS's iterate_dir() method. (Kirill Tkhai) - Convert remaining uses of ACCESS_ONCE() to READ_ONCE()/WRITE_ONCE(). Most of the conversion was Coccinelle driven. (Mark Rutland, Paul E. McKenney) - Get rid of lockless_dereference(), by strengthening Alpha atomics, strengthening READ_ONCE() with smp_read_barrier_depends() and thus being able to convert users of lockless_dereference() to READ_ONCE(). (Will Deacon) - Various micro-optimizations: - better PV qspinlocks (Waiman Long), - better x86 barriers (Michael S. Tsirkin) - better x86 refcounts (Kees Cook) - ... plus other fixes and enhancements. (Borislav Petkov, Juergen Gross, Miguel Bernal Marin)" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) locking/x86: Use LOCK ADD for smp_mb() instead of MFENCE rcu: Use lockdep to assert IRQs are disabled/enabled netpoll: Use lockdep to assert IRQs are disabled/enabled timers/posix-cpu-timers: Use lockdep to assert IRQs are disabled/enabled sched/clock, sched/cputime: Use lockdep to assert IRQs are disabled/enabled irq_work: Use lockdep to assert IRQs are disabled/enabled irq/timings: Use lockdep to assert IRQs are disabled/enabled perf/core: Use lockdep to assert IRQs are disabled/enabled x86: Use lockdep to assert IRQs are disabled/enabled smp/core: Use lockdep to assert IRQs are disabled/enabled timers/hrtimer: Use lockdep to assert IRQs are disabled/enabled timers/nohz: Use lockdep to assert IRQs are disabled/enabled workqueue: Use lockdep to assert IRQs are disabled/enabled irq/softirqs: Use lockdep to assert IRQs are disabled/enabled locking/lockdep: Add IRQs disabled/enabled assertion APIs: lockdep_assert_irqs_enabled()/disabled() locking/pvqspinlock: Implement hybrid PV queued/unfair locks locking/rwlocks: Fix comments x86/paravirt: Set up the virt_spin_lock_key after static keys get initialized block, locking/lockdep: Assign a lock_class per gendisk used for wait_for_completion() workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes ...
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c25
1 files changed, 6 insertions, 19 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a2dccfe1acec..13f67b5a0a0c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1376,7 +1376,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
1376 * queued or lose PENDING. Grabbing PENDING and queueing should 1376 * queued or lose PENDING. Grabbing PENDING and queueing should
1377 * happen with IRQ disabled. 1377 * happen with IRQ disabled.
1378 */ 1378 */
1379 WARN_ON_ONCE(!irqs_disabled()); 1379 lockdep_assert_irqs_disabled();
1380 1380
1381 debug_work_activate(work); 1381 debug_work_activate(work);
1382 1382
@@ -2491,15 +2491,8 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
2491 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 2491 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2492 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 2492 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2493 2493
2494 /* 2494 init_completion_map(&barr->done, &target->lockdep_map);
2495 * Explicitly init the crosslock for wq_barrier::done, make its lock 2495
2496 * key a subkey of the corresponding work. As a result we won't
2497 * build a dependency between wq_barrier::done and unrelated work.
2498 */
2499 lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map,
2500 "(complete)wq_barr::done",
2501 target->lockdep_map.key, 1);
2502 __init_completion(&barr->done);
2503 barr->task = current; 2496 barr->task = current;
2504 2497
2505 /* 2498 /*
@@ -2605,16 +2598,13 @@ void flush_workqueue(struct workqueue_struct *wq)
2605 struct wq_flusher this_flusher = { 2598 struct wq_flusher this_flusher = {
2606 .list = LIST_HEAD_INIT(this_flusher.list), 2599 .list = LIST_HEAD_INIT(this_flusher.list),
2607 .flush_color = -1, 2600 .flush_color = -1,
2608 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), 2601 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
2609 }; 2602 };
2610 int next_color; 2603 int next_color;
2611 2604
2612 if (WARN_ON(!wq_online)) 2605 if (WARN_ON(!wq_online))
2613 return; 2606 return;
2614 2607
2615 lock_map_acquire(&wq->lockdep_map);
2616 lock_map_release(&wq->lockdep_map);
2617
2618 mutex_lock(&wq->mutex); 2608 mutex_lock(&wq->mutex);
2619 2609
2620 /* 2610 /*
@@ -2877,9 +2867,6 @@ bool flush_work(struct work_struct *work)
2877 if (WARN_ON(!wq_online)) 2867 if (WARN_ON(!wq_online))
2878 return false; 2868 return false;
2879 2869
2880 lock_map_acquire(&work->lockdep_map);
2881 lock_map_release(&work->lockdep_map);
2882
2883 if (start_flush_work(work, &barr)) { 2870 if (start_flush_work(work, &barr)) {
2884 wait_for_completion(&barr.done); 2871 wait_for_completion(&barr.done);
2885 destroy_work_on_stack(&barr.work); 2872 destroy_work_on_stack(&barr.work);
@@ -4640,7 +4627,7 @@ static void rebind_workers(struct worker_pool *pool)
4640 * concurrency management. Note that when or whether 4627 * concurrency management. Note that when or whether
4641 * @worker clears REBOUND doesn't affect correctness. 4628 * @worker clears REBOUND doesn't affect correctness.
4642 * 4629 *
4643 * ACCESS_ONCE() is necessary because @worker->flags may be 4630 * WRITE_ONCE() is necessary because @worker->flags may be
4644 * tested without holding any lock in 4631 * tested without holding any lock in
4645 * wq_worker_waking_up(). Without it, NOT_RUNNING test may 4632 * wq_worker_waking_up(). Without it, NOT_RUNNING test may
4646 * fail incorrectly leading to premature concurrency 4633 * fail incorrectly leading to premature concurrency
@@ -4649,7 +4636,7 @@ static void rebind_workers(struct worker_pool *pool)
4649 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 4636 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4650 worker_flags |= WORKER_REBOUND; 4637 worker_flags |= WORKER_REBOUND;
4651 worker_flags &= ~WORKER_UNBOUND; 4638 worker_flags &= ~WORKER_UNBOUND;
4652 ACCESS_ONCE(worker->flags) = worker_flags; 4639 WRITE_ONCE(worker->flags, worker_flags);
4653 } 4640 }
4654 4641
4655 spin_unlock_irq(&pool->lock); 4642 spin_unlock_irq(&pool->lock);