aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c54
1 files changed, 18 insertions, 36 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a2dccfe1acec..7368b57842ea 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1376,7 +1376,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
1376 * queued or lose PENDING. Grabbing PENDING and queueing should 1376 * queued or lose PENDING. Grabbing PENDING and queueing should
1377 * happen with IRQ disabled. 1377 * happen with IRQ disabled.
1378 */ 1378 */
1379 WARN_ON_ONCE(!irqs_disabled()); 1379 lockdep_assert_irqs_disabled();
1380 1380
1381 debug_work_activate(work); 1381 debug_work_activate(work);
1382 1382
@@ -1493,9 +1493,9 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
1493} 1493}
1494EXPORT_SYMBOL(queue_work_on); 1494EXPORT_SYMBOL(queue_work_on);
1495 1495
1496void delayed_work_timer_fn(unsigned long __data) 1496void delayed_work_timer_fn(struct timer_list *t)
1497{ 1497{
1498 struct delayed_work *dwork = (struct delayed_work *)__data; 1498 struct delayed_work *dwork = from_timer(dwork, t, timer);
1499 1499
1500 /* should have been called from irqsafe timer with irq already off */ 1500 /* should have been called from irqsafe timer with irq already off */
1501 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 1501 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
@@ -1509,8 +1509,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1509 struct work_struct *work = &dwork->work; 1509 struct work_struct *work = &dwork->work;
1510 1510
1511 WARN_ON_ONCE(!wq); 1511 WARN_ON_ONCE(!wq);
1512 WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 1512 WARN_ON_ONCE(timer->function != (TIMER_FUNC_TYPE)delayed_work_timer_fn);
1513 timer->data != (unsigned long)dwork);
1514 WARN_ON_ONCE(timer_pending(timer)); 1513 WARN_ON_ONCE(timer_pending(timer));
1515 WARN_ON_ONCE(!list_empty(&work->entry)); 1514 WARN_ON_ONCE(!list_empty(&work->entry));
1516 1515
@@ -1833,9 +1832,9 @@ static void destroy_worker(struct worker *worker)
1833 wake_up_process(worker->task); 1832 wake_up_process(worker->task);
1834} 1833}
1835 1834
1836static void idle_worker_timeout(unsigned long __pool) 1835static void idle_worker_timeout(struct timer_list *t)
1837{ 1836{
1838 struct worker_pool *pool = (void *)__pool; 1837 struct worker_pool *pool = from_timer(pool, t, idle_timer);
1839 1838
1840 spin_lock_irq(&pool->lock); 1839 spin_lock_irq(&pool->lock);
1841 1840
@@ -1881,9 +1880,9 @@ static void send_mayday(struct work_struct *work)
1881 } 1880 }
1882} 1881}
1883 1882
1884static void pool_mayday_timeout(unsigned long __pool) 1883static void pool_mayday_timeout(struct timer_list *t)
1885{ 1884{
1886 struct worker_pool *pool = (void *)__pool; 1885 struct worker_pool *pool = from_timer(pool, t, mayday_timer);
1887 struct work_struct *work; 1886 struct work_struct *work;
1888 1887
1889 spin_lock_irq(&pool->lock); 1888 spin_lock_irq(&pool->lock);
@@ -2491,15 +2490,8 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
2491 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 2490 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2492 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 2491 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2493 2492
2494 /* 2493 init_completion_map(&barr->done, &target->lockdep_map);
2495 * Explicitly init the crosslock for wq_barrier::done, make its lock 2494
2496 * key a subkey of the corresponding work. As a result we won't
2497 * build a dependency between wq_barrier::done and unrelated work.
2498 */
2499 lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map,
2500 "(complete)wq_barr::done",
2501 target->lockdep_map.key, 1);
2502 __init_completion(&barr->done);
2503 barr->task = current; 2495 barr->task = current;
2504 2496
2505 /* 2497 /*
@@ -2605,16 +2597,13 @@ void flush_workqueue(struct workqueue_struct *wq)
2605 struct wq_flusher this_flusher = { 2597 struct wq_flusher this_flusher = {
2606 .list = LIST_HEAD_INIT(this_flusher.list), 2598 .list = LIST_HEAD_INIT(this_flusher.list),
2607 .flush_color = -1, 2599 .flush_color = -1,
2608 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), 2600 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
2609 }; 2601 };
2610 int next_color; 2602 int next_color;
2611 2603
2612 if (WARN_ON(!wq_online)) 2604 if (WARN_ON(!wq_online))
2613 return; 2605 return;
2614 2606
2615 lock_map_acquire(&wq->lockdep_map);
2616 lock_map_release(&wq->lockdep_map);
2617
2618 mutex_lock(&wq->mutex); 2607 mutex_lock(&wq->mutex);
2619 2608
2620 /* 2609 /*
@@ -2877,9 +2866,6 @@ bool flush_work(struct work_struct *work)
2877 if (WARN_ON(!wq_online)) 2866 if (WARN_ON(!wq_online))
2878 return false; 2867 return false;
2879 2868
2880 lock_map_acquire(&work->lockdep_map);
2881 lock_map_release(&work->lockdep_map);
2882
2883 if (start_flush_work(work, &barr)) { 2869 if (start_flush_work(work, &barr)) {
2884 wait_for_completion(&barr.done); 2870 wait_for_completion(&barr.done);
2885 destroy_work_on_stack(&barr.work); 2871 destroy_work_on_stack(&barr.work);
@@ -3236,11 +3222,9 @@ static int init_worker_pool(struct worker_pool *pool)
3236 INIT_LIST_HEAD(&pool->idle_list); 3222 INIT_LIST_HEAD(&pool->idle_list);
3237 hash_init(pool->busy_hash); 3223 hash_init(pool->busy_hash);
3238 3224
3239 setup_deferrable_timer(&pool->idle_timer, idle_worker_timeout, 3225 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3240 (unsigned long)pool);
3241 3226
3242 setup_timer(&pool->mayday_timer, pool_mayday_timeout, 3227 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3243 (unsigned long)pool);
3244 3228
3245 mutex_init(&pool->attach_mutex); 3229 mutex_init(&pool->attach_mutex);
3246 INIT_LIST_HEAD(&pool->workers); 3230 INIT_LIST_HEAD(&pool->workers);
@@ -4640,7 +4624,7 @@ static void rebind_workers(struct worker_pool *pool)
4640 * concurrency management. Note that when or whether 4624 * concurrency management. Note that when or whether
4641 * @worker clears REBOUND doesn't affect correctness. 4625 * @worker clears REBOUND doesn't affect correctness.
4642 * 4626 *
4643 * ACCESS_ONCE() is necessary because @worker->flags may be 4627 * WRITE_ONCE() is necessary because @worker->flags may be
4644 * tested without holding any lock in 4628 * tested without holding any lock in
4645 * wq_worker_waking_up(). Without it, NOT_RUNNING test may 4629 * wq_worker_waking_up(). Without it, NOT_RUNNING test may
4646 * fail incorrectly leading to premature concurrency 4630 * fail incorrectly leading to premature concurrency
@@ -4649,7 +4633,7 @@ static void rebind_workers(struct worker_pool *pool)
4649 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 4633 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4650 worker_flags |= WORKER_REBOUND; 4634 worker_flags |= WORKER_REBOUND;
4651 worker_flags &= ~WORKER_UNBOUND; 4635 worker_flags &= ~WORKER_UNBOUND;
4652 ACCESS_ONCE(worker->flags) = worker_flags; 4636 WRITE_ONCE(worker->flags, worker_flags);
4653 } 4637 }
4654 4638
4655 spin_unlock_irq(&pool->lock); 4639 spin_unlock_irq(&pool->lock);
@@ -5383,11 +5367,8 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
5383 */ 5367 */
5384#ifdef CONFIG_WQ_WATCHDOG 5368#ifdef CONFIG_WQ_WATCHDOG
5385 5369
5386static void wq_watchdog_timer_fn(unsigned long data);
5387
5388static unsigned long wq_watchdog_thresh = 30; 5370static unsigned long wq_watchdog_thresh = 30;
5389static struct timer_list wq_watchdog_timer = 5371static struct timer_list wq_watchdog_timer;
5390 TIMER_DEFERRED_INITIALIZER(wq_watchdog_timer_fn, 0, 0);
5391 5372
5392static unsigned long wq_watchdog_touched = INITIAL_JIFFIES; 5373static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5393static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; 5374static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
@@ -5401,7 +5382,7 @@ static void wq_watchdog_reset_touched(void)
5401 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 5382 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5402} 5383}
5403 5384
5404static void wq_watchdog_timer_fn(unsigned long data) 5385static void wq_watchdog_timer_fn(struct timer_list *unused)
5405{ 5386{
5406 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; 5387 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5407 bool lockup_detected = false; 5388 bool lockup_detected = false;
@@ -5503,6 +5484,7 @@ module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5503 5484
5504static void wq_watchdog_init(void) 5485static void wq_watchdog_init(void)
5505{ 5486{
5487 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
5506 wq_watchdog_set_thresh(wq_watchdog_thresh); 5488 wq_watchdog_set_thresh(wq_watchdog_thresh);
5507} 5489}
5508 5490