aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c85
1 files changed, 46 insertions, 39 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index fd9a28a13afd..c6e1bdb469ee 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -530,7 +530,7 @@ static int work_next_color(int color)
530static inline void set_work_data(struct work_struct *work, unsigned long data, 530static inline void set_work_data(struct work_struct *work, unsigned long data,
531 unsigned long flags) 531 unsigned long flags)
532{ 532{
533 BUG_ON(!work_pending(work)); 533 WARN_ON_ONCE(!work_pending(work));
534 atomic_long_set(&work->data, data | flags | work_static(work)); 534 atomic_long_set(&work->data, data | flags | work_static(work));
535} 535}
536 536
@@ -785,7 +785,8 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
785 pool = worker->pool; 785 pool = worker->pool;
786 786
787 /* this can only happen on the local cpu */ 787 /* this can only happen on the local cpu */
788 BUG_ON(cpu != raw_smp_processor_id()); 788 if (WARN_ON_ONCE(cpu != raw_smp_processor_id()))
789 return NULL;
789 790
790 /* 791 /*
791 * The counterpart of the following dec_and_test, implied mb, 792 * The counterpart of the following dec_and_test, implied mb,
@@ -1458,9 +1459,10 @@ static void worker_enter_idle(struct worker *worker)
1458{ 1459{
1459 struct worker_pool *pool = worker->pool; 1460 struct worker_pool *pool = worker->pool;
1460 1461
1461 BUG_ON(worker->flags & WORKER_IDLE); 1462 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1462 BUG_ON(!list_empty(&worker->entry) && 1463 WARN_ON_ONCE(!list_empty(&worker->entry) &&
1463 (worker->hentry.next || worker->hentry.pprev)); 1464 (worker->hentry.next || worker->hentry.pprev)))
1465 return;
1464 1466
1465 /* can't use worker_set_flags(), also called from start_worker() */ 1467 /* can't use worker_set_flags(), also called from start_worker() */
1466 worker->flags |= WORKER_IDLE; 1468 worker->flags |= WORKER_IDLE;
@@ -1497,7 +1499,8 @@ static void worker_leave_idle(struct worker *worker)
1497{ 1499{
1498 struct worker_pool *pool = worker->pool; 1500 struct worker_pool *pool = worker->pool;
1499 1501
1500 BUG_ON(!(worker->flags & WORKER_IDLE)); 1502 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1503 return;
1501 worker_clr_flags(worker, WORKER_IDLE); 1504 worker_clr_flags(worker, WORKER_IDLE);
1502 pool->nr_idle--; 1505 pool->nr_idle--;
1503 list_del_init(&worker->entry); 1506 list_del_init(&worker->entry);
@@ -1793,8 +1796,9 @@ static void destroy_worker(struct worker *worker)
1793 int id = worker->id; 1796 int id = worker->id;
1794 1797
1795 /* sanity check frenzy */ 1798 /* sanity check frenzy */
1796 BUG_ON(worker->current_work); 1799 if (WARN_ON(worker->current_work) ||
1797 BUG_ON(!list_empty(&worker->scheduled)); 1800 WARN_ON(!list_empty(&worker->scheduled)))
1801 return;
1798 1802
1799 if (worker->flags & WORKER_STARTED) 1803 if (worker->flags & WORKER_STARTED)
1800 pool->nr_workers--; 1804 pool->nr_workers--;
@@ -1923,7 +1927,8 @@ restart:
1923 del_timer_sync(&pool->mayday_timer); 1927 del_timer_sync(&pool->mayday_timer);
1924 spin_lock_irq(&pool->lock); 1928 spin_lock_irq(&pool->lock);
1925 start_worker(worker); 1929 start_worker(worker);
1926 BUG_ON(need_to_create_worker(pool)); 1930 if (WARN_ON_ONCE(need_to_create_worker(pool)))
1931 goto restart;
1927 return true; 1932 return true;
1928 } 1933 }
1929 1934
@@ -2256,7 +2261,7 @@ recheck:
2256 * preparing to process a work or actually processing it. 2261 * preparing to process a work or actually processing it.
2257 * Make sure nobody diddled with it while I was sleeping. 2262 * Make sure nobody diddled with it while I was sleeping.
2258 */ 2263 */
2259 BUG_ON(!list_empty(&worker->scheduled)); 2264 WARN_ON_ONCE(!list_empty(&worker->scheduled));
2260 2265
2261 /* 2266 /*
2262 * When control reaches this point, we're guaranteed to have 2267 * When control reaches this point, we're guaranteed to have
@@ -2364,7 +2369,7 @@ repeat:
2364 * Slurp in all works issued via this workqueue and 2369 * Slurp in all works issued via this workqueue and
2365 * process'em. 2370 * process'em.
2366 */ 2371 */
2367 BUG_ON(!list_empty(&rescuer->scheduled)); 2372 WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
2368 list_for_each_entry_safe(work, n, &pool->worklist, entry) 2373 list_for_each_entry_safe(work, n, &pool->worklist, entry)
2369 if (get_work_pwq(work) == pwq) 2374 if (get_work_pwq(work) == pwq)
2370 move_linked_works(work, scheduled, &n); 2375 move_linked_works(work, scheduled, &n);
@@ -2499,7 +2504,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2499 unsigned int cpu; 2504 unsigned int cpu;
2500 2505
2501 if (flush_color >= 0) { 2506 if (flush_color >= 0) {
2502 BUG_ON(atomic_read(&wq->nr_pwqs_to_flush)); 2507 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2503 atomic_set(&wq->nr_pwqs_to_flush, 1); 2508 atomic_set(&wq->nr_pwqs_to_flush, 1);
2504 } 2509 }
2505 2510
@@ -2510,7 +2515,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2510 spin_lock_irq(&pool->lock); 2515 spin_lock_irq(&pool->lock);
2511 2516
2512 if (flush_color >= 0) { 2517 if (flush_color >= 0) {
2513 BUG_ON(pwq->flush_color != -1); 2518 WARN_ON_ONCE(pwq->flush_color != -1);
2514 2519
2515 if (pwq->nr_in_flight[flush_color]) { 2520 if (pwq->nr_in_flight[flush_color]) {
2516 pwq->flush_color = flush_color; 2521 pwq->flush_color = flush_color;
@@ -2520,7 +2525,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2520 } 2525 }
2521 2526
2522 if (work_color >= 0) { 2527 if (work_color >= 0) {
2523 BUG_ON(work_color != work_next_color(pwq->work_color)); 2528 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2524 pwq->work_color = work_color; 2529 pwq->work_color = work_color;
2525 } 2530 }
2526 2531
@@ -2568,13 +2573,13 @@ void flush_workqueue(struct workqueue_struct *wq)
2568 * becomes our flush_color and work_color is advanced 2573 * becomes our flush_color and work_color is advanced
2569 * by one. 2574 * by one.
2570 */ 2575 */
2571 BUG_ON(!list_empty(&wq->flusher_overflow)); 2576 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2572 this_flusher.flush_color = wq->work_color; 2577 this_flusher.flush_color = wq->work_color;
2573 wq->work_color = next_color; 2578 wq->work_color = next_color;
2574 2579
2575 if (!wq->first_flusher) { 2580 if (!wq->first_flusher) {
2576 /* no flush in progress, become the first flusher */ 2581 /* no flush in progress, become the first flusher */
2577 BUG_ON(wq->flush_color != this_flusher.flush_color); 2582 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2578 2583
2579 wq->first_flusher = &this_flusher; 2584 wq->first_flusher = &this_flusher;
2580 2585
@@ -2587,7 +2592,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2587 } 2592 }
2588 } else { 2593 } else {
2589 /* wait in queue */ 2594 /* wait in queue */
2590 BUG_ON(wq->flush_color == this_flusher.flush_color); 2595 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2591 list_add_tail(&this_flusher.list, &wq->flusher_queue); 2596 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2592 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 2597 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2593 } 2598 }
@@ -2621,8 +2626,8 @@ void flush_workqueue(struct workqueue_struct *wq)
2621 2626
2622 wq->first_flusher = NULL; 2627 wq->first_flusher = NULL;
2623 2628
2624 BUG_ON(!list_empty(&this_flusher.list)); 2629 WARN_ON_ONCE(!list_empty(&this_flusher.list));
2625 BUG_ON(wq->flush_color != this_flusher.flush_color); 2630 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2626 2631
2627 while (true) { 2632 while (true) {
2628 struct wq_flusher *next, *tmp; 2633 struct wq_flusher *next, *tmp;
@@ -2635,8 +2640,8 @@ void flush_workqueue(struct workqueue_struct *wq)
2635 complete(&next->done); 2640 complete(&next->done);
2636 } 2641 }
2637 2642
2638 BUG_ON(!list_empty(&wq->flusher_overflow) && 2643 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2639 wq->flush_color != work_next_color(wq->work_color)); 2644 wq->flush_color != work_next_color(wq->work_color));
2640 2645
2641 /* this flush_color is finished, advance by one */ 2646 /* this flush_color is finished, advance by one */
2642 wq->flush_color = work_next_color(wq->flush_color); 2647 wq->flush_color = work_next_color(wq->flush_color);
@@ -2660,7 +2665,7 @@ void flush_workqueue(struct workqueue_struct *wq)
2660 } 2665 }
2661 2666
2662 if (list_empty(&wq->flusher_queue)) { 2667 if (list_empty(&wq->flusher_queue)) {
2663 BUG_ON(wq->flush_color != wq->work_color); 2668 WARN_ON_ONCE(wq->flush_color != wq->work_color);
2664 break; 2669 break;
2665 } 2670 }
2666 2671
@@ -2668,8 +2673,8 @@ void flush_workqueue(struct workqueue_struct *wq)
2668 * Need to flush more colors. Make the next flusher 2673 * Need to flush more colors. Make the next flusher
2669 * the new first flusher and arm pwqs. 2674 * the new first flusher and arm pwqs.
2670 */ 2675 */
2671 BUG_ON(wq->flush_color == wq->work_color); 2676 WARN_ON_ONCE(wq->flush_color == wq->work_color);
2672 BUG_ON(wq->flush_color != next->flush_color); 2677 WARN_ON_ONCE(wq->flush_color != next->flush_color);
2673 2678
2674 list_del_init(&next->list); 2679 list_del_init(&next->list);
2675 wq->first_flusher = next; 2680 wq->first_flusher = next;
@@ -3263,6 +3268,19 @@ void destroy_workqueue(struct workqueue_struct *wq)
3263 /* drain it before proceeding with destruction */ 3268 /* drain it before proceeding with destruction */
3264 drain_workqueue(wq); 3269 drain_workqueue(wq);
3265 3270
3271 /* sanity checks */
3272 for_each_pwq_cpu(cpu, wq) {
3273 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3274 int i;
3275
3276 for (i = 0; i < WORK_NR_COLORS; i++)
3277 if (WARN_ON(pwq->nr_in_flight[i]))
3278 return;
3279 if (WARN_ON(pwq->nr_active) ||
3280 WARN_ON(!list_empty(&pwq->delayed_works)))
3281 return;
3282 }
3283
3266 /* 3284 /*
3267 * wq list is used to freeze wq, remove from list after 3285 * wq list is used to freeze wq, remove from list after
3268 * flushing is complete in case freeze races us. 3286 * flushing is complete in case freeze races us.
@@ -3271,17 +3289,6 @@ void destroy_workqueue(struct workqueue_struct *wq)
3271 list_del(&wq->list); 3289 list_del(&wq->list);
3272 spin_unlock(&workqueue_lock); 3290 spin_unlock(&workqueue_lock);
3273 3291
3274 /* sanity check */
3275 for_each_pwq_cpu(cpu, wq) {
3276 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3277 int i;
3278
3279 for (i = 0; i < WORK_NR_COLORS; i++)
3280 BUG_ON(pwq->nr_in_flight[i]);
3281 BUG_ON(pwq->nr_active);
3282 BUG_ON(!list_empty(&pwq->delayed_works));
3283 }
3284
3285 if (wq->flags & WQ_RESCUER) { 3292 if (wq->flags & WQ_RESCUER) {
3286 kthread_stop(wq->rescuer->task); 3293 kthread_stop(wq->rescuer->task);
3287 free_mayday_mask(wq->mayday_mask); 3294 free_mayday_mask(wq->mayday_mask);
@@ -3424,7 +3431,7 @@ static void wq_unbind_fn(struct work_struct *work)
3424 int i; 3431 int i;
3425 3432
3426 for_each_std_worker_pool(pool, cpu) { 3433 for_each_std_worker_pool(pool, cpu) {
3427 BUG_ON(cpu != smp_processor_id()); 3434 WARN_ON_ONCE(cpu != smp_processor_id());
3428 3435
3429 mutex_lock(&pool->assoc_mutex); 3436 mutex_lock(&pool->assoc_mutex);
3430 spin_lock_irq(&pool->lock); 3437 spin_lock_irq(&pool->lock);
@@ -3594,7 +3601,7 @@ void freeze_workqueues_begin(void)
3594 3601
3595 spin_lock(&workqueue_lock); 3602 spin_lock(&workqueue_lock);
3596 3603
3597 BUG_ON(workqueue_freezing); 3604 WARN_ON_ONCE(workqueue_freezing);
3598 workqueue_freezing = true; 3605 workqueue_freezing = true;
3599 3606
3600 for_each_wq_cpu(cpu) { 3607 for_each_wq_cpu(cpu) {
@@ -3642,7 +3649,7 @@ bool freeze_workqueues_busy(void)
3642 3649
3643 spin_lock(&workqueue_lock); 3650 spin_lock(&workqueue_lock);
3644 3651
3645 BUG_ON(!workqueue_freezing); 3652 WARN_ON_ONCE(!workqueue_freezing);
3646 3653
3647 for_each_wq_cpu(cpu) { 3654 for_each_wq_cpu(cpu) {
3648 struct workqueue_struct *wq; 3655 struct workqueue_struct *wq;
@@ -3656,7 +3663,7 @@ bool freeze_workqueues_busy(void)
3656 if (!pwq || !(wq->flags & WQ_FREEZABLE)) 3663 if (!pwq || !(wq->flags & WQ_FREEZABLE))
3657 continue; 3664 continue;
3658 3665
3659 BUG_ON(pwq->nr_active < 0); 3666 WARN_ON_ONCE(pwq->nr_active < 0);
3660 if (pwq->nr_active) { 3667 if (pwq->nr_active) {
3661 busy = true; 3668 busy = true;
3662 goto out_unlock; 3669 goto out_unlock;