diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 28 |
1 files changed, 20 insertions, 8 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 042d221d33cc..fbc6576a83c3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -739,8 +739,10 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) | |||
739 | { | 739 | { |
740 | struct worker *worker = kthread_data(task); | 740 | struct worker *worker = kthread_data(task); |
741 | 741 | ||
742 | if (!(worker->flags & WORKER_NOT_RUNNING)) | 742 | if (!(worker->flags & WORKER_NOT_RUNNING)) { |
743 | WARN_ON_ONCE(worker->pool->gcwq->cpu != cpu); | ||
743 | atomic_inc(get_pool_nr_running(worker->pool)); | 744 | atomic_inc(get_pool_nr_running(worker->pool)); |
745 | } | ||
744 | } | 746 | } |
745 | 747 | ||
746 | /** | 748 | /** |
@@ -1361,8 +1363,19 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, | |||
1361 | 1363 | ||
1362 | WARN_ON_ONCE(timer->function != delayed_work_timer_fn || | 1364 | WARN_ON_ONCE(timer->function != delayed_work_timer_fn || |
1363 | timer->data != (unsigned long)dwork); | 1365 | timer->data != (unsigned long)dwork); |
1364 | BUG_ON(timer_pending(timer)); | 1366 | WARN_ON_ONCE(timer_pending(timer)); |
1365 | BUG_ON(!list_empty(&work->entry)); | 1367 | WARN_ON_ONCE(!list_empty(&work->entry)); |
1368 | |||
1369 | /* | ||
1370 | * If @delay is 0, queue @dwork->work immediately. This is for | ||
1371 | * both optimization and correctness. The earliest @timer can | ||
1372 | * expire is on the closest next tick and delayed_work users depend | ||
1373 | * on that there's no such delay when @delay is 0. | ||
1374 | */ | ||
1375 | if (!delay) { | ||
1376 | __queue_work(cpu, wq, &dwork->work); | ||
1377 | return; | ||
1378 | } | ||
1366 | 1379 | ||
1367 | timer_stats_timer_set_start_info(&dwork->timer); | 1380 | timer_stats_timer_set_start_info(&dwork->timer); |
1368 | 1381 | ||
@@ -1417,9 +1430,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
1417 | bool ret = false; | 1430 | bool ret = false; |
1418 | unsigned long flags; | 1431 | unsigned long flags; |
1419 | 1432 | ||
1420 | if (!delay) | ||
1421 | return queue_work_on(cpu, wq, &dwork->work); | ||
1422 | |||
1423 | /* read the comment in __queue_work() */ | 1433 | /* read the comment in __queue_work() */ |
1424 | local_irq_save(flags); | 1434 | local_irq_save(flags); |
1425 | 1435 | ||
@@ -2407,8 +2417,10 @@ static int rescuer_thread(void *__wq) | |||
2407 | repeat: | 2417 | repeat: |
2408 | set_current_state(TASK_INTERRUPTIBLE); | 2418 | set_current_state(TASK_INTERRUPTIBLE); |
2409 | 2419 | ||
2410 | if (kthread_should_stop()) | 2420 | if (kthread_should_stop()) { |
2421 | __set_current_state(TASK_RUNNING); | ||
2411 | return 0; | 2422 | return 0; |
2423 | } | ||
2412 | 2424 | ||
2413 | /* | 2425 | /* |
2414 | * See whether any cpu is asking for help. Unbounded | 2426 | * See whether any cpu is asking for help. Unbounded |
@@ -3475,7 +3487,7 @@ unsigned int work_busy(struct work_struct *work) | |||
3475 | unsigned int ret = 0; | 3487 | unsigned int ret = 0; |
3476 | 3488 | ||
3477 | if (!gcwq) | 3489 | if (!gcwq) |
3478 | return false; | 3490 | return 0; |
3479 | 3491 | ||
3480 | spin_lock_irqsave(&gcwq->lock, flags); | 3492 | spin_lock_irqsave(&gcwq->lock, flags); |
3481 | 3493 | ||