diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-03-18 21:47:07 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-03-18 21:47:07 -0400 |
| commit | b63dc123b20e54a306ff1bfa191e511c506ee331 (patch) | |
| tree | ab7b2b9259bbb1bf9c139e71b94cdfd27c69a9f8 /kernel/workqueue.c | |
| parent | 35f8c769aa5f3d9a81d50e9bdcbfd4151e72a0c9 (diff) | |
| parent | eb2834285cf172856cd12f66892fc7467935ebed (diff) | |
Merge branch 'for-3.9-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue fix from Tejun Heo:
"Lai's patch to fix highly unlikely but still possible workqueue stall
during CPU hotunplug."
* 'for-3.9-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: fix possible pool stall bug in wq_unbind_fn()
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 44 |
1 files changed, 25 insertions, 19 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 55fac5b991b7..b48cd597145d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -3447,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work) | |||
| 3447 | 3447 | ||
| 3448 | spin_unlock_irq(&pool->lock); | 3448 | spin_unlock_irq(&pool->lock); |
| 3449 | mutex_unlock(&pool->assoc_mutex); | 3449 | mutex_unlock(&pool->assoc_mutex); |
| 3450 | } | ||
| 3451 | 3450 | ||
| 3452 | /* | 3451 | /* |
| 3453 | * Call schedule() so that we cross rq->lock and thus can guarantee | 3452 | * Call schedule() so that we cross rq->lock and thus can |
| 3454 | * sched callbacks see the %WORKER_UNBOUND flag. This is necessary | 3453 | * guarantee sched callbacks see the %WORKER_UNBOUND flag. |
| 3455 | * as scheduler callbacks may be invoked from other cpus. | 3454 | * This is necessary as scheduler callbacks may be invoked |
| 3456 | */ | 3455 | * from other cpus. |
| 3457 | schedule(); | 3456 | */ |
| 3457 | schedule(); | ||
| 3458 | 3458 | ||
| 3459 | /* | 3459 | /* |
| 3460 | * Sched callbacks are disabled now. Zap nr_running. After this, | 3460 | * Sched callbacks are disabled now. Zap nr_running. |
| 3461 | * nr_running stays zero and need_more_worker() and keep_working() | 3461 | * After this, nr_running stays zero and need_more_worker() |
| 3462 | * are always true as long as the worklist is not empty. Pools on | 3462 | * and keep_working() are always true as long as the |
| 3463 | * @cpu now behave as unbound (in terms of concurrency management) | 3463 | * worklist is not empty. This pool now behaves as an |
| 3464 | * pools which are served by workers tied to the CPU. | 3464 | * unbound (in terms of concurrency management) pool which |
| 3465 | * | 3465 | * are served by workers tied to the pool. |
| 3466 | * On return from this function, the current worker would trigger | 3466 | */ |
| 3467 | * unbound chain execution of pending work items if other workers | ||
| 3468 | * didn't already. | ||
| 3469 | */ | ||
| 3470 | for_each_std_worker_pool(pool, cpu) | ||
| 3471 | atomic_set(&pool->nr_running, 0); | 3467 | atomic_set(&pool->nr_running, 0); |
| 3468 | |||
| 3469 | /* | ||
| 3470 | * With concurrency management just turned off, a busy | ||
| 3471 | * worker blocking could lead to lengthy stalls. Kick off | ||
| 3472 | * unbound chain execution of currently pending work items. | ||
| 3473 | */ | ||
| 3474 | spin_lock_irq(&pool->lock); | ||
| 3475 | wake_up_worker(pool); | ||
| 3476 | spin_unlock_irq(&pool->lock); | ||
| 3477 | } | ||
| 3472 | } | 3478 | } |
| 3473 | 3479 | ||
| 3474 | /* | 3480 | /* |
