diff options
author | Tejun Heo <tj@kernel.org> | 2012-07-17 15:39:26 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-07-17 15:39:26 -0400 |
commit | f2d5a0ee06c1813f985bb9386f3ccc0d0315720f (patch) | |
tree | 4207975fe000f95931b0c6876657db5b13f92b73 /kernel/workqueue.c | |
parent | 6575820221f7a4dd6eadecf7bf83cdd154335eda (diff) |
workqueue: drop CPU_DYING notifier operation
Workqueue used CPU_DYING notification to mark GCWQ_DISASSOCIATED.
This was necessary because workqueue's CPU_DOWN_PREPARE happened
before other DOWN_PREPARE notifiers and workqueue needed to stay
associated across the rest of DOWN_PREPARE.
After the previous patch, workqueue's DOWN_PREPARE happens after
others and can set GCWQ_DISASSOCIATED directly. Drop CPU_DYING and
let the trustee set GCWQ_DISASSOCIATED after disabling concurrency
management.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: "Rafael J. Wysocki" <rjw@sisk.pl>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 29 |
1 files changed, 13 insertions, 16 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f59b7fd26e26..1405fb98c0b1 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1250,11 +1250,11 @@ static void worker_leave_idle(struct worker *worker) | |||
1250 | * verbatim as it's best effort and blocking and gcwq may be | 1250 | * verbatim as it's best effort and blocking and gcwq may be |
1251 | * [dis]associated in the meantime. | 1251 | * [dis]associated in the meantime. |
1252 | * | 1252 | * |
1253 | * This function tries set_cpus_allowed() and locks gcwq and verifies | 1253 | * This function tries set_cpus_allowed() and locks gcwq and verifies the |
1254 | * the binding against GCWQ_DISASSOCIATED which is set during | 1254 | * binding against %GCWQ_DISASSOCIATED which is set during |
1255 | * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters | 1255 | * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker |
1256 | * idle state or fetches works without dropping lock, it can guarantee | 1256 | * enters idle state or fetches works without dropping lock, it can |
1257 | * the scheduling requirement described in the first paragraph. | 1257 | * guarantee the scheduling requirement described in the first paragraph. |
1258 | * | 1258 | * |
1259 | * CONTEXT: | 1259 | * CONTEXT: |
1260 | * Might sleep. Called without any lock but returns with gcwq->lock | 1260 | * Might sleep. Called without any lock but returns with gcwq->lock |
@@ -3349,6 +3349,12 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3349 | rc = trustee_wait_event(!gcwq_is_managing_workers(gcwq)); | 3349 | rc = trustee_wait_event(!gcwq_is_managing_workers(gcwq)); |
3350 | BUG_ON(rc < 0); | 3350 | BUG_ON(rc < 0); |
3351 | 3351 | ||
3352 | /* | ||
3353 | * We've claimed all manager positions. Make all workers unbound | ||
3354 | * and set DISASSOCIATED. Before this, all workers except for the | ||
3355 | * ones which are still executing works from before the last CPU | ||
3356 | * down must be on the cpu. After this, they may become diasporas. | ||
3357 | */ | ||
3352 | for_each_worker_pool(pool, gcwq) { | 3358 | for_each_worker_pool(pool, gcwq) { |
3353 | pool->flags |= POOL_MANAGING_WORKERS; | 3359 | pool->flags |= POOL_MANAGING_WORKERS; |
3354 | 3360 | ||
@@ -3359,6 +3365,8 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3359 | for_each_busy_worker(worker, i, pos, gcwq) | 3365 | for_each_busy_worker(worker, i, pos, gcwq) |
3360 | worker->flags |= WORKER_ROGUE; | 3366 | worker->flags |= WORKER_ROGUE; |
3361 | 3367 | ||
3368 | gcwq->flags |= GCWQ_DISASSOCIATED; | ||
3369 | |||
3362 | /* | 3370 | /* |
3363 | * Call schedule() so that we cross rq->lock and thus can | 3371 | * Call schedule() so that we cross rq->lock and thus can |
3364 | * guarantee sched callbacks see the rogue flag. This is | 3372 | * guarantee sched callbacks see the rogue flag. This is |
@@ -3582,16 +3590,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
3582 | } | 3590 | } |
3583 | break; | 3591 | break; |
3584 | 3592 | ||
3585 | case CPU_DYING: | ||
3586 | /* | ||
3587 | * Before this, the trustee and all workers except for | ||
3588 | * the ones which are still executing works from | ||
3589 | * before the last CPU down must be on the cpu. After | ||
3590 | * this, they'll all be diasporas. | ||
3591 | */ | ||
3592 | gcwq->flags |= GCWQ_DISASSOCIATED; | ||
3593 | break; | ||
3594 | |||
3595 | case CPU_POST_DEAD: | 3593 | case CPU_POST_DEAD: |
3596 | gcwq->trustee_state = TRUSTEE_BUTCHER; | 3594 | gcwq->trustee_state = TRUSTEE_BUTCHER; |
3597 | /* fall through */ | 3595 | /* fall through */ |
@@ -3672,7 +3670,6 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb, | |||
3672 | { | 3670 | { |
3673 | switch (action & ~CPU_TASKS_FROZEN) { | 3671 | switch (action & ~CPU_TASKS_FROZEN) { |
3674 | case CPU_DOWN_PREPARE: | 3672 | case CPU_DOWN_PREPARE: |
3675 | case CPU_DYING: | ||
3676 | case CPU_POST_DEAD: | 3673 | case CPU_POST_DEAD: |
3677 | return workqueue_cpu_callback(nfb, action, hcpu); | 3674 | return workqueue_cpu_callback(nfb, action, hcpu); |
3678 | } | 3675 | } |