diff options
author | Tejun Heo <tj@kernel.org> | 2012-07-17 15:39:27 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-07-17 15:39:27 -0400 |
commit | bc2ae0f5bb2f39e6db06a62f9d353e4601a332a1 (patch) | |
tree | 3f1aa1f72566ac67234799fdd811ba63297de33c /kernel | |
parent | 6037315269d62bf967286ae2670fdd6b6acedab9 (diff) |
workqueue: drop @bind from create_worker()
Currently, create_worker()'s callers are responsible for deciding
whether the newly created worker should be bound to the associated CPU
and create_worker() sets WORKER_UNBOUND only for the workers for the
unbound global_cwq. Creation during normal operation is always via
maybe_create_worker() and @bind is true. For workers created during
hotplug, @bind is false.
Normal operation path is planned to be used even while the CPU is
going through hotplug operations or offline and this static decision
won't work.
Drop @bind from create_worker() and decide whether to bind by looking
at GCWQ_DISASSOCIATED. create_worker() will also set WORKER_UNBOUND
autmatically if disassociated. To avoid flipping GCWQ_DISASSOCIATED
while create_worker() is in progress, the flag is now allowed to be
changed only while holding all manager_mutexes on the global_cwq.
This requires that GCWQ_DISASSOCIATED is not cleared behind trustee's
back. CPU_ONLINE no longer clears DISASSOCIATED before flushing
trustee, which clears DISASSOCIATED before rebinding remaining workers
if asked to release. For cases where trustee isn't around, CPU_ONLINE
clears DISASSOCIATED after flushing trustee. Also, now, first_idle
has UNBOUND set on creation which is explicitly cleared by CPU_ONLINE
while binding it. These convolutions will soon be removed by further
simplification of CPU hotplug path.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: "Rafael J. Wysocki" <rjw@sisk.pl>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 64 |
1 files changed, 45 insertions, 19 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f7a00697d150..e1d05e51a80a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -45,7 +45,22 @@ | |||
45 | #include "workqueue_sched.h" | 45 | #include "workqueue_sched.h" |
46 | 46 | ||
47 | enum { | 47 | enum { |
48 | /* global_cwq flags */ | 48 | /* |
49 | * global_cwq flags | ||
50 | * | ||
51 | * A bound gcwq is either associated or disassociated with its CPU. | ||
52 | * While associated (!DISASSOCIATED), all workers are bound to the | ||
53 | * CPU and none has %WORKER_UNBOUND set and concurrency management | ||
54 | * is in effect. | ||
55 | * | ||
56 | * While DISASSOCIATED, the cpu may be offline and all workers have | ||
57 | * %WORKER_UNBOUND set and concurrency management disabled, and may | ||
58 | * be executing on any CPU. The gcwq behaves as an unbound one. | ||
59 | * | ||
60 | * Note that DISASSOCIATED can be flipped only while holding | ||
61 | * managership of all pools on the gcwq to avoid changing binding | ||
62 | * state while create_worker() is in progress. | ||
63 | */ | ||
49 | GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */ | 64 | GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */ |
50 | GCWQ_FREEZING = 1 << 1, /* freeze in progress */ | 65 | GCWQ_FREEZING = 1 << 1, /* freeze in progress */ |
51 | 66 | ||
@@ -1334,7 +1349,6 @@ static struct worker *alloc_worker(void) | |||
1334 | /** | 1349 | /** |
1335 | * create_worker - create a new workqueue worker | 1350 | * create_worker - create a new workqueue worker |
1336 | * @pool: pool the new worker will belong to | 1351 | * @pool: pool the new worker will belong to |
1337 | * @bind: whether to set affinity to @cpu or not | ||
1338 | * | 1352 | * |
1339 | * Create a new worker which is bound to @pool. The returned worker | 1353 | * Create a new worker which is bound to @pool. The returned worker |
1340 | * can be started by calling start_worker() or destroyed using | 1354 | * can be started by calling start_worker() or destroyed using |
@@ -1346,10 +1360,9 @@ static struct worker *alloc_worker(void) | |||
1346 | * RETURNS: | 1360 | * RETURNS: |
1347 | * Pointer to the newly created worker. | 1361 | * Pointer to the newly created worker. |
1348 | */ | 1362 | */ |
1349 | static struct worker *create_worker(struct worker_pool *pool, bool bind) | 1363 | static struct worker *create_worker(struct worker_pool *pool) |
1350 | { | 1364 | { |
1351 | struct global_cwq *gcwq = pool->gcwq; | 1365 | struct global_cwq *gcwq = pool->gcwq; |
1352 | bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND; | ||
1353 | const char *pri = worker_pool_pri(pool) ? "H" : ""; | 1366 | const char *pri = worker_pool_pri(pool) ? "H" : ""; |
1354 | struct worker *worker = NULL; | 1367 | struct worker *worker = NULL; |
1355 | int id = -1; | 1368 | int id = -1; |
@@ -1370,7 +1383,7 @@ static struct worker *create_worker(struct worker_pool *pool, bool bind) | |||
1370 | worker->pool = pool; | 1383 | worker->pool = pool; |
1371 | worker->id = id; | 1384 | worker->id = id; |
1372 | 1385 | ||
1373 | if (!on_unbound_cpu) | 1386 | if (gcwq->cpu != WORK_CPU_UNBOUND) |
1374 | worker->task = kthread_create_on_node(worker_thread, | 1387 | worker->task = kthread_create_on_node(worker_thread, |
1375 | worker, cpu_to_node(gcwq->cpu), | 1388 | worker, cpu_to_node(gcwq->cpu), |
1376 | "kworker/%u:%d%s", gcwq->cpu, id, pri); | 1389 | "kworker/%u:%d%s", gcwq->cpu, id, pri); |
@@ -1384,15 +1397,19 @@ static struct worker *create_worker(struct worker_pool *pool, bool bind) | |||
1384 | set_user_nice(worker->task, HIGHPRI_NICE_LEVEL); | 1397 | set_user_nice(worker->task, HIGHPRI_NICE_LEVEL); |
1385 | 1398 | ||
1386 | /* | 1399 | /* |
1387 | * An unbound worker will become a regular one if CPU comes online | 1400 | * Determine CPU binding of the new worker depending on |
1388 | * later on. Make sure every worker has PF_THREAD_BOUND set. | 1401 | * %GCWQ_DISASSOCIATED. The caller is responsible for ensuring the |
1402 | * flag remains stable across this function. See the comments | ||
1403 | * above the flag definition for details. | ||
1404 | * | ||
1405 | * As an unbound worker may later become a regular one if CPU comes | ||
1406 | * online, make sure every worker has %PF_THREAD_BOUND set. | ||
1389 | */ | 1407 | */ |
1390 | if (bind && !on_unbound_cpu) | 1408 | if (!(gcwq->flags & GCWQ_DISASSOCIATED)) { |
1391 | kthread_bind(worker->task, gcwq->cpu); | 1409 | kthread_bind(worker->task, gcwq->cpu); |
1392 | else { | 1410 | } else { |
1393 | worker->task->flags |= PF_THREAD_BOUND; | 1411 | worker->task->flags |= PF_THREAD_BOUND; |
1394 | if (on_unbound_cpu) | 1412 | worker->flags |= WORKER_UNBOUND; |
1395 | worker->flags |= WORKER_UNBOUND; | ||
1396 | } | 1413 | } |
1397 | 1414 | ||
1398 | return worker; | 1415 | return worker; |
@@ -1568,7 +1585,7 @@ restart: | |||
1568 | while (true) { | 1585 | while (true) { |
1569 | struct worker *worker; | 1586 | struct worker *worker; |
1570 | 1587 | ||
1571 | worker = create_worker(pool, true); | 1588 | worker = create_worker(pool); |
1572 | if (worker) { | 1589 | if (worker) { |
1573 | del_timer_sync(&pool->mayday_timer); | 1590 | del_timer_sync(&pool->mayday_timer); |
1574 | spin_lock_irq(&gcwq->lock); | 1591 | spin_lock_irq(&gcwq->lock); |
@@ -3420,12 +3437,10 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3420 | 3437 | ||
3421 | if (need_to_create_worker(pool)) { | 3438 | if (need_to_create_worker(pool)) { |
3422 | spin_unlock_irq(&gcwq->lock); | 3439 | spin_unlock_irq(&gcwq->lock); |
3423 | worker = create_worker(pool, false); | 3440 | worker = create_worker(pool); |
3424 | spin_lock_irq(&gcwq->lock); | 3441 | spin_lock_irq(&gcwq->lock); |
3425 | if (worker) { | 3442 | if (worker) |
3426 | worker->flags |= WORKER_UNBOUND; | ||
3427 | start_worker(worker); | 3443 | start_worker(worker); |
3428 | } | ||
3429 | } | 3444 | } |
3430 | } | 3445 | } |
3431 | 3446 | ||
@@ -3463,6 +3478,10 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3463 | for_each_worker_pool(pool, gcwq) | 3478 | for_each_worker_pool(pool, gcwq) |
3464 | WARN_ON(!list_empty(&pool->idle_list)); | 3479 | WARN_ON(!list_empty(&pool->idle_list)); |
3465 | 3480 | ||
3481 | /* if we're reassociating, clear DISASSOCIATED */ | ||
3482 | if (gcwq->trustee_state == TRUSTEE_RELEASE) | ||
3483 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | ||
3484 | |||
3466 | for_each_busy_worker(worker, i, pos, gcwq) { | 3485 | for_each_busy_worker(worker, i, pos, gcwq) { |
3467 | struct work_struct *rebind_work = &worker->rebind_work; | 3486 | struct work_struct *rebind_work = &worker->rebind_work; |
3468 | 3487 | ||
@@ -3546,7 +3565,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
3546 | i = 0; | 3565 | i = 0; |
3547 | for_each_worker_pool(pool, gcwq) { | 3566 | for_each_worker_pool(pool, gcwq) { |
3548 | BUG_ON(pool->first_idle); | 3567 | BUG_ON(pool->first_idle); |
3549 | new_workers[i] = create_worker(pool, false); | 3568 | new_workers[i] = create_worker(pool); |
3550 | if (!new_workers[i++]) | 3569 | if (!new_workers[i++]) |
3551 | goto err_destroy; | 3570 | goto err_destroy; |
3552 | } | 3571 | } |
@@ -3584,7 +3603,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
3584 | 3603 | ||
3585 | case CPU_DOWN_FAILED: | 3604 | case CPU_DOWN_FAILED: |
3586 | case CPU_ONLINE: | 3605 | case CPU_ONLINE: |
3587 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | ||
3588 | if (gcwq->trustee_state != TRUSTEE_DONE) { | 3606 | if (gcwq->trustee_state != TRUSTEE_DONE) { |
3589 | gcwq->trustee_state = TRUSTEE_RELEASE; | 3607 | gcwq->trustee_state = TRUSTEE_RELEASE; |
3590 | wake_up_process(gcwq->trustee); | 3608 | wake_up_process(gcwq->trustee); |
@@ -3592,6 +3610,13 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
3592 | } | 3610 | } |
3593 | 3611 | ||
3594 | /* | 3612 | /* |
3613 | * Either DISASSOCIATED is already cleared or no worker is | ||
3614 | * left on the gcwq. Safe to clear DISASSOCIATED without | ||
3615 | * claiming managers. | ||
3616 | */ | ||
3617 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | ||
3618 | |||
3619 | /* | ||
3595 | * Trustee is done and there might be no worker left. | 3620 | * Trustee is done and there might be no worker left. |
3596 | * Put the first_idle in and request a real manager to | 3621 | * Put the first_idle in and request a real manager to |
3597 | * take a look. | 3622 | * take a look. |
@@ -3601,6 +3626,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
3601 | kthread_bind(pool->first_idle->task, cpu); | 3626 | kthread_bind(pool->first_idle->task, cpu); |
3602 | spin_lock_irq(&gcwq->lock); | 3627 | spin_lock_irq(&gcwq->lock); |
3603 | pool->flags |= POOL_MANAGE_WORKERS; | 3628 | pool->flags |= POOL_MANAGE_WORKERS; |
3629 | pool->first_idle->flags &= ~WORKER_UNBOUND; | ||
3604 | start_worker(pool->first_idle); | 3630 | start_worker(pool->first_idle); |
3605 | pool->first_idle = NULL; | 3631 | pool->first_idle = NULL; |
3606 | } | 3632 | } |
@@ -3899,7 +3925,7 @@ static int __init init_workqueues(void) | |||
3899 | for_each_worker_pool(pool, gcwq) { | 3925 | for_each_worker_pool(pool, gcwq) { |
3900 | struct worker *worker; | 3926 | struct worker *worker; |
3901 | 3927 | ||
3902 | worker = create_worker(pool, true); | 3928 | worker = create_worker(pool); |
3903 | BUG_ON(!worker); | 3929 | BUG_ON(!worker); |
3904 | spin_lock_irq(&gcwq->lock); | 3930 | spin_lock_irq(&gcwq->lock); |
3905 | start_worker(worker); | 3931 | start_worker(worker); |