diff options
author | Tejun Heo <tj@kernel.org> | 2012-07-17 15:39:27 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-07-17 15:39:27 -0400 |
commit | 403c821d452c03be4ced571ac91339a9d3631b17 (patch) | |
tree | 022cf4ff47b9652ca550498dc896672c1cec8d05 /kernel | |
parent | f2d5a0ee06c1813f985bb9386f3ccc0d0315720f (diff) |
workqueue: ROGUE workers are UNBOUND workers
Currently, WORKER_UNBOUND is used to mark workers for the unbound
global_cwq and WORKER_ROGUE is used to mark workers for disassociated
per-cpu global_cwqs. Both are used to make the marked worker skip
concurrency management and the only place they make any difference is
in worker_enter_idle() where WORKER_ROGUE is used to skip scheduling
idle timer, which can easily be replaced with trustee state testing.
This patch replaces WORKER_ROGUE with WORKER_UNBOUND and drops
WORKER_ROGUE. This is to prepare for removing trustee and handling
disassociated global_cwqs as unbound.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: "Rafael J. Wysocki" <rjw@sisk.pl>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 46 |
1 files changed, 21 insertions, 25 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1405fb98c0b1..af512927c607 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -58,13 +58,12 @@ enum { | |||
58 | WORKER_DIE = 1 << 1, /* die die die */ | 58 | WORKER_DIE = 1 << 1, /* die die die */ |
59 | WORKER_IDLE = 1 << 2, /* is idle */ | 59 | WORKER_IDLE = 1 << 2, /* is idle */ |
60 | WORKER_PREP = 1 << 3, /* preparing to run works */ | 60 | WORKER_PREP = 1 << 3, /* preparing to run works */ |
61 | WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ | ||
62 | WORKER_REBIND = 1 << 5, /* mom is home, come back */ | 61 | WORKER_REBIND = 1 << 5, /* mom is home, come back */ |
63 | WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ | 62 | WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ |
64 | WORKER_UNBOUND = 1 << 7, /* worker is unbound */ | 63 | WORKER_UNBOUND = 1 << 7, /* worker is unbound */ |
65 | 64 | ||
66 | WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND | | 65 | WORKER_NOT_RUNNING = WORKER_PREP | WORKER_REBIND | WORKER_UNBOUND | |
67 | WORKER_CPU_INTENSIVE | WORKER_UNBOUND, | 66 | WORKER_CPU_INTENSIVE, |
68 | 67 | ||
69 | /* gcwq->trustee_state */ | 68 | /* gcwq->trustee_state */ |
70 | TRUSTEE_START = 0, /* start */ | 69 | TRUSTEE_START = 0, /* start */ |
@@ -1198,7 +1197,7 @@ static void worker_enter_idle(struct worker *worker) | |||
1198 | /* idle_list is LIFO */ | 1197 | /* idle_list is LIFO */ |
1199 | list_add(&worker->entry, &pool->idle_list); | 1198 | list_add(&worker->entry, &pool->idle_list); |
1200 | 1199 | ||
1201 | if (likely(!(worker->flags & WORKER_ROGUE))) { | 1200 | if (likely(gcwq->trustee_state != TRUSTEE_DONE)) { |
1202 | if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) | 1201 | if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) |
1203 | mod_timer(&pool->idle_timer, | 1202 | mod_timer(&pool->idle_timer, |
1204 | jiffies + IDLE_WORKER_TIMEOUT); | 1203 | jiffies + IDLE_WORKER_TIMEOUT); |
@@ -1207,7 +1206,7 @@ static void worker_enter_idle(struct worker *worker) | |||
1207 | 1206 | ||
1208 | /* | 1207 | /* |
1209 | * Sanity check nr_running. Because trustee releases gcwq->lock | 1208 | * Sanity check nr_running. Because trustee releases gcwq->lock |
1210 | * between setting %WORKER_ROGUE and zapping nr_running, the | 1209 | * between setting %WORKER_UNBOUND and zapping nr_running, the |
1211 | * warning may trigger spuriously. Check iff trustee is idle. | 1210 | * warning may trigger spuriously. Check iff trustee is idle. |
1212 | */ | 1211 | */ |
1213 | WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE && | 1212 | WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE && |
@@ -1301,10 +1300,10 @@ __acquires(&gcwq->lock) | |||
1301 | } | 1300 | } |
1302 | 1301 | ||
1303 | /* | 1302 | /* |
1304 | * Function for worker->rebind_work used to rebind rogue busy workers | 1303 | * Function for worker->rebind_work used to rebind unbound busy workers to |
1305 | * to the associated cpu which is coming back online. This is | 1304 | * the associated cpu which is coming back online. This is scheduled by |
1306 | * scheduled by cpu up but can race with other cpu hotplug operations | 1305 | * cpu up but can race with other cpu hotplug operations and may be |
1307 | * and may be executed twice without intervening cpu down. | 1306 | * executed twice without intervening cpu down. |
1308 | */ | 1307 | */ |
1309 | static void worker_rebind_fn(struct work_struct *work) | 1308 | static void worker_rebind_fn(struct work_struct *work) |
1310 | { | 1309 | { |
@@ -1385,9 +1384,8 @@ static struct worker *create_worker(struct worker_pool *pool, bool bind) | |||
1385 | set_user_nice(worker->task, HIGHPRI_NICE_LEVEL); | 1384 | set_user_nice(worker->task, HIGHPRI_NICE_LEVEL); |
1386 | 1385 | ||
1387 | /* | 1386 | /* |
1388 | * A rogue worker will become a regular one if CPU comes | 1387 | * An unbound worker will become a regular one if CPU comes online |
1389 | * online later on. Make sure every worker has | 1388 | * later on. Make sure every worker has PF_THREAD_BOUND set. |
1390 | * PF_THREAD_BOUND set. | ||
1391 | */ | 1389 | */ |
1392 | if (bind && !on_unbound_cpu) | 1390 | if (bind && !on_unbound_cpu) |
1393 | kthread_bind(worker->task, gcwq->cpu); | 1391 | kthread_bind(worker->task, gcwq->cpu); |
@@ -3215,11 +3213,10 @@ EXPORT_SYMBOL_GPL(work_busy); | |||
3215 | * gcwqs serve mix of short, long and very long running works making | 3213 | * gcwqs serve mix of short, long and very long running works making |
3216 | * blocked draining impractical. | 3214 | * blocked draining impractical. |
3217 | * | 3215 | * |
3218 | * This is solved by allowing a gcwq to be detached from CPU, running | 3216 | * This is solved by allowing a gcwq to be detached from CPU, running it |
3219 | * it with unbound (rogue) workers and allowing it to be reattached | 3217 | * with unbound workers and allowing it to be reattached later if the cpu |
3220 | * later if the cpu comes back online. A separate thread is created | 3218 | * comes back online. A separate thread is created to govern a gcwq in |
3221 | * to govern a gcwq in such state and is called the trustee of the | 3219 | * such state and is called the trustee of the gcwq. |
3222 | * gcwq. | ||
3223 | * | 3220 | * |
3224 | * Trustee states and their descriptions. | 3221 | * Trustee states and their descriptions. |
3225 | * | 3222 | * |
@@ -3359,19 +3356,18 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3359 | pool->flags |= POOL_MANAGING_WORKERS; | 3356 | pool->flags |= POOL_MANAGING_WORKERS; |
3360 | 3357 | ||
3361 | list_for_each_entry(worker, &pool->idle_list, entry) | 3358 | list_for_each_entry(worker, &pool->idle_list, entry) |
3362 | worker->flags |= WORKER_ROGUE; | 3359 | worker->flags |= WORKER_UNBOUND; |
3363 | } | 3360 | } |
3364 | 3361 | ||
3365 | for_each_busy_worker(worker, i, pos, gcwq) | 3362 | for_each_busy_worker(worker, i, pos, gcwq) |
3366 | worker->flags |= WORKER_ROGUE; | 3363 | worker->flags |= WORKER_UNBOUND; |
3367 | 3364 | ||
3368 | gcwq->flags |= GCWQ_DISASSOCIATED; | 3365 | gcwq->flags |= GCWQ_DISASSOCIATED; |
3369 | 3366 | ||
3370 | /* | 3367 | /* |
3371 | * Call schedule() so that we cross rq->lock and thus can | 3368 | * Call schedule() so that we cross rq->lock and thus can guarantee |
3372 | * guarantee sched callbacks see the rogue flag. This is | 3369 | * sched callbacks see the unbound flag. This is necessary as |
3373 | * necessary as scheduler callbacks may be invoked from other | 3370 | * scheduler callbacks may be invoked from other cpus. |
3374 | * cpus. | ||
3375 | */ | 3371 | */ |
3376 | spin_unlock_irq(&gcwq->lock); | 3372 | spin_unlock_irq(&gcwq->lock); |
3377 | schedule(); | 3373 | schedule(); |
@@ -3439,7 +3435,7 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3439 | worker = create_worker(pool, false); | 3435 | worker = create_worker(pool, false); |
3440 | spin_lock_irq(&gcwq->lock); | 3436 | spin_lock_irq(&gcwq->lock); |
3441 | if (worker) { | 3437 | if (worker) { |
3442 | worker->flags |= WORKER_ROGUE; | 3438 | worker->flags |= WORKER_UNBOUND; |
3443 | start_worker(worker); | 3439 | start_worker(worker); |
3444 | } | 3440 | } |
3445 | } | 3441 | } |
@@ -3488,7 +3484,7 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3488 | * rebinding is scheduled. | 3484 | * rebinding is scheduled. |
3489 | */ | 3485 | */ |
3490 | worker->flags |= WORKER_REBIND; | 3486 | worker->flags |= WORKER_REBIND; |
3491 | worker->flags &= ~WORKER_ROGUE; | 3487 | worker->flags &= ~WORKER_UNBOUND; |
3492 | 3488 | ||
3493 | /* queue rebind_work, wq doesn't matter, use the default one */ | 3489 | /* queue rebind_work, wq doesn't matter, use the default one */ |
3494 | if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, | 3490 | if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, |