diff options
author | Tejun Heo <tj@kernel.org> | 2013-01-24 14:01:33 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-01-24 14:01:33 -0500 |
commit | 2464757086b4de0591738d5e30f069d068d70ec0 (patch) | |
tree | 2e7994351d92c24fc20fdb38108a64342bef0daf /kernel/workqueue.c | |
parent | e34cdddb03bdfe98f20c58934fd4c45019f13ae5 (diff) |
workqueue: make GCWQ_DISASSOCIATED a pool flag
Make GCWQ_DISASSOCIATED a pool flag POOL_DISASSOCIATED. This patch
doesn't change locking - DISASSOCIATED on both pools of a CPU are set
or clear together while holding gcwq->lock. It shouldn't cause any
functional difference.
This is part of an effort to remove global_cwq and make worker_pool
the top level abstraction, which in turn will help implementing worker
pools with user-specified attributes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 66 |
1 files changed, 35 insertions, 31 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 634251572fdd..1b8af92cc2c9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -48,26 +48,28 @@ | |||
48 | enum { | 48 | enum { |
49 | /* | 49 | /* |
50 | * global_cwq flags | 50 | * global_cwq flags |
51 | */ | ||
52 | GCWQ_FREEZING = 1 << 1, /* freeze in progress */ | ||
53 | |||
54 | /* | ||
55 | * worker_pool flags | ||
51 | * | 56 | * |
52 | * A bound gcwq is either associated or disassociated with its CPU. | 57 | * A bound pool is either associated or disassociated with its CPU. |
53 | * While associated (!DISASSOCIATED), all workers are bound to the | 58 | * While associated (!DISASSOCIATED), all workers are bound to the |
54 | * CPU and none has %WORKER_UNBOUND set and concurrency management | 59 | * CPU and none has %WORKER_UNBOUND set and concurrency management |
55 | * is in effect. | 60 | * is in effect. |
56 | * | 61 | * |
57 | * While DISASSOCIATED, the cpu may be offline and all workers have | 62 | * While DISASSOCIATED, the cpu may be offline and all workers have |
58 | * %WORKER_UNBOUND set and concurrency management disabled, and may | 63 | * %WORKER_UNBOUND set and concurrency management disabled, and may |
59 | * be executing on any CPU. The gcwq behaves as an unbound one. | 64 | * be executing on any CPU. The pool behaves as an unbound one. |
60 | * | 65 | * |
61 | * Note that DISASSOCIATED can be flipped only while holding | 66 | * Note that DISASSOCIATED can be flipped only while holding |
62 | * assoc_mutex of all pools on the gcwq to avoid changing binding | 67 | * assoc_mutex to avoid changing binding state while |
63 | * state while create_worker() is in progress. | 68 | * create_worker() is in progress. |
64 | */ | 69 | */ |
65 | GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */ | ||
66 | GCWQ_FREEZING = 1 << 1, /* freeze in progress */ | ||
67 | |||
68 | /* pool flags */ | ||
69 | POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ | 70 | POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ |
70 | POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */ | 71 | POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */ |
72 | POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ | ||
71 | 73 | ||
72 | /* worker flags */ | 74 | /* worker flags */ |
73 | WORKER_STARTED = 1 << 0, /* started */ | 75 | WORKER_STARTED = 1 << 0, /* started */ |
@@ -115,7 +117,7 @@ enum { | |||
115 | * X: During normal operation, modification requires gcwq->lock and | 117 | * X: During normal operation, modification requires gcwq->lock and |
116 | * should be done only from local cpu. Either disabling preemption | 118 | * should be done only from local cpu. Either disabling preemption |
117 | * on local cpu or grabbing gcwq->lock is enough for read access. | 119 | * on local cpu or grabbing gcwq->lock is enough for read access. |
118 | * If GCWQ_DISASSOCIATED is set, it's identical to L. | 120 | * If POOL_DISASSOCIATED is set, it's identical to L. |
119 | * | 121 | * |
120 | * F: wq->flush_mutex protected. | 122 | * F: wq->flush_mutex protected. |
121 | * | 123 | * |
@@ -138,7 +140,7 @@ struct worker_pool { | |||
138 | struct timer_list idle_timer; /* L: worker idle timeout */ | 140 | struct timer_list idle_timer; /* L: worker idle timeout */ |
139 | struct timer_list mayday_timer; /* L: SOS timer for workers */ | 141 | struct timer_list mayday_timer; /* L: SOS timer for workers */ |
140 | 142 | ||
141 | struct mutex assoc_mutex; /* protect GCWQ_DISASSOCIATED */ | 143 | struct mutex assoc_mutex; /* protect POOL_DISASSOCIATED */ |
142 | struct ida worker_ida; /* L: for worker IDs */ | 144 | struct ida worker_ida; /* L: for worker IDs */ |
143 | }; | 145 | }; |
144 | 146 | ||
@@ -439,9 +441,9 @@ static DEFINE_PER_CPU(struct global_cwq, global_cwq); | |||
439 | static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_STD_WORKER_POOLS]); | 441 | static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_STD_WORKER_POOLS]); |
440 | 442 | ||
441 | /* | 443 | /* |
442 | * Global cpu workqueue and nr_running counter for unbound gcwq. The | 444 | * Global cpu workqueue and nr_running counter for unbound gcwq. The pools |
443 | * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its | 445 | * for online CPUs have POOL_DISASSOCIATED set, and all their workers have |
444 | * workers have WORKER_UNBOUND set. | 446 | * WORKER_UNBOUND set. |
445 | */ | 447 | */ |
446 | static struct global_cwq unbound_global_cwq; | 448 | static struct global_cwq unbound_global_cwq; |
447 | static atomic_t unbound_pool_nr_running[NR_STD_WORKER_POOLS] = { | 449 | static atomic_t unbound_pool_nr_running[NR_STD_WORKER_POOLS] = { |
@@ -1474,7 +1476,6 @@ EXPORT_SYMBOL_GPL(mod_delayed_work); | |||
1474 | static void worker_enter_idle(struct worker *worker) | 1476 | static void worker_enter_idle(struct worker *worker) |
1475 | { | 1477 | { |
1476 | struct worker_pool *pool = worker->pool; | 1478 | struct worker_pool *pool = worker->pool; |
1477 | struct global_cwq *gcwq = pool->gcwq; | ||
1478 | 1479 | ||
1479 | BUG_ON(worker->flags & WORKER_IDLE); | 1480 | BUG_ON(worker->flags & WORKER_IDLE); |
1480 | BUG_ON(!list_empty(&worker->entry) && | 1481 | BUG_ON(!list_empty(&worker->entry) && |
@@ -1497,7 +1498,7 @@ static void worker_enter_idle(struct worker *worker) | |||
1497 | * nr_running, the warning may trigger spuriously. Check iff | 1498 | * nr_running, the warning may trigger spuriously. Check iff |
1498 | * unbind is not in progress. | 1499 | * unbind is not in progress. |
1499 | */ | 1500 | */ |
1500 | WARN_ON_ONCE(!(gcwq->flags & GCWQ_DISASSOCIATED) && | 1501 | WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && |
1501 | pool->nr_workers == pool->nr_idle && | 1502 | pool->nr_workers == pool->nr_idle && |
1502 | atomic_read(get_pool_nr_running(pool))); | 1503 | atomic_read(get_pool_nr_running(pool))); |
1503 | } | 1504 | } |
@@ -1538,7 +1539,7 @@ static void worker_leave_idle(struct worker *worker) | |||
1538 | * [dis]associated in the meantime. | 1539 | * [dis]associated in the meantime. |
1539 | * | 1540 | * |
1540 | * This function tries set_cpus_allowed() and locks gcwq and verifies the | 1541 | * This function tries set_cpus_allowed() and locks gcwq and verifies the |
1541 | * binding against %GCWQ_DISASSOCIATED which is set during | 1542 | * binding against %POOL_DISASSOCIATED which is set during |
1542 | * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker | 1543 | * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker |
1543 | * enters idle state or fetches works without dropping lock, it can | 1544 | * enters idle state or fetches works without dropping lock, it can |
1544 | * guarantee the scheduling requirement described in the first paragraph. | 1545 | * guarantee the scheduling requirement described in the first paragraph. |
@@ -1554,7 +1555,8 @@ static void worker_leave_idle(struct worker *worker) | |||
1554 | static bool worker_maybe_bind_and_lock(struct worker *worker) | 1555 | static bool worker_maybe_bind_and_lock(struct worker *worker) |
1555 | __acquires(&gcwq->lock) | 1556 | __acquires(&gcwq->lock) |
1556 | { | 1557 | { |
1557 | struct global_cwq *gcwq = worker->pool->gcwq; | 1558 | struct worker_pool *pool = worker->pool; |
1559 | struct global_cwq *gcwq = pool->gcwq; | ||
1558 | struct task_struct *task = worker->task; | 1560 | struct task_struct *task = worker->task; |
1559 | 1561 | ||
1560 | while (true) { | 1562 | while (true) { |
@@ -1562,13 +1564,13 @@ __acquires(&gcwq->lock) | |||
1562 | * The following call may fail, succeed or succeed | 1564 | * The following call may fail, succeed or succeed |
1563 | * without actually migrating the task to the cpu if | 1565 | * without actually migrating the task to the cpu if |
1564 | * it races with cpu hotunplug operation. Verify | 1566 | * it races with cpu hotunplug operation. Verify |
1565 | * against GCWQ_DISASSOCIATED. | 1567 | * against POOL_DISASSOCIATED. |
1566 | */ | 1568 | */ |
1567 | if (!(gcwq->flags & GCWQ_DISASSOCIATED)) | 1569 | if (!(pool->flags & POOL_DISASSOCIATED)) |
1568 | set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); | 1570 | set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); |
1569 | 1571 | ||
1570 | spin_lock_irq(&gcwq->lock); | 1572 | spin_lock_irq(&gcwq->lock); |
1571 | if (gcwq->flags & GCWQ_DISASSOCIATED) | 1573 | if (pool->flags & POOL_DISASSOCIATED) |
1572 | return false; | 1574 | return false; |
1573 | if (task_cpu(task) == gcwq->cpu && | 1575 | if (task_cpu(task) == gcwq->cpu && |
1574 | cpumask_equal(¤t->cpus_allowed, | 1576 | cpumask_equal(¤t->cpus_allowed, |
@@ -1766,14 +1768,14 @@ static struct worker *create_worker(struct worker_pool *pool) | |||
1766 | 1768 | ||
1767 | /* | 1769 | /* |
1768 | * Determine CPU binding of the new worker depending on | 1770 | * Determine CPU binding of the new worker depending on |
1769 | * %GCWQ_DISASSOCIATED. The caller is responsible for ensuring the | 1771 | * %POOL_DISASSOCIATED. The caller is responsible for ensuring the |
1770 | * flag remains stable across this function. See the comments | 1772 | * flag remains stable across this function. See the comments |
1771 | * above the flag definition for details. | 1773 | * above the flag definition for details. |
1772 | * | 1774 | * |
1773 | * As an unbound worker may later become a regular one if CPU comes | 1775 | * As an unbound worker may later become a regular one if CPU comes |
1774 | * online, make sure every worker has %PF_THREAD_BOUND set. | 1776 | * online, make sure every worker has %PF_THREAD_BOUND set. |
1775 | */ | 1777 | */ |
1776 | if (!(gcwq->flags & GCWQ_DISASSOCIATED)) { | 1778 | if (!(pool->flags & POOL_DISASSOCIATED)) { |
1777 | kthread_bind(worker->task, gcwq->cpu); | 1779 | kthread_bind(worker->task, gcwq->cpu); |
1778 | } else { | 1780 | } else { |
1779 | worker->task->flags |= PF_THREAD_BOUND; | 1781 | worker->task->flags |= PF_THREAD_BOUND; |
@@ -2134,10 +2136,10 @@ __acquires(&gcwq->lock) | |||
2134 | /* | 2136 | /* |
2135 | * Ensure we're on the correct CPU. DISASSOCIATED test is | 2137 | * Ensure we're on the correct CPU. DISASSOCIATED test is |
2136 | * necessary to avoid spurious warnings from rescuers servicing the | 2138 | * necessary to avoid spurious warnings from rescuers servicing the |
2137 | * unbound or a disassociated gcwq. | 2139 | * unbound or a disassociated pool. |
2138 | */ | 2140 | */ |
2139 | WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) && | 2141 | WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) && |
2140 | !(gcwq->flags & GCWQ_DISASSOCIATED) && | 2142 | !(pool->flags & POOL_DISASSOCIATED) && |
2141 | raw_smp_processor_id() != gcwq->cpu); | 2143 | raw_smp_processor_id() != gcwq->cpu); |
2142 | 2144 | ||
2143 | /* | 2145 | /* |
@@ -3472,7 +3474,7 @@ EXPORT_SYMBOL_GPL(work_busy); | |||
3472 | * gcwqs serve mix of short, long and very long running works making | 3474 | * gcwqs serve mix of short, long and very long running works making |
3473 | * blocked draining impractical. | 3475 | * blocked draining impractical. |
3474 | * | 3476 | * |
3475 | * This is solved by allowing a gcwq to be disassociated from the CPU | 3477 | * This is solved by allowing the pools to be disassociated from the CPU |
3476 | * running as an unbound one and allowing it to be reattached later if the | 3478 | * running as an unbound one and allowing it to be reattached later if the |
3477 | * cpu comes back online. | 3479 | * cpu comes back online. |
3478 | */ | 3480 | */ |
@@ -3522,7 +3524,8 @@ static void gcwq_unbind_fn(struct work_struct *work) | |||
3522 | for_each_busy_worker(worker, i, pos, gcwq) | 3524 | for_each_busy_worker(worker, i, pos, gcwq) |
3523 | worker->flags |= WORKER_UNBOUND; | 3525 | worker->flags |= WORKER_UNBOUND; |
3524 | 3526 | ||
3525 | gcwq->flags |= GCWQ_DISASSOCIATED; | 3527 | for_each_worker_pool(pool, gcwq) |
3528 | pool->flags |= POOL_DISASSOCIATED; | ||
3526 | 3529 | ||
3527 | gcwq_release_assoc_and_unlock(gcwq); | 3530 | gcwq_release_assoc_and_unlock(gcwq); |
3528 | 3531 | ||
@@ -3581,7 +3584,8 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
3581 | case CPU_DOWN_FAILED: | 3584 | case CPU_DOWN_FAILED: |
3582 | case CPU_ONLINE: | 3585 | case CPU_ONLINE: |
3583 | gcwq_claim_assoc_and_lock(gcwq); | 3586 | gcwq_claim_assoc_and_lock(gcwq); |
3584 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | 3587 | for_each_worker_pool(pool, gcwq) |
3588 | pool->flags &= ~POOL_DISASSOCIATED; | ||
3585 | rebind_workers(gcwq); | 3589 | rebind_workers(gcwq); |
3586 | gcwq_release_assoc_and_unlock(gcwq); | 3590 | gcwq_release_assoc_and_unlock(gcwq); |
3587 | break; | 3591 | break; |
@@ -3806,12 +3810,12 @@ static int __init init_workqueues(void) | |||
3806 | 3810 | ||
3807 | spin_lock_init(&gcwq->lock); | 3811 | spin_lock_init(&gcwq->lock); |
3808 | gcwq->cpu = cpu; | 3812 | gcwq->cpu = cpu; |
3809 | gcwq->flags |= GCWQ_DISASSOCIATED; | ||
3810 | 3813 | ||
3811 | hash_init(gcwq->busy_hash); | 3814 | hash_init(gcwq->busy_hash); |
3812 | 3815 | ||
3813 | for_each_worker_pool(pool, gcwq) { | 3816 | for_each_worker_pool(pool, gcwq) { |
3814 | pool->gcwq = gcwq; | 3817 | pool->gcwq = gcwq; |
3818 | pool->flags |= POOL_DISASSOCIATED; | ||
3815 | INIT_LIST_HEAD(&pool->worklist); | 3819 | INIT_LIST_HEAD(&pool->worklist); |
3816 | INIT_LIST_HEAD(&pool->idle_list); | 3820 | INIT_LIST_HEAD(&pool->idle_list); |
3817 | 3821 | ||
@@ -3832,12 +3836,12 @@ static int __init init_workqueues(void) | |||
3832 | struct global_cwq *gcwq = get_gcwq(cpu); | 3836 | struct global_cwq *gcwq = get_gcwq(cpu); |
3833 | struct worker_pool *pool; | 3837 | struct worker_pool *pool; |
3834 | 3838 | ||
3835 | if (cpu != WORK_CPU_UNBOUND) | ||
3836 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | ||
3837 | |||
3838 | for_each_worker_pool(pool, gcwq) { | 3839 | for_each_worker_pool(pool, gcwq) { |
3839 | struct worker *worker; | 3840 | struct worker *worker; |
3840 | 3841 | ||
3842 | if (cpu != WORK_CPU_UNBOUND) | ||
3843 | pool->flags &= ~POOL_DISASSOCIATED; | ||
3844 | |||
3841 | worker = create_worker(pool); | 3845 | worker = create_worker(pool); |
3842 | BUG_ON(!worker); | 3846 | BUG_ON(!worker); |
3843 | spin_lock_irq(&gcwq->lock); | 3847 | spin_lock_irq(&gcwq->lock); |