aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-24 14:01:34 -0500
committerTejun Heo <tj@kernel.org>2013-01-24 14:01:34 -0500
commit706026c2141113886f61e1ad2738c9a7723ec69c (patch)
treec61ffa31567cf6b7536a3209503d498f22c6ace6 /kernel/workqueue.c
parente6e380ed92555533740d5f670640f6f1868132de (diff)
workqueue: post global_cwq removal cleanups
Remove remaining references to gcwq. * __next_gcwq_cpu() steals __next_wq_cpu() name. The original __next_wq_cpu() became __next_cwq_cpu(). * s/for_each_gcwq_cpu/for_each_wq_cpu/ s/for_each_online_gcwq_cpu/for_each_online_wq_cpu/ * s/gcwq_mayday_timeout/pool_mayday_timeout/ * s/gcwq_unbind_fn/wq_unbind_fn/ * Drop references to gcwq in comments. This patch doesn't introduce any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c104
1 files changed, 52 insertions, 52 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index db8d4b7471ac..577de1073f24 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -246,8 +246,8 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
246#define for_each_busy_worker(worker, i, pos, pool) \ 246#define for_each_busy_worker(worker, i, pos, pool) \
247 hash_for_each(pool->busy_hash, i, pos, worker, hentry) 247 hash_for_each(pool->busy_hash, i, pos, worker, hentry)
248 248
249static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask, 249static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
250 unsigned int sw) 250 unsigned int sw)
251{ 251{
252 if (cpu < nr_cpu_ids) { 252 if (cpu < nr_cpu_ids) {
253 if (sw & 1) { 253 if (sw & 1) {
@@ -261,39 +261,39 @@ static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
261 return WORK_CPU_NONE; 261 return WORK_CPU_NONE;
262} 262}
263 263
264static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, 264static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask,
265 struct workqueue_struct *wq) 265 struct workqueue_struct *wq)
266{ 266{
267 return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2); 267 return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
268} 268}
269 269
270/* 270/*
271 * CPU iterators 271 * CPU iterators
272 * 272 *
273 * An extra gcwq is defined for an invalid cpu number 273 * An extra cpu number is defined using an invalid cpu number
274 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any 274 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
275 * specific CPU. The following iterators are similar to 275 * specific CPU. The following iterators are similar to for_each_*_cpu()
276 * for_each_*_cpu() iterators but also considers the unbound gcwq. 276 * iterators but also considers the unbound CPU.
277 * 277 *
278 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND 278 * for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND
279 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND 279 * for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND
280 * for_each_cwq_cpu() : possible CPUs for bound workqueues, 280 * for_each_cwq_cpu() : possible CPUs for bound workqueues,
281 * WORK_CPU_UNBOUND for unbound workqueues 281 * WORK_CPU_UNBOUND for unbound workqueues
282 */ 282 */
283#define for_each_gcwq_cpu(cpu) \ 283#define for_each_wq_cpu(cpu) \
284 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \ 284 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3); \
285 (cpu) < WORK_CPU_NONE; \ 285 (cpu) < WORK_CPU_NONE; \
286 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3)) 286 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, 3))
287 287
288#define for_each_online_gcwq_cpu(cpu) \ 288#define for_each_online_wq_cpu(cpu) \
289 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \ 289 for ((cpu) = __next_wq_cpu(-1, cpu_online_mask, 3); \
290 (cpu) < WORK_CPU_NONE; \ 290 (cpu) < WORK_CPU_NONE; \
291 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3)) 291 (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
292 292
293#define for_each_cwq_cpu(cpu, wq) \ 293#define for_each_cwq_cpu(cpu, wq) \
294 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \ 294 for ((cpu) = __next_cwq_cpu(-1, cpu_possible_mask, (wq)); \
295 (cpu) < WORK_CPU_NONE; \ 295 (cpu) < WORK_CPU_NONE; \
296 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq))) 296 (cpu) = __next_cwq_cpu((cpu), cpu_possible_mask, (wq)))
297 297
298#ifdef CONFIG_DEBUG_OBJECTS_WORK 298#ifdef CONFIG_DEBUG_OBJECTS_WORK
299 299
@@ -655,7 +655,7 @@ static bool __need_more_worker(struct worker_pool *pool)
655 * running workers. 655 * running workers.
656 * 656 *
657 * Note that, because unbound workers never contribute to nr_running, this 657 * Note that, because unbound workers never contribute to nr_running, this
658 * function will always return %true for unbound gcwq as long as the 658 * function will always return %true for unbound pools as long as the
659 * worklist isn't empty. 659 * worklist isn't empty.
660 */ 660 */
661static bool need_more_worker(struct worker_pool *pool) 661static bool need_more_worker(struct worker_pool *pool)
@@ -1129,14 +1129,14 @@ fail:
1129} 1129}
1130 1130
1131/** 1131/**
1132 * insert_work - insert a work into gcwq 1132 * insert_work - insert a work into a pool
1133 * @cwq: cwq @work belongs to 1133 * @cwq: cwq @work belongs to
1134 * @work: work to insert 1134 * @work: work to insert
1135 * @head: insertion point 1135 * @head: insertion point
1136 * @extra_flags: extra WORK_STRUCT_* flags to set 1136 * @extra_flags: extra WORK_STRUCT_* flags to set
1137 * 1137 *
1138 * Insert @work which belongs to @cwq into @gcwq after @head. 1138 * Insert @work which belongs to @cwq after @head. @extra_flags is or'd to
1139 * @extra_flags is or'd to work_struct flags. 1139 * work_struct flags.
1140 * 1140 *
1141 * CONTEXT: 1141 * CONTEXT:
1142 * spin_lock_irq(pool->lock). 1142 * spin_lock_irq(pool->lock).
@@ -1179,7 +1179,7 @@ static bool is_chained_work(struct workqueue_struct *wq)
1179 unsigned long flags; 1179 unsigned long flags;
1180 unsigned int cpu; 1180 unsigned int cpu;
1181 1181
1182 for_each_gcwq_cpu(cpu) { 1182 for_each_wq_cpu(cpu) {
1183 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 1183 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1184 struct worker_pool *pool = cwq->pool; 1184 struct worker_pool *pool = cwq->pool;
1185 struct worker *worker; 1185 struct worker *worker;
@@ -1533,7 +1533,7 @@ static void worker_enter_idle(struct worker *worker)
1533 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 1533 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1534 1534
1535 /* 1535 /*
1536 * Sanity check nr_running. Because gcwq_unbind_fn() releases 1536 * Sanity check nr_running. Because wq_unbind_fn() releases
1537 * pool->lock between setting %WORKER_UNBOUND and zapping 1537 * pool->lock between setting %WORKER_UNBOUND and zapping
1538 * nr_running, the warning may trigger spuriously. Check iff 1538 * nr_running, the warning may trigger spuriously. Check iff
1539 * unbind is not in progress. 1539 * unbind is not in progress.
@@ -1563,7 +1563,7 @@ static void worker_leave_idle(struct worker *worker)
1563} 1563}
1564 1564
1565/** 1565/**
1566 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq 1566 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool
1567 * @worker: self 1567 * @worker: self
1568 * 1568 *
1569 * Works which are scheduled while the cpu is online must at least be 1569 * Works which are scheduled while the cpu is online must at least be
@@ -1575,10 +1575,10 @@ static void worker_leave_idle(struct worker *worker)
1575 * themselves to the target cpu and may race with cpu going down or 1575 * themselves to the target cpu and may race with cpu going down or
1576 * coming online. kthread_bind() can't be used because it may put the 1576 * coming online. kthread_bind() can't be used because it may put the
1577 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used 1577 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1578 * verbatim as it's best effort and blocking and gcwq may be 1578 * verbatim as it's best effort and blocking and pool may be
1579 * [dis]associated in the meantime. 1579 * [dis]associated in the meantime.
1580 * 1580 *
1581 * This function tries set_cpus_allowed() and locks gcwq and verifies the 1581 * This function tries set_cpus_allowed() and locks pool and verifies the
1582 * binding against %POOL_DISASSOCIATED which is set during 1582 * binding against %POOL_DISASSOCIATED which is set during
1583 * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker 1583 * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
1584 * enters idle state or fetches works without dropping lock, it can 1584 * enters idle state or fetches works without dropping lock, it can
@@ -1589,7 +1589,7 @@ static void worker_leave_idle(struct worker *worker)
1589 * held. 1589 * held.
1590 * 1590 *
1591 * RETURNS: 1591 * RETURNS:
1592 * %true if the associated gcwq is online (@worker is successfully 1592 * %true if the associated pool is online (@worker is successfully
1593 * bound), %false if offline. 1593 * bound), %false if offline.
1594 */ 1594 */
1595static bool worker_maybe_bind_and_lock(struct worker *worker) 1595static bool worker_maybe_bind_and_lock(struct worker *worker)
@@ -1826,7 +1826,7 @@ fail:
1826 * start_worker - start a newly created worker 1826 * start_worker - start a newly created worker
1827 * @worker: worker to start 1827 * @worker: worker to start
1828 * 1828 *
1829 * Make the gcwq aware of @worker and start it. 1829 * Make the pool aware of @worker and start it.
1830 * 1830 *
1831 * CONTEXT: 1831 * CONTEXT:
1832 * spin_lock_irq(pool->lock). 1832 * spin_lock_irq(pool->lock).
@@ -1843,7 +1843,7 @@ static void start_worker(struct worker *worker)
1843 * destroy_worker - destroy a workqueue worker 1843 * destroy_worker - destroy a workqueue worker
1844 * @worker: worker to be destroyed 1844 * @worker: worker to be destroyed
1845 * 1845 *
1846 * Destroy @worker and adjust @gcwq stats accordingly. 1846 * Destroy @worker and adjust @pool stats accordingly.
1847 * 1847 *
1848 * CONTEXT: 1848 * CONTEXT:
1849 * spin_lock_irq(pool->lock) which is released and regrabbed. 1849 * spin_lock_irq(pool->lock) which is released and regrabbed.
@@ -1919,7 +1919,7 @@ static bool send_mayday(struct work_struct *work)
1919 return true; 1919 return true;
1920} 1920}
1921 1921
1922static void gcwq_mayday_timeout(unsigned long __pool) 1922static void pool_mayday_timeout(unsigned long __pool)
1923{ 1923{
1924 struct worker_pool *pool = (void *)__pool; 1924 struct worker_pool *pool = (void *)__pool;
1925 struct work_struct *work; 1925 struct work_struct *work;
@@ -2047,9 +2047,9 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
2047 * manage_workers - manage worker pool 2047 * manage_workers - manage worker pool
2048 * @worker: self 2048 * @worker: self
2049 * 2049 *
2050 * Assume the manager role and manage gcwq worker pool @worker belongs 2050 * Assume the manager role and manage the worker pool @worker belongs
2051 * to. At any given time, there can be only zero or one manager per 2051 * to. At any given time, there can be only zero or one manager per
2052 * gcwq. The exclusion is handled automatically by this function. 2052 * pool. The exclusion is handled automatically by this function.
2053 * 2053 *
2054 * The caller can safely start processing works on false return. On 2054 * The caller can safely start processing works on false return. On
2055 * true return, it's guaranteed that need_to_create_worker() is false 2055 * true return, it's guaranteed that need_to_create_worker() is false
@@ -2092,11 +2092,11 @@ static bool manage_workers(struct worker *worker)
2092 * CPU hotplug could have happened while we were waiting 2092 * CPU hotplug could have happened while we were waiting
2093 * for assoc_mutex. Hotplug itself can't handle us 2093 * for assoc_mutex. Hotplug itself can't handle us
2094 * because manager isn't either on idle or busy list, and 2094 * because manager isn't either on idle or busy list, and
2095 * @gcwq's state and ours could have deviated. 2095 * @pool's state and ours could have deviated.
2096 * 2096 *
2097 * As hotplug is now excluded via assoc_mutex, we can 2097 * As hotplug is now excluded via assoc_mutex, we can
2098 * simply try to bind. It will succeed or fail depending 2098 * simply try to bind. It will succeed or fail depending
2099 * on @gcwq's current state. Try it and adjust 2099 * on @pool's current state. Try it and adjust
2100 * %WORKER_UNBOUND accordingly. 2100 * %WORKER_UNBOUND accordingly.
2101 */ 2101 */
2102 if (worker_maybe_bind_and_lock(worker)) 2102 if (worker_maybe_bind_and_lock(worker))
@@ -2271,8 +2271,8 @@ static void process_scheduled_works(struct worker *worker)
2271 * worker_thread - the worker thread function 2271 * worker_thread - the worker thread function
2272 * @__worker: self 2272 * @__worker: self
2273 * 2273 *
2274 * The gcwq worker thread function. There's a single dynamic pool of 2274 * The worker thread function. There are NR_CPU_WORKER_POOLS dynamic pools
2275 * these per each cpu. These workers process all works regardless of 2275 * of these per each cpu. These workers process all works regardless of
2276 * their specific target workqueue. The only exception is works which 2276 * their specific target workqueue. The only exception is works which
2277 * belong to workqueues with a rescuer which will be explained in 2277 * belong to workqueues with a rescuer which will be explained in
2278 * rescuer_thread(). 2278 * rescuer_thread().
@@ -2368,14 +2368,14 @@ sleep:
2368 * Workqueue rescuer thread function. There's one rescuer for each 2368 * Workqueue rescuer thread function. There's one rescuer for each
2369 * workqueue which has WQ_RESCUER set. 2369 * workqueue which has WQ_RESCUER set.
2370 * 2370 *
2371 * Regular work processing on a gcwq may block trying to create a new 2371 * Regular work processing on a pool may block trying to create a new
2372 * worker which uses GFP_KERNEL allocation which has slight chance of 2372 * worker which uses GFP_KERNEL allocation which has slight chance of
2373 * developing into deadlock if some works currently on the same queue 2373 * developing into deadlock if some works currently on the same queue
2374 * need to be processed to satisfy the GFP_KERNEL allocation. This is 2374 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2375 * the problem rescuer solves. 2375 * the problem rescuer solves.
2376 * 2376 *
2377 * When such condition is possible, the gcwq summons rescuers of all 2377 * When such condition is possible, the pool summons rescuers of all
2378 * workqueues which have works queued on the gcwq and let them process 2378 * workqueues which have works queued on the pool and let them process
2379 * those works so that forward progress can be guaranteed. 2379 * those works so that forward progress can be guaranteed.
2380 * 2380 *
2381 * This should happen rarely. 2381 * This should happen rarely.
@@ -3476,7 +3476,7 @@ EXPORT_SYMBOL_GPL(work_busy);
3476 * 3476 *
3477 * There are two challenges in supporting CPU hotplug. Firstly, there 3477 * There are two challenges in supporting CPU hotplug. Firstly, there
3478 * are a lot of assumptions on strong associations among work, cwq and 3478 * are a lot of assumptions on strong associations among work, cwq and
3479 * gcwq which make migrating pending and scheduled works very 3479 * pool which make migrating pending and scheduled works very
3480 * difficult to implement without impacting hot paths. Secondly, 3480 * difficult to implement without impacting hot paths. Secondly,
3481 * worker pools serve mix of short, long and very long running works making 3481 * worker pools serve mix of short, long and very long running works making
3482 * blocked draining impractical. 3482 * blocked draining impractical.
@@ -3486,7 +3486,7 @@ EXPORT_SYMBOL_GPL(work_busy);
3486 * cpu comes back online. 3486 * cpu comes back online.
3487 */ 3487 */
3488 3488
3489static void gcwq_unbind_fn(struct work_struct *work) 3489static void wq_unbind_fn(struct work_struct *work)
3490{ 3490{
3491 int cpu = smp_processor_id(); 3491 int cpu = smp_processor_id();
3492 struct worker_pool *pool; 3492 struct worker_pool *pool;
@@ -3601,7 +3601,7 @@ static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3601 switch (action & ~CPU_TASKS_FROZEN) { 3601 switch (action & ~CPU_TASKS_FROZEN) {
3602 case CPU_DOWN_PREPARE: 3602 case CPU_DOWN_PREPARE:
3603 /* unbinding should happen on the local CPU */ 3603 /* unbinding should happen on the local CPU */
3604 INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn); 3604 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
3605 queue_work_on(cpu, system_highpri_wq, &unbind_work); 3605 queue_work_on(cpu, system_highpri_wq, &unbind_work);
3606 flush_work(&unbind_work); 3606 flush_work(&unbind_work);
3607 break; 3607 break;
@@ -3654,7 +3654,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
3654 * 3654 *
3655 * Start freezing workqueues. After this function returns, all freezable 3655 * Start freezing workqueues. After this function returns, all freezable
3656 * workqueues will queue new works to their frozen_works list instead of 3656 * workqueues will queue new works to their frozen_works list instead of
3657 * gcwq->worklist. 3657 * pool->worklist.
3658 * 3658 *
3659 * CONTEXT: 3659 * CONTEXT:
3660 * Grabs and releases workqueue_lock and pool->lock's. 3660 * Grabs and releases workqueue_lock and pool->lock's.
@@ -3668,7 +3668,7 @@ void freeze_workqueues_begin(void)
3668 BUG_ON(workqueue_freezing); 3668 BUG_ON(workqueue_freezing);
3669 workqueue_freezing = true; 3669 workqueue_freezing = true;
3670 3670
3671 for_each_gcwq_cpu(cpu) { 3671 for_each_wq_cpu(cpu) {
3672 struct worker_pool *pool; 3672 struct worker_pool *pool;
3673 struct workqueue_struct *wq; 3673 struct workqueue_struct *wq;
3674 3674
@@ -3715,7 +3715,7 @@ bool freeze_workqueues_busy(void)
3715 3715
3716 BUG_ON(!workqueue_freezing); 3716 BUG_ON(!workqueue_freezing);
3717 3717
3718 for_each_gcwq_cpu(cpu) { 3718 for_each_wq_cpu(cpu) {
3719 struct workqueue_struct *wq; 3719 struct workqueue_struct *wq;
3720 /* 3720 /*
3721 * nr_active is monotonically decreasing. It's safe 3721 * nr_active is monotonically decreasing. It's safe
@@ -3743,7 +3743,7 @@ out_unlock:
3743 * thaw_workqueues - thaw workqueues 3743 * thaw_workqueues - thaw workqueues
3744 * 3744 *
3745 * Thaw workqueues. Normal queueing is restored and all collected 3745 * Thaw workqueues. Normal queueing is restored and all collected
3746 * frozen works are transferred to their respective gcwq worklists. 3746 * frozen works are transferred to their respective pool worklists.
3747 * 3747 *
3748 * CONTEXT: 3748 * CONTEXT:
3749 * Grabs and releases workqueue_lock and pool->lock's. 3749 * Grabs and releases workqueue_lock and pool->lock's.
@@ -3757,7 +3757,7 @@ void thaw_workqueues(void)
3757 if (!workqueue_freezing) 3757 if (!workqueue_freezing)
3758 goto out_unlock; 3758 goto out_unlock;
3759 3759
3760 for_each_gcwq_cpu(cpu) { 3760 for_each_wq_cpu(cpu) {
3761 struct worker_pool *pool; 3761 struct worker_pool *pool;
3762 struct workqueue_struct *wq; 3762 struct workqueue_struct *wq;
3763 3763
@@ -3801,8 +3801,8 @@ static int __init init_workqueues(void)
3801 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); 3801 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
3802 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); 3802 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
3803 3803
3804 /* initialize gcwqs */ 3804 /* initialize CPU pools */
3805 for_each_gcwq_cpu(cpu) { 3805 for_each_wq_cpu(cpu) {
3806 struct worker_pool *pool; 3806 struct worker_pool *pool;
3807 3807
3808 for_each_std_worker_pool(pool, cpu) { 3808 for_each_std_worker_pool(pool, cpu) {
@@ -3817,7 +3817,7 @@ static int __init init_workqueues(void)
3817 pool->idle_timer.function = idle_worker_timeout; 3817 pool->idle_timer.function = idle_worker_timeout;
3818 pool->idle_timer.data = (unsigned long)pool; 3818 pool->idle_timer.data = (unsigned long)pool;
3819 3819
3820 setup_timer(&pool->mayday_timer, gcwq_mayday_timeout, 3820 setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3821 (unsigned long)pool); 3821 (unsigned long)pool);
3822 3822
3823 mutex_init(&pool->assoc_mutex); 3823 mutex_init(&pool->assoc_mutex);
@@ -3829,7 +3829,7 @@ static int __init init_workqueues(void)
3829 } 3829 }
3830 3830
3831 /* create the initial worker */ 3831 /* create the initial worker */
3832 for_each_online_gcwq_cpu(cpu) { 3832 for_each_online_wq_cpu(cpu) {
3833 struct worker_pool *pool; 3833 struct worker_pool *pool;
3834 3834
3835 for_each_std_worker_pool(pool, cpu) { 3835 for_each_std_worker_pool(pool, cpu) {