aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 14:30:03 -0400
committerTejun Heo <tj@kernel.org>2013-03-12 14:30:03 -0400
commitf02ae73aaa4f285199683862ac59972877a11c5d (patch)
treeca517312e1ed5bfaff9b18ebefcd8faf064ad040 /kernel/workqueue.c
parent7a62c2c87e3bc174fe4b9e9720e148427510fcfb (diff)
workqueue: drop "std" from cpu_std_worker_pools and for_each_std_worker_pool()
All per-cpu pools are standard, so there's no need to use both "cpu" and "std" and for_each_std_worker_pool() is confusing in that it can be used only for per-cpu pools. * s/cpu_std_worker_pools/cpu_worker_pools/ * s/for_each_std_worker_pool()/for_each_cpu_worker_pool()/ Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7642bb7b70ee..2c5073214774 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -252,9 +252,9 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
252 lockdep_is_held(&workqueue_lock), \ 252 lockdep_is_held(&workqueue_lock), \
253 "sched RCU or workqueue lock should be held") 253 "sched RCU or workqueue lock should be held")
254 254
255#define for_each_std_worker_pool(pool, cpu) \ 255#define for_each_cpu_worker_pool(pool, cpu) \
256 for ((pool) = &per_cpu(cpu_std_worker_pools, cpu)[0]; \ 256 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
257 (pool) < &per_cpu(cpu_std_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 257 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
258 (pool)++) 258 (pool)++)
259 259
260#define for_each_busy_worker(worker, i, pool) \ 260#define for_each_busy_worker(worker, i, pool) \
@@ -420,7 +420,7 @@ static bool workqueue_freezing; /* W: have wqs started freezing? */
420 * POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set. 420 * POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set.
421 */ 421 */
422static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], 422static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
423 cpu_std_worker_pools); 423 cpu_worker_pools);
424 424
425/* 425/*
426 * idr of all pools. Modifications are protected by workqueue_lock. Read 426 * idr of all pools. Modifications are protected by workqueue_lock. Read
@@ -3342,7 +3342,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
3342 struct pool_workqueue *pwq = 3342 struct pool_workqueue *pwq =
3343 per_cpu_ptr(wq->cpu_pwqs, cpu); 3343 per_cpu_ptr(wq->cpu_pwqs, cpu);
3344 struct worker_pool *cpu_pools = 3344 struct worker_pool *cpu_pools =
3345 per_cpu(cpu_std_worker_pools, cpu); 3345 per_cpu(cpu_worker_pools, cpu);
3346 3346
3347 pwq->pool = &cpu_pools[highpri]; 3347 pwq->pool = &cpu_pools[highpri];
3348 list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); 3348 list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs);
@@ -3694,7 +3694,7 @@ static void wq_unbind_fn(struct work_struct *work)
3694 struct worker *worker; 3694 struct worker *worker;
3695 int i; 3695 int i;
3696 3696
3697 for_each_std_worker_pool(pool, cpu) { 3697 for_each_cpu_worker_pool(pool, cpu) {
3698 WARN_ON_ONCE(cpu != smp_processor_id()); 3698 WARN_ON_ONCE(cpu != smp_processor_id());
3699 3699
3700 mutex_lock(&pool->assoc_mutex); 3700 mutex_lock(&pool->assoc_mutex);
@@ -3737,7 +3737,7 @@ static void wq_unbind_fn(struct work_struct *work)
3737 * unbound chain execution of pending work items if other workers 3737 * unbound chain execution of pending work items if other workers
3738 * didn't already. 3738 * didn't already.
3739 */ 3739 */
3740 for_each_std_worker_pool(pool, cpu) 3740 for_each_cpu_worker_pool(pool, cpu)
3741 atomic_set(&pool->nr_running, 0); 3741 atomic_set(&pool->nr_running, 0);
3742} 3742}
3743 3743
@@ -3754,7 +3754,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3754 3754
3755 switch (action & ~CPU_TASKS_FROZEN) { 3755 switch (action & ~CPU_TASKS_FROZEN) {
3756 case CPU_UP_PREPARE: 3756 case CPU_UP_PREPARE:
3757 for_each_std_worker_pool(pool, cpu) { 3757 for_each_cpu_worker_pool(pool, cpu) {
3758 struct worker *worker; 3758 struct worker *worker;
3759 3759
3760 if (pool->nr_workers) 3760 if (pool->nr_workers)
@@ -3772,7 +3772,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3772 3772
3773 case CPU_DOWN_FAILED: 3773 case CPU_DOWN_FAILED:
3774 case CPU_ONLINE: 3774 case CPU_ONLINE:
3775 for_each_std_worker_pool(pool, cpu) { 3775 for_each_cpu_worker_pool(pool, cpu) {
3776 mutex_lock(&pool->assoc_mutex); 3776 mutex_lock(&pool->assoc_mutex);
3777 spin_lock_irq(&pool->lock); 3777 spin_lock_irq(&pool->lock);
3778 3778
@@ -4012,7 +4012,7 @@ static int __init init_workqueues(void)
4012 struct worker_pool *pool; 4012 struct worker_pool *pool;
4013 4013
4014 i = 0; 4014 i = 0;
4015 for_each_std_worker_pool(pool, cpu) { 4015 for_each_cpu_worker_pool(pool, cpu) {
4016 BUG_ON(init_worker_pool(pool)); 4016 BUG_ON(init_worker_pool(pool));
4017 pool->cpu = cpu; 4017 pool->cpu = cpu;
4018 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 4018 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
@@ -4027,7 +4027,7 @@ static int __init init_workqueues(void)
4027 for_each_online_cpu(cpu) { 4027 for_each_online_cpu(cpu) {
4028 struct worker_pool *pool; 4028 struct worker_pool *pool;
4029 4029
4030 for_each_std_worker_pool(pool, cpu) { 4030 for_each_cpu_worker_pool(pool, cpu) {
4031 struct worker *worker; 4031 struct worker *worker;
4032 4032
4033 pool->flags &= ~POOL_DISASSOCIATED; 4033 pool->flags &= ~POOL_DISASSOCIATED;