aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-24 14:01:34 -0500
committerTejun Heo <tj@kernel.org>2013-01-24 14:01:34 -0500
commite6e380ed92555533740d5f670640f6f1868132de (patch)
treefd24f4293e1c6fa9ab728c59ddc25d26146fd98e /kernel/workqueue.c
parenta60dc39c016a65bfdbd05c43b3707962d5ed04c7 (diff)
workqueue: rename nr_running variables
Rename per-cpu and unbound nr_running variables such that they match the pool variables. This patch doesn't introduce any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 224580f7459c..db8d4b7471ac 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -423,14 +423,15 @@ static bool workqueue_freezing; /* W: have wqs started freezing? */
423 */ 423 */
424static DEFINE_PER_CPU(struct worker_pool [NR_STD_WORKER_POOLS], 424static DEFINE_PER_CPU(struct worker_pool [NR_STD_WORKER_POOLS],
425 cpu_std_worker_pools); 425 cpu_std_worker_pools);
426static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_STD_WORKER_POOLS]); 426static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t [NR_STD_WORKER_POOLS],
427 cpu_std_pool_nr_running);
427 428
428/* 429/*
429 * Standard worker pools and nr_running counter for unbound CPU. The pools 430 * Standard worker pools and nr_running counter for unbound CPU. The pools
430 * have POOL_DISASSOCIATED set, and all workers have WORKER_UNBOUND set. 431 * have POOL_DISASSOCIATED set, and all workers have WORKER_UNBOUND set.
431 */ 432 */
432static struct worker_pool unbound_std_worker_pools[NR_STD_WORKER_POOLS]; 433static struct worker_pool unbound_std_worker_pools[NR_STD_WORKER_POOLS];
433static atomic_t unbound_pool_nr_running[NR_STD_WORKER_POOLS] = { 434static atomic_t unbound_std_pool_nr_running[NR_STD_WORKER_POOLS] = {
434 [0 ... NR_STD_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */ 435 [0 ... NR_STD_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */
435}; 436};
436 437
@@ -488,9 +489,9 @@ static atomic_t *get_pool_nr_running(struct worker_pool *pool)
488 int idx = std_worker_pool_pri(pool); 489 int idx = std_worker_pool_pri(pool);
489 490
490 if (cpu != WORK_CPU_UNBOUND) 491 if (cpu != WORK_CPU_UNBOUND)
491 return &per_cpu(pool_nr_running, cpu)[idx]; 492 return &per_cpu(cpu_std_pool_nr_running, cpu)[idx];
492 else 493 else
493 return &unbound_pool_nr_running[idx]; 494 return &unbound_std_pool_nr_running[idx];
494} 495}
495 496
496static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, 497static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,