aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-24 14:01:33 -0500
committerTejun Heo <tj@kernel.org>2013-01-24 14:01:33 -0500
commite34cdddb03bdfe98f20c58934fd4c45019f13ae5 (patch)
tree3c98a24a407e1f2794e06a48961a2b9da8e208ae /kernel
parente2905b29122173b72b612c962b138e3fa07476b8 (diff)
workqueue: use std_ prefix for the standard per-cpu pools
There are currently two worker pools per cpu (including the unbound cpu) and they are the only pools in use. New class of pools are scheduled to be added and some pool related APIs will be added inbetween. Call the existing pools the standard pools and prefix them with std_. Do this early so that new APIs can use std_ prefix from the beginning. This patch doesn't introduce any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index fe0745f54fcd..634251572fdd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -80,7 +80,7 @@ enum {
80 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND | 80 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND |
81 WORKER_CPU_INTENSIVE, 81 WORKER_CPU_INTENSIVE,
82 82
83 NR_WORKER_POOLS = 2, /* # worker pools per gcwq */ 83 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
84 84
85 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 85 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
86 86
@@ -156,7 +156,7 @@ struct global_cwq {
156 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 156 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
157 /* L: hash of busy workers */ 157 /* L: hash of busy workers */
158 158
159 struct worker_pool pools[NR_WORKER_POOLS]; 159 struct worker_pool pools[NR_STD_WORKER_POOLS];
160 /* normal and highpri pools */ 160 /* normal and highpri pools */
161} ____cacheline_aligned_in_smp; 161} ____cacheline_aligned_in_smp;
162 162
@@ -255,7 +255,7 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
255 255
256#define for_each_worker_pool(pool, gcwq) \ 256#define for_each_worker_pool(pool, gcwq) \
257 for ((pool) = &(gcwq)->pools[0]; \ 257 for ((pool) = &(gcwq)->pools[0]; \
258 (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++) 258 (pool) < &(gcwq)->pools[NR_STD_WORKER_POOLS]; (pool)++)
259 259
260#define for_each_busy_worker(worker, i, pos, gcwq) \ 260#define for_each_busy_worker(worker, i, pos, gcwq) \
261 hash_for_each(gcwq->busy_hash, i, pos, worker, hentry) 261 hash_for_each(gcwq->busy_hash, i, pos, worker, hentry)
@@ -436,7 +436,7 @@ static bool workqueue_freezing; /* W: have wqs started freezing? */
436 * try_to_wake_up(). Put it in a separate cacheline. 436 * try_to_wake_up(). Put it in a separate cacheline.
437 */ 437 */
438static DEFINE_PER_CPU(struct global_cwq, global_cwq); 438static DEFINE_PER_CPU(struct global_cwq, global_cwq);
439static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]); 439static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_STD_WORKER_POOLS]);
440 440
441/* 441/*
442 * Global cpu workqueue and nr_running counter for unbound gcwq. The 442 * Global cpu workqueue and nr_running counter for unbound gcwq. The
@@ -444,14 +444,14 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS])
444 * workers have WORKER_UNBOUND set. 444 * workers have WORKER_UNBOUND set.
445 */ 445 */
446static struct global_cwq unbound_global_cwq; 446static struct global_cwq unbound_global_cwq;
447static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = { 447static atomic_t unbound_pool_nr_running[NR_STD_WORKER_POOLS] = {
448 [0 ... NR_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */ 448 [0 ... NR_STD_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */
449}; 449};
450 450
451static int worker_thread(void *__worker); 451static int worker_thread(void *__worker);
452static unsigned int work_cpu(struct work_struct *work); 452static unsigned int work_cpu(struct work_struct *work);
453 453
454static int worker_pool_pri(struct worker_pool *pool) 454static int std_worker_pool_pri(struct worker_pool *pool)
455{ 455{
456 return pool - pool->gcwq->pools; 456 return pool - pool->gcwq->pools;
457} 457}
@@ -467,7 +467,7 @@ static struct global_cwq *get_gcwq(unsigned int cpu)
467static atomic_t *get_pool_nr_running(struct worker_pool *pool) 467static atomic_t *get_pool_nr_running(struct worker_pool *pool)
468{ 468{
469 int cpu = pool->gcwq->cpu; 469 int cpu = pool->gcwq->cpu;
470 int idx = worker_pool_pri(pool); 470 int idx = std_worker_pool_pri(pool);
471 471
472 if (cpu != WORK_CPU_UNBOUND) 472 if (cpu != WORK_CPU_UNBOUND)
473 return &per_cpu(pool_nr_running, cpu)[idx]; 473 return &per_cpu(pool_nr_running, cpu)[idx];
@@ -1688,7 +1688,7 @@ static void rebind_workers(struct global_cwq *gcwq)
1688 * wq doesn't really matter but let's keep @worker->pool 1688 * wq doesn't really matter but let's keep @worker->pool
1689 * and @cwq->pool consistent for sanity. 1689 * and @cwq->pool consistent for sanity.
1690 */ 1690 */
1691 if (worker_pool_pri(worker->pool)) 1691 if (std_worker_pool_pri(worker->pool))
1692 wq = system_highpri_wq; 1692 wq = system_highpri_wq;
1693 else 1693 else
1694 wq = system_wq; 1694 wq = system_wq;
@@ -1731,7 +1731,7 @@ static struct worker *alloc_worker(void)
1731static struct worker *create_worker(struct worker_pool *pool) 1731static struct worker *create_worker(struct worker_pool *pool)
1732{ 1732{
1733 struct global_cwq *gcwq = pool->gcwq; 1733 struct global_cwq *gcwq = pool->gcwq;
1734 const char *pri = worker_pool_pri(pool) ? "H" : ""; 1734 const char *pri = std_worker_pool_pri(pool) ? "H" : "";
1735 struct worker *worker = NULL; 1735 struct worker *worker = NULL;
1736 int id = -1; 1736 int id = -1;
1737 1737
@@ -1761,7 +1761,7 @@ static struct worker *create_worker(struct worker_pool *pool)
1761 if (IS_ERR(worker->task)) 1761 if (IS_ERR(worker->task))
1762 goto fail; 1762 goto fail;
1763 1763
1764 if (worker_pool_pri(pool)) 1764 if (std_worker_pool_pri(pool))
1765 set_user_nice(worker->task, HIGHPRI_NICE_LEVEL); 1765 set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
1766 1766
1767 /* 1767 /*