aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-24 14:01:33 -0500
committerTejun Heo <tj@kernel.org>2013-01-24 14:01:33 -0500
commitec22ca5eab0bd225588c69ccd06b16504cb05adf (patch)
tree3282a2b587235879c3f2d286896a003900ab6563 /kernel/workqueue.c
parentc9e7cf273fa1876dee8effdb201a6f65eefab3a7 (diff)
workqueue: move global_cwq->cpu to worker_pool
Move gcwq->cpu to pool->cpu. This introduces a couple places where gcwq->pools[0].cpu is used. These will soon go away as gcwq is further reduced. This is part of an effort to remove global_cwq and make worker_pool the top level abstraction, which in turn will help implementing worker pools with user-specified attributes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c42
1 files changed, 21 insertions, 21 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 99c30116d291..366132bd226f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -124,6 +124,7 @@ enum {
124 124
125struct worker_pool { 125struct worker_pool {
126 struct global_cwq *gcwq; /* I: the owning gcwq */ 126 struct global_cwq *gcwq; /* I: the owning gcwq */
127 unsigned int cpu; /* I: the associated cpu */
127 int id; /* I: pool ID */ 128 int id; /* I: pool ID */
128 unsigned int flags; /* X: flags */ 129 unsigned int flags; /* X: flags */
129 130
@@ -152,7 +153,6 @@ struct worker_pool {
152 */ 153 */
153struct global_cwq { 154struct global_cwq {
154 spinlock_t lock; /* the gcwq lock */ 155 spinlock_t lock; /* the gcwq lock */
155 unsigned int cpu; /* I: the associated cpu */
156 156
157 struct worker_pool pools[NR_STD_WORKER_POOLS]; 157 struct worker_pool pools[NR_STD_WORKER_POOLS];
158 /* normal and highpri pools */ 158 /* normal and highpri pools */
@@ -489,7 +489,7 @@ static struct worker_pool *worker_pool_by_id(int pool_id)
489 489
490static atomic_t *get_pool_nr_running(struct worker_pool *pool) 490static atomic_t *get_pool_nr_running(struct worker_pool *pool)
491{ 491{
492 int cpu = pool->gcwq->cpu; 492 int cpu = pool->cpu;
493 int idx = std_worker_pool_pri(pool); 493 int idx = std_worker_pool_pri(pool);
494 494
495 if (cpu != WORK_CPU_UNBOUND) 495 if (cpu != WORK_CPU_UNBOUND)
@@ -764,7 +764,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
764 struct worker *worker = kthread_data(task); 764 struct worker *worker = kthread_data(task);
765 765
766 if (!(worker->flags & WORKER_NOT_RUNNING)) { 766 if (!(worker->flags & WORKER_NOT_RUNNING)) {
767 WARN_ON_ONCE(worker->pool->gcwq->cpu != cpu); 767 WARN_ON_ONCE(worker->pool->cpu != cpu);
768 atomic_inc(get_pool_nr_running(worker->pool)); 768 atomic_inc(get_pool_nr_running(worker->pool));
769 } 769 }
770} 770}
@@ -1278,7 +1278,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1278 } 1278 }
1279 1279
1280 /* gcwq determined, get cwq and queue */ 1280 /* gcwq determined, get cwq and queue */
1281 cwq = get_cwq(gcwq->cpu, wq); 1281 cwq = get_cwq(gcwq->pools[0].cpu, wq);
1282 trace_workqueue_queue_work(req_cpu, cwq, work); 1282 trace_workqueue_queue_work(req_cpu, cwq, work);
1283 1283
1284 if (WARN_ON(!list_empty(&work->entry))) { 1284 if (WARN_ON(!list_empty(&work->entry))) {
@@ -1385,20 +1385,20 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1385 1385
1386 /* 1386 /*
1387 * This stores cwq for the moment, for the timer_fn. Note that the 1387 * This stores cwq for the moment, for the timer_fn. Note that the
1388 * work's gcwq is preserved to allow reentrance detection for 1388 * work's pool is preserved to allow reentrance detection for
1389 * delayed works. 1389 * delayed works.
1390 */ 1390 */
1391 if (!(wq->flags & WQ_UNBOUND)) { 1391 if (!(wq->flags & WQ_UNBOUND)) {
1392 struct global_cwq *gcwq = get_work_gcwq(work); 1392 struct worker_pool *pool = get_work_pool(work);
1393 1393
1394 /* 1394 /*
1395 * If we cannot get the last gcwq from @work directly, 1395 * If we cannot get the last pool from @work directly,
1396 * select the last CPU such that it avoids unnecessarily 1396 * select the last CPU such that it avoids unnecessarily
1397 * triggering non-reentrancy check in __queue_work(). 1397 * triggering non-reentrancy check in __queue_work().
1398 */ 1398 */
1399 lcpu = cpu; 1399 lcpu = cpu;
1400 if (gcwq) 1400 if (pool)
1401 lcpu = gcwq->cpu; 1401 lcpu = pool->cpu;
1402 if (lcpu == WORK_CPU_UNBOUND) 1402 if (lcpu == WORK_CPU_UNBOUND)
1403 lcpu = raw_smp_processor_id(); 1403 lcpu = raw_smp_processor_id();
1404 } else { 1404 } else {
@@ -1619,14 +1619,14 @@ __acquires(&gcwq->lock)
1619 * against POOL_DISASSOCIATED. 1619 * against POOL_DISASSOCIATED.
1620 */ 1620 */
1621 if (!(pool->flags & POOL_DISASSOCIATED)) 1621 if (!(pool->flags & POOL_DISASSOCIATED))
1622 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu)); 1622 set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu));
1623 1623
1624 spin_lock_irq(&gcwq->lock); 1624 spin_lock_irq(&gcwq->lock);
1625 if (pool->flags & POOL_DISASSOCIATED) 1625 if (pool->flags & POOL_DISASSOCIATED)
1626 return false; 1626 return false;
1627 if (task_cpu(task) == gcwq->cpu && 1627 if (task_cpu(task) == pool->cpu &&
1628 cpumask_equal(&current->cpus_allowed, 1628 cpumask_equal(&current->cpus_allowed,
1629 get_cpu_mask(gcwq->cpu))) 1629 get_cpu_mask(pool->cpu)))
1630 return true; 1630 return true;
1631 spin_unlock_irq(&gcwq->lock); 1631 spin_unlock_irq(&gcwq->lock);
1632 1632
@@ -1747,7 +1747,7 @@ static void rebind_workers(struct global_cwq *gcwq)
1747 else 1747 else
1748 wq = system_wq; 1748 wq = system_wq;
1749 1749
1750 insert_work(get_cwq(gcwq->cpu, wq), rebind_work, 1750 insert_work(get_cwq(pool->cpu, wq), rebind_work,
1751 worker->scheduled.next, 1751 worker->scheduled.next,
1752 work_color_to_flags(WORK_NO_COLOR)); 1752 work_color_to_flags(WORK_NO_COLOR));
1753 } 1753 }
@@ -1806,10 +1806,10 @@ static struct worker *create_worker(struct worker_pool *pool)
1806 worker->pool = pool; 1806 worker->pool = pool;
1807 worker->id = id; 1807 worker->id = id;
1808 1808
1809 if (gcwq->cpu != WORK_CPU_UNBOUND) 1809 if (pool->cpu != WORK_CPU_UNBOUND)
1810 worker->task = kthread_create_on_node(worker_thread, 1810 worker->task = kthread_create_on_node(worker_thread,
1811 worker, cpu_to_node(gcwq->cpu), 1811 worker, cpu_to_node(pool->cpu),
1812 "kworker/%u:%d%s", gcwq->cpu, id, pri); 1812 "kworker/%u:%d%s", pool->cpu, id, pri);
1813 else 1813 else
1814 worker->task = kthread_create(worker_thread, worker, 1814 worker->task = kthread_create(worker_thread, worker,
1815 "kworker/u:%d%s", id, pri); 1815 "kworker/u:%d%s", id, pri);
@@ -1829,7 +1829,7 @@ static struct worker *create_worker(struct worker_pool *pool)
1829 * online, make sure every worker has %PF_THREAD_BOUND set. 1829 * online, make sure every worker has %PF_THREAD_BOUND set.
1830 */ 1830 */
1831 if (!(pool->flags & POOL_DISASSOCIATED)) { 1831 if (!(pool->flags & POOL_DISASSOCIATED)) {
1832 kthread_bind(worker->task, gcwq->cpu); 1832 kthread_bind(worker->task, pool->cpu);
1833 } else { 1833 } else {
1834 worker->task->flags |= PF_THREAD_BOUND; 1834 worker->task->flags |= PF_THREAD_BOUND;
1835 worker->flags |= WORKER_UNBOUND; 1835 worker->flags |= WORKER_UNBOUND;
@@ -1936,7 +1936,7 @@ static bool send_mayday(struct work_struct *work)
1936 return false; 1936 return false;
1937 1937
1938 /* mayday mayday mayday */ 1938 /* mayday mayday mayday */
1939 cpu = cwq->pool->gcwq->cpu; 1939 cpu = cwq->pool->cpu;
1940 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ 1940 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1941 if (cpu == WORK_CPU_UNBOUND) 1941 if (cpu == WORK_CPU_UNBOUND)
1942 cpu = 0; 1942 cpu = 0;
@@ -2193,7 +2193,7 @@ __acquires(&gcwq->lock)
2193 */ 2193 */
2194 WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) && 2194 WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
2195 !(pool->flags & POOL_DISASSOCIATED) && 2195 !(pool->flags & POOL_DISASSOCIATED) &&
2196 raw_smp_processor_id() != gcwq->cpu); 2196 raw_smp_processor_id() != pool->cpu);
2197 2197
2198 /* 2198 /*
2199 * A single work shouldn't be executed concurrently by 2199 * A single work shouldn't be executed concurrently by
@@ -3553,7 +3553,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
3553 struct hlist_node *pos; 3553 struct hlist_node *pos;
3554 int i; 3554 int i;
3555 3555
3556 BUG_ON(gcwq->cpu != smp_processor_id()); 3556 BUG_ON(gcwq->pools[0].cpu != smp_processor_id());
3557 3557
3558 gcwq_claim_assoc_and_lock(gcwq); 3558 gcwq_claim_assoc_and_lock(gcwq);
3559 3559
@@ -3860,10 +3860,10 @@ static int __init init_workqueues(void)
3860 struct worker_pool *pool; 3860 struct worker_pool *pool;
3861 3861
3862 spin_lock_init(&gcwq->lock); 3862 spin_lock_init(&gcwq->lock);
3863 gcwq->cpu = cpu;
3864 3863
3865 for_each_worker_pool(pool, gcwq) { 3864 for_each_worker_pool(pool, gcwq) {
3866 pool->gcwq = gcwq; 3865 pool->gcwq = gcwq;
3866 pool->cpu = cpu;
3867 pool->flags |= POOL_DISASSOCIATED; 3867 pool->flags |= POOL_DISASSOCIATED;
3868 INIT_LIST_HEAD(&pool->worklist); 3868 INIT_LIST_HEAD(&pool->worklist);
3869 INIT_LIST_HEAD(&pool->idle_list); 3869 INIT_LIST_HEAD(&pool->idle_list);