diff options
author | Tejun Heo <tj@kernel.org> | 2013-01-24 14:01:34 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-01-24 14:01:34 -0500 |
commit | 38db41d984f17938631420ff78160dda7f182d24 (patch) | |
tree | 4591d50ecb7fe9749dc5d48b735d3f43aa0b80a7 /kernel | |
parent | a1056305fa98c7e13b38718658a8b07a5d926460 (diff) |
workqueue: replace for_each_worker_pool() with for_each_std_worker_pool()
for_each_std_worker_pool() takes @cpu instead of @gcwq.
This is part of an effort to remove global_cwq and make worker_pool
the top level abstraction, which in turn will help implementing worker
pools with user-specified attributes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 39 |
1 files changed, 17 insertions, 22 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b609bfba134b..bd639c185da1 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -250,9 +250,9 @@ EXPORT_SYMBOL_GPL(system_freezable_wq); | |||
250 | #define CREATE_TRACE_POINTS | 250 | #define CREATE_TRACE_POINTS |
251 | #include <trace/events/workqueue.h> | 251 | #include <trace/events/workqueue.h> |
252 | 252 | ||
253 | #define for_each_worker_pool(pool, gcwq) \ | 253 | #define for_each_std_worker_pool(pool, cpu) \ |
254 | for ((pool) = &(gcwq)->pools[0]; \ | 254 | for ((pool) = &get_gcwq((cpu))->pools[0]; \ |
255 | (pool) < &(gcwq)->pools[NR_STD_WORKER_POOLS]; (pool)++) | 255 | (pool) < &get_gcwq((cpu))->pools[NR_STD_WORKER_POOLS]; (pool)++) |
256 | 256 | ||
257 | #define for_each_busy_worker(worker, i, pos, pool) \ | 257 | #define for_each_busy_worker(worker, i, pos, pool) \ |
258 | hash_for_each(pool->busy_hash, i, pos, worker, hentry) | 258 | hash_for_each(pool->busy_hash, i, pos, worker, hentry) |
@@ -3500,14 +3500,14 @@ EXPORT_SYMBOL_GPL(work_busy); | |||
3500 | 3500 | ||
3501 | static void gcwq_unbind_fn(struct work_struct *work) | 3501 | static void gcwq_unbind_fn(struct work_struct *work) |
3502 | { | 3502 | { |
3503 | struct global_cwq *gcwq = get_gcwq(smp_processor_id()); | 3503 | int cpu = smp_processor_id(); |
3504 | struct worker_pool *pool; | 3504 | struct worker_pool *pool; |
3505 | struct worker *worker; | 3505 | struct worker *worker; |
3506 | struct hlist_node *pos; | 3506 | struct hlist_node *pos; |
3507 | int i; | 3507 | int i; |
3508 | 3508 | ||
3509 | for_each_worker_pool(pool, gcwq) { | 3509 | for_each_std_worker_pool(pool, cpu) { |
3510 | BUG_ON(pool->cpu != smp_processor_id()); | 3510 | BUG_ON(cpu != smp_processor_id()); |
3511 | 3511 | ||
3512 | mutex_lock(&pool->assoc_mutex); | 3512 | mutex_lock(&pool->assoc_mutex); |
3513 | spin_lock_irq(&pool->lock); | 3513 | spin_lock_irq(&pool->lock); |
@@ -3541,15 +3541,15 @@ static void gcwq_unbind_fn(struct work_struct *work) | |||
3541 | /* | 3541 | /* |
3542 | * Sched callbacks are disabled now. Zap nr_running. After this, | 3542 | * Sched callbacks are disabled now. Zap nr_running. After this, |
3543 | * nr_running stays zero and need_more_worker() and keep_working() | 3543 | * nr_running stays zero and need_more_worker() and keep_working() |
3544 | * are always true as long as the worklist is not empty. @gcwq now | 3544 | * are always true as long as the worklist is not empty. Pools on |
3545 | * behaves as unbound (in terms of concurrency management) gcwq | 3545 | * @cpu now behave as unbound (in terms of concurrency management) |
3546 | * which is served by workers tied to the CPU. | 3546 | * pools which are served by workers tied to the CPU. |
3547 | * | 3547 | * |
3548 | * On return from this function, the current worker would trigger | 3548 | * On return from this function, the current worker would trigger |
3549 | * unbound chain execution of pending work items if other workers | 3549 | * unbound chain execution of pending work items if other workers |
3550 | * didn't already. | 3550 | * didn't already. |
3551 | */ | 3551 | */ |
3552 | for_each_worker_pool(pool, gcwq) | 3552 | for_each_std_worker_pool(pool, cpu) |
3553 | atomic_set(get_pool_nr_running(pool), 0); | 3553 | atomic_set(get_pool_nr_running(pool), 0); |
3554 | } | 3554 | } |
3555 | 3555 | ||
@@ -3562,12 +3562,11 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
3562 | void *hcpu) | 3562 | void *hcpu) |
3563 | { | 3563 | { |
3564 | unsigned int cpu = (unsigned long)hcpu; | 3564 | unsigned int cpu = (unsigned long)hcpu; |
3565 | struct global_cwq *gcwq = get_gcwq(cpu); | ||
3566 | struct worker_pool *pool; | 3565 | struct worker_pool *pool; |
3567 | 3566 | ||
3568 | switch (action & ~CPU_TASKS_FROZEN) { | 3567 | switch (action & ~CPU_TASKS_FROZEN) { |
3569 | case CPU_UP_PREPARE: | 3568 | case CPU_UP_PREPARE: |
3570 | for_each_worker_pool(pool, gcwq) { | 3569 | for_each_std_worker_pool(pool, cpu) { |
3571 | struct worker *worker; | 3570 | struct worker *worker; |
3572 | 3571 | ||
3573 | if (pool->nr_workers) | 3572 | if (pool->nr_workers) |
@@ -3585,7 +3584,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
3585 | 3584 | ||
3586 | case CPU_DOWN_FAILED: | 3585 | case CPU_DOWN_FAILED: |
3587 | case CPU_ONLINE: | 3586 | case CPU_ONLINE: |
3588 | for_each_worker_pool(pool, gcwq) { | 3587 | for_each_std_worker_pool(pool, cpu) { |
3589 | mutex_lock(&pool->assoc_mutex); | 3588 | mutex_lock(&pool->assoc_mutex); |
3590 | spin_lock_irq(&pool->lock); | 3589 | spin_lock_irq(&pool->lock); |
3591 | 3590 | ||
@@ -3682,11 +3681,10 @@ void freeze_workqueues_begin(void) | |||
3682 | workqueue_freezing = true; | 3681 | workqueue_freezing = true; |
3683 | 3682 | ||
3684 | for_each_gcwq_cpu(cpu) { | 3683 | for_each_gcwq_cpu(cpu) { |
3685 | struct global_cwq *gcwq = get_gcwq(cpu); | ||
3686 | struct worker_pool *pool; | 3684 | struct worker_pool *pool; |
3687 | struct workqueue_struct *wq; | 3685 | struct workqueue_struct *wq; |
3688 | 3686 | ||
3689 | for_each_worker_pool(pool, gcwq) { | 3687 | for_each_std_worker_pool(pool, cpu) { |
3690 | spin_lock_irq(&pool->lock); | 3688 | spin_lock_irq(&pool->lock); |
3691 | 3689 | ||
3692 | WARN_ON_ONCE(pool->flags & POOL_FREEZING); | 3690 | WARN_ON_ONCE(pool->flags & POOL_FREEZING); |
@@ -3772,11 +3770,10 @@ void thaw_workqueues(void) | |||
3772 | goto out_unlock; | 3770 | goto out_unlock; |
3773 | 3771 | ||
3774 | for_each_gcwq_cpu(cpu) { | 3772 | for_each_gcwq_cpu(cpu) { |
3775 | struct global_cwq *gcwq = get_gcwq(cpu); | ||
3776 | struct worker_pool *pool; | 3773 | struct worker_pool *pool; |
3777 | struct workqueue_struct *wq; | 3774 | struct workqueue_struct *wq; |
3778 | 3775 | ||
3779 | for_each_worker_pool(pool, gcwq) { | 3776 | for_each_std_worker_pool(pool, cpu) { |
3780 | spin_lock_irq(&pool->lock); | 3777 | spin_lock_irq(&pool->lock); |
3781 | 3778 | ||
3782 | WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); | 3779 | WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); |
@@ -3818,11 +3815,10 @@ static int __init init_workqueues(void) | |||
3818 | 3815 | ||
3819 | /* initialize gcwqs */ | 3816 | /* initialize gcwqs */ |
3820 | for_each_gcwq_cpu(cpu) { | 3817 | for_each_gcwq_cpu(cpu) { |
3821 | struct global_cwq *gcwq = get_gcwq(cpu); | ||
3822 | struct worker_pool *pool; | 3818 | struct worker_pool *pool; |
3823 | 3819 | ||
3824 | for_each_worker_pool(pool, gcwq) { | 3820 | for_each_std_worker_pool(pool, cpu) { |
3825 | pool->gcwq = gcwq; | 3821 | pool->gcwq = get_gcwq(cpu); |
3826 | spin_lock_init(&pool->lock); | 3822 | spin_lock_init(&pool->lock); |
3827 | pool->cpu = cpu; | 3823 | pool->cpu = cpu; |
3828 | pool->flags |= POOL_DISASSOCIATED; | 3824 | pool->flags |= POOL_DISASSOCIATED; |
@@ -3847,10 +3843,9 @@ static int __init init_workqueues(void) | |||
3847 | 3843 | ||
3848 | /* create the initial worker */ | 3844 | /* create the initial worker */ |
3849 | for_each_online_gcwq_cpu(cpu) { | 3845 | for_each_online_gcwq_cpu(cpu) { |
3850 | struct global_cwq *gcwq = get_gcwq(cpu); | ||
3851 | struct worker_pool *pool; | 3846 | struct worker_pool *pool; |
3852 | 3847 | ||
3853 | for_each_worker_pool(pool, gcwq) { | 3848 | for_each_std_worker_pool(pool, cpu) { |
3854 | struct worker *worker; | 3849 | struct worker *worker; |
3855 | 3850 | ||
3856 | if (cpu != WORK_CPU_UNBOUND) | 3851 | if (cpu != WORK_CPU_UNBOUND) |