aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 14:29:58 -0400
committerTejun Heo <tj@kernel.org>2013-03-12 14:29:58 -0400
commit171169695555831e8cc41dbc1783700868631ea5 (patch)
tree62e774a7298eadb486a7148796b2e8cd627d6e77 /kernel
parent49e3cf44df0663a521aa71e7667c52a9dbd0fce9 (diff)
workqueue: introduce for_each_pool()
With the scheduled unbound pools with custom attributes, there will be multiple unbound pools, so it wouldn't be able to use for_each_wq_cpu() + for_each_std_worker_pool() to iterate through all pools. Introduce for_each_pool() which iterates through all pools using worker_pool_idr and use it instead of for_each_wq_cpu() + for_each_std_worker_pool() combination in freeze_workqueues_begin(). Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2db1532b09dc..55494e3f9f3b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -295,6 +295,14 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
295 (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3)) 295 (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
296 296
297/** 297/**
298 * for_each_pool - iterate through all worker_pools in the system
299 * @pool: iteration cursor
300 * @id: integer used for iteration
301 */
302#define for_each_pool(pool, id) \
303 idr_for_each_entry(&worker_pool_idr, pool, id)
304
305/**
298 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 306 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
299 * @pwq: iteration cursor 307 * @pwq: iteration cursor
300 * @wq: the target workqueue 308 * @wq: the target workqueue
@@ -3586,33 +3594,31 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
3586 */ 3594 */
3587void freeze_workqueues_begin(void) 3595void freeze_workqueues_begin(void)
3588{ 3596{
3589 unsigned int cpu; 3597 struct worker_pool *pool;
3598 int id;
3590 3599
3591 spin_lock_irq(&workqueue_lock); 3600 spin_lock_irq(&workqueue_lock);
3592 3601
3593 WARN_ON_ONCE(workqueue_freezing); 3602 WARN_ON_ONCE(workqueue_freezing);
3594 workqueue_freezing = true; 3603 workqueue_freezing = true;
3595 3604
3596 for_each_wq_cpu(cpu) { 3605 for_each_pool(pool, id) {
3597 struct worker_pool *pool;
3598 struct workqueue_struct *wq; 3606 struct workqueue_struct *wq;
3599 3607
3600 for_each_std_worker_pool(pool, cpu) { 3608 spin_lock(&pool->lock);
3601 spin_lock(&pool->lock);
3602
3603 WARN_ON_ONCE(pool->flags & POOL_FREEZING);
3604 pool->flags |= POOL_FREEZING;
3605 3609
3606 list_for_each_entry(wq, &workqueues, list) { 3610 WARN_ON_ONCE(pool->flags & POOL_FREEZING);
3607 struct pool_workqueue *pwq = get_pwq(cpu, wq); 3611 pool->flags |= POOL_FREEZING;
3608 3612
3609 if (pwq && pwq->pool == pool && 3613 list_for_each_entry(wq, &workqueues, list) {
3610 (wq->flags & WQ_FREEZABLE)) 3614 struct pool_workqueue *pwq = get_pwq(pool->cpu, wq);
3611 pwq->max_active = 0;
3612 }
3613 3615
3614 spin_unlock(&pool->lock); 3616 if (pwq && pwq->pool == pool &&
3617 (wq->flags & WQ_FREEZABLE))
3618 pwq->max_active = 0;
3615 } 3619 }
3620
3621 spin_unlock(&pool->lock);
3616 } 3622 }
3617 3623
3618 spin_unlock_irq(&workqueue_lock); 3624 spin_unlock_irq(&workqueue_lock);