aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-24 14:01:33 -0500
committerTejun Heo <tj@kernel.org>2013-01-24 14:01:33 -0500
commita1056305fa98c7e13b38718658a8b07a5d926460 (patch)
treed20ce512fdd0e3f07d972d62ecc9cb357c3db69e /kernel
parent94cf58bb2907bd2702fce2266955e29ab5261f53 (diff)
workqueue: make freezing/thawing per-pool
Instead of holding locks from both pools and then processing the pools together, make freezing/thwaing per-pool - grab locks of one pool, process it, release it and then proceed to the next pool. While this patch changes processing order across pools, order within each pool remains the same. As each pool is independent, this shouldn't break anything. This is part of an effort to remove global_cwq and make worker_pool the top level abstraction, which in turn will help implementing worker pools with user-specified attributes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c46
1 files changed, 20 insertions, 26 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index fd400f8c9514..b609bfba134b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3686,25 +3686,22 @@ void freeze_workqueues_begin(void)
3686 struct worker_pool *pool; 3686 struct worker_pool *pool;
3687 struct workqueue_struct *wq; 3687 struct workqueue_struct *wq;
3688 3688
3689 local_irq_disable();
3690
3691 for_each_worker_pool(pool, gcwq) { 3689 for_each_worker_pool(pool, gcwq) {
3692 spin_lock_nested(&pool->lock, pool - gcwq->pools); 3690 spin_lock_irq(&pool->lock);
3693 3691
3694 WARN_ON_ONCE(pool->flags & POOL_FREEZING); 3692 WARN_ON_ONCE(pool->flags & POOL_FREEZING);
3695 pool->flags |= POOL_FREEZING; 3693 pool->flags |= POOL_FREEZING;
3696 }
3697 3694
3698 list_for_each_entry(wq, &workqueues, list) { 3695 list_for_each_entry(wq, &workqueues, list) {
3699 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3696 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3700 3697
3701 if (cwq && wq->flags & WQ_FREEZABLE) 3698 if (cwq && cwq->pool == pool &&
3702 cwq->max_active = 0; 3699 (wq->flags & WQ_FREEZABLE))
3703 } 3700 cwq->max_active = 0;
3701 }
3704 3702
3705 for_each_worker_pool(pool, gcwq) 3703 spin_unlock_irq(&pool->lock);
3706 spin_unlock(&pool->lock); 3704 }
3707 local_irq_enable();
3708 } 3705 }
3709 3706
3710 spin_unlock(&workqueue_lock); 3707 spin_unlock(&workqueue_lock);
@@ -3779,30 +3776,27 @@ void thaw_workqueues(void)
3779 struct worker_pool *pool; 3776 struct worker_pool *pool;
3780 struct workqueue_struct *wq; 3777 struct workqueue_struct *wq;
3781 3778
3782 local_irq_disable();
3783
3784 for_each_worker_pool(pool, gcwq) { 3779 for_each_worker_pool(pool, gcwq) {
3785 spin_lock_nested(&pool->lock, pool - gcwq->pools); 3780 spin_lock_irq(&pool->lock);
3786 3781
3787 WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); 3782 WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
3788 pool->flags &= ~POOL_FREEZING; 3783 pool->flags &= ~POOL_FREEZING;
3789 }
3790 3784
3791 list_for_each_entry(wq, &workqueues, list) { 3785 list_for_each_entry(wq, &workqueues, list) {
3792 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3786 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3793 3787
3794 if (!cwq || !(wq->flags & WQ_FREEZABLE)) 3788 if (!cwq || cwq->pool != pool ||
3795 continue; 3789 !(wq->flags & WQ_FREEZABLE))
3790 continue;
3796 3791
3797 /* restore max_active and repopulate worklist */ 3792 /* restore max_active and repopulate worklist */
3798 cwq_set_max_active(cwq, wq->saved_max_active); 3793 cwq_set_max_active(cwq, wq->saved_max_active);
3799 } 3794 }
3800 3795
3801 for_each_worker_pool(pool, gcwq) {
3802 wake_up_worker(pool); 3796 wake_up_worker(pool);
3803 spin_unlock(&pool->lock); 3797
3798 spin_unlock_irq(&pool->lock);
3804 } 3799 }
3805 local_irq_enable();
3806 } 3800 }
3807 3801
3808 workqueue_freezing = false; 3802 workqueue_freezing = false;