aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 14:29:58 -0400
committerTejun Heo <tj@kernel.org>2013-03-12 14:29:58 -0400
commit24b8a84718ed28a51b452881612c267ba3f2b263 (patch)
treeaf47f74e181a6a431aee517f149015f769b7ed0a /kernel/workqueue.c
parent171169695555831e8cc41dbc1783700868631ea5 (diff)
workqueue: restructure pool / pool_workqueue iterations in freeze/thaw functions
The three freeze/thaw related functions - freeze_workqueues_begin(), freeze_workqueues_busy() and thaw_workqueues() - need to iterate through all pool_workqueues of all freezable workqueues. They did it by first iterating pools and then visiting all pwqs (pool_workqueues) of all workqueues and process it if its pwq->pool matches the current pool. This is rather backwards and done this way partly because workqueue didn't have fitting iteration helpers and partly to avoid the number of lock operations on pool->lock. Workqueue now has fitting iterators and the locking operation overhead isn't anything to worry about - those locks are unlikely to be contended and the same CPU visiting the same set of locks multiple times isn't expensive. Restructure the three functions such that the flow better matches the logical steps and pwq iteration is done using for_each_pwq() inside workqueue iteration. * freeze_workqueues_begin(): Setting of FREEZING is moved into a separate for_each_pool() iteration. pwq iteration for clearing max_active is updated as described above. * freeze_workqueues_busy(): pwq iteration updated as described above. * thaw_workqueues(): The single for_each_wq_cpu() iteration is broken into three discrete steps - clearing FREEZING, restoring max_active, and kicking workers. The first and last steps use for_each_pool() and the second step uses pwq iteration described above. This makes the code easier to understand and removes the use of for_each_wq_cpu() for walking pwqs, which can't support multiple unbound pwqs which will be needed to implement unbound workqueues with custom attributes. This patch doesn't introduce any visible behavior changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c87
1 files changed, 45 insertions, 42 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 55494e3f9f3b..8942cc74d83b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3595,6 +3595,8 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
3595void freeze_workqueues_begin(void) 3595void freeze_workqueues_begin(void)
3596{ 3596{
3597 struct worker_pool *pool; 3597 struct worker_pool *pool;
3598 struct workqueue_struct *wq;
3599 struct pool_workqueue *pwq;
3598 int id; 3600 int id;
3599 3601
3600 spin_lock_irq(&workqueue_lock); 3602 spin_lock_irq(&workqueue_lock);
@@ -3602,23 +3604,24 @@ void freeze_workqueues_begin(void)
3602 WARN_ON_ONCE(workqueue_freezing); 3604 WARN_ON_ONCE(workqueue_freezing);
3603 workqueue_freezing = true; 3605 workqueue_freezing = true;
3604 3606
3607 /* set FREEZING */
3605 for_each_pool(pool, id) { 3608 for_each_pool(pool, id) {
3606 struct workqueue_struct *wq;
3607
3608 spin_lock(&pool->lock); 3609 spin_lock(&pool->lock);
3609
3610 WARN_ON_ONCE(pool->flags & POOL_FREEZING); 3610 WARN_ON_ONCE(pool->flags & POOL_FREEZING);
3611 pool->flags |= POOL_FREEZING; 3611 pool->flags |= POOL_FREEZING;
3612 spin_unlock(&pool->lock);
3613 }
3612 3614
3613 list_for_each_entry(wq, &workqueues, list) { 3615 /* suppress further executions by setting max_active to zero */
3614 struct pool_workqueue *pwq = get_pwq(pool->cpu, wq); 3616 list_for_each_entry(wq, &workqueues, list) {
3617 if (!(wq->flags & WQ_FREEZABLE))
3618 continue;
3615 3619
3616 if (pwq && pwq->pool == pool && 3620 for_each_pwq(pwq, wq) {
3617 (wq->flags & WQ_FREEZABLE)) 3621 spin_lock(&pwq->pool->lock);
3618 pwq->max_active = 0; 3622 pwq->max_active = 0;
3623 spin_unlock(&pwq->pool->lock);
3619 } 3624 }
3620
3621 spin_unlock(&pool->lock);
3622 } 3625 }
3623 3626
3624 spin_unlock_irq(&workqueue_lock); 3627 spin_unlock_irq(&workqueue_lock);
@@ -3639,25 +3642,22 @@ void freeze_workqueues_begin(void)
3639 */ 3642 */
3640bool freeze_workqueues_busy(void) 3643bool freeze_workqueues_busy(void)
3641{ 3644{
3642 unsigned int cpu;
3643 bool busy = false; 3645 bool busy = false;
3646 struct workqueue_struct *wq;
3647 struct pool_workqueue *pwq;
3644 3648
3645 spin_lock_irq(&workqueue_lock); 3649 spin_lock_irq(&workqueue_lock);
3646 3650
3647 WARN_ON_ONCE(!workqueue_freezing); 3651 WARN_ON_ONCE(!workqueue_freezing);
3648 3652
3649 for_each_wq_cpu(cpu) { 3653 list_for_each_entry(wq, &workqueues, list) {
3650 struct workqueue_struct *wq; 3654 if (!(wq->flags & WQ_FREEZABLE))
3655 continue;
3651 /* 3656 /*
3652 * nr_active is monotonically decreasing. It's safe 3657 * nr_active is monotonically decreasing. It's safe
3653 * to peek without lock. 3658 * to peek without lock.
3654 */ 3659 */
3655 list_for_each_entry(wq, &workqueues, list) { 3660 for_each_pwq(pwq, wq) {
3656 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3657
3658 if (!pwq || !(wq->flags & WQ_FREEZABLE))
3659 continue;
3660
3661 WARN_ON_ONCE(pwq->nr_active < 0); 3661 WARN_ON_ONCE(pwq->nr_active < 0);
3662 if (pwq->nr_active) { 3662 if (pwq->nr_active) {
3663 busy = true; 3663 busy = true;
@@ -3681,40 +3681,43 @@ out_unlock:
3681 */ 3681 */
3682void thaw_workqueues(void) 3682void thaw_workqueues(void)
3683{ 3683{
3684 unsigned int cpu; 3684 struct workqueue_struct *wq;
3685 struct pool_workqueue *pwq;
3686 struct worker_pool *pool;
3687 int id;
3685 3688
3686 spin_lock_irq(&workqueue_lock); 3689 spin_lock_irq(&workqueue_lock);
3687 3690
3688 if (!workqueue_freezing) 3691 if (!workqueue_freezing)
3689 goto out_unlock; 3692 goto out_unlock;
3690 3693
3691 for_each_wq_cpu(cpu) { 3694 /* clear FREEZING */
3692 struct worker_pool *pool; 3695 for_each_pool(pool, id) {
3693 struct workqueue_struct *wq; 3696 spin_lock(&pool->lock);
3694 3697 WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
3695 for_each_std_worker_pool(pool, cpu) { 3698 pool->flags &= ~POOL_FREEZING;
3696 spin_lock(&pool->lock); 3699 spin_unlock(&pool->lock);
3697 3700 }
3698 WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
3699 pool->flags &= ~POOL_FREEZING;
3700
3701 list_for_each_entry(wq, &workqueues, list) {
3702 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3703
3704 if (!pwq || pwq->pool != pool ||
3705 !(wq->flags & WQ_FREEZABLE))
3706 continue;
3707
3708 /* restore max_active and repopulate worklist */
3709 pwq_set_max_active(pwq, wq->saved_max_active);
3710 }
3711 3701
3712 wake_up_worker(pool); 3702 /* restore max_active and repopulate worklist */
3703 list_for_each_entry(wq, &workqueues, list) {
3704 if (!(wq->flags & WQ_FREEZABLE))
3705 continue;
3713 3706
3714 spin_unlock(&pool->lock); 3707 for_each_pwq(pwq, wq) {
3708 spin_lock(&pwq->pool->lock);
3709 pwq_set_max_active(pwq, wq->saved_max_active);
3710 spin_unlock(&pwq->pool->lock);
3715 } 3711 }
3716 } 3712 }
3717 3713
3714 /* kick workers */
3715 for_each_pool(pool, id) {
3716 spin_lock(&pool->lock);
3717 wake_up_worker(pool);
3718 spin_unlock(&pool->lock);
3719 }
3720
3718 workqueue_freezing = false; 3721 workqueue_freezing = false;
3719out_unlock: 3722out_unlock:
3720 spin_unlock_irq(&workqueue_lock); 3723 spin_unlock_irq(&workqueue_lock);