diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2014-05-22 07:01:16 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-05-22 11:35:51 -0400 |
commit | 74b414ead1133972817d3ce7b934356150d03a7d (patch) | |
tree | 8942db6fda58c213804b66f8825fd736366b1597 | |
parent | 1037de36edae30f1ddc0a2532decd50f92ac4901 (diff) |
workqueue: remove the confusing POOL_FREEZING
Currently, the global freezing state is propagated to worker_pools via
POOL_FREEZING and then to each workqueue; however, the middle step -
propagation through worker_pools - can be skipped as long as one or
more max_active adjustments happens for each workqueue after the
update to the global state is visible. The global workqueue freezing
state and the max_active adjustments during workqueue creation and
[un]freezing are serialized with wq_pool_mutex, so it's trivial to
guarantee that max_actives stay in sync with global freezing state.
POOL_FREEZING is unnecessary and makes the code more confusing and
complicates freeze_workqueues_begin() and thaw_workqueues() by
requiring them to walk through all pools.
Remove POOL_FREEZING and use workqueue_freezing directly instead.
tj: Description and comment updates.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r-- | kernel/workqueue.c | 32 |
1 files changed, 7 insertions, 25 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index dd107b83f45f..bc3c18892b7d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -69,7 +69,6 @@ enum { | |||
69 | * worker_attach_to_pool() is in progress. | 69 | * worker_attach_to_pool() is in progress. |
70 | */ | 70 | */ |
71 | POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ | 71 | POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ |
72 | POOL_FREEZING = 1 << 3, /* freeze in progress */ | ||
73 | 72 | ||
74 | /* worker flags */ | 73 | /* worker flags */ |
75 | WORKER_DIE = 1 << 1, /* die die die */ | 74 | WORKER_DIE = 1 << 1, /* die die die */ |
@@ -3533,9 +3532,6 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) | |||
3533 | if (!pool || init_worker_pool(pool) < 0) | 3532 | if (!pool || init_worker_pool(pool) < 0) |
3534 | goto fail; | 3533 | goto fail; |
3535 | 3534 | ||
3536 | if (workqueue_freezing) | ||
3537 | pool->flags |= POOL_FREEZING; | ||
3538 | |||
3539 | lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ | 3535 | lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ |
3540 | copy_workqueue_attrs(pool->attrs, attrs); | 3536 | copy_workqueue_attrs(pool->attrs, attrs); |
3541 | 3537 | ||
@@ -3642,7 +3638,12 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) | |||
3642 | 3638 | ||
3643 | spin_lock_irq(&pwq->pool->lock); | 3639 | spin_lock_irq(&pwq->pool->lock); |
3644 | 3640 | ||
3645 | if (!freezable || !(pwq->pool->flags & POOL_FREEZING)) { | 3641 | /* |
3642 | * During [un]freezing, the caller is responsible for ensuring that | ||
3643 | * this function is called at least once after @workqueue_freezing | ||
3644 | * is updated and visible. | ||
3645 | */ | ||
3646 | if (!freezable || !workqueue_freezing) { | ||
3646 | pwq->max_active = wq->saved_max_active; | 3647 | pwq->max_active = wq->saved_max_active; |
3647 | 3648 | ||
3648 | while (!list_empty(&pwq->delayed_works) && | 3649 | while (!list_empty(&pwq->delayed_works) && |
@@ -4751,24 +4752,14 @@ EXPORT_SYMBOL_GPL(work_on_cpu); | |||
4751 | */ | 4752 | */ |
4752 | void freeze_workqueues_begin(void) | 4753 | void freeze_workqueues_begin(void) |
4753 | { | 4754 | { |
4754 | struct worker_pool *pool; | ||
4755 | struct workqueue_struct *wq; | 4755 | struct workqueue_struct *wq; |
4756 | struct pool_workqueue *pwq; | 4756 | struct pool_workqueue *pwq; |
4757 | int pi; | ||
4758 | 4757 | ||
4759 | mutex_lock(&wq_pool_mutex); | 4758 | mutex_lock(&wq_pool_mutex); |
4760 | 4759 | ||
4761 | WARN_ON_ONCE(workqueue_freezing); | 4760 | WARN_ON_ONCE(workqueue_freezing); |
4762 | workqueue_freezing = true; | 4761 | workqueue_freezing = true; |
4763 | 4762 | ||
4764 | /* set FREEZING */ | ||
4765 | for_each_pool(pool, pi) { | ||
4766 | spin_lock_irq(&pool->lock); | ||
4767 | WARN_ON_ONCE(pool->flags & POOL_FREEZING); | ||
4768 | pool->flags |= POOL_FREEZING; | ||
4769 | spin_unlock_irq(&pool->lock); | ||
4770 | } | ||
4771 | |||
4772 | list_for_each_entry(wq, &workqueues, list) { | 4763 | list_for_each_entry(wq, &workqueues, list) { |
4773 | mutex_lock(&wq->mutex); | 4764 | mutex_lock(&wq->mutex); |
4774 | for_each_pwq(pwq, wq) | 4765 | for_each_pwq(pwq, wq) |
@@ -4838,21 +4829,13 @@ void thaw_workqueues(void) | |||
4838 | { | 4829 | { |
4839 | struct workqueue_struct *wq; | 4830 | struct workqueue_struct *wq; |
4840 | struct pool_workqueue *pwq; | 4831 | struct pool_workqueue *pwq; |
4841 | struct worker_pool *pool; | ||
4842 | int pi; | ||
4843 | 4832 | ||
4844 | mutex_lock(&wq_pool_mutex); | 4833 | mutex_lock(&wq_pool_mutex); |
4845 | 4834 | ||
4846 | if (!workqueue_freezing) | 4835 | if (!workqueue_freezing) |
4847 | goto out_unlock; | 4836 | goto out_unlock; |
4848 | 4837 | ||
4849 | /* clear FREEZING */ | 4838 | workqueue_freezing = false; |
4850 | for_each_pool(pool, pi) { | ||
4851 | spin_lock_irq(&pool->lock); | ||
4852 | WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); | ||
4853 | pool->flags &= ~POOL_FREEZING; | ||
4854 | spin_unlock_irq(&pool->lock); | ||
4855 | } | ||
4856 | 4839 | ||
4857 | /* restore max_active and repopulate worklist */ | 4840 | /* restore max_active and repopulate worklist */ |
4858 | list_for_each_entry(wq, &workqueues, list) { | 4841 | list_for_each_entry(wq, &workqueues, list) { |
@@ -4862,7 +4845,6 @@ void thaw_workqueues(void) | |||
4862 | mutex_unlock(&wq->mutex); | 4845 | mutex_unlock(&wq->mutex); |
4863 | } | 4846 | } |
4864 | 4847 | ||
4865 | workqueue_freezing = false; | ||
4866 | out_unlock: | 4848 | out_unlock: |
4867 | mutex_unlock(&wq_pool_mutex); | 4849 | mutex_unlock(&wq_pool_mutex); |
4868 | } | 4850 | } |