aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 14:30:00 -0400
committerTejun Heo <tj@kernel.org>2013-03-12 14:30:00 -0400
commit34a06bd6b6fa92ccd9d3e6866b6cb91264c3cd20 (patch)
tree546c715508dd2d4a80c754b24e41e1d4d2899775 /kernel/workqueue.c
parentfa1b54e69bc6c04674c9bb96a6cfa8b2c9f44771 (diff)
workqueue: replace POOL_MANAGING_WORKERS flag with worker_pool->manager_arb
POOL_MANAGING_WORKERS is used to synchronize the manager role. Synchronizing among workers doesn't need blocking and that's why it's implemented as a flag. It got converted to a mutex a while back to add blocking wait from CPU hotplug path - 6037315269 ("workqueue: use mutex for global_cwq manager exclusion"). Later it turned out that synchronization among workers and cpu hotplug need to be done separately. Eventually, POOL_MANAGING_WORKERS is restored and workqueue->manager_mutex got morphed into workqueue->assoc_mutex - 552a37e936 ("workqueue: restore POOL_MANAGING_WORKERS") and b2eb83d123 ("workqueue: rename manager_mutex to assoc_mutex"). Now, we're gonna need to be able to lock out managers from destroy_workqueue() to support multiple unbound pools with custom attributes making it again necessary to be able to block on the manager role. This patch replaces POOL_MANAGING_WORKERS with worker_pool->manager_arb. This patch doesn't introduce any behavior changes. v2: s/manager_mutex/manager_arb/ Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 46381490f496..16f7f8d79d35 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -64,7 +64,6 @@ enum {
64 * create_worker() is in progress. 64 * create_worker() is in progress.
65 */ 65 */
66 POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 66 POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
67 POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */
68 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 67 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
69 POOL_FREEZING = 1 << 3, /* freeze in progress */ 68 POOL_FREEZING = 1 << 3, /* freeze in progress */
70 69
@@ -145,6 +144,7 @@ struct worker_pool {
145 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 144 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
146 /* L: hash of busy workers */ 145 /* L: hash of busy workers */
147 146
147 struct mutex manager_arb; /* manager arbitration */
148 struct mutex assoc_mutex; /* protect POOL_DISASSOCIATED */ 148 struct mutex assoc_mutex; /* protect POOL_DISASSOCIATED */
149 struct ida worker_ida; /* L: for worker IDs */ 149 struct ida worker_ida; /* L: for worker IDs */
150 150
@@ -706,7 +706,7 @@ static bool need_to_manage_workers(struct worker_pool *pool)
706/* Do we have too many workers and should some go away? */ 706/* Do we have too many workers and should some go away? */
707static bool too_many_workers(struct worker_pool *pool) 707static bool too_many_workers(struct worker_pool *pool)
708{ 708{
709 bool managing = pool->flags & POOL_MANAGING_WORKERS; 709 bool managing = mutex_is_locked(&pool->manager_arb);
710 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 710 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
711 int nr_busy = pool->nr_workers - nr_idle; 711 int nr_busy = pool->nr_workers - nr_idle;
712 712
@@ -2029,19 +2029,17 @@ static bool manage_workers(struct worker *worker)
2029 struct worker_pool *pool = worker->pool; 2029 struct worker_pool *pool = worker->pool;
2030 bool ret = false; 2030 bool ret = false;
2031 2031
2032 if (pool->flags & POOL_MANAGING_WORKERS) 2032 if (!mutex_trylock(&pool->manager_arb))
2033 return ret; 2033 return ret;
2034 2034
2035 pool->flags |= POOL_MANAGING_WORKERS;
2036
2037 /* 2035 /*
2038 * To simplify both worker management and CPU hotplug, hold off 2036 * To simplify both worker management and CPU hotplug, hold off
2039 * management while hotplug is in progress. CPU hotplug path can't 2037 * management while hotplug is in progress. CPU hotplug path can't
2040 * grab %POOL_MANAGING_WORKERS to achieve this because that can 2038 * grab @pool->manager_arb to achieve this because that can lead to
2041 * lead to idle worker depletion (all become busy thinking someone 2039 * idle worker depletion (all become busy thinking someone else is
2042 * else is managing) which in turn can result in deadlock under 2040 * managing) which in turn can result in deadlock under extreme
2043 * extreme circumstances. Use @pool->assoc_mutex to synchronize 2041 * circumstances. Use @pool->assoc_mutex to synchronize manager
2044 * manager against CPU hotplug. 2042 * against CPU hotplug.
2045 * 2043 *
2046 * assoc_mutex would always be free unless CPU hotplug is in 2044 * assoc_mutex would always be free unless CPU hotplug is in
2047 * progress. trylock first without dropping @pool->lock. 2045 * progress. trylock first without dropping @pool->lock.
@@ -2077,8 +2075,8 @@ static bool manage_workers(struct worker *worker)
2077 ret |= maybe_destroy_workers(pool); 2075 ret |= maybe_destroy_workers(pool);
2078 ret |= maybe_create_worker(pool); 2076 ret |= maybe_create_worker(pool);
2079 2077
2080 pool->flags &= ~POOL_MANAGING_WORKERS;
2081 mutex_unlock(&pool->assoc_mutex); 2078 mutex_unlock(&pool->assoc_mutex);
2079 mutex_unlock(&pool->manager_arb);
2082 return ret; 2080 return ret;
2083} 2081}
2084 2082
@@ -3806,6 +3804,7 @@ static int __init init_workqueues(void)
3806 setup_timer(&pool->mayday_timer, pool_mayday_timeout, 3804 setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3807 (unsigned long)pool); 3805 (unsigned long)pool);
3808 3806
3807 mutex_init(&pool->manager_arb);
3809 mutex_init(&pool->assoc_mutex); 3808 mutex_init(&pool->assoc_mutex);
3810 ida_init(&pool->worker_ida); 3809 ida_init(&pool->worker_ida);
3811 3810