aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-11-07 04:32:44 -0500
committerIngo Molnar <mingo@kernel.org>2017-11-07 04:32:44 -0500
commit8c5db92a705d9e2c986adec475980d1120fa07b4 (patch)
tree9f0eea56889819707c0a1a8eb5b1fb2db3cdaf3d /kernel/workqueue.c
parentca5d376e17072c1b60c3fee66f3be58ef018952d (diff)
parente4880bc5dfb1f02b152e62a894b5c6f3e995b3cf (diff)
Merge branch 'linus' into locking/core, to resolve conflicts
Conflicts: include/linux/compiler-clang.h include/linux/compiler-gcc.h include/linux/compiler-intel.h include/uapi/linux/stddef.h Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c37
1 files changed, 15 insertions, 22 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 160fdc6e839a..1070b21ba4aa 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -68,6 +68,7 @@ enum {
68 * attach_mutex to avoid changing binding state while 68 * attach_mutex to avoid changing binding state while
69 * worker_attach_to_pool() is in progress. 69 * worker_attach_to_pool() is in progress.
70 */ 70 */
71 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
71 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 72 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
72 73
73 /* worker flags */ 74 /* worker flags */
@@ -165,7 +166,6 @@ struct worker_pool {
165 /* L: hash of busy workers */ 166 /* L: hash of busy workers */
166 167
167 /* see manage_workers() for details on the two manager mutexes */ 168 /* see manage_workers() for details on the two manager mutexes */
168 struct mutex manager_arb; /* manager arbitration */
169 struct worker *manager; /* L: purely informational */ 169 struct worker *manager; /* L: purely informational */
170 struct mutex attach_mutex; /* attach/detach exclusion */ 170 struct mutex attach_mutex; /* attach/detach exclusion */
171 struct list_head workers; /* A: attached workers */ 171 struct list_head workers; /* A: attached workers */
@@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
299 299
300static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 300static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
301static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 301static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
302static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
302 303
303static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 304static LIST_HEAD(workqueues); /* PR: list of all workqueues */
304static bool workqueue_freezing; /* PL: have wqs started freezing? */ 305static bool workqueue_freezing; /* PL: have wqs started freezing? */
@@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
801/* Do we have too many workers and should some go away? */ 802/* Do we have too many workers and should some go away? */
802static bool too_many_workers(struct worker_pool *pool) 803static bool too_many_workers(struct worker_pool *pool)
803{ 804{
804 bool managing = mutex_is_locked(&pool->manager_arb); 805 bool managing = pool->flags & POOL_MANAGER_ACTIVE;
805 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 806 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
806 int nr_busy = pool->nr_workers - nr_idle; 807 int nr_busy = pool->nr_workers - nr_idle;
807 808
@@ -1980,24 +1981,17 @@ static bool manage_workers(struct worker *worker)
1980{ 1981{
1981 struct worker_pool *pool = worker->pool; 1982 struct worker_pool *pool = worker->pool;
1982 1983
1983 /* 1984 if (pool->flags & POOL_MANAGER_ACTIVE)
1984 * Anyone who successfully grabs manager_arb wins the arbitration
1985 * and becomes the manager. mutex_trylock() on pool->manager_arb
1986 * failure while holding pool->lock reliably indicates that someone
1987 * else is managing the pool and the worker which failed trylock
1988 * can proceed to executing work items. This means that anyone
1989 * grabbing manager_arb is responsible for actually performing
1990 * manager duties. If manager_arb is grabbed and released without
1991 * actual management, the pool may stall indefinitely.
1992 */
1993 if (!mutex_trylock(&pool->manager_arb))
1994 return false; 1985 return false;
1986
1987 pool->flags |= POOL_MANAGER_ACTIVE;
1995 pool->manager = worker; 1988 pool->manager = worker;
1996 1989
1997 maybe_create_worker(pool); 1990 maybe_create_worker(pool);
1998 1991
1999 pool->manager = NULL; 1992 pool->manager = NULL;
2000 mutex_unlock(&pool->manager_arb); 1993 pool->flags &= ~POOL_MANAGER_ACTIVE;
1994 wake_up(&wq_manager_wait);
2001 return true; 1995 return true;
2002} 1996}
2003 1997
@@ -3235,7 +3229,6 @@ static int init_worker_pool(struct worker_pool *pool)
3235 setup_timer(&pool->mayday_timer, pool_mayday_timeout, 3229 setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3236 (unsigned long)pool); 3230 (unsigned long)pool);
3237 3231
3238 mutex_init(&pool->manager_arb);
3239 mutex_init(&pool->attach_mutex); 3232 mutex_init(&pool->attach_mutex);
3240 INIT_LIST_HEAD(&pool->workers); 3233 INIT_LIST_HEAD(&pool->workers);
3241 3234
@@ -3305,13 +3298,15 @@ static void put_unbound_pool(struct worker_pool *pool)
3305 hash_del(&pool->hash_node); 3298 hash_del(&pool->hash_node);
3306 3299
3307 /* 3300 /*
3308 * Become the manager and destroy all workers. Grabbing 3301 * Become the manager and destroy all workers. This prevents
3309 * manager_arb prevents @pool's workers from blocking on 3302 * @pool's workers from blocking on attach_mutex. We're the last
3310 * attach_mutex. 3303 * manager and @pool gets freed with the flag set.
3311 */ 3304 */
3312 mutex_lock(&pool->manager_arb);
3313
3314 spin_lock_irq(&pool->lock); 3305 spin_lock_irq(&pool->lock);
3306 wait_event_lock_irq(wq_manager_wait,
3307 !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
3308 pool->flags |= POOL_MANAGER_ACTIVE;
3309
3315 while ((worker = first_idle_worker(pool))) 3310 while ((worker = first_idle_worker(pool)))
3316 destroy_worker(worker); 3311 destroy_worker(worker);
3317 WARN_ON(pool->nr_workers || pool->nr_idle); 3312 WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -3325,8 +3320,6 @@ static void put_unbound_pool(struct worker_pool *pool)
3325 if (pool->detach_completion) 3320 if (pool->detach_completion)
3326 wait_for_completion(pool->detach_completion); 3321 wait_for_completion(pool->detach_completion);
3327 3322
3328 mutex_unlock(&pool->manager_arb);
3329
3330 /* shut down the timers */ 3323 /* shut down the timers */
3331 del_timer_sync(&pool->idle_timer); 3324 del_timer_sync(&pool->idle_timer);
3332 del_timer_sync(&pool->mayday_timer); 3325 del_timer_sync(&pool->mayday_timer);