aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/workqueue.c41
1 files changed, 20 insertions, 21 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ca7959be8aaa..91fe0a6118a0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -66,7 +66,7 @@ enum {
66 * be executing on any CPU. The pool behaves as an unbound one. 66 * be executing on any CPU. The pool behaves as an unbound one.
67 * 67 *
68 * Note that DISASSOCIATED should be flipped only while holding 68 * Note that DISASSOCIATED should be flipped only while holding
69 * attach_mutex to avoid changing binding state while 69 * wq_pool_attach_mutex to avoid changing binding state while
70 * worker_attach_to_pool() is in progress. 70 * worker_attach_to_pool() is in progress.
71 */ 71 */
72 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ 72 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
@@ -123,7 +123,7 @@ enum {
123 * cpu or grabbing pool->lock is enough for read access. If 123 * cpu or grabbing pool->lock is enough for read access. If
124 * POOL_DISASSOCIATED is set, it's identical to L. 124 * POOL_DISASSOCIATED is set, it's identical to L.
125 * 125 *
126 * A: pool->attach_mutex protected. 126 * A: wq_pool_attach_mutex protected.
127 * 127 *
128 * PL: wq_pool_mutex protected. 128 * PL: wq_pool_mutex protected.
129 * 129 *
@@ -166,7 +166,6 @@ struct worker_pool {
166 /* L: hash of busy workers */ 166 /* L: hash of busy workers */
167 167
168 struct worker *manager; /* L: purely informational */ 168 struct worker *manager; /* L: purely informational */
169 struct mutex attach_mutex; /* attach/detach exclusion */
170 struct list_head workers; /* A: attached workers */ 169 struct list_head workers; /* A: attached workers */
171 struct completion *detach_completion; /* all workers detached */ 170 struct completion *detach_completion; /* all workers detached */
172 171
@@ -297,6 +296,7 @@ static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
297static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; 296static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
298 297
299static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 298static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
299static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
300static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 300static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
301static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ 301static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
302 302
@@ -399,14 +399,14 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
399 * @worker: iteration cursor 399 * @worker: iteration cursor
400 * @pool: worker_pool to iterate workers of 400 * @pool: worker_pool to iterate workers of
401 * 401 *
402 * This must be called with @pool->attach_mutex. 402 * This must be called with wq_pool_attach_mutex.
403 * 403 *
404 * The if/else clause exists only for the lockdep assertion and can be 404 * The if/else clause exists only for the lockdep assertion and can be
405 * ignored. 405 * ignored.
406 */ 406 */
407#define for_each_pool_worker(worker, pool) \ 407#define for_each_pool_worker(worker, pool) \
408 list_for_each_entry((worker), &(pool)->workers, node) \ 408 list_for_each_entry((worker), &(pool)->workers, node) \
409 if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \ 409 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
410 else 410 else
411 411
412/** 412/**
@@ -1724,7 +1724,7 @@ static struct worker *alloc_worker(int node)
1724static void worker_attach_to_pool(struct worker *worker, 1724static void worker_attach_to_pool(struct worker *worker,
1725 struct worker_pool *pool) 1725 struct worker_pool *pool)
1726{ 1726{
1727 mutex_lock(&pool->attach_mutex); 1727 mutex_lock(&wq_pool_attach_mutex);
1728 1728
1729 /* 1729 /*
1730 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any 1730 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
@@ -1733,16 +1733,16 @@ static void worker_attach_to_pool(struct worker *worker,
1733 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); 1733 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1734 1734
1735 /* 1735 /*
1736 * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains 1736 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
1737 * stable across this function. See the comments above the 1737 * stable across this function. See the comments above the flag
1738 * flag definition for details. 1738 * definition for details.
1739 */ 1739 */
1740 if (pool->flags & POOL_DISASSOCIATED) 1740 if (pool->flags & POOL_DISASSOCIATED)
1741 worker->flags |= WORKER_UNBOUND; 1741 worker->flags |= WORKER_UNBOUND;
1742 1742
1743 list_add_tail(&worker->node, &pool->workers); 1743 list_add_tail(&worker->node, &pool->workers);
1744 1744
1745 mutex_unlock(&pool->attach_mutex); 1745 mutex_unlock(&wq_pool_attach_mutex);
1746} 1746}
1747 1747
1748/** 1748/**
@@ -1759,11 +1759,11 @@ static void worker_detach_from_pool(struct worker *worker,
1759{ 1759{
1760 struct completion *detach_completion = NULL; 1760 struct completion *detach_completion = NULL;
1761 1761
1762 mutex_lock(&pool->attach_mutex); 1762 mutex_lock(&wq_pool_attach_mutex);
1763 list_del(&worker->node); 1763 list_del(&worker->node);
1764 if (list_empty(&pool->workers)) 1764 if (list_empty(&pool->workers))
1765 detach_completion = pool->detach_completion; 1765 detach_completion = pool->detach_completion;
1766 mutex_unlock(&pool->attach_mutex); 1766 mutex_unlock(&wq_pool_attach_mutex);
1767 1767
1768 /* clear leftover flags without pool->lock after it is detached */ 1768 /* clear leftover flags without pool->lock after it is detached */
1769 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 1769 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
@@ -3271,7 +3271,6 @@ static int init_worker_pool(struct worker_pool *pool)
3271 3271
3272 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); 3272 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3273 3273
3274 mutex_init(&pool->attach_mutex);
3275 INIT_LIST_HEAD(&pool->workers); 3274 INIT_LIST_HEAD(&pool->workers);
3276 3275
3277 ida_init(&pool->worker_ida); 3276 ida_init(&pool->worker_ida);
@@ -3354,10 +3353,10 @@ static void put_unbound_pool(struct worker_pool *pool)
3354 WARN_ON(pool->nr_workers || pool->nr_idle); 3353 WARN_ON(pool->nr_workers || pool->nr_idle);
3355 spin_unlock_irq(&pool->lock); 3354 spin_unlock_irq(&pool->lock);
3356 3355
3357 mutex_lock(&pool->attach_mutex); 3356 mutex_lock(&wq_pool_attach_mutex);
3358 if (!list_empty(&pool->workers)) 3357 if (!list_empty(&pool->workers))
3359 pool->detach_completion = &detach_completion; 3358 pool->detach_completion = &detach_completion;
3360 mutex_unlock(&pool->attach_mutex); 3359 mutex_unlock(&wq_pool_attach_mutex);
3361 3360
3362 if (pool->detach_completion) 3361 if (pool->detach_completion)
3363 wait_for_completion(pool->detach_completion); 3362 wait_for_completion(pool->detach_completion);
@@ -4600,7 +4599,7 @@ static void unbind_workers(int cpu)
4600 struct worker *worker; 4599 struct worker *worker;
4601 4600
4602 for_each_cpu_worker_pool(pool, cpu) { 4601 for_each_cpu_worker_pool(pool, cpu) {
4603 mutex_lock(&pool->attach_mutex); 4602 mutex_lock(&wq_pool_attach_mutex);
4604 spin_lock_irq(&pool->lock); 4603 spin_lock_irq(&pool->lock);
4605 4604
4606 /* 4605 /*
@@ -4616,7 +4615,7 @@ static void unbind_workers(int cpu)
4616 pool->flags |= POOL_DISASSOCIATED; 4615 pool->flags |= POOL_DISASSOCIATED;
4617 4616
4618 spin_unlock_irq(&pool->lock); 4617 spin_unlock_irq(&pool->lock);
4619 mutex_unlock(&pool->attach_mutex); 4618 mutex_unlock(&wq_pool_attach_mutex);
4620 4619
4621 /* 4620 /*
4622 * Call schedule() so that we cross rq->lock and thus can 4621 * Call schedule() so that we cross rq->lock and thus can
@@ -4657,7 +4656,7 @@ static void rebind_workers(struct worker_pool *pool)
4657{ 4656{
4658 struct worker *worker; 4657 struct worker *worker;
4659 4658
4660 lockdep_assert_held(&pool->attach_mutex); 4659 lockdep_assert_held(&wq_pool_attach_mutex);
4661 4660
4662 /* 4661 /*
4663 * Restore CPU affinity of all workers. As all idle workers should 4662 * Restore CPU affinity of all workers. As all idle workers should
@@ -4727,7 +4726,7 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4727 static cpumask_t cpumask; 4726 static cpumask_t cpumask;
4728 struct worker *worker; 4727 struct worker *worker;
4729 4728
4730 lockdep_assert_held(&pool->attach_mutex); 4729 lockdep_assert_held(&wq_pool_attach_mutex);
4731 4730
4732 /* is @cpu allowed for @pool? */ 4731 /* is @cpu allowed for @pool? */
4733 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 4732 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
@@ -4762,14 +4761,14 @@ int workqueue_online_cpu(unsigned int cpu)
4762 mutex_lock(&wq_pool_mutex); 4761 mutex_lock(&wq_pool_mutex);
4763 4762
4764 for_each_pool(pool, pi) { 4763 for_each_pool(pool, pi) {
4765 mutex_lock(&pool->attach_mutex); 4764 mutex_lock(&wq_pool_attach_mutex);
4766 4765
4767 if (pool->cpu == cpu) 4766 if (pool->cpu == cpu)
4768 rebind_workers(pool); 4767 rebind_workers(pool);
4769 else if (pool->cpu < 0) 4768 else if (pool->cpu < 0)
4770 restore_unbound_workers_cpumask(pool, cpu); 4769 restore_unbound_workers_cpumask(pool, cpu);
4771 4770
4772 mutex_unlock(&pool->attach_mutex); 4771 mutex_unlock(&wq_pool_attach_mutex);
4773 } 4772 }
4774 4773
4775 /* update NUMA affinity of unbound workqueues */ 4774 /* update NUMA affinity of unbound workqueues */