aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2014-05-20 05:46:33 -0400
committerTejun Heo <tj@kernel.org>2014-05-20 10:59:32 -0400
commit4d757c5c81edba2052aae10d5b36dfcb9902b141 (patch)
treecf6462421d19834ab6bdd17d18c7916a5adb8fb4 /kernel/workqueue.c
parent7cda9aae0596d871a8d7a6888d7b447c60e5ab30 (diff)
workqueue: narrow the protection range of manager_mutex
In create_worker(), as pool->worker_ida now uses ida_simple_get()/ida_simple_put() and doesn't require external synchronization, it doesn't need manager_mutex. struct worker allocation and kthread allocation are not visible by any one before attached, so they don't need manager_mutex either. The above operations are before the attaching operation which attaches the worker to the pool. Between attaching and starting the worker, the worker is already attached to the pool, so the cpu hotplug will handle cpu-binding for the worker correctly and we don't need the manager_mutex after attaching. The conclusion is that only the attaching operation needs manager_mutex, so we narrow the protection section of manager_mutex in create_worker(). Some comments about manager_mutex are removed, because we will rename it to attach_mutex and add worker_attach_to_pool() later which will be self-explanatory. tj: Minor description updates. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c35
1 files changed, 5 insertions, 30 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 092f2098746d..d6b31ff60c52 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1725,8 +1725,6 @@ static struct worker *create_worker(struct worker_pool *pool)
1725 int id = -1; 1725 int id = -1;
1726 char id_buf[16]; 1726 char id_buf[16];
1727 1727
1728 lockdep_assert_held(&pool->manager_mutex);
1729
1730 /* ID is needed to determine kthread name */ 1728 /* ID is needed to determine kthread name */
1731 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL); 1729 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1732 if (id < 0) 1730 if (id < 0)
@@ -1755,6 +1753,8 @@ static struct worker *create_worker(struct worker_pool *pool)
1755 /* prevent userland from meddling with cpumask of workqueue workers */ 1753 /* prevent userland from meddling with cpumask of workqueue workers */
1756 worker->task->flags |= PF_NO_SETAFFINITY; 1754 worker->task->flags |= PF_NO_SETAFFINITY;
1757 1755
1756 mutex_lock(&pool->manager_mutex);
1757
1758 /* 1758 /*
1759 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any 1759 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1760 * online CPUs. It'll be re-applied when any of the CPUs come up. 1760 * online CPUs. It'll be re-applied when any of the CPUs come up.
@@ -1762,7 +1762,7 @@ static struct worker *create_worker(struct worker_pool *pool)
1762 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); 1762 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1763 1763
1764 /* 1764 /*
1765 * The caller is responsible for ensuring %POOL_DISASSOCIATED 1765 * The pool->manager_mutex ensures %POOL_DISASSOCIATED
1766 * remains stable across this function. See the comments above the 1766 * remains stable across this function. See the comments above the
1767 * flag definition for details. 1767 * flag definition for details.
1768 */ 1768 */
@@ -1772,6 +1772,8 @@ static struct worker *create_worker(struct worker_pool *pool)
1772 /* successful, attach the worker to the pool */ 1772 /* successful, attach the worker to the pool */
1773 list_add_tail(&worker->node, &pool->workers); 1773 list_add_tail(&worker->node, &pool->workers);
1774 1774
1775 mutex_unlock(&pool->manager_mutex);
1776
1775 return worker; 1777 return worker;
1776 1778
1777fail: 1779fail:
@@ -1809,8 +1811,6 @@ static int create_and_start_worker(struct worker_pool *pool)
1809{ 1811{
1810 struct worker *worker; 1812 struct worker *worker;
1811 1813
1812 mutex_lock(&pool->manager_mutex);
1813
1814 worker = create_worker(pool); 1814 worker = create_worker(pool);
1815 if (worker) { 1815 if (worker) {
1816 spin_lock_irq(&pool->lock); 1816 spin_lock_irq(&pool->lock);
@@ -1818,8 +1818,6 @@ static int create_and_start_worker(struct worker_pool *pool)
1818 spin_unlock_irq(&pool->lock); 1818 spin_unlock_irq(&pool->lock);
1819 } 1819 }
1820 1820
1821 mutex_unlock(&pool->manager_mutex);
1822
1823 return worker ? 0 : -ENOMEM; 1821 return worker ? 0 : -ENOMEM;
1824} 1822}
1825 1823
@@ -2019,8 +2017,6 @@ static bool manage_workers(struct worker *worker)
2019 bool ret = false; 2017 bool ret = false;
2020 2018
2021 /* 2019 /*
2022 * Managership is governed by two mutexes - manager_arb and
2023 * manager_mutex. manager_arb handles arbitration of manager role.
2024 * Anyone who successfully grabs manager_arb wins the arbitration 2020 * Anyone who successfully grabs manager_arb wins the arbitration
2025 * and becomes the manager. mutex_trylock() on pool->manager_arb 2021 * and becomes the manager. mutex_trylock() on pool->manager_arb
2026 * failure while holding pool->lock reliably indicates that someone 2022 * failure while holding pool->lock reliably indicates that someone
@@ -2029,33 +2025,12 @@ static bool manage_workers(struct worker *worker)
2029 * grabbing manager_arb is responsible for actually performing 2025 * grabbing manager_arb is responsible for actually performing
2030 * manager duties. If manager_arb is grabbed and released without 2026 * manager duties. If manager_arb is grabbed and released without
2031 * actual management, the pool may stall indefinitely. 2027 * actual management, the pool may stall indefinitely.
2032 *
2033 * manager_mutex is used for exclusion of actual management
2034 * operations. The holder of manager_mutex can be sure that none
2035 * of management operations, including creation and destruction of
2036 * workers, won't take place until the mutex is released. Because
2037 * manager_mutex doesn't interfere with manager role arbitration,
2038 * it is guaranteed that the pool's management, while may be
2039 * delayed, won't be disturbed by someone else grabbing
2040 * manager_mutex.
2041 */ 2028 */
2042 if (!mutex_trylock(&pool->manager_arb)) 2029 if (!mutex_trylock(&pool->manager_arb))
2043 return ret; 2030 return ret;
2044 2031
2045 /*
2046 * With manager arbitration won, manager_mutex would be free in
2047 * most cases. trylock first without dropping @pool->lock.
2048 */
2049 if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
2050 spin_unlock_irq(&pool->lock);
2051 mutex_lock(&pool->manager_mutex);
2052 spin_lock_irq(&pool->lock);
2053 ret = true;
2054 }
2055
2056 ret |= maybe_create_worker(pool); 2032 ret |= maybe_create_worker(pool);
2057 2033
2058 mutex_unlock(&pool->manager_mutex);
2059 mutex_unlock(&pool->manager_arb); 2034 mutex_unlock(&pool->manager_arb);
2060 return ret; 2035 return ret;
2061} 2036}