aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2012-09-10 13:03:44 -0400
committerTejun Heo <tj@kernel.org>2012-09-10 13:05:54 -0400
commitee378aa49b594da9bda6a2c768cc5b2ad585f911 (patch)
tree33d73f93b93388e92fce1d4f4a5b3ae4100060ba /kernel/workqueue.c
parent552a37e9360a293cd20e7f8ff1fb326a244c5f1e (diff)
workqueue: fix possible idle worker depletion across CPU hotplug
To simplify both normal and CPU hotplug paths, worker management is prevented while CPU hoplug is in progress. This is achieved by CPU hotplug holding the same exclusion mechanism used by workers to ensure there's only one manager per pool. If someone else seems to be performing the manager role, workers proceed to execute work items. CPU hotplug using the same mechanism can lead to idle worker depletion because all workers could proceed to execute work items while CPU hotplug is in progress and CPU hotplug itself wouldn't actually perform the worker management duty - it doesn't guarantee that there's an idle worker left when it releases management. This idle worker depletion, under extreme circumstances, can break forward-progress guarantee and thus lead to deadlock. This patch fixes the bug by using separate mechanisms for manager exclusion among workers and hotplug exclusion. For manager exclusion, POOL_MANAGING_WORKERS which was restored by the previous patch is used. pool->manager_mutex is now only used for exclusion between the elected manager and CPU hotplug. The elected manager won't proceed without holding pool->manager_mutex. This ensures that the worker which won the manager position can't skip managing while CPU hotplug is in progress. It will block on manager_mutex and perform management after CPU hotplug is complete. Note that hotplug may happen while waiting for manager_mutex. A manager isn't either on idle or busy list and thus the hoplug code can't unbind/rebind it. Make the manager handle its own un/rebinding. tj: Updated comment and description. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c37
1 files changed, 36 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 383548ed0b54..1e1373bcb3e3 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1825,10 +1825,45 @@ static bool manage_workers(struct worker *worker)
1825 struct worker_pool *pool = worker->pool; 1825 struct worker_pool *pool = worker->pool;
1826 bool ret = false; 1826 bool ret = false;
1827 1827
1828 if (!mutex_trylock(&pool->manager_mutex)) 1828 if (pool->flags & POOL_MANAGING_WORKERS)
1829 return ret; 1829 return ret;
1830 1830
1831 pool->flags |= POOL_MANAGING_WORKERS; 1831 pool->flags |= POOL_MANAGING_WORKERS;
1832
1833 /*
1834 * To simplify both worker management and CPU hotplug, hold off
1835 * management while hotplug is in progress. CPU hotplug path can't
1836 * grab %POOL_MANAGING_WORKERS to achieve this because that can
1837 * lead to idle worker depletion (all become busy thinking someone
1838 * else is managing) which in turn can result in deadlock under
1839 * extreme circumstances. Use @pool->manager_mutex to synchronize
1840 * manager against CPU hotplug.
1841 *
1842 * manager_mutex would always be free unless CPU hotplug is in
1843 * progress. trylock first without dropping @gcwq->lock.
1844 */
1845 if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
1846 spin_unlock_irq(&pool->gcwq->lock);
1847 mutex_lock(&pool->manager_mutex);
1848 /*
1849 * CPU hotplug could have happened while we were waiting
1850 * for manager_mutex. Hotplug itself can't handle us
1851 * because manager isn't either on idle or busy list, and
1852 * @gcwq's state and ours could have deviated.
1853 *
1854 * As hotplug is now excluded via manager_mutex, we can
1855 * simply try to bind. It will succeed or fail depending
1856 * on @gcwq's current state. Try it and adjust
1857 * %WORKER_UNBOUND accordingly.
1858 */
1859 if (worker_maybe_bind_and_lock(worker))
1860 worker->flags &= ~WORKER_UNBOUND;
1861 else
1862 worker->flags |= WORKER_UNBOUND;
1863
1864 ret = true;
1865 }
1866
1832 pool->flags &= ~POOL_MANAGE_WORKERS; 1867 pool->flags &= ~POOL_MANAGE_WORKERS;
1833 1868
1834 /* 1869 /*