aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-19 16:45:21 -0400
committerTejun Heo <tj@kernel.org>2013-03-19 16:45:21 -0400
commit822d8405d13931062d653e0c2cc0199ed801b072 (patch)
tree388738869c771c58d20bc24d25729fabc0aab171 /kernel/workqueue.c
parent14a40ffccd6163bbcd1d6f32b28a88ffe6149fc6 (diff)
workqueue: convert worker_pool->worker_ida to idr and implement for_each_pool_worker()
Make worker_ida an idr - worker_idr and use it to implement for_each_pool_worker() which will be used to simplify worker rebinding on CPU_ONLINE. pool->worker_idr is protected by both pool->manager_mutex and pool->lock so that it can be iterated while holding either lock. * create_worker() allocates ID without installing worker pointer and installs the pointer later using idr_replace(). This is because worker ID is needed when creating the actual task to name it and the new worker shouldn't be visible to iterations before fully initialized. * In destroy_worker(), ID removal is moved before kthread_stop(). This is again to guarantee that only fully working workers are visible to for_each_pool_worker(). Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c63
1 files changed, 51 insertions, 12 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 39a591f65b08..384ff34c9aff 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -119,6 +119,9 @@ enum {
119 * 119 *
120 * F: wq->flush_mutex protected. 120 * F: wq->flush_mutex protected.
121 * 121 *
122 * MG: pool->manager_mutex and pool->lock protected. Writes require both
123 * locks. Reads can happen under either lock.
124 *
122 * WQ: wq_mutex protected. 125 * WQ: wq_mutex protected.
123 * 126 *
124 * WR: wq_mutex protected for writes. Sched-RCU protected for reads. 127 * WR: wq_mutex protected for writes. Sched-RCU protected for reads.
@@ -156,7 +159,7 @@ struct worker_pool {
156 /* see manage_workers() for details on the two manager mutexes */ 159 /* see manage_workers() for details on the two manager mutexes */
157 struct mutex manager_arb; /* manager arbitration */ 160 struct mutex manager_arb; /* manager arbitration */
158 struct mutex manager_mutex; /* manager exclusion */ 161 struct mutex manager_mutex; /* manager exclusion */
159 struct ida worker_ida; /* L: for worker IDs */ 162 struct idr worker_idr; /* MG: worker IDs and iteration */
160 163
161 struct workqueue_attrs *attrs; /* I: worker attributes */ 164 struct workqueue_attrs *attrs; /* I: worker attributes */
162 struct hlist_node hash_node; /* WQ: unbound_pool_hash node */ 165 struct hlist_node hash_node; /* WQ: unbound_pool_hash node */
@@ -299,6 +302,15 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
299 lockdep_is_held(&pwq_lock), \ 302 lockdep_is_held(&pwq_lock), \
300 "sched RCU or pwq_lock should be held") 303 "sched RCU or pwq_lock should be held")
301 304
305#ifdef CONFIG_LOCKDEP
306#define assert_manager_or_pool_lock(pool) \
307 WARN_ONCE(!lockdep_is_held(&(pool)->manager_mutex) && \
308 !lockdep_is_held(&(pool)->lock), \
309 "pool->manager_mutex or ->lock should be held")
310#else
311#define assert_manager_or_pool_lock(pool) do { } while (0)
312#endif
313
302#define for_each_cpu_worker_pool(pool, cpu) \ 314#define for_each_cpu_worker_pool(pool, cpu) \
303 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 315 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
304 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 316 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
@@ -325,6 +337,22 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
325 else 337 else
326 338
327/** 339/**
340 * for_each_pool_worker - iterate through all workers of a worker_pool
341 * @worker: iteration cursor
342 * @wi: integer used for iteration
343 * @pool: worker_pool to iterate workers of
344 *
345 * This must be called with either @pool->manager_mutex or ->lock held.
346 *
347 * The if/else clause exists only for the lockdep assertion and can be
348 * ignored.
349 */
350#define for_each_pool_worker(worker, wi, pool) \
351 idr_for_each_entry(&(pool)->worker_idr, (worker), (wi)) \
352 if (({ assert_manager_or_pool_lock((pool)); false; })) { } \
353 else
354
355/**
328 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 356 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
329 * @pwq: iteration cursor 357 * @pwq: iteration cursor
330 * @wq: the target workqueue 358 * @wq: the target workqueue
@@ -1723,14 +1751,19 @@ static struct worker *create_worker(struct worker_pool *pool)
1723 1751
1724 lockdep_assert_held(&pool->manager_mutex); 1752 lockdep_assert_held(&pool->manager_mutex);
1725 1753
1754 /*
1755 * ID is needed to determine kthread name. Allocate ID first
1756 * without installing the pointer.
1757 */
1758 idr_preload(GFP_KERNEL);
1726 spin_lock_irq(&pool->lock); 1759 spin_lock_irq(&pool->lock);
1727 while (ida_get_new(&pool->worker_ida, &id)) { 1760
1728 spin_unlock_irq(&pool->lock); 1761 id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_NOWAIT);
1729 if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL)) 1762
1730 goto fail;
1731 spin_lock_irq(&pool->lock);
1732 }
1733 spin_unlock_irq(&pool->lock); 1763 spin_unlock_irq(&pool->lock);
1764 idr_preload_end();
1765 if (id < 0)
1766 goto fail;
1734 1767
1735 worker = alloc_worker(); 1768 worker = alloc_worker();
1736 if (!worker) 1769 if (!worker)
@@ -1768,11 +1801,17 @@ static struct worker *create_worker(struct worker_pool *pool)
1768 if (pool->flags & POOL_DISASSOCIATED) 1801 if (pool->flags & POOL_DISASSOCIATED)
1769 worker->flags |= WORKER_UNBOUND; 1802 worker->flags |= WORKER_UNBOUND;
1770 1803
1804 /* successful, commit the pointer to idr */
1805 spin_lock_irq(&pool->lock);
1806 idr_replace(&pool->worker_idr, worker, worker->id);
1807 spin_unlock_irq(&pool->lock);
1808
1771 return worker; 1809 return worker;
1810
1772fail: 1811fail:
1773 if (id >= 0) { 1812 if (id >= 0) {
1774 spin_lock_irq(&pool->lock); 1813 spin_lock_irq(&pool->lock);
1775 ida_remove(&pool->worker_ida, id); 1814 idr_remove(&pool->worker_idr, id);
1776 spin_unlock_irq(&pool->lock); 1815 spin_unlock_irq(&pool->lock);
1777 } 1816 }
1778 kfree(worker); 1817 kfree(worker);
@@ -1832,7 +1871,6 @@ static int create_and_start_worker(struct worker_pool *pool)
1832static void destroy_worker(struct worker *worker) 1871static void destroy_worker(struct worker *worker)
1833{ 1872{
1834 struct worker_pool *pool = worker->pool; 1873 struct worker_pool *pool = worker->pool;
1835 int id = worker->id;
1836 1874
1837 lockdep_assert_held(&pool->manager_mutex); 1875 lockdep_assert_held(&pool->manager_mutex);
1838 lockdep_assert_held(&pool->lock); 1876 lockdep_assert_held(&pool->lock);
@@ -1850,13 +1888,14 @@ static void destroy_worker(struct worker *worker)
1850 list_del_init(&worker->entry); 1888 list_del_init(&worker->entry);
1851 worker->flags |= WORKER_DIE; 1889 worker->flags |= WORKER_DIE;
1852 1890
1891 idr_remove(&pool->worker_idr, worker->id);
1892
1853 spin_unlock_irq(&pool->lock); 1893 spin_unlock_irq(&pool->lock);
1854 1894
1855 kthread_stop(worker->task); 1895 kthread_stop(worker->task);
1856 kfree(worker); 1896 kfree(worker);
1857 1897
1858 spin_lock_irq(&pool->lock); 1898 spin_lock_irq(&pool->lock);
1859 ida_remove(&pool->worker_ida, id);
1860} 1899}
1861 1900
1862static void idle_worker_timeout(unsigned long __pool) 1901static void idle_worker_timeout(unsigned long __pool)
@@ -3482,7 +3521,7 @@ static int init_worker_pool(struct worker_pool *pool)
3482 3521
3483 mutex_init(&pool->manager_arb); 3522 mutex_init(&pool->manager_arb);
3484 mutex_init(&pool->manager_mutex); 3523 mutex_init(&pool->manager_mutex);
3485 ida_init(&pool->worker_ida); 3524 idr_init(&pool->worker_idr);
3486 3525
3487 INIT_HLIST_NODE(&pool->hash_node); 3526 INIT_HLIST_NODE(&pool->hash_node);
3488 pool->refcnt = 1; 3527 pool->refcnt = 1;
@@ -3498,7 +3537,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
3498{ 3537{
3499 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 3538 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3500 3539
3501 ida_destroy(&pool->worker_ida); 3540 idr_destroy(&pool->worker_idr);
3502 free_workqueue_attrs(pool->attrs); 3541 free_workqueue_attrs(pool->attrs);
3503 kfree(pool); 3542 kfree(pool);
3504} 3543}