aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2013-03-25 19:57:18 -0400
committerTejun Heo <tj@kernel.org>2013-03-25 19:57:18 -0400
commitb09f4fd39c0e562aff3682773f4c451d6125048c (patch)
treef96b59d02c0136f82c0d280bf6d075d6d0f48f9d /kernel/workqueue.c
parent87fc741e94cf64445c698486982b30afa0811eca (diff)
workqueue: protect wq->pwqs and iteration with wq->mutex
We're expanding wq->mutex to cover all fields specific to each workqueue with the end goal of replacing pwq_lock which will make locking simpler and easier to understand. init_and_link_pwq() and pwq_unbound_release_workfn() already grab wq->mutex when adding or removing a pwq from wq->pwqs list. This patch makes it official that the list is wq->mutex protected for writes and updates readers accoridingly. Explicit IRQ toggles for sched-RCU read-locking in flush_workqueue_prep_pwqs() and drain_workqueues() are removed as the surrounding wq->mutex can provide sufficient synchronization. Also, assert_rcu_or_pwq_lock() is renamed to assert_rcu_or_wq_mutex() and checks for wq->mutex too. pwq_lock locking and assertion are not removed by this patch and a couple of for_each_pwq() iterations are still protected by it. They'll be removed by future patches. tj: Rebased on top of the current dev branch. Updated description. Folded in assert_rcu_or_wq_mutex() renaming from a later patch along with associated comment updates. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c42
1 files changed, 18 insertions, 24 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3ac2c4d85607..9c32fd171d5c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -204,7 +204,7 @@ struct pool_workqueue {
204 * Release of unbound pwq is punted to system_wq. See put_pwq() 204 * Release of unbound pwq is punted to system_wq. See put_pwq()
205 * and pwq_unbound_release_workfn() for details. pool_workqueue 205 * and pwq_unbound_release_workfn() for details. pool_workqueue
206 * itself is also sched-RCU protected so that the first pwq can be 206 * itself is also sched-RCU protected so that the first pwq can be
207 * determined without grabbing pwq_lock. 207 * determined without grabbing wq->mutex.
208 */ 208 */
209 struct work_struct unbound_release_work; 209 struct work_struct unbound_release_work;
210 struct rcu_head rcu; 210 struct rcu_head rcu;
@@ -298,10 +298,11 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
298 lockdep_is_held(&wq_pool_mutex), \ 298 lockdep_is_held(&wq_pool_mutex), \
299 "sched RCU or wq_pool_mutex should be held") 299 "sched RCU or wq_pool_mutex should be held")
300 300
301#define assert_rcu_or_pwq_lock() \ 301#define assert_rcu_or_wq_mutex(wq) \
302 rcu_lockdep_assert(rcu_read_lock_sched_held() || \ 302 rcu_lockdep_assert(rcu_read_lock_sched_held() || \
303 lockdep_is_held(&wq->mutex) || \
303 lockdep_is_held(&pwq_lock), \ 304 lockdep_is_held(&pwq_lock), \
304 "sched RCU or pwq_lock should be held") 305 "sched RCU or wq->mutex should be held")
305 306
306#ifdef CONFIG_LOCKDEP 307#ifdef CONFIG_LOCKDEP
307#define assert_manager_or_pool_lock(pool) \ 308#define assert_manager_or_pool_lock(pool) \
@@ -356,7 +357,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
356 * @pwq: iteration cursor 357 * @pwq: iteration cursor
357 * @wq: the target workqueue 358 * @wq: the target workqueue
358 * 359 *
359 * This must be called either with pwq_lock held or sched RCU read locked. 360 * This must be called either with wq->mutex held or sched RCU read locked.
360 * If the pwq needs to be used beyond the locking in effect, the caller is 361 * If the pwq needs to be used beyond the locking in effect, the caller is
361 * responsible for guaranteeing that the pwq stays online. 362 * responsible for guaranteeing that the pwq stays online.
362 * 363 *
@@ -365,7 +366,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
365 */ 366 */
366#define for_each_pwq(pwq, wq) \ 367#define for_each_pwq(pwq, wq) \
367 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ 368 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
368 if (({ assert_rcu_or_pwq_lock(); false; })) { } \ 369 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
369 else 370 else
370 371
371#ifdef CONFIG_DEBUG_OBJECTS_WORK 372#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -504,13 +505,13 @@ static int worker_pool_assign_id(struct worker_pool *pool)
504 * first_pwq - return the first pool_workqueue of the specified workqueue 505 * first_pwq - return the first pool_workqueue of the specified workqueue
505 * @wq: the target workqueue 506 * @wq: the target workqueue
506 * 507 *
507 * This must be called either with pwq_lock held or sched RCU read locked. 508 * This must be called either with wq->mutex held or sched RCU read locked.
508 * If the pwq needs to be used beyond the locking in effect, the caller is 509 * If the pwq needs to be used beyond the locking in effect, the caller is
509 * responsible for guaranteeing that the pwq stays online. 510 * responsible for guaranteeing that the pwq stays online.
510 */ 511 */
511static struct pool_workqueue *first_pwq(struct workqueue_struct *wq) 512static struct pool_workqueue *first_pwq(struct workqueue_struct *wq)
512{ 513{
513 assert_rcu_or_pwq_lock(); 514 assert_rcu_or_wq_mutex(wq);
514 return list_first_or_null_rcu(&wq->pwqs, struct pool_workqueue, 515 return list_first_or_null_rcu(&wq->pwqs, struct pool_workqueue,
515 pwqs_node); 516 pwqs_node);
516} 517}
@@ -2477,12 +2478,10 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2477 atomic_set(&wq->nr_pwqs_to_flush, 1); 2478 atomic_set(&wq->nr_pwqs_to_flush, 1);
2478 } 2479 }
2479 2480
2480 local_irq_disable();
2481
2482 for_each_pwq(pwq, wq) { 2481 for_each_pwq(pwq, wq) {
2483 struct worker_pool *pool = pwq->pool; 2482 struct worker_pool *pool = pwq->pool;
2484 2483
2485 spin_lock(&pool->lock); 2484 spin_lock_irq(&pool->lock);
2486 2485
2487 if (flush_color >= 0) { 2486 if (flush_color >= 0) {
2488 WARN_ON_ONCE(pwq->flush_color != -1); 2487 WARN_ON_ONCE(pwq->flush_color != -1);
@@ -2499,11 +2498,9 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2499 pwq->work_color = work_color; 2498 pwq->work_color = work_color;
2500 } 2499 }
2501 2500
2502 spin_unlock(&pool->lock); 2501 spin_unlock_irq(&pool->lock);
2503 } 2502 }
2504 2503
2505 local_irq_enable();
2506
2507 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 2504 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2508 complete(&wq->first_flusher->done); 2505 complete(&wq->first_flusher->done);
2509 2506
@@ -2691,14 +2688,14 @@ void drain_workqueue(struct workqueue_struct *wq)
2691reflush: 2688reflush:
2692 flush_workqueue(wq); 2689 flush_workqueue(wq);
2693 2690
2694 local_irq_disable(); 2691 mutex_lock(&wq->mutex);
2695 2692
2696 for_each_pwq(pwq, wq) { 2693 for_each_pwq(pwq, wq) {
2697 bool drained; 2694 bool drained;
2698 2695
2699 spin_lock(&pwq->pool->lock); 2696 spin_lock_irq(&pwq->pool->lock);
2700 drained = !pwq->nr_active && list_empty(&pwq->delayed_works); 2697 drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2701 spin_unlock(&pwq->pool->lock); 2698 spin_unlock_irq(&pwq->pool->lock);
2702 2699
2703 if (drained) 2700 if (drained)
2704 continue; 2701 continue;
@@ -2708,13 +2705,10 @@ reflush:
2708 pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n", 2705 pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2709 wq->name, flush_cnt); 2706 wq->name, flush_cnt);
2710 2707
2711 local_irq_enable(); 2708 mutex_unlock(&wq->mutex);
2712 goto reflush; 2709 goto reflush;
2713 } 2710 }
2714 2711
2715 local_irq_enable();
2716
2717 mutex_lock(&wq->mutex);
2718 if (!--wq->nr_drainers) 2712 if (!--wq->nr_drainers)
2719 wq->flags &= ~__WQ_DRAINING; 2713 wq->flags &= ~__WQ_DRAINING;
2720 mutex_unlock(&wq->mutex); 2714 mutex_unlock(&wq->mutex);
@@ -3843,13 +3837,13 @@ void destroy_workqueue(struct workqueue_struct *wq)
3843 drain_workqueue(wq); 3837 drain_workqueue(wq);
3844 3838
3845 /* sanity checks */ 3839 /* sanity checks */
3846 spin_lock_irq(&pwq_lock); 3840 mutex_lock(&wq->mutex);
3847 for_each_pwq(pwq, wq) { 3841 for_each_pwq(pwq, wq) {
3848 int i; 3842 int i;
3849 3843
3850 for (i = 0; i < WORK_NR_COLORS; i++) { 3844 for (i = 0; i < WORK_NR_COLORS; i++) {
3851 if (WARN_ON(pwq->nr_in_flight[i])) { 3845 if (WARN_ON(pwq->nr_in_flight[i])) {
3852 spin_unlock_irq(&pwq_lock); 3846 mutex_unlock(&wq->mutex);
3853 return; 3847 return;
3854 } 3848 }
3855 } 3849 }
@@ -3857,11 +3851,11 @@ void destroy_workqueue(struct workqueue_struct *wq)
3857 if (WARN_ON(pwq->refcnt > 1) || 3851 if (WARN_ON(pwq->refcnt > 1) ||
3858 WARN_ON(pwq->nr_active) || 3852 WARN_ON(pwq->nr_active) ||
3859 WARN_ON(!list_empty(&pwq->delayed_works))) { 3853 WARN_ON(!list_empty(&pwq->delayed_works))) {
3860 spin_unlock_irq(&pwq_lock); 3854 mutex_unlock(&wq->mutex);
3861 return; 3855 return;
3862 } 3856 }
3863 } 3857 }
3864 spin_unlock_irq(&pwq_lock); 3858 mutex_unlock(&wq->mutex);
3865 3859
3866 /* 3860 /*
3867 * wq list is used to freeze wq, remove from list after 3861 * wq list is used to freeze wq, remove from list after