aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 14:30:00 -0400
committerTejun Heo <tj@kernel.org>2013-03-12 14:30:00 -0400
commit76af4d936153afec176c53378e6ba8671e7e237d (patch)
tree94db54e019cc5c66305381f532506cc767df1930 /kernel/workqueue.c
parent7fb98ea79cecb14fc1735544146be06fdb1944c3 (diff)
workqueue: update synchronization rules on workqueue->pwqs
Make workqueue->pwqs protected by workqueue_lock for writes and sched-RCU protected for reads. Lockdep assertions are added to for_each_pwq() and first_pwq() and all their users are converted to either hold workqueue_lock or disable preemption/irq. alloc_and_link_pwqs() is updated to use list_add_tail_rcu() for consistency which isn't strictly necessary as the workqueue isn't visible. destroy_workqueue() isn't updated to sched-RCU release pwqs. This is okay as the workqueue should have on users left by that point. The locking is superflous at this point. This is to help implementation of unbound pools/pwqs with custom attributes. This patch doesn't introduce any behavior changes. v2: Updated for_each_pwq() use if/else for the hidden assertion statement instead of just if as suggested by Lai. This avoids confusing the following else clause. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c87
1 files changed, 70 insertions, 17 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 577ac719eaec..e060ff2bc20c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -42,6 +42,7 @@
42#include <linux/lockdep.h> 42#include <linux/lockdep.h>
43#include <linux/idr.h> 43#include <linux/idr.h>
44#include <linux/hashtable.h> 44#include <linux/hashtable.h>
45#include <linux/rculist.h>
45 46
46#include "workqueue_internal.h" 47#include "workqueue_internal.h"
47 48
@@ -118,6 +119,8 @@ enum {
118 * F: wq->flush_mutex protected. 119 * F: wq->flush_mutex protected.
119 * 120 *
120 * W: workqueue_lock protected. 121 * W: workqueue_lock protected.
122 *
123 * R: workqueue_lock protected for writes. Sched-RCU protected for reads.
121 */ 124 */
122 125
123/* struct worker is defined in workqueue_internal.h */ 126/* struct worker is defined in workqueue_internal.h */
@@ -169,7 +172,7 @@ struct pool_workqueue {
169 int nr_active; /* L: nr of active works */ 172 int nr_active; /* L: nr of active works */
170 int max_active; /* L: max active works */ 173 int max_active; /* L: max active works */
171 struct list_head delayed_works; /* L: delayed works */ 174 struct list_head delayed_works; /* L: delayed works */
172 struct list_head pwqs_node; /* I: node on wq->pwqs */ 175 struct list_head pwqs_node; /* R: node on wq->pwqs */
173 struct list_head mayday_node; /* W: node on wq->maydays */ 176 struct list_head mayday_node; /* W: node on wq->maydays */
174} __aligned(1 << WORK_STRUCT_FLAG_BITS); 177} __aligned(1 << WORK_STRUCT_FLAG_BITS);
175 178
@@ -189,7 +192,7 @@ struct wq_flusher {
189struct workqueue_struct { 192struct workqueue_struct {
190 unsigned int flags; /* W: WQ_* flags */ 193 unsigned int flags; /* W: WQ_* flags */
191 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */ 194 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */
192 struct list_head pwqs; /* I: all pwqs of this wq */ 195 struct list_head pwqs; /* R: all pwqs of this wq */
193 struct list_head list; /* W: list of all workqueues */ 196 struct list_head list; /* W: list of all workqueues */
194 197
195 struct mutex flush_mutex; /* protects wq flushing */ 198 struct mutex flush_mutex; /* protects wq flushing */
@@ -227,6 +230,11 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
227#define CREATE_TRACE_POINTS 230#define CREATE_TRACE_POINTS
228#include <trace/events/workqueue.h> 231#include <trace/events/workqueue.h>
229 232
233#define assert_rcu_or_wq_lock() \
234 rcu_lockdep_assert(rcu_read_lock_sched_held() || \
235 lockdep_is_held(&workqueue_lock), \
236 "sched RCU or workqueue lock should be held")
237
230#define for_each_std_worker_pool(pool, cpu) \ 238#define for_each_std_worker_pool(pool, cpu) \
231 for ((pool) = &std_worker_pools(cpu)[0]; \ 239 for ((pool) = &std_worker_pools(cpu)[0]; \
232 (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++) 240 (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
@@ -282,9 +290,18 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
282 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 290 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
283 * @pwq: iteration cursor 291 * @pwq: iteration cursor
284 * @wq: the target workqueue 292 * @wq: the target workqueue
293 *
294 * This must be called either with workqueue_lock held or sched RCU read
295 * locked. If the pwq needs to be used beyond the locking in effect, the
296 * caller is responsible for guaranteeing that the pwq stays online.
297 *
298 * The if/else clause exists only for the lockdep assertion and can be
299 * ignored.
285 */ 300 */
286#define for_each_pwq(pwq, wq) \ 301#define for_each_pwq(pwq, wq) \
287 list_for_each_entry((pwq), &(wq)->pwqs, pwqs_node) 302 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
303 if (({ assert_rcu_or_wq_lock(); false; })) { } \
304 else
288 305
289#ifdef CONFIG_DEBUG_OBJECTS_WORK 306#ifdef CONFIG_DEBUG_OBJECTS_WORK
290 307
@@ -463,9 +480,19 @@ static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
463 return &pools[highpri]; 480 return &pools[highpri];
464} 481}
465 482
483/**
484 * first_pwq - return the first pool_workqueue of the specified workqueue
485 * @wq: the target workqueue
486 *
487 * This must be called either with workqueue_lock held or sched RCU read
488 * locked. If the pwq needs to be used beyond the locking in effect, the
489 * caller is responsible for guaranteeing that the pwq stays online.
490 */
466static struct pool_workqueue *first_pwq(struct workqueue_struct *wq) 491static struct pool_workqueue *first_pwq(struct workqueue_struct *wq)
467{ 492{
468 return list_first_entry(&wq->pwqs, struct pool_workqueue, pwqs_node); 493 assert_rcu_or_wq_lock();
494 return list_first_or_null_rcu(&wq->pwqs, struct pool_workqueue,
495 pwqs_node);
469} 496}
470 497
471static unsigned int work_color_to_flags(int color) 498static unsigned int work_color_to_flags(int color)
@@ -2486,10 +2513,12 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2486 atomic_set(&wq->nr_pwqs_to_flush, 1); 2513 atomic_set(&wq->nr_pwqs_to_flush, 1);
2487 } 2514 }
2488 2515
2516 local_irq_disable();
2517
2489 for_each_pwq(pwq, wq) { 2518 for_each_pwq(pwq, wq) {
2490 struct worker_pool *pool = pwq->pool; 2519 struct worker_pool *pool = pwq->pool;
2491 2520
2492 spin_lock_irq(&pool->lock); 2521 spin_lock(&pool->lock);
2493 2522
2494 if (flush_color >= 0) { 2523 if (flush_color >= 0) {
2495 WARN_ON_ONCE(pwq->flush_color != -1); 2524 WARN_ON_ONCE(pwq->flush_color != -1);
@@ -2506,9 +2535,11 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2506 pwq->work_color = work_color; 2535 pwq->work_color = work_color;
2507 } 2536 }
2508 2537
2509 spin_unlock_irq(&pool->lock); 2538 spin_unlock(&pool->lock);
2510 } 2539 }
2511 2540
2541 local_irq_enable();
2542
2512 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 2543 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2513 complete(&wq->first_flusher->done); 2544 complete(&wq->first_flusher->done);
2514 2545
@@ -2699,12 +2730,14 @@ void drain_workqueue(struct workqueue_struct *wq)
2699reflush: 2730reflush:
2700 flush_workqueue(wq); 2731 flush_workqueue(wq);
2701 2732
2733 local_irq_disable();
2734
2702 for_each_pwq(pwq, wq) { 2735 for_each_pwq(pwq, wq) {
2703 bool drained; 2736 bool drained;
2704 2737
2705 spin_lock_irq(&pwq->pool->lock); 2738 spin_lock(&pwq->pool->lock);
2706 drained = !pwq->nr_active && list_empty(&pwq->delayed_works); 2739 drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2707 spin_unlock_irq(&pwq->pool->lock); 2740 spin_unlock(&pwq->pool->lock);
2708 2741
2709 if (drained) 2742 if (drained)
2710 continue; 2743 continue;
@@ -2713,13 +2746,17 @@ reflush:
2713 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 2746 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2714 pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n", 2747 pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n",
2715 wq->name, flush_cnt); 2748 wq->name, flush_cnt);
2749
2750 local_irq_enable();
2716 goto reflush; 2751 goto reflush;
2717 } 2752 }
2718 2753
2719 spin_lock_irq(&workqueue_lock); 2754 spin_lock(&workqueue_lock);
2720 if (!--wq->nr_drainers) 2755 if (!--wq->nr_drainers)
2721 wq->flags &= ~WQ_DRAINING; 2756 wq->flags &= ~WQ_DRAINING;
2722 spin_unlock_irq(&workqueue_lock); 2757 spin_unlock(&workqueue_lock);
2758
2759 local_irq_enable();
2723} 2760}
2724EXPORT_SYMBOL_GPL(drain_workqueue); 2761EXPORT_SYMBOL_GPL(drain_workqueue);
2725 2762
@@ -3085,7 +3122,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
3085 per_cpu_ptr(wq->cpu_pwqs, cpu); 3122 per_cpu_ptr(wq->cpu_pwqs, cpu);
3086 3123
3087 pwq->pool = get_std_worker_pool(cpu, highpri); 3124 pwq->pool = get_std_worker_pool(cpu, highpri);
3088 list_add_tail(&pwq->pwqs_node, &wq->pwqs); 3125 list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs);
3089 } 3126 }
3090 } else { 3127 } else {
3091 struct pool_workqueue *pwq; 3128 struct pool_workqueue *pwq;
@@ -3095,7 +3132,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
3095 return -ENOMEM; 3132 return -ENOMEM;
3096 3133
3097 pwq->pool = get_std_worker_pool(WORK_CPU_UNBOUND, highpri); 3134 pwq->pool = get_std_worker_pool(WORK_CPU_UNBOUND, highpri);
3098 list_add_tail(&pwq->pwqs_node, &wq->pwqs); 3135 list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs);
3099 } 3136 }
3100 3137
3101 return 0; 3138 return 0;
@@ -3172,6 +3209,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3172 if (alloc_and_link_pwqs(wq) < 0) 3209 if (alloc_and_link_pwqs(wq) < 0)
3173 goto err; 3210 goto err;
3174 3211
3212 local_irq_disable();
3175 for_each_pwq(pwq, wq) { 3213 for_each_pwq(pwq, wq) {
3176 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 3214 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3177 pwq->wq = wq; 3215 pwq->wq = wq;
@@ -3180,6 +3218,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3180 INIT_LIST_HEAD(&pwq->delayed_works); 3218 INIT_LIST_HEAD(&pwq->delayed_works);
3181 INIT_LIST_HEAD(&pwq->mayday_node); 3219 INIT_LIST_HEAD(&pwq->mayday_node);
3182 } 3220 }
3221 local_irq_enable();
3183 3222
3184 if (flags & WQ_RESCUER) { 3223 if (flags & WQ_RESCUER) {
3185 struct worker *rescuer; 3224 struct worker *rescuer;
@@ -3237,24 +3276,32 @@ void destroy_workqueue(struct workqueue_struct *wq)
3237 /* drain it before proceeding with destruction */ 3276 /* drain it before proceeding with destruction */
3238 drain_workqueue(wq); 3277 drain_workqueue(wq);
3239 3278
3279 spin_lock_irq(&workqueue_lock);
3280
3240 /* sanity checks */ 3281 /* sanity checks */
3241 for_each_pwq(pwq, wq) { 3282 for_each_pwq(pwq, wq) {
3242 int i; 3283 int i;
3243 3284
3244 for (i = 0; i < WORK_NR_COLORS; i++) 3285 for (i = 0; i < WORK_NR_COLORS; i++) {
3245 if (WARN_ON(pwq->nr_in_flight[i])) 3286 if (WARN_ON(pwq->nr_in_flight[i])) {
3287 spin_unlock_irq(&workqueue_lock);
3246 return; 3288 return;
3289 }
3290 }
3291
3247 if (WARN_ON(pwq->nr_active) || 3292 if (WARN_ON(pwq->nr_active) ||
3248 WARN_ON(!list_empty(&pwq->delayed_works))) 3293 WARN_ON(!list_empty(&pwq->delayed_works))) {
3294 spin_unlock_irq(&workqueue_lock);
3249 return; 3295 return;
3296 }
3250 } 3297 }
3251 3298
3252 /* 3299 /*
3253 * wq list is used to freeze wq, remove from list after 3300 * wq list is used to freeze wq, remove from list after
3254 * flushing is complete in case freeze races us. 3301 * flushing is complete in case freeze races us.
3255 */ 3302 */
3256 spin_lock_irq(&workqueue_lock);
3257 list_del(&wq->list); 3303 list_del(&wq->list);
3304
3258 spin_unlock_irq(&workqueue_lock); 3305 spin_unlock_irq(&workqueue_lock);
3259 3306
3260 if (wq->flags & WQ_RESCUER) { 3307 if (wq->flags & WQ_RESCUER) {
@@ -3338,13 +3385,19 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3338bool workqueue_congested(int cpu, struct workqueue_struct *wq) 3385bool workqueue_congested(int cpu, struct workqueue_struct *wq)
3339{ 3386{
3340 struct pool_workqueue *pwq; 3387 struct pool_workqueue *pwq;
3388 bool ret;
3389
3390 preempt_disable();
3341 3391
3342 if (!(wq->flags & WQ_UNBOUND)) 3392 if (!(wq->flags & WQ_UNBOUND))
3343 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 3393 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
3344 else 3394 else
3345 pwq = first_pwq(wq); 3395 pwq = first_pwq(wq);
3346 3396
3347 return !list_empty(&pwq->delayed_works); 3397 ret = !list_empty(&pwq->delayed_works);
3398 preempt_enable();
3399
3400 return ret;
3348} 3401}
3349EXPORT_SYMBOL_GPL(workqueue_congested); 3402EXPORT_SYMBOL_GPL(workqueue_congested);
3350 3403