diff options
author | Tejun Heo <tj@kernel.org> | 2013-03-13 22:47:40 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-03-13 22:47:40 -0400 |
commit | 794b18bc8a3f80445e1f85c9c87c74de9575c93a (patch) | |
tree | 460e370381780ad497514abc1079f7cd20467ce0 /kernel/workqueue.c | |
parent | 5bcab3355a555a9c1bd4becb136cbd3651c8eafa (diff) |
workqueue: separate out pool_workqueue locking into pwq_lock
This patch continues locking cleanup from the previous patch. It
breaks out pool_workqueue synchronization from workqueue_lock into a
new spinlock - pwq_lock. The followings are protected by pwq_lock.
* workqueue->pwqs
* workqueue->saved_max_active
The conversion is straight-forward. workqueue_lock usages which cover
the above two are converted to pwq_lock. New locking label PW added
for things protected by pwq_lock and FR is updated to mean flush_mutex
+ pwq_lock + sched-RCU.
This patch shouldn't introduce any visible behavior changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 69 |
1 files changed, 36 insertions, 33 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c3b59ff22007..63856dfbd082 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -123,9 +123,11 @@ enum { | |||
123 | * | 123 | * |
124 | * WR: wq_mutex protected for writes. Sched-RCU protected for reads. | 124 | * WR: wq_mutex protected for writes. Sched-RCU protected for reads. |
125 | * | 125 | * |
126 | * PW: pwq_lock protected. | ||
127 | * | ||
126 | * W: workqueue_lock protected. | 128 | * W: workqueue_lock protected. |
127 | * | 129 | * |
128 | * FR: wq->flush_mutex and workqueue_lock protected for writes. Sched-RCU | 130 | * FR: wq->flush_mutex and pwq_lock protected for writes. Sched-RCU |
129 | * protected for reads. | 131 | * protected for reads. |
130 | */ | 132 | */ |
131 | 133 | ||
@@ -198,7 +200,7 @@ struct pool_workqueue { | |||
198 | * Release of unbound pwq is punted to system_wq. See put_pwq() | 200 | * Release of unbound pwq is punted to system_wq. See put_pwq() |
199 | * and pwq_unbound_release_workfn() for details. pool_workqueue | 201 | * and pwq_unbound_release_workfn() for details. pool_workqueue |
200 | * itself is also sched-RCU protected so that the first pwq can be | 202 | * itself is also sched-RCU protected so that the first pwq can be |
201 | * determined without grabbing workqueue_lock. | 203 | * determined without grabbing pwq_lock. |
202 | */ | 204 | */ |
203 | struct work_struct unbound_release_work; | 205 | struct work_struct unbound_release_work; |
204 | struct rcu_head rcu; | 206 | struct rcu_head rcu; |
@@ -237,7 +239,7 @@ struct workqueue_struct { | |||
237 | struct worker *rescuer; /* I: rescue worker */ | 239 | struct worker *rescuer; /* I: rescue worker */ |
238 | 240 | ||
239 | int nr_drainers; /* WQ: drain in progress */ | 241 | int nr_drainers; /* WQ: drain in progress */ |
240 | int saved_max_active; /* W: saved pwq max_active */ | 242 | int saved_max_active; /* PW: saved pwq max_active */ |
241 | 243 | ||
242 | #ifdef CONFIG_SYSFS | 244 | #ifdef CONFIG_SYSFS |
243 | struct wq_device *wq_dev; /* I: for sysfs interface */ | 245 | struct wq_device *wq_dev; /* I: for sysfs interface */ |
@@ -251,6 +253,7 @@ struct workqueue_struct { | |||
251 | static struct kmem_cache *pwq_cache; | 253 | static struct kmem_cache *pwq_cache; |
252 | 254 | ||
253 | static DEFINE_MUTEX(wq_mutex); /* protects workqueues and pools */ | 255 | static DEFINE_MUTEX(wq_mutex); /* protects workqueues and pools */ |
256 | static DEFINE_SPINLOCK(pwq_lock); /* protects pool_workqueues */ | ||
254 | static DEFINE_SPINLOCK(workqueue_lock); | 257 | static DEFINE_SPINLOCK(workqueue_lock); |
255 | 258 | ||
256 | static LIST_HEAD(workqueues); /* WQ: list of all workqueues */ | 259 | static LIST_HEAD(workqueues); /* WQ: list of all workqueues */ |
@@ -291,10 +294,10 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, | |||
291 | lockdep_is_held(&wq_mutex), \ | 294 | lockdep_is_held(&wq_mutex), \ |
292 | "sched RCU or wq_mutex should be held") | 295 | "sched RCU or wq_mutex should be held") |
293 | 296 | ||
294 | #define assert_rcu_or_wq_lock() \ | 297 | #define assert_rcu_or_pwq_lock() \ |
295 | rcu_lockdep_assert(rcu_read_lock_sched_held() || \ | 298 | rcu_lockdep_assert(rcu_read_lock_sched_held() || \ |
296 | lockdep_is_held(&workqueue_lock), \ | 299 | lockdep_is_held(&pwq_lock), \ |
297 | "sched RCU or workqueue lock should be held") | 300 | "sched RCU or pwq_lock should be held") |
298 | 301 | ||
299 | #define for_each_cpu_worker_pool(pool, cpu) \ | 302 | #define for_each_cpu_worker_pool(pool, cpu) \ |
300 | for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ | 303 | for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ |
@@ -326,16 +329,16 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, | |||
326 | * @pwq: iteration cursor | 329 | * @pwq: iteration cursor |
327 | * @wq: the target workqueue | 330 | * @wq: the target workqueue |
328 | * | 331 | * |
329 | * This must be called either with workqueue_lock held or sched RCU read | 332 | * This must be called either with pwq_lock held or sched RCU read locked. |
330 | * locked. If the pwq needs to be used beyond the locking in effect, the | 333 | * If the pwq needs to be used beyond the locking in effect, the caller is |
331 | * caller is responsible for guaranteeing that the pwq stays online. | 334 | * responsible for guaranteeing that the pwq stays online. |
332 | * | 335 | * |
333 | * The if/else clause exists only for the lockdep assertion and can be | 336 | * The if/else clause exists only for the lockdep assertion and can be |
334 | * ignored. | 337 | * ignored. |
335 | */ | 338 | */ |
336 | #define for_each_pwq(pwq, wq) \ | 339 | #define for_each_pwq(pwq, wq) \ |
337 | list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ | 340 | list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ |
338 | if (({ assert_rcu_or_wq_lock(); false; })) { } \ | 341 | if (({ assert_rcu_or_pwq_lock(); false; })) { } \ |
339 | else | 342 | else |
340 | 343 | ||
341 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 344 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
@@ -474,13 +477,13 @@ static int worker_pool_assign_id(struct worker_pool *pool) | |||
474 | * first_pwq - return the first pool_workqueue of the specified workqueue | 477 | * first_pwq - return the first pool_workqueue of the specified workqueue |
475 | * @wq: the target workqueue | 478 | * @wq: the target workqueue |
476 | * | 479 | * |
477 | * This must be called either with workqueue_lock held or sched RCU read | 480 | * This must be called either with pwq_lock held or sched RCU read locked. |
478 | * locked. If the pwq needs to be used beyond the locking in effect, the | 481 | * If the pwq needs to be used beyond the locking in effect, the caller is |
479 | * caller is responsible for guaranteeing that the pwq stays online. | 482 | * responsible for guaranteeing that the pwq stays online. |
480 | */ | 483 | */ |
481 | static struct pool_workqueue *first_pwq(struct workqueue_struct *wq) | 484 | static struct pool_workqueue *first_pwq(struct workqueue_struct *wq) |
482 | { | 485 | { |
483 | assert_rcu_or_wq_lock(); | 486 | assert_rcu_or_pwq_lock(); |
484 | return list_first_or_null_rcu(&wq->pwqs, struct pool_workqueue, | 487 | return list_first_or_null_rcu(&wq->pwqs, struct pool_workqueue, |
485 | pwqs_node); | 488 | pwqs_node); |
486 | } | 489 | } |
@@ -3639,9 +3642,9 @@ static void pwq_unbound_release_workfn(struct work_struct *work) | |||
3639 | * and consistent with the linking path. | 3642 | * and consistent with the linking path. |
3640 | */ | 3643 | */ |
3641 | mutex_lock(&wq->flush_mutex); | 3644 | mutex_lock(&wq->flush_mutex); |
3642 | spin_lock_irq(&workqueue_lock); | 3645 | spin_lock_irq(&pwq_lock); |
3643 | list_del_rcu(&pwq->pwqs_node); | 3646 | list_del_rcu(&pwq->pwqs_node); |
3644 | spin_unlock_irq(&workqueue_lock); | 3647 | spin_unlock_irq(&pwq_lock); |
3645 | mutex_unlock(&wq->flush_mutex); | 3648 | mutex_unlock(&wq->flush_mutex); |
3646 | 3649 | ||
3647 | put_unbound_pool(pool); | 3650 | put_unbound_pool(pool); |
@@ -3669,7 +3672,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) | |||
3669 | bool freezable = wq->flags & WQ_FREEZABLE; | 3672 | bool freezable = wq->flags & WQ_FREEZABLE; |
3670 | 3673 | ||
3671 | /* for @wq->saved_max_active */ | 3674 | /* for @wq->saved_max_active */ |
3672 | lockdep_assert_held(&workqueue_lock); | 3675 | lockdep_assert_held(&pwq_lock); |
3673 | 3676 | ||
3674 | /* fast exit for non-freezable wqs */ | 3677 | /* fast exit for non-freezable wqs */ |
3675 | if (!freezable && pwq->max_active == wq->saved_max_active) | 3678 | if (!freezable && pwq->max_active == wq->saved_max_active) |
@@ -3706,7 +3709,7 @@ static void init_and_link_pwq(struct pool_workqueue *pwq, | |||
3706 | INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); | 3709 | INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); |
3707 | 3710 | ||
3708 | mutex_lock(&wq->flush_mutex); | 3711 | mutex_lock(&wq->flush_mutex); |
3709 | spin_lock_irq(&workqueue_lock); | 3712 | spin_lock_irq(&pwq_lock); |
3710 | 3713 | ||
3711 | /* | 3714 | /* |
3712 | * Set the matching work_color. This is synchronized with | 3715 | * Set the matching work_color. This is synchronized with |
@@ -3722,7 +3725,7 @@ static void init_and_link_pwq(struct pool_workqueue *pwq, | |||
3722 | /* link in @pwq */ | 3725 | /* link in @pwq */ |
3723 | list_add_rcu(&pwq->pwqs_node, &wq->pwqs); | 3726 | list_add_rcu(&pwq->pwqs_node, &wq->pwqs); |
3724 | 3727 | ||
3725 | spin_unlock_irq(&workqueue_lock); | 3728 | spin_unlock_irq(&pwq_lock); |
3726 | mutex_unlock(&wq->flush_mutex); | 3729 | mutex_unlock(&wq->flush_mutex); |
3727 | } | 3730 | } |
3728 | 3731 | ||
@@ -3886,10 +3889,10 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, | |||
3886 | */ | 3889 | */ |
3887 | mutex_lock(&wq_mutex); | 3890 | mutex_lock(&wq_mutex); |
3888 | 3891 | ||
3889 | spin_lock_irq(&workqueue_lock); | 3892 | spin_lock_irq(&pwq_lock); |
3890 | for_each_pwq(pwq, wq) | 3893 | for_each_pwq(pwq, wq) |
3891 | pwq_adjust_max_active(pwq); | 3894 | pwq_adjust_max_active(pwq); |
3892 | spin_unlock_irq(&workqueue_lock); | 3895 | spin_unlock_irq(&pwq_lock); |
3893 | 3896 | ||
3894 | list_add(&wq->list, &workqueues); | 3897 | list_add(&wq->list, &workqueues); |
3895 | 3898 | ||
@@ -3920,13 +3923,13 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
3920 | drain_workqueue(wq); | 3923 | drain_workqueue(wq); |
3921 | 3924 | ||
3922 | /* sanity checks */ | 3925 | /* sanity checks */ |
3923 | spin_lock_irq(&workqueue_lock); | 3926 | spin_lock_irq(&pwq_lock); |
3924 | for_each_pwq(pwq, wq) { | 3927 | for_each_pwq(pwq, wq) { |
3925 | int i; | 3928 | int i; |
3926 | 3929 | ||
3927 | for (i = 0; i < WORK_NR_COLORS; i++) { | 3930 | for (i = 0; i < WORK_NR_COLORS; i++) { |
3928 | if (WARN_ON(pwq->nr_in_flight[i])) { | 3931 | if (WARN_ON(pwq->nr_in_flight[i])) { |
3929 | spin_unlock_irq(&workqueue_lock); | 3932 | spin_unlock_irq(&pwq_lock); |
3930 | return; | 3933 | return; |
3931 | } | 3934 | } |
3932 | } | 3935 | } |
@@ -3934,11 +3937,11 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
3934 | if (WARN_ON(pwq->refcnt > 1) || | 3937 | if (WARN_ON(pwq->refcnt > 1) || |
3935 | WARN_ON(pwq->nr_active) || | 3938 | WARN_ON(pwq->nr_active) || |
3936 | WARN_ON(!list_empty(&pwq->delayed_works))) { | 3939 | WARN_ON(!list_empty(&pwq->delayed_works))) { |
3937 | spin_unlock_irq(&workqueue_lock); | 3940 | spin_unlock_irq(&pwq_lock); |
3938 | return; | 3941 | return; |
3939 | } | 3942 | } |
3940 | } | 3943 | } |
3941 | spin_unlock_irq(&workqueue_lock); | 3944 | spin_unlock_irq(&pwq_lock); |
3942 | 3945 | ||
3943 | /* | 3946 | /* |
3944 | * wq list is used to freeze wq, remove from list after | 3947 | * wq list is used to freeze wq, remove from list after |
@@ -4000,14 +4003,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | |||
4000 | 4003 | ||
4001 | max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); | 4004 | max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); |
4002 | 4005 | ||
4003 | spin_lock_irq(&workqueue_lock); | 4006 | spin_lock_irq(&pwq_lock); |
4004 | 4007 | ||
4005 | wq->saved_max_active = max_active; | 4008 | wq->saved_max_active = max_active; |
4006 | 4009 | ||
4007 | for_each_pwq(pwq, wq) | 4010 | for_each_pwq(pwq, wq) |
4008 | pwq_adjust_max_active(pwq); | 4011 | pwq_adjust_max_active(pwq); |
4009 | 4012 | ||
4010 | spin_unlock_irq(&workqueue_lock); | 4013 | spin_unlock_irq(&pwq_lock); |
4011 | } | 4014 | } |
4012 | EXPORT_SYMBOL_GPL(workqueue_set_max_active); | 4015 | EXPORT_SYMBOL_GPL(workqueue_set_max_active); |
4013 | 4016 | ||
@@ -4266,7 +4269,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu); | |||
4266 | * pool->worklist. | 4269 | * pool->worklist. |
4267 | * | 4270 | * |
4268 | * CONTEXT: | 4271 | * CONTEXT: |
4269 | * Grabs and releases wq_mutex, workqueue_lock and pool->lock's. | 4272 | * Grabs and releases wq_mutex, pwq_lock and pool->lock's. |
4270 | */ | 4273 | */ |
4271 | void freeze_workqueues_begin(void) | 4274 | void freeze_workqueues_begin(void) |
4272 | { | 4275 | { |
@@ -4289,12 +4292,12 @@ void freeze_workqueues_begin(void) | |||
4289 | } | 4292 | } |
4290 | 4293 | ||
4291 | /* suppress further executions by setting max_active to zero */ | 4294 | /* suppress further executions by setting max_active to zero */ |
4292 | spin_lock_irq(&workqueue_lock); | 4295 | spin_lock_irq(&pwq_lock); |
4293 | list_for_each_entry(wq, &workqueues, list) { | 4296 | list_for_each_entry(wq, &workqueues, list) { |
4294 | for_each_pwq(pwq, wq) | 4297 | for_each_pwq(pwq, wq) |
4295 | pwq_adjust_max_active(pwq); | 4298 | pwq_adjust_max_active(pwq); |
4296 | } | 4299 | } |
4297 | spin_unlock_irq(&workqueue_lock); | 4300 | spin_unlock_irq(&pwq_lock); |
4298 | 4301 | ||
4299 | mutex_unlock(&wq_mutex); | 4302 | mutex_unlock(&wq_mutex); |
4300 | } | 4303 | } |
@@ -4352,7 +4355,7 @@ out_unlock: | |||
4352 | * frozen works are transferred to their respective pool worklists. | 4355 | * frozen works are transferred to their respective pool worklists. |
4353 | * | 4356 | * |
4354 | * CONTEXT: | 4357 | * CONTEXT: |
4355 | * Grabs and releases wq_mutex, workqueue_lock and pool->lock's. | 4358 | * Grabs and releases wq_mutex, pwq_lock and pool->lock's. |
4356 | */ | 4359 | */ |
4357 | void thaw_workqueues(void) | 4360 | void thaw_workqueues(void) |
4358 | { | 4361 | { |
@@ -4375,12 +4378,12 @@ void thaw_workqueues(void) | |||
4375 | } | 4378 | } |
4376 | 4379 | ||
4377 | /* restore max_active and repopulate worklist */ | 4380 | /* restore max_active and repopulate worklist */ |
4378 | spin_lock_irq(&workqueue_lock); | 4381 | spin_lock_irq(&pwq_lock); |
4379 | list_for_each_entry(wq, &workqueues, list) { | 4382 | list_for_each_entry(wq, &workqueues, list) { |
4380 | for_each_pwq(pwq, wq) | 4383 | for_each_pwq(pwq, wq) |
4381 | pwq_adjust_max_active(pwq); | 4384 | pwq_adjust_max_active(pwq); |
4382 | } | 4385 | } |
4383 | spin_unlock_irq(&workqueue_lock); | 4386 | spin_unlock_irq(&pwq_lock); |
4384 | 4387 | ||
4385 | /* kick workers */ | 4388 | /* kick workers */ |
4386 | for_each_pool(pool, pi) { | 4389 | for_each_pool(pool, pi) { |