aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-13 22:47:40 -0400
committerTejun Heo <tj@kernel.org>2013-03-13 22:47:40 -0400
commit5bcab3355a555a9c1bd4becb136cbd3651c8eafa (patch)
tree21b724b2fdaa0b78a4805cef9267499b61824963 /kernel/workqueue.c
parent7d19c5ce6682fd0390049b5340d4b6bb6065d677 (diff)
workqueue: separate out pool and workqueue locking into wq_mutex
Currently, workqueue_lock protects most shared workqueue resources - the pools, workqueues, pool_workqueues, draining, ID assignments, mayday handling and so on. The coverage has grown organically and there is no identified bottleneck coming from workqueue_lock, but it has grown a bit too much and scheduled rebinding changes need the pools and workqueues to be protected by a mutex instead of a spinlock. This patch breaks out pool and workqueue synchronization from workqueue_lock into a new mutex - wq_mutex. The followings are protected by wq_mutex. * worker_pool_idr and unbound_pool_hash * pool->refcnt * workqueues list * workqueue->flags, ->nr_drainers Most changes are mostly straight-forward. workqueue_lock is replaced with wq_mutex where applicable and workqueue_lock lock/unlocks are added where wq_mutex conversion leaves data structures not protected by wq_mutex without locking. irq / preemption flippings were added where the conversion affects them. Things worth noting are * New WQ and WR locking lables added along with assert_rcu_or_wq_mutex(). * worker_pool_assign_id() now expects to be called under wq_mutex. * create_mutex is removed from get_unbound_pool(). It now just holds wq_mutex. This patch shouldn't introduce any visible behavior changes. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c146
1 files changed, 77 insertions, 69 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9a0cbb2fdd64..c3b59ff22007 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -119,9 +119,11 @@ enum {
119 * 119 *
120 * F: wq->flush_mutex protected. 120 * F: wq->flush_mutex protected.
121 * 121 *
122 * W: workqueue_lock protected. 122 * WQ: wq_mutex protected.
123 *
124 * WR: wq_mutex protected for writes. Sched-RCU protected for reads.
123 * 125 *
124 * R: workqueue_lock protected for writes. Sched-RCU protected for reads. 126 * W: workqueue_lock protected.
125 * 127 *
126 * FR: wq->flush_mutex and workqueue_lock protected for writes. Sched-RCU 128 * FR: wq->flush_mutex and workqueue_lock protected for writes. Sched-RCU
127 * protected for reads. 129 * protected for reads.
@@ -155,8 +157,8 @@ struct worker_pool {
155 struct ida worker_ida; /* L: for worker IDs */ 157 struct ida worker_ida; /* L: for worker IDs */
156 158
157 struct workqueue_attrs *attrs; /* I: worker attributes */ 159 struct workqueue_attrs *attrs; /* I: worker attributes */
158 struct hlist_node hash_node; /* W: unbound_pool_hash node */ 160 struct hlist_node hash_node; /* WQ: unbound_pool_hash node */
159 int refcnt; /* W: refcnt for unbound pools */ 161 int refcnt; /* WQ: refcnt for unbound pools */
160 162
161 /* 163 /*
162 * The current concurrency level. As it's likely to be accessed 164 * The current concurrency level. As it's likely to be accessed
@@ -218,10 +220,10 @@ struct wq_device;
218 * the appropriate worker_pool through its pool_workqueues. 220 * the appropriate worker_pool through its pool_workqueues.
219 */ 221 */
220struct workqueue_struct { 222struct workqueue_struct {
221 unsigned int flags; /* W: WQ_* flags */ 223 unsigned int flags; /* WQ: WQ_* flags */
222 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */ 224 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */
223 struct list_head pwqs; /* FR: all pwqs of this wq */ 225 struct list_head pwqs; /* FR: all pwqs of this wq */
224 struct list_head list; /* W: list of all workqueues */ 226 struct list_head list; /* WQ: list of all workqueues */
225 227
226 struct mutex flush_mutex; /* protects wq flushing */ 228 struct mutex flush_mutex; /* protects wq flushing */
227 int work_color; /* F: current work color */ 229 int work_color; /* F: current work color */
@@ -234,7 +236,7 @@ struct workqueue_struct {
234 struct list_head maydays; /* W: pwqs requesting rescue */ 236 struct list_head maydays; /* W: pwqs requesting rescue */
235 struct worker *rescuer; /* I: rescue worker */ 237 struct worker *rescuer; /* I: rescue worker */
236 238
237 int nr_drainers; /* W: drain in progress */ 239 int nr_drainers; /* WQ: drain in progress */
238 int saved_max_active; /* W: saved pwq max_active */ 240 int saved_max_active; /* W: saved pwq max_active */
239 241
240#ifdef CONFIG_SYSFS 242#ifdef CONFIG_SYSFS
@@ -248,22 +250,19 @@ struct workqueue_struct {
248 250
249static struct kmem_cache *pwq_cache; 251static struct kmem_cache *pwq_cache;
250 252
251/* Serializes the accesses to the list of workqueues. */ 253static DEFINE_MUTEX(wq_mutex); /* protects workqueues and pools */
252static DEFINE_SPINLOCK(workqueue_lock); 254static DEFINE_SPINLOCK(workqueue_lock);
253static LIST_HEAD(workqueues); 255
254static bool workqueue_freezing; /* W: have wqs started freezing? */ 256static LIST_HEAD(workqueues); /* WQ: list of all workqueues */
257static bool workqueue_freezing; /* WQ: have wqs started freezing? */
255 258
256/* the per-cpu worker pools */ 259/* the per-cpu worker pools */
257static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], 260static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
258 cpu_worker_pools); 261 cpu_worker_pools);
259 262
260/* 263static DEFINE_IDR(worker_pool_idr); /* WR: idr of all pools */
261 * R: idr of all pools. Modifications are protected by workqueue_lock.
262 * Read accesses are protected by sched-RCU protected.
263 */
264static DEFINE_IDR(worker_pool_idr);
265 264
266/* W: hash of all unbound pools keyed by pool->attrs */ 265/* WQ: hash of all unbound pools keyed by pool->attrs */
267static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 266static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
268 267
269/* I: attributes used when instantiating standard unbound pools on demand */ 268/* I: attributes used when instantiating standard unbound pools on demand */
@@ -287,6 +286,11 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
287#define CREATE_TRACE_POINTS 286#define CREATE_TRACE_POINTS
288#include <trace/events/workqueue.h> 287#include <trace/events/workqueue.h>
289 288
289#define assert_rcu_or_wq_mutex() \
290 rcu_lockdep_assert(rcu_read_lock_sched_held() || \
291 lockdep_is_held(&wq_mutex), \
292 "sched RCU or wq_mutex should be held")
293
290#define assert_rcu_or_wq_lock() \ 294#define assert_rcu_or_wq_lock() \
291 rcu_lockdep_assert(rcu_read_lock_sched_held() || \ 295 rcu_lockdep_assert(rcu_read_lock_sched_held() || \
292 lockdep_is_held(&workqueue_lock), \ 296 lockdep_is_held(&workqueue_lock), \
@@ -305,16 +309,16 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
305 * @pool: iteration cursor 309 * @pool: iteration cursor
306 * @pi: integer used for iteration 310 * @pi: integer used for iteration
307 * 311 *
308 * This must be called either with workqueue_lock held or sched RCU read 312 * This must be called either with wq_mutex held or sched RCU read locked.
309 * locked. If the pool needs to be used beyond the locking in effect, the 313 * If the pool needs to be used beyond the locking in effect, the caller is
310 * caller is responsible for guaranteeing that the pool stays online. 314 * responsible for guaranteeing that the pool stays online.
311 * 315 *
312 * The if/else clause exists only for the lockdep assertion and can be 316 * The if/else clause exists only for the lockdep assertion and can be
313 * ignored. 317 * ignored.
314 */ 318 */
315#define for_each_pool(pool, pi) \ 319#define for_each_pool(pool, pi) \
316 idr_for_each_entry(&worker_pool_idr, pool, pi) \ 320 idr_for_each_entry(&worker_pool_idr, pool, pi) \
317 if (({ assert_rcu_or_wq_lock(); false; })) { } \ 321 if (({ assert_rcu_or_wq_mutex(); false; })) { } \
318 else 322 else
319 323
320/** 324/**
@@ -455,13 +459,12 @@ static int worker_pool_assign_id(struct worker_pool *pool)
455{ 459{
456 int ret; 460 int ret;
457 461
462 lockdep_assert_held(&wq_mutex);
463
458 do { 464 do {
459 if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL)) 465 if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL))
460 return -ENOMEM; 466 return -ENOMEM;
461
462 spin_lock_irq(&workqueue_lock);
463 ret = idr_get_new(&worker_pool_idr, pool, &pool->id); 467 ret = idr_get_new(&worker_pool_idr, pool, &pool->id);
464 spin_unlock_irq(&workqueue_lock);
465 } while (ret == -EAGAIN); 468 } while (ret == -EAGAIN);
466 469
467 return ret; 470 return ret;
@@ -574,9 +577,9 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
574 * 577 *
575 * Return the worker_pool @work was last associated with. %NULL if none. 578 * Return the worker_pool @work was last associated with. %NULL if none.
576 * 579 *
577 * Pools are created and destroyed under workqueue_lock, and allows read 580 * Pools are created and destroyed under wq_mutex, and allows read access
578 * access under sched-RCU read lock. As such, this function should be 581 * under sched-RCU read lock. As such, this function should be called
579 * called under workqueue_lock or with preemption disabled. 582 * under wq_mutex or with preemption disabled.
580 * 583 *
581 * All fields of the returned pool are accessible as long as the above 584 * All fields of the returned pool are accessible as long as the above
582 * mentioned locking is in effect. If the returned pool needs to be used 585 * mentioned locking is in effect. If the returned pool needs to be used
@@ -588,7 +591,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
588 unsigned long data = atomic_long_read(&work->data); 591 unsigned long data = atomic_long_read(&work->data);
589 int pool_id; 592 int pool_id;
590 593
591 assert_rcu_or_wq_lock(); 594 assert_rcu_or_wq_mutex();
592 595
593 if (data & WORK_STRUCT_PWQ) 596 if (data & WORK_STRUCT_PWQ)
594 return ((struct pool_workqueue *) 597 return ((struct pool_workqueue *)
@@ -2768,10 +2771,10 @@ void drain_workqueue(struct workqueue_struct *wq)
2768 * hotter than drain_workqueue() and already looks at @wq->flags. 2771 * hotter than drain_workqueue() and already looks at @wq->flags.
2769 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 2772 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2770 */ 2773 */
2771 spin_lock_irq(&workqueue_lock); 2774 mutex_lock(&wq_mutex);
2772 if (!wq->nr_drainers++) 2775 if (!wq->nr_drainers++)
2773 wq->flags |= __WQ_DRAINING; 2776 wq->flags |= __WQ_DRAINING;
2774 spin_unlock_irq(&workqueue_lock); 2777 mutex_unlock(&wq_mutex);
2775reflush: 2778reflush:
2776 flush_workqueue(wq); 2779 flush_workqueue(wq);
2777 2780
@@ -2796,12 +2799,12 @@ reflush:
2796 goto reflush; 2799 goto reflush;
2797 } 2800 }
2798 2801
2799 spin_lock(&workqueue_lock); 2802 local_irq_enable();
2803
2804 mutex_lock(&wq_mutex);
2800 if (!--wq->nr_drainers) 2805 if (!--wq->nr_drainers)
2801 wq->flags &= ~__WQ_DRAINING; 2806 wq->flags &= ~__WQ_DRAINING;
2802 spin_unlock(&workqueue_lock); 2807 mutex_unlock(&wq_mutex);
2803
2804 local_irq_enable();
2805} 2808}
2806EXPORT_SYMBOL_GPL(drain_workqueue); 2809EXPORT_SYMBOL_GPL(drain_workqueue);
2807 2810
@@ -3514,16 +3517,16 @@ static void put_unbound_pool(struct worker_pool *pool)
3514{ 3517{
3515 struct worker *worker; 3518 struct worker *worker;
3516 3519
3517 spin_lock_irq(&workqueue_lock); 3520 mutex_lock(&wq_mutex);
3518 if (--pool->refcnt) { 3521 if (--pool->refcnt) {
3519 spin_unlock_irq(&workqueue_lock); 3522 mutex_unlock(&wq_mutex);
3520 return; 3523 return;
3521 } 3524 }
3522 3525
3523 /* sanity checks */ 3526 /* sanity checks */
3524 if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) || 3527 if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) ||
3525 WARN_ON(!list_empty(&pool->worklist))) { 3528 WARN_ON(!list_empty(&pool->worklist))) {
3526 spin_unlock_irq(&workqueue_lock); 3529 mutex_unlock(&wq_mutex);
3527 return; 3530 return;
3528 } 3531 }
3529 3532
@@ -3532,7 +3535,7 @@ static void put_unbound_pool(struct worker_pool *pool)
3532 idr_remove(&worker_pool_idr, pool->id); 3535 idr_remove(&worker_pool_idr, pool->id);
3533 hash_del(&pool->hash_node); 3536 hash_del(&pool->hash_node);
3534 3537
3535 spin_unlock_irq(&workqueue_lock); 3538 mutex_unlock(&wq_mutex);
3536 3539
3537 /* 3540 /*
3538 * Become the manager and destroy all workers. Grabbing 3541 * Become the manager and destroy all workers. Grabbing
@@ -3570,21 +3573,18 @@ static void put_unbound_pool(struct worker_pool *pool)
3570 */ 3573 */
3571static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 3574static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3572{ 3575{
3573 static DEFINE_MUTEX(create_mutex);
3574 u32 hash = wqattrs_hash(attrs); 3576 u32 hash = wqattrs_hash(attrs);
3575 struct worker_pool *pool; 3577 struct worker_pool *pool;
3576 3578
3577 mutex_lock(&create_mutex); 3579 mutex_lock(&wq_mutex);
3578 3580
3579 /* do we already have a matching pool? */ 3581 /* do we already have a matching pool? */
3580 spin_lock_irq(&workqueue_lock);
3581 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 3582 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3582 if (wqattrs_equal(pool->attrs, attrs)) { 3583 if (wqattrs_equal(pool->attrs, attrs)) {
3583 pool->refcnt++; 3584 pool->refcnt++;
3584 goto out_unlock; 3585 goto out_unlock;
3585 } 3586 }
3586 } 3587 }
3587 spin_unlock_irq(&workqueue_lock);
3588 3588
3589 /* nope, create a new one */ 3589 /* nope, create a new one */
3590 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 3590 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
@@ -3602,14 +3602,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3602 goto fail; 3602 goto fail;
3603 3603
3604 /* install */ 3604 /* install */
3605 spin_lock_irq(&workqueue_lock);
3606 hash_add(unbound_pool_hash, &pool->hash_node, hash); 3605 hash_add(unbound_pool_hash, &pool->hash_node, hash);
3607out_unlock: 3606out_unlock:
3608 spin_unlock_irq(&workqueue_lock); 3607 mutex_unlock(&wq_mutex);
3609 mutex_unlock(&create_mutex);
3610 return pool; 3608 return pool;
3611fail: 3609fail:
3612 mutex_unlock(&create_mutex); 3610 mutex_unlock(&wq_mutex);
3613 if (pool) 3611 if (pool)
3614 put_unbound_pool(pool); 3612 put_unbound_pool(pool);
3615 return NULL; 3613 return NULL;
@@ -3883,18 +3881,19 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3883 goto err_destroy; 3881 goto err_destroy;
3884 3882
3885 /* 3883 /*
3886 * workqueue_lock protects global freeze state and workqueues list. 3884 * wq_mutex protects global freeze state and workqueues list. Grab
3887 * Grab it, adjust max_active and add the new workqueue to 3885 * it, adjust max_active and add the new @wq to workqueues list.
3888 * workqueues list.
3889 */ 3886 */
3890 spin_lock_irq(&workqueue_lock); 3887 mutex_lock(&wq_mutex);
3891 3888
3889 spin_lock_irq(&workqueue_lock);
3892 for_each_pwq(pwq, wq) 3890 for_each_pwq(pwq, wq)
3893 pwq_adjust_max_active(pwq); 3891 pwq_adjust_max_active(pwq);
3892 spin_unlock_irq(&workqueue_lock);
3894 3893
3895 list_add(&wq->list, &workqueues); 3894 list_add(&wq->list, &workqueues);
3896 3895
3897 spin_unlock_irq(&workqueue_lock); 3896 mutex_unlock(&wq_mutex);
3898 3897
3899 return wq; 3898 return wq;
3900 3899
@@ -3920,9 +3919,8 @@ void destroy_workqueue(struct workqueue_struct *wq)
3920 /* drain it before proceeding with destruction */ 3919 /* drain it before proceeding with destruction */
3921 drain_workqueue(wq); 3920 drain_workqueue(wq);
3922 3921
3923 spin_lock_irq(&workqueue_lock);
3924
3925 /* sanity checks */ 3922 /* sanity checks */
3923 spin_lock_irq(&workqueue_lock);
3926 for_each_pwq(pwq, wq) { 3924 for_each_pwq(pwq, wq) {
3927 int i; 3925 int i;
3928 3926
@@ -3940,14 +3938,15 @@ void destroy_workqueue(struct workqueue_struct *wq)
3940 return; 3938 return;
3941 } 3939 }
3942 } 3940 }
3941 spin_unlock_irq(&workqueue_lock);
3943 3942
3944 /* 3943 /*
3945 * wq list is used to freeze wq, remove from list after 3944 * wq list is used to freeze wq, remove from list after
3946 * flushing is complete in case freeze races us. 3945 * flushing is complete in case freeze races us.
3947 */ 3946 */
3947 mutex_lock(&wq_mutex);
3948 list_del_init(&wq->list); 3948 list_del_init(&wq->list);
3949 3949 mutex_unlock(&wq_mutex);
3950 spin_unlock_irq(&workqueue_lock);
3951 3950
3952 workqueue_sysfs_unregister(wq); 3951 workqueue_sysfs_unregister(wq);
3953 3952
@@ -4267,7 +4266,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
4267 * pool->worklist. 4266 * pool->worklist.
4268 * 4267 *
4269 * CONTEXT: 4268 * CONTEXT:
4270 * Grabs and releases workqueue_lock and pool->lock's. 4269 * Grabs and releases wq_mutex, workqueue_lock and pool->lock's.
4271 */ 4270 */
4272void freeze_workqueues_begin(void) 4271void freeze_workqueues_begin(void)
4273{ 4272{
@@ -4276,26 +4275,28 @@ void freeze_workqueues_begin(void)
4276 struct pool_workqueue *pwq; 4275 struct pool_workqueue *pwq;
4277 int pi; 4276 int pi;
4278 4277
4279 spin_lock_irq(&workqueue_lock); 4278 mutex_lock(&wq_mutex);
4280 4279
4281 WARN_ON_ONCE(workqueue_freezing); 4280 WARN_ON_ONCE(workqueue_freezing);
4282 workqueue_freezing = true; 4281 workqueue_freezing = true;
4283 4282
4284 /* set FREEZING */ 4283 /* set FREEZING */
4285 for_each_pool(pool, pi) { 4284 for_each_pool(pool, pi) {
4286 spin_lock(&pool->lock); 4285 spin_lock_irq(&pool->lock);
4287 WARN_ON_ONCE(pool->flags & POOL_FREEZING); 4286 WARN_ON_ONCE(pool->flags & POOL_FREEZING);
4288 pool->flags |= POOL_FREEZING; 4287 pool->flags |= POOL_FREEZING;
4289 spin_unlock(&pool->lock); 4288 spin_unlock_irq(&pool->lock);
4290 } 4289 }
4291 4290
4292 /* suppress further executions by setting max_active to zero */ 4291 /* suppress further executions by setting max_active to zero */
4292 spin_lock_irq(&workqueue_lock);
4293 list_for_each_entry(wq, &workqueues, list) { 4293 list_for_each_entry(wq, &workqueues, list) {
4294 for_each_pwq(pwq, wq) 4294 for_each_pwq(pwq, wq)
4295 pwq_adjust_max_active(pwq); 4295 pwq_adjust_max_active(pwq);
4296 } 4296 }
4297
4298 spin_unlock_irq(&workqueue_lock); 4297 spin_unlock_irq(&workqueue_lock);
4298
4299 mutex_unlock(&wq_mutex);
4299} 4300}
4300 4301
4301/** 4302/**
@@ -4305,7 +4306,7 @@ void freeze_workqueues_begin(void)
4305 * between freeze_workqueues_begin() and thaw_workqueues(). 4306 * between freeze_workqueues_begin() and thaw_workqueues().
4306 * 4307 *
4307 * CONTEXT: 4308 * CONTEXT:
4308 * Grabs and releases workqueue_lock. 4309 * Grabs and releases wq_mutex.
4309 * 4310 *
4310 * RETURNS: 4311 * RETURNS:
4311 * %true if some freezable workqueues are still busy. %false if freezing 4312 * %true if some freezable workqueues are still busy. %false if freezing
@@ -4317,7 +4318,7 @@ bool freeze_workqueues_busy(void)
4317 struct workqueue_struct *wq; 4318 struct workqueue_struct *wq;
4318 struct pool_workqueue *pwq; 4319 struct pool_workqueue *pwq;
4319 4320
4320 spin_lock_irq(&workqueue_lock); 4321 mutex_lock(&wq_mutex);
4321 4322
4322 WARN_ON_ONCE(!workqueue_freezing); 4323 WARN_ON_ONCE(!workqueue_freezing);
4323 4324
@@ -4328,16 +4329,19 @@ bool freeze_workqueues_busy(void)
4328 * nr_active is monotonically decreasing. It's safe 4329 * nr_active is monotonically decreasing. It's safe
4329 * to peek without lock. 4330 * to peek without lock.
4330 */ 4331 */
4332 preempt_disable();
4331 for_each_pwq(pwq, wq) { 4333 for_each_pwq(pwq, wq) {
4332 WARN_ON_ONCE(pwq->nr_active < 0); 4334 WARN_ON_ONCE(pwq->nr_active < 0);
4333 if (pwq->nr_active) { 4335 if (pwq->nr_active) {
4334 busy = true; 4336 busy = true;
4337 preempt_enable();
4335 goto out_unlock; 4338 goto out_unlock;
4336 } 4339 }
4337 } 4340 }
4341 preempt_enable();
4338 } 4342 }
4339out_unlock: 4343out_unlock:
4340 spin_unlock_irq(&workqueue_lock); 4344 mutex_unlock(&wq_mutex);
4341 return busy; 4345 return busy;
4342} 4346}
4343 4347
@@ -4348,7 +4352,7 @@ out_unlock:
4348 * frozen works are transferred to their respective pool worklists. 4352 * frozen works are transferred to their respective pool worklists.
4349 * 4353 *
4350 * CONTEXT: 4354 * CONTEXT:
4351 * Grabs and releases workqueue_lock and pool->lock's. 4355 * Grabs and releases wq_mutex, workqueue_lock and pool->lock's.
4352 */ 4356 */
4353void thaw_workqueues(void) 4357void thaw_workqueues(void)
4354{ 4358{
@@ -4357,35 +4361,37 @@ void thaw_workqueues(void)
4357 struct worker_pool *pool; 4361 struct worker_pool *pool;
4358 int pi; 4362 int pi;
4359 4363
4360 spin_lock_irq(&workqueue_lock); 4364 mutex_lock(&wq_mutex);
4361 4365
4362 if (!workqueue_freezing) 4366 if (!workqueue_freezing)
4363 goto out_unlock; 4367 goto out_unlock;
4364 4368
4365 /* clear FREEZING */ 4369 /* clear FREEZING */
4366 for_each_pool(pool, pi) { 4370 for_each_pool(pool, pi) {
4367 spin_lock(&pool->lock); 4371 spin_lock_irq(&pool->lock);
4368 WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); 4372 WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
4369 pool->flags &= ~POOL_FREEZING; 4373 pool->flags &= ~POOL_FREEZING;
4370 spin_unlock(&pool->lock); 4374 spin_unlock_irq(&pool->lock);
4371 } 4375 }
4372 4376
4373 /* restore max_active and repopulate worklist */ 4377 /* restore max_active and repopulate worklist */
4378 spin_lock_irq(&workqueue_lock);
4374 list_for_each_entry(wq, &workqueues, list) { 4379 list_for_each_entry(wq, &workqueues, list) {
4375 for_each_pwq(pwq, wq) 4380 for_each_pwq(pwq, wq)
4376 pwq_adjust_max_active(pwq); 4381 pwq_adjust_max_active(pwq);
4377 } 4382 }
4383 spin_unlock_irq(&workqueue_lock);
4378 4384
4379 /* kick workers */ 4385 /* kick workers */
4380 for_each_pool(pool, pi) { 4386 for_each_pool(pool, pi) {
4381 spin_lock(&pool->lock); 4387 spin_lock_irq(&pool->lock);
4382 wake_up_worker(pool); 4388 wake_up_worker(pool);
4383 spin_unlock(&pool->lock); 4389 spin_unlock_irq(&pool->lock);
4384 } 4390 }
4385 4391
4386 workqueue_freezing = false; 4392 workqueue_freezing = false;
4387out_unlock: 4393out_unlock:
4388 spin_unlock_irq(&workqueue_lock); 4394 mutex_unlock(&wq_mutex);
4389} 4395}
4390#endif /* CONFIG_FREEZER */ 4396#endif /* CONFIG_FREEZER */
4391 4397
@@ -4417,7 +4423,9 @@ static int __init init_workqueues(void)
4417 pool->attrs->nice = std_nice[i++]; 4423 pool->attrs->nice = std_nice[i++];
4418 4424
4419 /* alloc pool ID */ 4425 /* alloc pool ID */
4426 mutex_lock(&wq_mutex);
4420 BUG_ON(worker_pool_assign_id(pool)); 4427 BUG_ON(worker_pool_assign_id(pool));
4428 mutex_unlock(&wq_mutex);
4421 } 4429 }
4422 } 4430 }
4423 4431