diff options
| -rw-r--r-- | kernel/workqueue.c | 109 |
1 files changed, 55 insertions, 54 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 47f258799bf2..064157eac4c8 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -123,9 +123,9 @@ enum { | |||
| 123 | * MG: pool->manager_mutex and pool->lock protected. Writes require both | 123 | * MG: pool->manager_mutex and pool->lock protected. Writes require both |
| 124 | * locks. Reads can happen under either lock. | 124 | * locks. Reads can happen under either lock. |
| 125 | * | 125 | * |
| 126 | * WQ: wq_mutex protected. | 126 | * PL: wq_pool_mutex protected. |
| 127 | * | 127 | * |
| 128 | * WR: wq_mutex protected for writes. Sched-RCU protected for reads. | 128 | * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. |
| 129 | * | 129 | * |
| 130 | * PW: pwq_lock protected. | 130 | * PW: pwq_lock protected. |
| 131 | * | 131 | * |
| @@ -163,8 +163,8 @@ struct worker_pool { | |||
| 163 | struct idr worker_idr; /* MG: worker IDs and iteration */ | 163 | struct idr worker_idr; /* MG: worker IDs and iteration */ |
| 164 | 164 | ||
| 165 | struct workqueue_attrs *attrs; /* I: worker attributes */ | 165 | struct workqueue_attrs *attrs; /* I: worker attributes */ |
| 166 | struct hlist_node hash_node; /* WQ: unbound_pool_hash node */ | 166 | struct hlist_node hash_node; /* PL: unbound_pool_hash node */ |
| 167 | int refcnt; /* WQ: refcnt for unbound pools */ | 167 | int refcnt; /* PL: refcnt for unbound pools */ |
| 168 | 168 | ||
| 169 | /* | 169 | /* |
| 170 | * The current concurrency level. As it's likely to be accessed | 170 | * The current concurrency level. As it's likely to be accessed |
| @@ -226,10 +226,10 @@ struct wq_device; | |||
| 226 | * the appropriate worker_pool through its pool_workqueues. | 226 | * the appropriate worker_pool through its pool_workqueues. |
| 227 | */ | 227 | */ |
| 228 | struct workqueue_struct { | 228 | struct workqueue_struct { |
| 229 | unsigned int flags; /* WQ: WQ_* flags */ | 229 | unsigned int flags; /* PL: WQ_* flags */ |
| 230 | struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */ | 230 | struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */ |
| 231 | struct list_head pwqs; /* FR: all pwqs of this wq */ | 231 | struct list_head pwqs; /* FR: all pwqs of this wq */ |
| 232 | struct list_head list; /* WQ: list of all workqueues */ | 232 | struct list_head list; /* PL: list of all workqueues */ |
| 233 | 233 | ||
| 234 | struct mutex flush_mutex; /* protects wq flushing */ | 234 | struct mutex flush_mutex; /* protects wq flushing */ |
| 235 | int work_color; /* F: current work color */ | 235 | int work_color; /* F: current work color */ |
| @@ -242,7 +242,7 @@ struct workqueue_struct { | |||
| 242 | struct list_head maydays; /* MD: pwqs requesting rescue */ | 242 | struct list_head maydays; /* MD: pwqs requesting rescue */ |
| 243 | struct worker *rescuer; /* I: rescue worker */ | 243 | struct worker *rescuer; /* I: rescue worker */ |
| 244 | 244 | ||
| 245 | int nr_drainers; /* WQ: drain in progress */ | 245 | int nr_drainers; /* PL: drain in progress */ |
| 246 | int saved_max_active; /* PW: saved pwq max_active */ | 246 | int saved_max_active; /* PW: saved pwq max_active */ |
| 247 | 247 | ||
| 248 | #ifdef CONFIG_SYSFS | 248 | #ifdef CONFIG_SYSFS |
| @@ -256,20 +256,20 @@ struct workqueue_struct { | |||
| 256 | 256 | ||
| 257 | static struct kmem_cache *pwq_cache; | 257 | static struct kmem_cache *pwq_cache; |
| 258 | 258 | ||
| 259 | static DEFINE_MUTEX(wq_mutex); /* protects workqueues and pools */ | 259 | static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ |
| 260 | static DEFINE_SPINLOCK(pwq_lock); /* protects pool_workqueues */ | 260 | static DEFINE_SPINLOCK(pwq_lock); /* protects pool_workqueues */ |
| 261 | static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ | 261 | static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ |
| 262 | 262 | ||
| 263 | static LIST_HEAD(workqueues); /* WQ: list of all workqueues */ | 263 | static LIST_HEAD(workqueues); /* PL: list of all workqueues */ |
| 264 | static bool workqueue_freezing; /* WQ: have wqs started freezing? */ | 264 | static bool workqueue_freezing; /* PL: have wqs started freezing? */ |
| 265 | 265 | ||
| 266 | /* the per-cpu worker pools */ | 266 | /* the per-cpu worker pools */ |
| 267 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], | 267 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], |
| 268 | cpu_worker_pools); | 268 | cpu_worker_pools); |
| 269 | 269 | ||
| 270 | static DEFINE_IDR(worker_pool_idr); /* WR: idr of all pools */ | 270 | static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ |
| 271 | 271 | ||
| 272 | /* WQ: hash of all unbound pools keyed by pool->attrs */ | 272 | /* PL: hash of all unbound pools keyed by pool->attrs */ |
| 273 | static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); | 273 | static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); |
| 274 | 274 | ||
| 275 | /* I: attributes used when instantiating standard unbound pools on demand */ | 275 | /* I: attributes used when instantiating standard unbound pools on demand */ |
| @@ -293,10 +293,10 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, | |||
| 293 | #define CREATE_TRACE_POINTS | 293 | #define CREATE_TRACE_POINTS |
| 294 | #include <trace/events/workqueue.h> | 294 | #include <trace/events/workqueue.h> |
| 295 | 295 | ||
| 296 | #define assert_rcu_or_wq_mutex() \ | 296 | #define assert_rcu_or_pool_mutex() \ |
| 297 | rcu_lockdep_assert(rcu_read_lock_sched_held() || \ | 297 | rcu_lockdep_assert(rcu_read_lock_sched_held() || \ |
| 298 | lockdep_is_held(&wq_mutex), \ | 298 | lockdep_is_held(&wq_pool_mutex), \ |
| 299 | "sched RCU or wq_mutex should be held") | 299 | "sched RCU or wq_pool_mutex should be held") |
| 300 | 300 | ||
| 301 | #define assert_rcu_or_pwq_lock() \ | 301 | #define assert_rcu_or_pwq_lock() \ |
| 302 | rcu_lockdep_assert(rcu_read_lock_sched_held() || \ | 302 | rcu_lockdep_assert(rcu_read_lock_sched_held() || \ |
| @@ -323,16 +323,16 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, | |||
| 323 | * @pool: iteration cursor | 323 | * @pool: iteration cursor |
| 324 | * @pi: integer used for iteration | 324 | * @pi: integer used for iteration |
| 325 | * | 325 | * |
| 326 | * This must be called either with wq_mutex held or sched RCU read locked. | 326 | * This must be called either with wq_pool_mutex held or sched RCU read |
| 327 | * If the pool needs to be used beyond the locking in effect, the caller is | 327 | * locked. If the pool needs to be used beyond the locking in effect, the |
| 328 | * responsible for guaranteeing that the pool stays online. | 328 | * caller is responsible for guaranteeing that the pool stays online. |
| 329 | * | 329 | * |
| 330 | * The if/else clause exists only for the lockdep assertion and can be | 330 | * The if/else clause exists only for the lockdep assertion and can be |
| 331 | * ignored. | 331 | * ignored. |
| 332 | */ | 332 | */ |
| 333 | #define for_each_pool(pool, pi) \ | 333 | #define for_each_pool(pool, pi) \ |
| 334 | idr_for_each_entry(&worker_pool_idr, pool, pi) \ | 334 | idr_for_each_entry(&worker_pool_idr, pool, pi) \ |
| 335 | if (({ assert_rcu_or_wq_mutex(); false; })) { } \ | 335 | if (({ assert_rcu_or_pool_mutex(); false; })) { } \ |
| 336 | else | 336 | else |
| 337 | 337 | ||
| 338 | /** | 338 | /** |
| @@ -489,7 +489,7 @@ static int worker_pool_assign_id(struct worker_pool *pool) | |||
| 489 | { | 489 | { |
| 490 | int ret; | 490 | int ret; |
| 491 | 491 | ||
| 492 | lockdep_assert_held(&wq_mutex); | 492 | lockdep_assert_held(&wq_pool_mutex); |
| 493 | 493 | ||
| 494 | do { | 494 | do { |
| 495 | if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL)) | 495 | if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL)) |
| @@ -607,9 +607,9 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) | |||
| 607 | * | 607 | * |
| 608 | * Return the worker_pool @work was last associated with. %NULL if none. | 608 | * Return the worker_pool @work was last associated with. %NULL if none. |
| 609 | * | 609 | * |
| 610 | * Pools are created and destroyed under wq_mutex, and allows read access | 610 | * Pools are created and destroyed under wq_pool_mutex, and allows read |
| 611 | * under sched-RCU read lock. As such, this function should be called | 611 | * access under sched-RCU read lock. As such, this function should be |
| 612 | * under wq_mutex or with preemption disabled. | 612 | * called under wq_pool_mutex or with preemption disabled. |
| 613 | * | 613 | * |
| 614 | * All fields of the returned pool are accessible as long as the above | 614 | * All fields of the returned pool are accessible as long as the above |
| 615 | * mentioned locking is in effect. If the returned pool needs to be used | 615 | * mentioned locking is in effect. If the returned pool needs to be used |
| @@ -621,7 +621,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work) | |||
| 621 | unsigned long data = atomic_long_read(&work->data); | 621 | unsigned long data = atomic_long_read(&work->data); |
| 622 | int pool_id; | 622 | int pool_id; |
| 623 | 623 | ||
| 624 | assert_rcu_or_wq_mutex(); | 624 | assert_rcu_or_pool_mutex(); |
| 625 | 625 | ||
| 626 | if (data & WORK_STRUCT_PWQ) | 626 | if (data & WORK_STRUCT_PWQ) |
| 627 | return ((struct pool_workqueue *) | 627 | return ((struct pool_workqueue *) |
| @@ -2684,10 +2684,10 @@ void drain_workqueue(struct workqueue_struct *wq) | |||
| 2684 | * hotter than drain_workqueue() and already looks at @wq->flags. | 2684 | * hotter than drain_workqueue() and already looks at @wq->flags. |
| 2685 | * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. | 2685 | * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. |
| 2686 | */ | 2686 | */ |
| 2687 | mutex_lock(&wq_mutex); | 2687 | mutex_lock(&wq_pool_mutex); |
| 2688 | if (!wq->nr_drainers++) | 2688 | if (!wq->nr_drainers++) |
| 2689 | wq->flags |= __WQ_DRAINING; | 2689 | wq->flags |= __WQ_DRAINING; |
| 2690 | mutex_unlock(&wq_mutex); | 2690 | mutex_unlock(&wq_pool_mutex); |
| 2691 | reflush: | 2691 | reflush: |
| 2692 | flush_workqueue(wq); | 2692 | flush_workqueue(wq); |
| 2693 | 2693 | ||
| @@ -2714,10 +2714,10 @@ reflush: | |||
| 2714 | 2714 | ||
| 2715 | local_irq_enable(); | 2715 | local_irq_enable(); |
| 2716 | 2716 | ||
| 2717 | mutex_lock(&wq_mutex); | 2717 | mutex_lock(&wq_pool_mutex); |
| 2718 | if (!--wq->nr_drainers) | 2718 | if (!--wq->nr_drainers) |
| 2719 | wq->flags &= ~__WQ_DRAINING; | 2719 | wq->flags &= ~__WQ_DRAINING; |
| 2720 | mutex_unlock(&wq_mutex); | 2720 | mutex_unlock(&wq_pool_mutex); |
| 2721 | } | 2721 | } |
| 2722 | EXPORT_SYMBOL_GPL(drain_workqueue); | 2722 | EXPORT_SYMBOL_GPL(drain_workqueue); |
| 2723 | 2723 | ||
| @@ -3430,16 +3430,16 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
| 3430 | { | 3430 | { |
| 3431 | struct worker *worker; | 3431 | struct worker *worker; |
| 3432 | 3432 | ||
| 3433 | mutex_lock(&wq_mutex); | 3433 | mutex_lock(&wq_pool_mutex); |
| 3434 | if (--pool->refcnt) { | 3434 | if (--pool->refcnt) { |
| 3435 | mutex_unlock(&wq_mutex); | 3435 | mutex_unlock(&wq_pool_mutex); |
| 3436 | return; | 3436 | return; |
| 3437 | } | 3437 | } |
| 3438 | 3438 | ||
| 3439 | /* sanity checks */ | 3439 | /* sanity checks */ |
| 3440 | if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) || | 3440 | if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) || |
| 3441 | WARN_ON(!list_empty(&pool->worklist))) { | 3441 | WARN_ON(!list_empty(&pool->worklist))) { |
| 3442 | mutex_unlock(&wq_mutex); | 3442 | mutex_unlock(&wq_pool_mutex); |
| 3443 | return; | 3443 | return; |
| 3444 | } | 3444 | } |
| 3445 | 3445 | ||
| @@ -3448,7 +3448,7 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
| 3448 | idr_remove(&worker_pool_idr, pool->id); | 3448 | idr_remove(&worker_pool_idr, pool->id); |
| 3449 | hash_del(&pool->hash_node); | 3449 | hash_del(&pool->hash_node); |
| 3450 | 3450 | ||
| 3451 | mutex_unlock(&wq_mutex); | 3451 | mutex_unlock(&wq_pool_mutex); |
| 3452 | 3452 | ||
| 3453 | /* | 3453 | /* |
| 3454 | * Become the manager and destroy all workers. Grabbing | 3454 | * Become the manager and destroy all workers. Grabbing |
| @@ -3489,7 +3489,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) | |||
| 3489 | u32 hash = wqattrs_hash(attrs); | 3489 | u32 hash = wqattrs_hash(attrs); |
| 3490 | struct worker_pool *pool; | 3490 | struct worker_pool *pool; |
| 3491 | 3491 | ||
| 3492 | mutex_lock(&wq_mutex); | 3492 | mutex_lock(&wq_pool_mutex); |
| 3493 | 3493 | ||
| 3494 | /* do we already have a matching pool? */ | 3494 | /* do we already have a matching pool? */ |
| 3495 | hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { | 3495 | hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { |
| @@ -3520,10 +3520,10 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) | |||
| 3520 | /* install */ | 3520 | /* install */ |
| 3521 | hash_add(unbound_pool_hash, &pool->hash_node, hash); | 3521 | hash_add(unbound_pool_hash, &pool->hash_node, hash); |
| 3522 | out_unlock: | 3522 | out_unlock: |
| 3523 | mutex_unlock(&wq_mutex); | 3523 | mutex_unlock(&wq_pool_mutex); |
| 3524 | return pool; | 3524 | return pool; |
| 3525 | fail: | 3525 | fail: |
| 3526 | mutex_unlock(&wq_mutex); | 3526 | mutex_unlock(&wq_pool_mutex); |
| 3527 | if (pool) | 3527 | if (pool) |
| 3528 | put_unbound_pool(pool); | 3528 | put_unbound_pool(pool); |
| 3529 | return NULL; | 3529 | return NULL; |
| @@ -3803,10 +3803,11 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, | |||
| 3803 | goto err_destroy; | 3803 | goto err_destroy; |
| 3804 | 3804 | ||
| 3805 | /* | 3805 | /* |
| 3806 | * wq_mutex protects global freeze state and workqueues list. Grab | 3806 | * wq_pool_mutex protects global freeze state and workqueues list. |
| 3807 | * it, adjust max_active and add the new @wq to workqueues list. | 3807 | * Grab it, adjust max_active and add the new @wq to workqueues |
| 3808 | * list. | ||
| 3808 | */ | 3809 | */ |
| 3809 | mutex_lock(&wq_mutex); | 3810 | mutex_lock(&wq_pool_mutex); |
| 3810 | 3811 | ||
| 3811 | spin_lock_irq(&pwq_lock); | 3812 | spin_lock_irq(&pwq_lock); |
| 3812 | for_each_pwq(pwq, wq) | 3813 | for_each_pwq(pwq, wq) |
| @@ -3815,7 +3816,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, | |||
| 3815 | 3816 | ||
| 3816 | list_add(&wq->list, &workqueues); | 3817 | list_add(&wq->list, &workqueues); |
| 3817 | 3818 | ||
| 3818 | mutex_unlock(&wq_mutex); | 3819 | mutex_unlock(&wq_pool_mutex); |
| 3819 | 3820 | ||
| 3820 | return wq; | 3821 | return wq; |
| 3821 | 3822 | ||
| @@ -3866,9 +3867,9 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 3866 | * wq list is used to freeze wq, remove from list after | 3867 | * wq list is used to freeze wq, remove from list after |
| 3867 | * flushing is complete in case freeze races us. | 3868 | * flushing is complete in case freeze races us. |
| 3868 | */ | 3869 | */ |
| 3869 | mutex_lock(&wq_mutex); | 3870 | mutex_lock(&wq_pool_mutex); |
| 3870 | list_del_init(&wq->list); | 3871 | list_del_init(&wq->list); |
| 3871 | mutex_unlock(&wq_mutex); | 3872 | mutex_unlock(&wq_pool_mutex); |
| 3872 | 3873 | ||
| 3873 | workqueue_sysfs_unregister(wq); | 3874 | workqueue_sysfs_unregister(wq); |
| 3874 | 3875 | ||
| @@ -4198,7 +4199,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
| 4198 | 4199 | ||
| 4199 | case CPU_DOWN_FAILED: | 4200 | case CPU_DOWN_FAILED: |
| 4200 | case CPU_ONLINE: | 4201 | case CPU_ONLINE: |
| 4201 | mutex_lock(&wq_mutex); | 4202 | mutex_lock(&wq_pool_mutex); |
| 4202 | 4203 | ||
| 4203 | for_each_pool(pool, pi) { | 4204 | for_each_pool(pool, pi) { |
| 4204 | mutex_lock(&pool->manager_mutex); | 4205 | mutex_lock(&pool->manager_mutex); |
| @@ -4216,7 +4217,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
| 4216 | mutex_unlock(&pool->manager_mutex); | 4217 | mutex_unlock(&pool->manager_mutex); |
| 4217 | } | 4218 | } |
| 4218 | 4219 | ||
| 4219 | mutex_unlock(&wq_mutex); | 4220 | mutex_unlock(&wq_pool_mutex); |
| 4220 | break; | 4221 | break; |
| 4221 | } | 4222 | } |
| 4222 | return NOTIFY_OK; | 4223 | return NOTIFY_OK; |
| @@ -4292,7 +4293,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu); | |||
| 4292 | * pool->worklist. | 4293 | * pool->worklist. |
| 4293 | * | 4294 | * |
| 4294 | * CONTEXT: | 4295 | * CONTEXT: |
| 4295 | * Grabs and releases wq_mutex, pwq_lock and pool->lock's. | 4296 | * Grabs and releases wq_pool_mutex, pwq_lock and pool->lock's. |
| 4296 | */ | 4297 | */ |
| 4297 | void freeze_workqueues_begin(void) | 4298 | void freeze_workqueues_begin(void) |
| 4298 | { | 4299 | { |
| @@ -4301,7 +4302,7 @@ void freeze_workqueues_begin(void) | |||
| 4301 | struct pool_workqueue *pwq; | 4302 | struct pool_workqueue *pwq; |
| 4302 | int pi; | 4303 | int pi; |
| 4303 | 4304 | ||
| 4304 | mutex_lock(&wq_mutex); | 4305 | mutex_lock(&wq_pool_mutex); |
| 4305 | 4306 | ||
| 4306 | WARN_ON_ONCE(workqueue_freezing); | 4307 | WARN_ON_ONCE(workqueue_freezing); |
| 4307 | workqueue_freezing = true; | 4308 | workqueue_freezing = true; |
| @@ -4322,7 +4323,7 @@ void freeze_workqueues_begin(void) | |||
| 4322 | } | 4323 | } |
| 4323 | spin_unlock_irq(&pwq_lock); | 4324 | spin_unlock_irq(&pwq_lock); |
| 4324 | 4325 | ||
| 4325 | mutex_unlock(&wq_mutex); | 4326 | mutex_unlock(&wq_pool_mutex); |
| 4326 | } | 4327 | } |
| 4327 | 4328 | ||
| 4328 | /** | 4329 | /** |
| @@ -4332,7 +4333,7 @@ void freeze_workqueues_begin(void) | |||
| 4332 | * between freeze_workqueues_begin() and thaw_workqueues(). | 4333 | * between freeze_workqueues_begin() and thaw_workqueues(). |
| 4333 | * | 4334 | * |
| 4334 | * CONTEXT: | 4335 | * CONTEXT: |
| 4335 | * Grabs and releases wq_mutex. | 4336 | * Grabs and releases wq_pool_mutex. |
| 4336 | * | 4337 | * |
| 4337 | * RETURNS: | 4338 | * RETURNS: |
| 4338 | * %true if some freezable workqueues are still busy. %false if freezing | 4339 | * %true if some freezable workqueues are still busy. %false if freezing |
| @@ -4344,7 +4345,7 @@ bool freeze_workqueues_busy(void) | |||
| 4344 | struct workqueue_struct *wq; | 4345 | struct workqueue_struct *wq; |
| 4345 | struct pool_workqueue *pwq; | 4346 | struct pool_workqueue *pwq; |
| 4346 | 4347 | ||
| 4347 | mutex_lock(&wq_mutex); | 4348 | mutex_lock(&wq_pool_mutex); |
| 4348 | 4349 | ||
| 4349 | WARN_ON_ONCE(!workqueue_freezing); | 4350 | WARN_ON_ONCE(!workqueue_freezing); |
| 4350 | 4351 | ||
| @@ -4367,7 +4368,7 @@ bool freeze_workqueues_busy(void) | |||
| 4367 | rcu_read_unlock_sched(); | 4368 | rcu_read_unlock_sched(); |
| 4368 | } | 4369 | } |
| 4369 | out_unlock: | 4370 | out_unlock: |
| 4370 | mutex_unlock(&wq_mutex); | 4371 | mutex_unlock(&wq_pool_mutex); |
| 4371 | return busy; | 4372 | return busy; |
| 4372 | } | 4373 | } |
| 4373 | 4374 | ||
| @@ -4378,7 +4379,7 @@ out_unlock: | |||
| 4378 | * frozen works are transferred to their respective pool worklists. | 4379 | * frozen works are transferred to their respective pool worklists. |
| 4379 | * | 4380 | * |
| 4380 | * CONTEXT: | 4381 | * CONTEXT: |
| 4381 | * Grabs and releases wq_mutex, pwq_lock and pool->lock's. | 4382 | * Grabs and releases wq_pool_mutex, pwq_lock and pool->lock's. |
| 4382 | */ | 4383 | */ |
| 4383 | void thaw_workqueues(void) | 4384 | void thaw_workqueues(void) |
| 4384 | { | 4385 | { |
| @@ -4387,7 +4388,7 @@ void thaw_workqueues(void) | |||
| 4387 | struct worker_pool *pool; | 4388 | struct worker_pool *pool; |
| 4388 | int pi; | 4389 | int pi; |
| 4389 | 4390 | ||
| 4390 | mutex_lock(&wq_mutex); | 4391 | mutex_lock(&wq_pool_mutex); |
| 4391 | 4392 | ||
| 4392 | if (!workqueue_freezing) | 4393 | if (!workqueue_freezing) |
| 4393 | goto out_unlock; | 4394 | goto out_unlock; |
| @@ -4410,7 +4411,7 @@ void thaw_workqueues(void) | |||
| 4410 | 4411 | ||
| 4411 | workqueue_freezing = false; | 4412 | workqueue_freezing = false; |
| 4412 | out_unlock: | 4413 | out_unlock: |
| 4413 | mutex_unlock(&wq_mutex); | 4414 | mutex_unlock(&wq_pool_mutex); |
| 4414 | } | 4415 | } |
| 4415 | #endif /* CONFIG_FREEZER */ | 4416 | #endif /* CONFIG_FREEZER */ |
| 4416 | 4417 | ||
| @@ -4442,9 +4443,9 @@ static int __init init_workqueues(void) | |||
| 4442 | pool->attrs->nice = std_nice[i++]; | 4443 | pool->attrs->nice = std_nice[i++]; |
| 4443 | 4444 | ||
| 4444 | /* alloc pool ID */ | 4445 | /* alloc pool ID */ |
| 4445 | mutex_lock(&wq_mutex); | 4446 | mutex_lock(&wq_pool_mutex); |
| 4446 | BUG_ON(worker_pool_assign_id(pool)); | 4447 | BUG_ON(worker_pool_assign_id(pool)); |
| 4447 | mutex_unlock(&wq_mutex); | 4448 | mutex_unlock(&wq_pool_mutex); |
| 4448 | } | 4449 | } |
| 4449 | } | 4450 | } |
| 4450 | 4451 | ||
