diff options
author | Tejun Heo <tj@kernel.org> | 2013-04-01 14:23:32 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-04-01 14:23:32 -0400 |
commit | a892cacc7f4960a39c0fad7bbdf04c5cbf7c229e (patch) | |
tree | bbc2973a8d16bd643c4033cce6229049ff780e30 /kernel/workqueue.c | |
parent | 4862125b0256a946d2749a1d5003b0604bc3cb4d (diff) |
workqueue: move pwq_pool_locking outside of get/put_unbound_pool()
The scheduled NUMA affinity support for unbound workqueues would need
to walk workqueues list and pool related operations on each workqueue.
Move wq_pool_mutex locking out of get/put_unbound_pool() to their
callers so that pool operations can be performed while walking the
workqueues list, which is also protected by wq_pool_mutex.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 36 |
1 files changed, 22 insertions, 14 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 89480fc8eaa3..2bf3d8c6e128 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -3395,31 +3395,28 @@ static void rcu_free_pool(struct rcu_head *rcu) | |||
3395 | * safe manner. get_unbound_pool() calls this function on its failure path | 3395 | * safe manner. get_unbound_pool() calls this function on its failure path |
3396 | * and this function should be able to release pools which went through, | 3396 | * and this function should be able to release pools which went through, |
3397 | * successfully or not, init_worker_pool(). | 3397 | * successfully or not, init_worker_pool(). |
3398 | * | ||
3399 | * Should be called with wq_pool_mutex held. | ||
3398 | */ | 3400 | */ |
3399 | static void put_unbound_pool(struct worker_pool *pool) | 3401 | static void put_unbound_pool(struct worker_pool *pool) |
3400 | { | 3402 | { |
3401 | struct worker *worker; | 3403 | struct worker *worker; |
3402 | 3404 | ||
3403 | mutex_lock(&wq_pool_mutex); | 3405 | lockdep_assert_held(&wq_pool_mutex); |
3404 | if (--pool->refcnt) { | 3406 | |
3405 | mutex_unlock(&wq_pool_mutex); | 3407 | if (--pool->refcnt) |
3406 | return; | 3408 | return; |
3407 | } | ||
3408 | 3409 | ||
3409 | /* sanity checks */ | 3410 | /* sanity checks */ |
3410 | if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) || | 3411 | if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) || |
3411 | WARN_ON(!list_empty(&pool->worklist))) { | 3412 | WARN_ON(!list_empty(&pool->worklist))) |
3412 | mutex_unlock(&wq_pool_mutex); | ||
3413 | return; | 3413 | return; |
3414 | } | ||
3415 | 3414 | ||
3416 | /* release id and unhash */ | 3415 | /* release id and unhash */ |
3417 | if (pool->id >= 0) | 3416 | if (pool->id >= 0) |
3418 | idr_remove(&worker_pool_idr, pool->id); | 3417 | idr_remove(&worker_pool_idr, pool->id); |
3419 | hash_del(&pool->hash_node); | 3418 | hash_del(&pool->hash_node); |
3420 | 3419 | ||
3421 | mutex_unlock(&wq_pool_mutex); | ||
3422 | |||
3423 | /* | 3420 | /* |
3424 | * Become the manager and destroy all workers. Grabbing | 3421 | * Become the manager and destroy all workers. Grabbing |
3425 | * manager_arb prevents @pool's workers from blocking on | 3422 | * manager_arb prevents @pool's workers from blocking on |
@@ -3453,13 +3450,15 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
3453 | * reference count and return it. If there already is a matching | 3450 | * reference count and return it. If there already is a matching |
3454 | * worker_pool, it will be used; otherwise, this function attempts to | 3451 | * worker_pool, it will be used; otherwise, this function attempts to |
3455 | * create a new one. On failure, returns NULL. | 3452 | * create a new one. On failure, returns NULL. |
3453 | * | ||
3454 | * Should be called with wq_pool_mutex held. | ||
3456 | */ | 3455 | */ |
3457 | static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) | 3456 | static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) |
3458 | { | 3457 | { |
3459 | u32 hash = wqattrs_hash(attrs); | 3458 | u32 hash = wqattrs_hash(attrs); |
3460 | struct worker_pool *pool; | 3459 | struct worker_pool *pool; |
3461 | 3460 | ||
3462 | mutex_lock(&wq_pool_mutex); | 3461 | lockdep_assert_held(&wq_pool_mutex); |
3463 | 3462 | ||
3464 | /* do we already have a matching pool? */ | 3463 | /* do we already have a matching pool? */ |
3465 | hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { | 3464 | hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { |
@@ -3490,10 +3489,8 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) | |||
3490 | /* install */ | 3489 | /* install */ |
3491 | hash_add(unbound_pool_hash, &pool->hash_node, hash); | 3490 | hash_add(unbound_pool_hash, &pool->hash_node, hash); |
3492 | out_unlock: | 3491 | out_unlock: |
3493 | mutex_unlock(&wq_pool_mutex); | ||
3494 | return pool; | 3492 | return pool; |
3495 | fail: | 3493 | fail: |
3496 | mutex_unlock(&wq_pool_mutex); | ||
3497 | if (pool) | 3494 | if (pool) |
3498 | put_unbound_pool(pool); | 3495 | put_unbound_pool(pool); |
3499 | return NULL; | 3496 | return NULL; |
@@ -3530,7 +3527,10 @@ static void pwq_unbound_release_workfn(struct work_struct *work) | |||
3530 | is_last = list_empty(&wq->pwqs); | 3527 | is_last = list_empty(&wq->pwqs); |
3531 | mutex_unlock(&wq->mutex); | 3528 | mutex_unlock(&wq->mutex); |
3532 | 3529 | ||
3530 | mutex_lock(&wq_pool_mutex); | ||
3533 | put_unbound_pool(pool); | 3531 | put_unbound_pool(pool); |
3532 | mutex_unlock(&wq_pool_mutex); | ||
3533 | |||
3534 | call_rcu_sched(&pwq->rcu, rcu_free_pwq); | 3534 | call_rcu_sched(&pwq->rcu, rcu_free_pwq); |
3535 | 3535 | ||
3536 | /* | 3536 | /* |
@@ -3654,13 +3654,21 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, | |||
3654 | copy_workqueue_attrs(new_attrs, attrs); | 3654 | copy_workqueue_attrs(new_attrs, attrs); |
3655 | cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); | 3655 | cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); |
3656 | 3656 | ||
3657 | mutex_lock(&wq_pool_mutex); | ||
3658 | |||
3657 | pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL); | 3659 | pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL); |
3658 | if (!pwq) | 3660 | if (!pwq) { |
3661 | mutex_unlock(&wq_pool_mutex); | ||
3659 | goto enomem; | 3662 | goto enomem; |
3663 | } | ||
3660 | 3664 | ||
3661 | pool = get_unbound_pool(new_attrs); | 3665 | pool = get_unbound_pool(new_attrs); |
3662 | if (!pool) | 3666 | if (!pool) { |
3667 | mutex_unlock(&wq_pool_mutex); | ||
3663 | goto enomem; | 3668 | goto enomem; |
3669 | } | ||
3670 | |||
3671 | mutex_unlock(&wq_pool_mutex); | ||
3664 | 3672 | ||
3665 | init_and_link_pwq(pwq, wq, pool, &last_pwq); | 3673 | init_and_link_pwq(pwq, wq, pool, &last_pwq); |
3666 | if (last_pwq) { | 3674 | if (last_pwq) { |