diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2012-09-18 12:59:23 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-09-18 12:59:23 -0400 |
commit | b2eb83d123c1cc9f96a8e452b26a6ebe631b3ad7 (patch) | |
tree | 0be062bc42bc16e4de48fe1238e61eeb054bdef7 /kernel/workqueue.c | |
parent | 5f7dabfd5cb115937afb4649e4c73b02f927f6ae (diff) |
workqueue: rename manager_mutex to assoc_mutex
Now that manager_mutex's role has changed from synchronizing manager
role to excluding hotplug against manager, the name is misleading.
As it is protecting the CPU-association of the gcwq now, rename it to
assoc_mutex.
This patch is pure rename and doesn't introduce any functional change.
tj: Updated comments and description.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index cdc6bfc84b78..e651239f1ece 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -58,7 +58,7 @@ enum { | |||
58 | * be executing on any CPU. The gcwq behaves as an unbound one. | 58 | * be executing on any CPU. The gcwq behaves as an unbound one. |
59 | * | 59 | * |
60 | * Note that DISASSOCIATED can be flipped only while holding | 60 | * Note that DISASSOCIATED can be flipped only while holding |
61 | * managership of all pools on the gcwq to avoid changing binding | 61 | * assoc_mutex of all pools on the gcwq to avoid changing binding |
62 | * state while create_worker() is in progress. | 62 | * state while create_worker() is in progress. |
63 | */ | 63 | */ |
64 | GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */ | 64 | GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */ |
@@ -165,7 +165,7 @@ struct worker_pool { | |||
165 | struct timer_list idle_timer; /* L: worker idle timeout */ | 165 | struct timer_list idle_timer; /* L: worker idle timeout */ |
166 | struct timer_list mayday_timer; /* L: SOS timer for workers */ | 166 | struct timer_list mayday_timer; /* L: SOS timer for workers */ |
167 | 167 | ||
168 | struct mutex manager_mutex; /* mutex manager should hold */ | 168 | struct mutex assoc_mutex; /* protect GCWQ_DISASSOCIATED */ |
169 | struct ida worker_ida; /* L: for worker IDs */ | 169 | struct ida worker_ida; /* L: for worker IDs */ |
170 | }; | 170 | }; |
171 | 171 | ||
@@ -1681,7 +1681,7 @@ static void rebind_workers(struct global_cwq *gcwq) | |||
1681 | lockdep_assert_held(&gcwq->lock); | 1681 | lockdep_assert_held(&gcwq->lock); |
1682 | 1682 | ||
1683 | for_each_worker_pool(pool, gcwq) | 1683 | for_each_worker_pool(pool, gcwq) |
1684 | lockdep_assert_held(&pool->manager_mutex); | 1684 | lockdep_assert_held(&pool->assoc_mutex); |
1685 | 1685 | ||
1686 | /* dequeue and kick idle ones */ | 1686 | /* dequeue and kick idle ones */ |
1687 | for_each_worker_pool(pool, gcwq) { | 1687 | for_each_worker_pool(pool, gcwq) { |
@@ -2081,22 +2081,22 @@ static bool manage_workers(struct worker *worker) | |||
2081 | * grab %POOL_MANAGING_WORKERS to achieve this because that can | 2081 | * grab %POOL_MANAGING_WORKERS to achieve this because that can |
2082 | * lead to idle worker depletion (all become busy thinking someone | 2082 | * lead to idle worker depletion (all become busy thinking someone |
2083 | * else is managing) which in turn can result in deadlock under | 2083 | * else is managing) which in turn can result in deadlock under |
2084 | * extreme circumstances. Use @pool->manager_mutex to synchronize | 2084 | * extreme circumstances. Use @pool->assoc_mutex to synchronize |
2085 | * manager against CPU hotplug. | 2085 | * manager against CPU hotplug. |
2086 | * | 2086 | * |
2087 | * manager_mutex would always be free unless CPU hotplug is in | 2087 | * assoc_mutex would always be free unless CPU hotplug is in |
2088 | * progress. trylock first without dropping @gcwq->lock. | 2088 | * progress. trylock first without dropping @gcwq->lock. |
2089 | */ | 2089 | */ |
2090 | if (unlikely(!mutex_trylock(&pool->manager_mutex))) { | 2090 | if (unlikely(!mutex_trylock(&pool->assoc_mutex))) { |
2091 | spin_unlock_irq(&pool->gcwq->lock); | 2091 | spin_unlock_irq(&pool->gcwq->lock); |
2092 | mutex_lock(&pool->manager_mutex); | 2092 | mutex_lock(&pool->assoc_mutex); |
2093 | /* | 2093 | /* |
2094 | * CPU hotplug could have happened while we were waiting | 2094 | * CPU hotplug could have happened while we were waiting |
2095 | * for manager_mutex. Hotplug itself can't handle us | 2095 | * for assoc_mutex. Hotplug itself can't handle us |
2096 | * because manager isn't either on idle or busy list, and | 2096 | * because manager isn't either on idle or busy list, and |
2097 | * @gcwq's state and ours could have deviated. | 2097 | * @gcwq's state and ours could have deviated. |
2098 | * | 2098 | * |
2099 | * As hotplug is now excluded via manager_mutex, we can | 2099 | * As hotplug is now excluded via assoc_mutex, we can |
2100 | * simply try to bind. It will succeed or fail depending | 2100 | * simply try to bind. It will succeed or fail depending |
2101 | * on @gcwq's current state. Try it and adjust | 2101 | * on @gcwq's current state. Try it and adjust |
2102 | * %WORKER_UNBOUND accordingly. | 2102 | * %WORKER_UNBOUND accordingly. |
@@ -2119,7 +2119,7 @@ static bool manage_workers(struct worker *worker) | |||
2119 | ret |= maybe_create_worker(pool); | 2119 | ret |= maybe_create_worker(pool); |
2120 | 2120 | ||
2121 | pool->flags &= ~POOL_MANAGING_WORKERS; | 2121 | pool->flags &= ~POOL_MANAGING_WORKERS; |
2122 | mutex_unlock(&pool->manager_mutex); | 2122 | mutex_unlock(&pool->assoc_mutex); |
2123 | return ret; | 2123 | return ret; |
2124 | } | 2124 | } |
2125 | 2125 | ||
@@ -3474,23 +3474,23 @@ EXPORT_SYMBOL_GPL(work_busy); | |||
3474 | */ | 3474 | */ |
3475 | 3475 | ||
3476 | /* claim manager positions of all pools */ | 3476 | /* claim manager positions of all pools */ |
3477 | static void gcwq_claim_management_and_lock(struct global_cwq *gcwq) | 3477 | static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq) |
3478 | { | 3478 | { |
3479 | struct worker_pool *pool; | 3479 | struct worker_pool *pool; |
3480 | 3480 | ||
3481 | for_each_worker_pool(pool, gcwq) | 3481 | for_each_worker_pool(pool, gcwq) |
3482 | mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools); | 3482 | mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools); |
3483 | spin_lock_irq(&gcwq->lock); | 3483 | spin_lock_irq(&gcwq->lock); |
3484 | } | 3484 | } |
3485 | 3485 | ||
3486 | /* release manager positions */ | 3486 | /* release manager positions */ |
3487 | static void gcwq_release_management_and_unlock(struct global_cwq *gcwq) | 3487 | static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq) |
3488 | { | 3488 | { |
3489 | struct worker_pool *pool; | 3489 | struct worker_pool *pool; |
3490 | 3490 | ||
3491 | spin_unlock_irq(&gcwq->lock); | 3491 | spin_unlock_irq(&gcwq->lock); |
3492 | for_each_worker_pool(pool, gcwq) | 3492 | for_each_worker_pool(pool, gcwq) |
3493 | mutex_unlock(&pool->manager_mutex); | 3493 | mutex_unlock(&pool->assoc_mutex); |
3494 | } | 3494 | } |
3495 | 3495 | ||
3496 | static void gcwq_unbind_fn(struct work_struct *work) | 3496 | static void gcwq_unbind_fn(struct work_struct *work) |
@@ -3503,7 +3503,7 @@ static void gcwq_unbind_fn(struct work_struct *work) | |||
3503 | 3503 | ||
3504 | BUG_ON(gcwq->cpu != smp_processor_id()); | 3504 | BUG_ON(gcwq->cpu != smp_processor_id()); |
3505 | 3505 | ||
3506 | gcwq_claim_management_and_lock(gcwq); | 3506 | gcwq_claim_assoc_and_lock(gcwq); |
3507 | 3507 | ||
3508 | /* | 3508 | /* |
3509 | * We've claimed all manager positions. Make all workers unbound | 3509 | * We've claimed all manager positions. Make all workers unbound |
@@ -3520,7 +3520,7 @@ static void gcwq_unbind_fn(struct work_struct *work) | |||
3520 | 3520 | ||
3521 | gcwq->flags |= GCWQ_DISASSOCIATED; | 3521 | gcwq->flags |= GCWQ_DISASSOCIATED; |
3522 | 3522 | ||
3523 | gcwq_release_management_and_unlock(gcwq); | 3523 | gcwq_release_assoc_and_unlock(gcwq); |
3524 | 3524 | ||
3525 | /* | 3525 | /* |
3526 | * Call schedule() so that we cross rq->lock and thus can guarantee | 3526 | * Call schedule() so that we cross rq->lock and thus can guarantee |
@@ -3576,10 +3576,10 @@ static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
3576 | 3576 | ||
3577 | case CPU_DOWN_FAILED: | 3577 | case CPU_DOWN_FAILED: |
3578 | case CPU_ONLINE: | 3578 | case CPU_ONLINE: |
3579 | gcwq_claim_management_and_lock(gcwq); | 3579 | gcwq_claim_assoc_and_lock(gcwq); |
3580 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | 3580 | gcwq->flags &= ~GCWQ_DISASSOCIATED; |
3581 | rebind_workers(gcwq); | 3581 | rebind_workers(gcwq); |
3582 | gcwq_release_management_and_unlock(gcwq); | 3582 | gcwq_release_assoc_and_unlock(gcwq); |
3583 | break; | 3583 | break; |
3584 | } | 3584 | } |
3585 | return NOTIFY_OK; | 3585 | return NOTIFY_OK; |
@@ -3833,7 +3833,7 @@ static int __init init_workqueues(void) | |||
3833 | setup_timer(&pool->mayday_timer, gcwq_mayday_timeout, | 3833 | setup_timer(&pool->mayday_timer, gcwq_mayday_timeout, |
3834 | (unsigned long)pool); | 3834 | (unsigned long)pool); |
3835 | 3835 | ||
3836 | mutex_init(&pool->manager_mutex); | 3836 | mutex_init(&pool->assoc_mutex); |
3837 | ida_init(&pool->worker_ida); | 3837 | ida_init(&pool->worker_ida); |
3838 | } | 3838 | } |
3839 | } | 3839 | } |