diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2014-05-20 05:46:34 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-05-20 10:59:32 -0400 |
commit | 92f9c5c40cc67ffcc5ac7f55fdbd6ae8afc7e0b4 (patch) | |
tree | 6d1291b7b87ef94cb92a7a2d42aafe3d49cadb0b /kernel/workqueue.c | |
parent | 4d757c5c81edba2052aae10d5b36dfcb9902b141 (diff) |
workqueue: rename manager_mutex to attach_mutex
manager_mutex is only used to protect the attaching for the pool
and the pool->workers list. It protects the pool->workers and operations
based on this list, such as:
cpu-binding for the workers in the pool->workers
the operations to set/clear WORKER_UNBOUND
So let's rename manager_mutex to attach_mutex to better reflect its
role. This patch is a pure rename.
tj: Minor command and description updates.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 44 |
1 files changed, 22 insertions, 22 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d6b31ff60c52..38b9ea7c204c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -65,7 +65,7 @@ enum { | |||
65 | * be executing on any CPU. The pool behaves as an unbound one. | 65 | * be executing on any CPU. The pool behaves as an unbound one. |
66 | * | 66 | * |
67 | * Note that DISASSOCIATED should be flipped only while holding | 67 | * Note that DISASSOCIATED should be flipped only while holding |
68 | * manager_mutex to avoid changing binding state while | 68 | * attach_mutex to avoid changing binding state while |
69 | * create_worker() is in progress. | 69 | * create_worker() is in progress. |
70 | */ | 70 | */ |
71 | POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ | 71 | POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ |
@@ -122,7 +122,7 @@ enum { | |||
122 | * cpu or grabbing pool->lock is enough for read access. If | 122 | * cpu or grabbing pool->lock is enough for read access. If |
123 | * POOL_DISASSOCIATED is set, it's identical to L. | 123 | * POOL_DISASSOCIATED is set, it's identical to L. |
124 | * | 124 | * |
125 | * M: pool->manager_mutex protected. | 125 | * A: pool->attach_mutex protected. |
126 | * | 126 | * |
127 | * PL: wq_pool_mutex protected. | 127 | * PL: wq_pool_mutex protected. |
128 | * | 128 | * |
@@ -160,8 +160,8 @@ struct worker_pool { | |||
160 | 160 | ||
161 | /* see manage_workers() for details on the two manager mutexes */ | 161 | /* see manage_workers() for details on the two manager mutexes */ |
162 | struct mutex manager_arb; /* manager arbitration */ | 162 | struct mutex manager_arb; /* manager arbitration */ |
163 | struct mutex manager_mutex; /* manager exclusion */ | 163 | struct mutex attach_mutex; /* attach/detach exclusion */ |
164 | struct list_head workers; /* M: attached workers */ | 164 | struct list_head workers; /* A: attached workers */ |
165 | struct completion *detach_completion; /* all workers detached */ | 165 | struct completion *detach_completion; /* all workers detached */ |
166 | 166 | ||
167 | struct ida worker_ida; /* worker IDs for task name */ | 167 | struct ida worker_ida; /* worker IDs for task name */ |
@@ -367,14 +367,14 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, | |||
367 | * @worker: iteration cursor | 367 | * @worker: iteration cursor |
368 | * @pool: worker_pool to iterate workers of | 368 | * @pool: worker_pool to iterate workers of |
369 | * | 369 | * |
370 | * This must be called with @pool->manager_mutex. | 370 | * This must be called with @pool->attach_mutex. |
371 | * | 371 | * |
372 | * The if/else clause exists only for the lockdep assertion and can be | 372 | * The if/else clause exists only for the lockdep assertion and can be |
373 | * ignored. | 373 | * ignored. |
374 | */ | 374 | */ |
375 | #define for_each_pool_worker(worker, pool) \ | 375 | #define for_each_pool_worker(worker, pool) \ |
376 | list_for_each_entry((worker), &(pool)->workers, node) \ | 376 | list_for_each_entry((worker), &(pool)->workers, node) \ |
377 | if (({ lockdep_assert_held(&pool->manager_mutex); false; })) { } \ | 377 | if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \ |
378 | else | 378 | else |
379 | 379 | ||
380 | /** | 380 | /** |
@@ -1696,11 +1696,11 @@ static void worker_detach_from_pool(struct worker *worker, | |||
1696 | { | 1696 | { |
1697 | struct completion *detach_completion = NULL; | 1697 | struct completion *detach_completion = NULL; |
1698 | 1698 | ||
1699 | mutex_lock(&pool->manager_mutex); | 1699 | mutex_lock(&pool->attach_mutex); |
1700 | list_del(&worker->node); | 1700 | list_del(&worker->node); |
1701 | if (list_empty(&pool->workers)) | 1701 | if (list_empty(&pool->workers)) |
1702 | detach_completion = pool->detach_completion; | 1702 | detach_completion = pool->detach_completion; |
1703 | mutex_unlock(&pool->manager_mutex); | 1703 | mutex_unlock(&pool->attach_mutex); |
1704 | 1704 | ||
1705 | if (detach_completion) | 1705 | if (detach_completion) |
1706 | complete(detach_completion); | 1706 | complete(detach_completion); |
@@ -1753,7 +1753,7 @@ static struct worker *create_worker(struct worker_pool *pool) | |||
1753 | /* prevent userland from meddling with cpumask of workqueue workers */ | 1753 | /* prevent userland from meddling with cpumask of workqueue workers */ |
1754 | worker->task->flags |= PF_NO_SETAFFINITY; | 1754 | worker->task->flags |= PF_NO_SETAFFINITY; |
1755 | 1755 | ||
1756 | mutex_lock(&pool->manager_mutex); | 1756 | mutex_lock(&pool->attach_mutex); |
1757 | 1757 | ||
1758 | /* | 1758 | /* |
1759 | * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any | 1759 | * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any |
@@ -1762,7 +1762,7 @@ static struct worker *create_worker(struct worker_pool *pool) | |||
1762 | set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); | 1762 | set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); |
1763 | 1763 | ||
1764 | /* | 1764 | /* |
1765 | * The pool->manager_mutex ensures %POOL_DISASSOCIATED | 1765 | * The pool->attach_mutex ensures %POOL_DISASSOCIATED |
1766 | * remains stable across this function. See the comments above the | 1766 | * remains stable across this function. See the comments above the |
1767 | * flag definition for details. | 1767 | * flag definition for details. |
1768 | */ | 1768 | */ |
@@ -1772,7 +1772,7 @@ static struct worker *create_worker(struct worker_pool *pool) | |||
1772 | /* successful, attach the worker to the pool */ | 1772 | /* successful, attach the worker to the pool */ |
1773 | list_add_tail(&worker->node, &pool->workers); | 1773 | list_add_tail(&worker->node, &pool->workers); |
1774 | 1774 | ||
1775 | mutex_unlock(&pool->manager_mutex); | 1775 | mutex_unlock(&pool->attach_mutex); |
1776 | 1776 | ||
1777 | return worker; | 1777 | return worker; |
1778 | 1778 | ||
@@ -3456,7 +3456,7 @@ static int init_worker_pool(struct worker_pool *pool) | |||
3456 | (unsigned long)pool); | 3456 | (unsigned long)pool); |
3457 | 3457 | ||
3458 | mutex_init(&pool->manager_arb); | 3458 | mutex_init(&pool->manager_arb); |
3459 | mutex_init(&pool->manager_mutex); | 3459 | mutex_init(&pool->attach_mutex); |
3460 | INIT_LIST_HEAD(&pool->workers); | 3460 | INIT_LIST_HEAD(&pool->workers); |
3461 | 3461 | ||
3462 | ida_init(&pool->worker_ida); | 3462 | ida_init(&pool->worker_ida); |
@@ -3513,7 +3513,7 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
3513 | /* | 3513 | /* |
3514 | * Become the manager and destroy all workers. Grabbing | 3514 | * Become the manager and destroy all workers. Grabbing |
3515 | * manager_arb prevents @pool's workers from blocking on | 3515 | * manager_arb prevents @pool's workers from blocking on |
3516 | * manager_mutex. | 3516 | * attach_mutex. |
3517 | */ | 3517 | */ |
3518 | mutex_lock(&pool->manager_arb); | 3518 | mutex_lock(&pool->manager_arb); |
3519 | 3519 | ||
@@ -3523,10 +3523,10 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
3523 | WARN_ON(pool->nr_workers || pool->nr_idle); | 3523 | WARN_ON(pool->nr_workers || pool->nr_idle); |
3524 | spin_unlock_irq(&pool->lock); | 3524 | spin_unlock_irq(&pool->lock); |
3525 | 3525 | ||
3526 | mutex_lock(&pool->manager_mutex); | 3526 | mutex_lock(&pool->attach_mutex); |
3527 | if (!list_empty(&pool->workers)) | 3527 | if (!list_empty(&pool->workers)) |
3528 | pool->detach_completion = &detach_completion; | 3528 | pool->detach_completion = &detach_completion; |
3529 | mutex_unlock(&pool->manager_mutex); | 3529 | mutex_unlock(&pool->attach_mutex); |
3530 | 3530 | ||
3531 | if (pool->detach_completion) | 3531 | if (pool->detach_completion) |
3532 | wait_for_completion(pool->detach_completion); | 3532 | wait_for_completion(pool->detach_completion); |
@@ -4513,11 +4513,11 @@ static void wq_unbind_fn(struct work_struct *work) | |||
4513 | for_each_cpu_worker_pool(pool, cpu) { | 4513 | for_each_cpu_worker_pool(pool, cpu) { |
4514 | WARN_ON_ONCE(cpu != smp_processor_id()); | 4514 | WARN_ON_ONCE(cpu != smp_processor_id()); |
4515 | 4515 | ||
4516 | mutex_lock(&pool->manager_mutex); | 4516 | mutex_lock(&pool->attach_mutex); |
4517 | spin_lock_irq(&pool->lock); | 4517 | spin_lock_irq(&pool->lock); |
4518 | 4518 | ||
4519 | /* | 4519 | /* |
4520 | * We've blocked all manager operations. Make all workers | 4520 | * We've blocked all attach/detach operations. Make all workers |
4521 | * unbound and set DISASSOCIATED. Before this, all workers | 4521 | * unbound and set DISASSOCIATED. Before this, all workers |
4522 | * except for the ones which are still executing works from | 4522 | * except for the ones which are still executing works from |
4523 | * before the last CPU down must be on the cpu. After | 4523 | * before the last CPU down must be on the cpu. After |
@@ -4529,7 +4529,7 @@ static void wq_unbind_fn(struct work_struct *work) | |||
4529 | pool->flags |= POOL_DISASSOCIATED; | 4529 | pool->flags |= POOL_DISASSOCIATED; |
4530 | 4530 | ||
4531 | spin_unlock_irq(&pool->lock); | 4531 | spin_unlock_irq(&pool->lock); |
4532 | mutex_unlock(&pool->manager_mutex); | 4532 | mutex_unlock(&pool->attach_mutex); |
4533 | 4533 | ||
4534 | /* | 4534 | /* |
4535 | * Call schedule() so that we cross rq->lock and thus can | 4535 | * Call schedule() so that we cross rq->lock and thus can |
@@ -4570,7 +4570,7 @@ static void rebind_workers(struct worker_pool *pool) | |||
4570 | { | 4570 | { |
4571 | struct worker *worker; | 4571 | struct worker *worker; |
4572 | 4572 | ||
4573 | lockdep_assert_held(&pool->manager_mutex); | 4573 | lockdep_assert_held(&pool->attach_mutex); |
4574 | 4574 | ||
4575 | /* | 4575 | /* |
4576 | * Restore CPU affinity of all workers. As all idle workers should | 4576 | * Restore CPU affinity of all workers. As all idle workers should |
@@ -4638,7 +4638,7 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) | |||
4638 | static cpumask_t cpumask; | 4638 | static cpumask_t cpumask; |
4639 | struct worker *worker; | 4639 | struct worker *worker; |
4640 | 4640 | ||
4641 | lockdep_assert_held(&pool->manager_mutex); | 4641 | lockdep_assert_held(&pool->attach_mutex); |
4642 | 4642 | ||
4643 | /* is @cpu allowed for @pool? */ | 4643 | /* is @cpu allowed for @pool? */ |
4644 | if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) | 4644 | if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) |
@@ -4683,7 +4683,7 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
4683 | mutex_lock(&wq_pool_mutex); | 4683 | mutex_lock(&wq_pool_mutex); |
4684 | 4684 | ||
4685 | for_each_pool(pool, pi) { | 4685 | for_each_pool(pool, pi) { |
4686 | mutex_lock(&pool->manager_mutex); | 4686 | mutex_lock(&pool->attach_mutex); |
4687 | 4687 | ||
4688 | if (pool->cpu == cpu) { | 4688 | if (pool->cpu == cpu) { |
4689 | spin_lock_irq(&pool->lock); | 4689 | spin_lock_irq(&pool->lock); |
@@ -4695,7 +4695,7 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
4695 | restore_unbound_workers_cpumask(pool, cpu); | 4695 | restore_unbound_workers_cpumask(pool, cpu); |
4696 | } | 4696 | } |
4697 | 4697 | ||
4698 | mutex_unlock(&pool->manager_mutex); | 4698 | mutex_unlock(&pool->attach_mutex); |
4699 | } | 4699 | } |
4700 | 4700 | ||
4701 | /* update NUMA affinity of unbound workqueues */ | 4701 | /* update NUMA affinity of unbound workqueues */ |