aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2013-02-19 15:17:02 -0500
committerTejun Heo <tj@kernel.org>2013-03-04 12:44:58 -0500
commitf36dc67b27a689eeb3631b11ebef17bbff257fbb (patch)
treed93c4310742fae4633af6ebddf53fe649ca4965d /kernel/workqueue.c
parentf5faa0774e07eada85b0c55ec789b3f337d01412 (diff)
workqueue: change argument of worker_maybe_bind_and_lock() to @pool
worker_maybe_bind_and_lock() currently takes @worker but only cares about @worker->pool. This patch updates worker_maybe_bind_and_lock() to take @pool instead of @worker. This will be used to better define synchronization rules regarding rescuer->pool updates. This doesn't introduce any functional change. tj: Updated the comments and description. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f456433cf535..09545d445a55 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1504,8 +1504,10 @@ static void worker_leave_idle(struct worker *worker)
1504} 1504}
1505 1505
1506/** 1506/**
1507 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool 1507 * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it
1508 * @worker: self 1508 * @pool: target worker_pool
1509 *
1510 * Bind %current to the cpu of @pool if it is associated and lock @pool.
1509 * 1511 *
1510 * Works which are scheduled while the cpu is online must at least be 1512 * Works which are scheduled while the cpu is online must at least be
1511 * scheduled to a worker which is bound to the cpu so that if they are 1513 * scheduled to a worker which is bound to the cpu so that if they are
@@ -1533,11 +1535,9 @@ static void worker_leave_idle(struct worker *worker)
1533 * %true if the associated pool is online (@worker is successfully 1535 * %true if the associated pool is online (@worker is successfully
1534 * bound), %false if offline. 1536 * bound), %false if offline.
1535 */ 1537 */
1536static bool worker_maybe_bind_and_lock(struct worker *worker) 1538static bool worker_maybe_bind_and_lock(struct worker_pool *pool)
1537__acquires(&pool->lock) 1539__acquires(&pool->lock)
1538{ 1540{
1539 struct worker_pool *pool = worker->pool;
1540
1541 while (true) { 1541 while (true) {
1542 /* 1542 /*
1543 * The following call may fail, succeed or succeed 1543 * The following call may fail, succeed or succeed
@@ -1575,7 +1575,7 @@ __acquires(&pool->lock)
1575static void idle_worker_rebind(struct worker *worker) 1575static void idle_worker_rebind(struct worker *worker)
1576{ 1576{
1577 /* CPU may go down again inbetween, clear UNBOUND only on success */ 1577 /* CPU may go down again inbetween, clear UNBOUND only on success */
1578 if (worker_maybe_bind_and_lock(worker)) 1578 if (worker_maybe_bind_and_lock(worker->pool))
1579 worker_clr_flags(worker, WORKER_UNBOUND); 1579 worker_clr_flags(worker, WORKER_UNBOUND);
1580 1580
1581 /* rebind complete, become available again */ 1581 /* rebind complete, become available again */
@@ -1593,7 +1593,7 @@ static void busy_worker_rebind_fn(struct work_struct *work)
1593{ 1593{
1594 struct worker *worker = container_of(work, struct worker, rebind_work); 1594 struct worker *worker = container_of(work, struct worker, rebind_work);
1595 1595
1596 if (worker_maybe_bind_and_lock(worker)) 1596 if (worker_maybe_bind_and_lock(worker->pool))
1597 worker_clr_flags(worker, WORKER_UNBOUND); 1597 worker_clr_flags(worker, WORKER_UNBOUND);
1598 1598
1599 spin_unlock_irq(&worker->pool->lock); 1599 spin_unlock_irq(&worker->pool->lock);
@@ -2038,7 +2038,7 @@ static bool manage_workers(struct worker *worker)
2038 * on @pool's current state. Try it and adjust 2038 * on @pool's current state. Try it and adjust
2039 * %WORKER_UNBOUND accordingly. 2039 * %WORKER_UNBOUND accordingly.
2040 */ 2040 */
2041 if (worker_maybe_bind_and_lock(worker)) 2041 if (worker_maybe_bind_and_lock(pool))
2042 worker->flags &= ~WORKER_UNBOUND; 2042 worker->flags &= ~WORKER_UNBOUND;
2043 else 2043 else
2044 worker->flags |= WORKER_UNBOUND; 2044 worker->flags |= WORKER_UNBOUND;
@@ -2358,7 +2358,7 @@ repeat:
2358 2358
2359 /* migrate to the target cpu if possible */ 2359 /* migrate to the target cpu if possible */
2360 rescuer->pool = pool; 2360 rescuer->pool = pool;
2361 worker_maybe_bind_and_lock(rescuer); 2361 worker_maybe_bind_and_lock(pool);
2362 2362
2363 /* 2363 /*
2364 * Slurp in all works issued via this workqueue and 2364 * Slurp in all works issued via this workqueue and