diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2013-02-19 15:17:02 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-03-04 12:44:58 -0500 |
commit | f5faa0774e07eada85b0c55ec789b3f337d01412 (patch) | |
tree | 439566bd5b8d0187d380b950ef386c349448aca9 /kernel/workqueue.c | |
parent | 45d9550a0e7e9230606ca3c4c6f4dc6297848b2f (diff) |
workqueue: use %current instead of worker->task in worker_maybe_bind_and_lock()
worker_maybe_bind_and_lock() uses both @worker->task and @current at
the same time. As worker_maybe_bind_and_lock() can only be called by
the current worker task, they are always the same.
Update worker_maybe_bind_and_lock() to use %current consistently.
This doesn't introduce any functional change.
tj: Massaged the description.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 81f2457811eb..f456433cf535 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1512,7 +1512,7 @@ static void worker_leave_idle(struct worker *worker) | |||
1512 | * flushed from cpu callbacks while cpu is going down, they are | 1512 | * flushed from cpu callbacks while cpu is going down, they are |
1513 | * guaranteed to execute on the cpu. | 1513 | * guaranteed to execute on the cpu. |
1514 | * | 1514 | * |
1515 | * This function is to be used by rogue workers and rescuers to bind | 1515 | * This function is to be used by unbound workers and rescuers to bind |
1516 | * themselves to the target cpu and may race with cpu going down or | 1516 | * themselves to the target cpu and may race with cpu going down or |
1517 | * coming online. kthread_bind() can't be used because it may put the | 1517 | * coming online. kthread_bind() can't be used because it may put the |
1518 | * worker to already dead cpu and set_cpus_allowed_ptr() can't be used | 1518 | * worker to already dead cpu and set_cpus_allowed_ptr() can't be used |
@@ -1537,7 +1537,6 @@ static bool worker_maybe_bind_and_lock(struct worker *worker) | |||
1537 | __acquires(&pool->lock) | 1537 | __acquires(&pool->lock) |
1538 | { | 1538 | { |
1539 | struct worker_pool *pool = worker->pool; | 1539 | struct worker_pool *pool = worker->pool; |
1540 | struct task_struct *task = worker->task; | ||
1541 | 1540 | ||
1542 | while (true) { | 1541 | while (true) { |
1543 | /* | 1542 | /* |
@@ -1547,12 +1546,12 @@ __acquires(&pool->lock) | |||
1547 | * against POOL_DISASSOCIATED. | 1546 | * against POOL_DISASSOCIATED. |
1548 | */ | 1547 | */ |
1549 | if (!(pool->flags & POOL_DISASSOCIATED)) | 1548 | if (!(pool->flags & POOL_DISASSOCIATED)) |
1550 | set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu)); | 1549 | set_cpus_allowed_ptr(current, get_cpu_mask(pool->cpu)); |
1551 | 1550 | ||
1552 | spin_lock_irq(&pool->lock); | 1551 | spin_lock_irq(&pool->lock); |
1553 | if (pool->flags & POOL_DISASSOCIATED) | 1552 | if (pool->flags & POOL_DISASSOCIATED) |
1554 | return false; | 1553 | return false; |
1555 | if (task_cpu(task) == pool->cpu && | 1554 | if (task_cpu(current) == pool->cpu && |
1556 | cpumask_equal(¤t->cpus_allowed, | 1555 | cpumask_equal(¤t->cpus_allowed, |
1557 | get_cpu_mask(pool->cpu))) | 1556 | get_cpu_mask(pool->cpu))) |
1558 | return true; | 1557 | return true; |