diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2012-09-18 12:59:22 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-09-18 12:59:22 -0400 |
commit | eab6d82843ee1df244f8847d1bf8bb89160ec4aa (patch) | |
tree | 86a43e6ae1734779fe54ea5e62408395e6d0b36a /kernel | |
parent | ea1abd6197d5805655da1bb589929762f4b4aa08 (diff) |
workqueue: WORKER_REBIND is no longer necessary for busy rebinding
Because the old unbind/rebinding implementation wasn't atomic w.r.t.
GCWQ_DISASSOCIATED manipulation which is protected by
global_cwq->lock, we had to use two flags, WORKER_UNBOUND and
WORKER_REBIND, to avoid incorrectly losing all NOT_RUNNING bits with
back-to-back CPU hotplug operations; otherwise, completion of
rebinding while another unbinding is in progress could clear UNBIND
prematurely.
Now that both unbind/rebinding are atomic w.r.t. GCWQ_DISASSOCIATED,
there's no need to use two flags. Just one is enough. Don't use
WORKER_REBIND for busy rebinding.
tj: Updated description.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 18 |
1 files changed, 2 insertions, 16 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 770c1a8128b..794724efb73 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1649,16 +1649,8 @@ static void busy_worker_rebind_fn(struct work_struct *work) | |||
1649 | struct worker *worker = container_of(work, struct worker, rebind_work); | 1649 | struct worker *worker = container_of(work, struct worker, rebind_work); |
1650 | struct global_cwq *gcwq = worker->pool->gcwq; | 1650 | struct global_cwq *gcwq = worker->pool->gcwq; |
1651 | 1651 | ||
1652 | worker_maybe_bind_and_lock(worker); | 1652 | if (worker_maybe_bind_and_lock(worker)) |
1653 | 1653 | worker_clr_flags(worker, WORKER_UNBOUND); | |
1654 | /* | ||
1655 | * %WORKER_REBIND must be cleared even if the above binding failed; | ||
1656 | * otherwise, we may confuse the next CPU_UP cycle or oops / get | ||
1657 | * stuck by calling idle_worker_rebind() prematurely. If CPU went | ||
1658 | * down again inbetween, %WORKER_UNBOUND would be set, so clearing | ||
1659 | * %WORKER_REBIND is always safe. | ||
1660 | */ | ||
1661 | worker_clr_flags(worker, WORKER_REBIND); | ||
1662 | 1654 | ||
1663 | spin_unlock_irq(&gcwq->lock); | 1655 | spin_unlock_irq(&gcwq->lock); |
1664 | } | 1656 | } |
@@ -1721,15 +1713,9 @@ static void rebind_workers(struct global_cwq *gcwq) | |||
1721 | 1713 | ||
1722 | /* rebind busy workers */ | 1714 | /* rebind busy workers */ |
1723 | for_each_busy_worker(worker, i, pos, gcwq) { | 1715 | for_each_busy_worker(worker, i, pos, gcwq) { |
1724 | unsigned long worker_flags = worker->flags; | ||
1725 | struct work_struct *rebind_work = &worker->rebind_work; | 1716 | struct work_struct *rebind_work = &worker->rebind_work; |
1726 | struct workqueue_struct *wq; | 1717 | struct workqueue_struct *wq; |
1727 | 1718 | ||
1728 | /* morph UNBOUND to REBIND atomically */ | ||
1729 | worker_flags &= ~WORKER_UNBOUND; | ||
1730 | worker_flags |= WORKER_REBIND; | ||
1731 | ACCESS_ONCE(worker->flags) = worker_flags; | ||
1732 | |||
1733 | if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, | 1719 | if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, |
1734 | work_data_bits(rebind_work))) | 1720 | work_data_bits(rebind_work))) |
1735 | continue; | 1721 | continue; |