diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2012-09-18 12:59:23 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-09-18 12:59:23 -0400 |
commit | 5f7dabfd5cb115937afb4649e4c73b02f927f6ae (patch) | |
tree | 9b47bba67879363d70d68e3c9209debaf80aca0a /kernel/workqueue.c | |
parent | eab6d82843ee1df244f8847d1bf8bb89160ec4aa (diff) |
workqueue: WORKER_REBIND is no longer necessary for idle rebinding
Now both worker destruction and idle rebinding remove the worker from
idle list while it's still idle, so list_empty(&worker->entry) can be
used to test whether either is pending and WORKER_DIE to distinguish
between the two instead making WORKER_REBIND unnecessary.
Use list_empty(&worker->entry) to determine whether destruction or
rebinding is pending. This simplifies worker state transitions.
WORKER_REBIND is not needed anymore. Remove it.
tj: Updated comments and description.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 41 |
1 files changed, 15 insertions, 26 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 794724efb733..cdc6bfc84b78 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -73,11 +73,10 @@ enum { | |||
73 | WORKER_DIE = 1 << 1, /* die die die */ | 73 | WORKER_DIE = 1 << 1, /* die die die */ |
74 | WORKER_IDLE = 1 << 2, /* is idle */ | 74 | WORKER_IDLE = 1 << 2, /* is idle */ |
75 | WORKER_PREP = 1 << 3, /* preparing to run works */ | 75 | WORKER_PREP = 1 << 3, /* preparing to run works */ |
76 | WORKER_REBIND = 1 << 5, /* mom is home, come back */ | ||
77 | WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ | 76 | WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ |
78 | WORKER_UNBOUND = 1 << 7, /* worker is unbound */ | 77 | WORKER_UNBOUND = 1 << 7, /* worker is unbound */ |
79 | 78 | ||
80 | WORKER_NOT_RUNNING = WORKER_PREP | WORKER_REBIND | WORKER_UNBOUND | | 79 | WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND | |
81 | WORKER_CPU_INTENSIVE, | 80 | WORKER_CPU_INTENSIVE, |
82 | 81 | ||
83 | NR_WORKER_POOLS = 2, /* # worker pools per gcwq */ | 82 | NR_WORKER_POOLS = 2, /* # worker pools per gcwq */ |
@@ -1618,20 +1617,15 @@ __acquires(&gcwq->lock) | |||
1618 | 1617 | ||
1619 | /* | 1618 | /* |
1620 | * Rebind an idle @worker to its CPU. worker_thread() will test | 1619 | * Rebind an idle @worker to its CPU. worker_thread() will test |
1621 | * %WORKER_REBIND before leaving idle and call this function. | 1620 | * list_empty(@worker->entry) before leaving idle and call this function. |
1622 | */ | 1621 | */ |
1623 | static void idle_worker_rebind(struct worker *worker) | 1622 | static void idle_worker_rebind(struct worker *worker) |
1624 | { | 1623 | { |
1625 | struct global_cwq *gcwq = worker->pool->gcwq; | 1624 | struct global_cwq *gcwq = worker->pool->gcwq; |
1626 | 1625 | ||
1627 | /* | 1626 | /* CPU may go down again inbetween, clear UNBOUND only on success */ |
1628 | * CPU may go down again inbetween. If rebinding fails, reinstate | 1627 | if (worker_maybe_bind_and_lock(worker)) |
1629 | * UNBOUND. We're off idle_list and nobody else can do it for us. | 1628 | worker_clr_flags(worker, WORKER_UNBOUND); |
1630 | */ | ||
1631 | if (!worker_maybe_bind_and_lock(worker)) | ||
1632 | worker->flags |= WORKER_UNBOUND; | ||
1633 | |||
1634 | worker_clr_flags(worker, WORKER_REBIND); | ||
1635 | 1629 | ||
1636 | /* rebind complete, become available again */ | 1630 | /* rebind complete, become available again */ |
1637 | list_add(&worker->entry, &worker->pool->idle_list); | 1631 | list_add(&worker->entry, &worker->pool->idle_list); |
@@ -1689,16 +1683,9 @@ static void rebind_workers(struct global_cwq *gcwq) | |||
1689 | for_each_worker_pool(pool, gcwq) | 1683 | for_each_worker_pool(pool, gcwq) |
1690 | lockdep_assert_held(&pool->manager_mutex); | 1684 | lockdep_assert_held(&pool->manager_mutex); |
1691 | 1685 | ||
1692 | /* set REBIND and kick idle ones */ | 1686 | /* dequeue and kick idle ones */ |
1693 | for_each_worker_pool(pool, gcwq) { | 1687 | for_each_worker_pool(pool, gcwq) { |
1694 | list_for_each_entry_safe(worker, n, &pool->idle_list, entry) { | 1688 | list_for_each_entry_safe(worker, n, &pool->idle_list, entry) { |
1695 | unsigned long worker_flags = worker->flags; | ||
1696 | |||
1697 | /* morph UNBOUND to REBIND atomically */ | ||
1698 | worker_flags &= ~WORKER_UNBOUND; | ||
1699 | worker_flags |= WORKER_REBIND; | ||
1700 | ACCESS_ONCE(worker->flags) = worker_flags; | ||
1701 | |||
1702 | /* | 1689 | /* |
1703 | * idle workers should be off @pool->idle_list | 1690 | * idle workers should be off @pool->idle_list |
1704 | * until rebind is complete to avoid receiving | 1691 | * until rebind is complete to avoid receiving |
@@ -1706,7 +1693,10 @@ static void rebind_workers(struct global_cwq *gcwq) | |||
1706 | */ | 1693 | */ |
1707 | list_del_init(&worker->entry); | 1694 | list_del_init(&worker->entry); |
1708 | 1695 | ||
1709 | /* worker_thread() will call idle_worker_rebind() */ | 1696 | /* |
1697 | * worker_thread() will see the above dequeuing | ||
1698 | * and call idle_worker_rebind(). | ||
1699 | */ | ||
1710 | wake_up_process(worker->task); | 1700 | wake_up_process(worker->task); |
1711 | } | 1701 | } |
1712 | } | 1702 | } |
@@ -2176,7 +2166,7 @@ __acquires(&gcwq->lock) | |||
2176 | * necessary to avoid spurious warnings from rescuers servicing the | 2166 | * necessary to avoid spurious warnings from rescuers servicing the |
2177 | * unbound or a disassociated gcwq. | 2167 | * unbound or a disassociated gcwq. |
2178 | */ | 2168 | */ |
2179 | WARN_ON_ONCE(!(worker->flags & (WORKER_UNBOUND | WORKER_REBIND)) && | 2169 | WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) && |
2180 | !(gcwq->flags & GCWQ_DISASSOCIATED) && | 2170 | !(gcwq->flags & GCWQ_DISASSOCIATED) && |
2181 | raw_smp_processor_id() != gcwq->cpu); | 2171 | raw_smp_processor_id() != gcwq->cpu); |
2182 | 2172 | ||
@@ -2300,18 +2290,17 @@ static int worker_thread(void *__worker) | |||
2300 | woke_up: | 2290 | woke_up: |
2301 | spin_lock_irq(&gcwq->lock); | 2291 | spin_lock_irq(&gcwq->lock); |
2302 | 2292 | ||
2303 | /* | 2293 | /* we are off idle list if destruction or rebind is requested */ |
2304 | * DIE can be set only while idle and REBIND set while busy has | 2294 | if (unlikely(list_empty(&worker->entry))) { |
2305 | * @worker->rebind_work scheduled. Checking here is enough. | ||
2306 | */ | ||
2307 | if (unlikely(worker->flags & (WORKER_REBIND | WORKER_DIE))) { | ||
2308 | spin_unlock_irq(&gcwq->lock); | 2295 | spin_unlock_irq(&gcwq->lock); |
2309 | 2296 | ||
2297 | /* if DIE is set, destruction is requested */ | ||
2310 | if (worker->flags & WORKER_DIE) { | 2298 | if (worker->flags & WORKER_DIE) { |
2311 | worker->task->flags &= ~PF_WQ_WORKER; | 2299 | worker->task->flags &= ~PF_WQ_WORKER; |
2312 | return 0; | 2300 | return 0; |
2313 | } | 2301 | } |
2314 | 2302 | ||
2303 | /* otherwise, rebind */ | ||
2315 | idle_worker_rebind(worker); | 2304 | idle_worker_rebind(worker); |
2316 | goto woke_up; | 2305 | goto woke_up; |
2317 | } | 2306 | } |