summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-19 16:45:21 -0400
committerTejun Heo <tj@kernel.org>2013-03-19 16:45:21 -0400
commitbd7c089eb25b26d2e03fd34f97e5517a4463f871 (patch)
tree17b8a08678910689293e1cf06942f20bea42929c /kernel/workqueue.c
parent822d8405d13931062d653e0c2cc0199ed801b072 (diff)
workqueue: relocate rebind_workers()
rebind_workers() will be reimplemented in a way which makes it mostly decoupled from the rest of worker management. Move rebind_workers() so that it's located with other CPU hotplug related functions. This patch is pure function relocation. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c142
1 files changed, 71 insertions, 71 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 384ff34c9aff..3e297c574be8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1643,77 +1643,6 @@ static void busy_worker_rebind_fn(struct work_struct *work)
1643 spin_unlock_irq(&worker->pool->lock); 1643 spin_unlock_irq(&worker->pool->lock);
1644} 1644}
1645 1645
1646/**
1647 * rebind_workers - rebind all workers of a pool to the associated CPU
1648 * @pool: pool of interest
1649 *
1650 * @pool->cpu is coming online. Rebind all workers to the CPU. Rebinding
1651 * is different for idle and busy ones.
1652 *
1653 * Idle ones will be removed from the idle_list and woken up. They will
1654 * add themselves back after completing rebind. This ensures that the
1655 * idle_list doesn't contain any unbound workers when re-bound busy workers
1656 * try to perform local wake-ups for concurrency management.
1657 *
1658 * Busy workers can rebind after they finish their current work items.
1659 * Queueing the rebind work item at the head of the scheduled list is
1660 * enough. Note that nr_running will be properly bumped as busy workers
1661 * rebind.
1662 *
1663 * On return, all non-manager workers are scheduled for rebind - see
1664 * manage_workers() for the manager special case. Any idle worker
1665 * including the manager will not appear on @idle_list until rebind is
1666 * complete, making local wake-ups safe.
1667 */
1668static void rebind_workers(struct worker_pool *pool)
1669{
1670 struct worker *worker, *n;
1671 int i;
1672
1673 lockdep_assert_held(&pool->manager_mutex);
1674 lockdep_assert_held(&pool->lock);
1675
1676 /* dequeue and kick idle ones */
1677 list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
1678 /*
1679 * idle workers should be off @pool->idle_list until rebind
1680 * is complete to avoid receiving premature local wake-ups.
1681 */
1682 list_del_init(&worker->entry);
1683
1684 /*
1685 * worker_thread() will see the above dequeuing and call
1686 * idle_worker_rebind().
1687 */
1688 wake_up_process(worker->task);
1689 }
1690
1691 /* rebind busy workers */
1692 for_each_busy_worker(worker, i, pool) {
1693 struct work_struct *rebind_work = &worker->rebind_work;
1694 struct workqueue_struct *wq;
1695
1696 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
1697 work_data_bits(rebind_work)))
1698 continue;
1699
1700 debug_work_activate(rebind_work);
1701
1702 /*
1703 * wq doesn't really matter but let's keep @worker->pool
1704 * and @pwq->pool consistent for sanity.
1705 */
1706 if (worker->pool->attrs->nice < 0)
1707 wq = system_highpri_wq;
1708 else
1709 wq = system_wq;
1710
1711 insert_work(per_cpu_ptr(wq->cpu_pwqs, pool->cpu), rebind_work,
1712 worker->scheduled.next,
1713 work_color_to_flags(WORK_NO_COLOR));
1714 }
1715}
1716
1717static struct worker *alloc_worker(void) 1646static struct worker *alloc_worker(void)
1718{ 1647{
1719 struct worker *worker; 1648 struct worker *worker;
@@ -4196,6 +4125,77 @@ static void wq_unbind_fn(struct work_struct *work)
4196 atomic_set(&pool->nr_running, 0); 4125 atomic_set(&pool->nr_running, 0);
4197} 4126}
4198 4127
4128/**
4129 * rebind_workers - rebind all workers of a pool to the associated CPU
4130 * @pool: pool of interest
4131 *
4132 * @pool->cpu is coming online. Rebind all workers to the CPU. Rebinding
4133 * is different for idle and busy ones.
4134 *
4135 * Idle ones will be removed from the idle_list and woken up. They will
4136 * add themselves back after completing rebind. This ensures that the
4137 * idle_list doesn't contain any unbound workers when re-bound busy workers
4138 * try to perform local wake-ups for concurrency management.
4139 *
4140 * Busy workers can rebind after they finish their current work items.
4141 * Queueing the rebind work item at the head of the scheduled list is
4142 * enough. Note that nr_running will be properly bumped as busy workers
4143 * rebind.
4144 *
4145 * On return, all non-manager workers are scheduled for rebind - see
4146 * manage_workers() for the manager special case. Any idle worker
4147 * including the manager will not appear on @idle_list until rebind is
4148 * complete, making local wake-ups safe.
4149 */
4150static void rebind_workers(struct worker_pool *pool)
4151{
4152 struct worker *worker, *n;
4153 int i;
4154
4155 lockdep_assert_held(&pool->manager_mutex);
4156 lockdep_assert_held(&pool->lock);
4157
4158 /* dequeue and kick idle ones */
4159 list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
4160 /*
4161 * idle workers should be off @pool->idle_list until rebind
4162 * is complete to avoid receiving premature local wake-ups.
4163 */
4164 list_del_init(&worker->entry);
4165
4166 /*
4167 * worker_thread() will see the above dequeuing and call
4168 * idle_worker_rebind().
4169 */
4170 wake_up_process(worker->task);
4171 }
4172
4173 /* rebind busy workers */
4174 for_each_busy_worker(worker, i, pool) {
4175 struct work_struct *rebind_work = &worker->rebind_work;
4176 struct workqueue_struct *wq;
4177
4178 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
4179 work_data_bits(rebind_work)))
4180 continue;
4181
4182 debug_work_activate(rebind_work);
4183
4184 /*
4185 * wq doesn't really matter but let's keep @worker->pool
4186 * and @pwq->pool consistent for sanity.
4187 */
4188 if (worker->pool->attrs->nice < 0)
4189 wq = system_highpri_wq;
4190 else
4191 wq = system_wq;
4192
4193 insert_work(per_cpu_ptr(wq->cpu_pwqs, pool->cpu), rebind_work,
4194 worker->scheduled.next,
4195 work_color_to_flags(WORK_NO_COLOR));
4196 }
4197}
4198
4199/* 4199/*
4200 * Workqueues should be brought up before normal priority CPU notifiers. 4200 * Workqueues should be brought up before normal priority CPU notifiers.
4201 * This will be registered high priority CPU notifier. 4201 * This will be registered high priority CPU notifier.