aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLai Jiangshan <jiangshanlai@gmail.com>2017-12-01 09:20:36 -0500
committerTejun Heo <tj@kernel.org>2017-12-04 17:44:11 -0500
commite8b3f8db7aad99fcc5234fc5b89984ff6620de3d (patch)
treea7b870b5b2899297c9a797b467919ae31bd581f4
parentc98a9805096460567404799a7bd3149826affde7 (diff)
workqueue/hotplug: simplify workqueue_offline_cpu()
Since the recent cpu/hotplug refactoring, workqueue_offline_cpu() is guaranteed to run on the local cpu which is going offline. This also fixes the following deadlock by removing work item scheduling and flushing from CPU hotplug path. http://lkml.kernel.org/r/1504764252-29091-1-git-send-email-prsood@codeaurora.org tj: Description update. Signed-off-by: Lai Jiangshan <jiangshanlai@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--kernel/workqueue.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 6a5658cb46da..48a4d00f55dc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1635,7 +1635,7 @@ static void worker_enter_idle(struct worker *worker)
1635 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 1635 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1636 1636
1637 /* 1637 /*
1638 * Sanity check nr_running. Because wq_unbind_fn() releases 1638 * Sanity check nr_running. Because unbind_workers() releases
1639 * pool->lock between setting %WORKER_UNBOUND and zapping 1639 * pool->lock between setting %WORKER_UNBOUND and zapping
1640 * nr_running, the warning may trigger spuriously. Check iff 1640 * nr_running, the warning may trigger spuriously. Check iff
1641 * unbind is not in progress. 1641 * unbind is not in progress.
@@ -4511,9 +4511,8 @@ void show_workqueue_state(void)
4511 * cpu comes back online. 4511 * cpu comes back online.
4512 */ 4512 */
4513 4513
4514static void wq_unbind_fn(struct work_struct *work) 4514static void unbind_workers(int cpu)
4515{ 4515{
4516 int cpu = smp_processor_id();
4517 struct worker_pool *pool; 4516 struct worker_pool *pool;
4518 struct worker *worker; 4517 struct worker *worker;
4519 4518
@@ -4710,12 +4709,13 @@ int workqueue_online_cpu(unsigned int cpu)
4710 4709
4711int workqueue_offline_cpu(unsigned int cpu) 4710int workqueue_offline_cpu(unsigned int cpu)
4712{ 4711{
4713 struct work_struct unbind_work;
4714 struct workqueue_struct *wq; 4712 struct workqueue_struct *wq;
4715 4713
4716 /* unbinding per-cpu workers should happen on the local CPU */ 4714 /* unbinding per-cpu workers should happen on the local CPU */
4717 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 4715 if (WARN_ON(cpu != smp_processor_id()))
4718 queue_work_on(cpu, system_highpri_wq, &unbind_work); 4716 return -1;
4717
4718 unbind_workers(cpu);
4719 4719
4720 /* update NUMA affinity of unbound workqueues */ 4720 /* update NUMA affinity of unbound workqueues */
4721 mutex_lock(&wq_pool_mutex); 4721 mutex_lock(&wq_pool_mutex);
@@ -4723,9 +4723,6 @@ int workqueue_offline_cpu(unsigned int cpu)
4723 wq_update_unbound_numa(wq, cpu, false); 4723 wq_update_unbound_numa(wq, cpu, false);
4724 mutex_unlock(&wq_pool_mutex); 4724 mutex_unlock(&wq_pool_mutex);
4725 4725
4726 /* wait for per-cpu unbinding to finish */
4727 flush_work(&unbind_work);
4728 destroy_work_on_stack(&unbind_work);
4729 return 0; 4726 return 0;
4730} 4727}
4731 4728