aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c108
1 files changed, 43 insertions, 65 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e1c0e996b5ae..c9dd5fbdbf33 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4611,84 +4611,65 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4611 pool->attrs->cpumask) < 0); 4611 pool->attrs->cpumask) < 0);
4612} 4612}
4613 4613
4614/* 4614int workqueue_prepare_cpu(unsigned int cpu)
4615 * Workqueues should be brought up before normal priority CPU notifiers. 4615{
4616 * This will be registered high priority CPU notifier. 4616 struct worker_pool *pool;
4617 */ 4617
4618static int workqueue_cpu_up_callback(struct notifier_block *nfb, 4618 for_each_cpu_worker_pool(pool, cpu) {
4619 unsigned long action, 4619 if (pool->nr_workers)
4620 void *hcpu) 4620 continue;
4621 if (!create_worker(pool))
4622 return -ENOMEM;
4623 }
4624 return 0;
4625}
4626
4627int workqueue_online_cpu(unsigned int cpu)
4621{ 4628{
4622 int cpu = (unsigned long)hcpu;
4623 struct worker_pool *pool; 4629 struct worker_pool *pool;
4624 struct workqueue_struct *wq; 4630 struct workqueue_struct *wq;
4625 int pi; 4631 int pi;
4626 4632
4627 switch (action & ~CPU_TASKS_FROZEN) { 4633 mutex_lock(&wq_pool_mutex);
4628 case CPU_UP_PREPARE:
4629 for_each_cpu_worker_pool(pool, cpu) {
4630 if (pool->nr_workers)
4631 continue;
4632 if (!create_worker(pool))
4633 return NOTIFY_BAD;
4634 }
4635 break;
4636
4637 case CPU_DOWN_FAILED:
4638 case CPU_ONLINE:
4639 mutex_lock(&wq_pool_mutex);
4640 4634
4641 for_each_pool(pool, pi) { 4635 for_each_pool(pool, pi) {
4642 mutex_lock(&pool->attach_mutex); 4636 mutex_lock(&pool->attach_mutex);
4643 4637
4644 if (pool->cpu == cpu) 4638 if (pool->cpu == cpu)
4645 rebind_workers(pool); 4639 rebind_workers(pool);
4646 else if (pool->cpu < 0) 4640 else if (pool->cpu < 0)
4647 restore_unbound_workers_cpumask(pool, cpu); 4641 restore_unbound_workers_cpumask(pool, cpu);
4648 4642
4649 mutex_unlock(&pool->attach_mutex); 4643 mutex_unlock(&pool->attach_mutex);
4650 } 4644 }
4651 4645
4652 /* update NUMA affinity of unbound workqueues */ 4646 /* update NUMA affinity of unbound workqueues */
4653 list_for_each_entry(wq, &workqueues, list) 4647 list_for_each_entry(wq, &workqueues, list)
4654 wq_update_unbound_numa(wq, cpu, true); 4648 wq_update_unbound_numa(wq, cpu, true);
4655 4649
4656 mutex_unlock(&wq_pool_mutex); 4650 mutex_unlock(&wq_pool_mutex);
4657 break; 4651 return 0;
4658 }
4659 return NOTIFY_OK;
4660} 4652}
4661 4653
4662/* 4654int workqueue_offline_cpu(unsigned int cpu)
4663 * Workqueues should be brought down after normal priority CPU notifiers.
4664 * This will be registered as low priority CPU notifier.
4665 */
4666static int workqueue_cpu_down_callback(struct notifier_block *nfb,
4667 unsigned long action,
4668 void *hcpu)
4669{ 4655{
4670 int cpu = (unsigned long)hcpu;
4671 struct work_struct unbind_work; 4656 struct work_struct unbind_work;
4672 struct workqueue_struct *wq; 4657 struct workqueue_struct *wq;
4673 4658
4674 switch (action & ~CPU_TASKS_FROZEN) { 4659 /* unbinding per-cpu workers should happen on the local CPU */
4675 case CPU_DOWN_PREPARE: 4660 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
4676 /* unbinding per-cpu workers should happen on the local CPU */ 4661 queue_work_on(cpu, system_highpri_wq, &unbind_work);
4677 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 4662
4678 queue_work_on(cpu, system_highpri_wq, &unbind_work); 4663 /* update NUMA affinity of unbound workqueues */
4679 4664 mutex_lock(&wq_pool_mutex);
4680 /* update NUMA affinity of unbound workqueues */ 4665 list_for_each_entry(wq, &workqueues, list)
4681 mutex_lock(&wq_pool_mutex); 4666 wq_update_unbound_numa(wq, cpu, false);
4682 list_for_each_entry(wq, &workqueues, list) 4667 mutex_unlock(&wq_pool_mutex);
4683 wq_update_unbound_numa(wq, cpu, false); 4668
4684 mutex_unlock(&wq_pool_mutex); 4669 /* wait for per-cpu unbinding to finish */
4685 4670 flush_work(&unbind_work);
4686 /* wait for per-cpu unbinding to finish */ 4671 destroy_work_on_stack(&unbind_work);
4687 flush_work(&unbind_work); 4672 return 0;
4688 destroy_work_on_stack(&unbind_work);
4689 break;
4690 }
4691 return NOTIFY_OK;
4692} 4673}
4693 4674
4694#ifdef CONFIG_SMP 4675#ifdef CONFIG_SMP
@@ -5490,9 +5471,6 @@ static int __init init_workqueues(void)
5490 5471
5491 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 5472 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5492 5473
5493 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
5494 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
5495
5496 wq_numa_init(); 5474 wq_numa_init();
5497 5475
5498 /* initialize CPU pools */ 5476 /* initialize CPU pools */