aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 16:55:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 16:55:30 -0400
commita6408f6cb63ac0958fee7dbce7861ffb540d8a49 (patch)
treec94a835d343974171951e3b805e6bbbb02852ebc /kernel/workqueue.c
parent1a81a8f2a5918956e214bb718099a89e500e7ec5 (diff)
parent4fae16dffb812f0e0d98a0b2b0856ca48ca63e6c (diff)
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug updates from Thomas Gleixner: "This is the next part of the hotplug rework. - Convert all notifiers with a priority assigned - Convert all CPU_STARTING/DYING notifiers The final removal of the STARTING/DYING infrastructure will happen when the merge window closes. Another 700 hundred line of unpenetrable maze gone :)" * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) timers/core: Correct callback order during CPU hot plug leds/trigger/cpu: Move from CPU_STARTING to ONLINE level powerpc/numa: Convert to hotplug state machine arm/perf: Fix hotplug state machine conversion irqchip/armada: Avoid unused function warnings ARC/time: Convert to hotplug state machine clocksource/atlas7: Convert to hotplug state machine clocksource/armada-370-xp: Convert to hotplug state machine clocksource/exynos_mct: Convert to hotplug state machine clocksource/arm_global_timer: Convert to hotplug state machine rcu: Convert rcutree to hotplug state machine KVM/arm/arm64/vgic-new: Convert to hotplug state machine smp/cfd: Convert core to hotplug state machine x86/x2apic: Convert to CPU hotplug state machine profile: Convert to hotplug state machine timers/core: Convert to hotplug state machine hrtimer: Convert to hotplug state machine x86/tboot: Convert to hotplug state machine arm64/armv8 deprecated: Convert to hotplug state machine hwtracing/coresight-etm4x: Convert to hotplug state machine ...
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c108
1 files changed, 43 insertions, 65 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d12bd958077e..ef071ca73fc3 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4607,84 +4607,65 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4607 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); 4607 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
4608} 4608}
4609 4609
4610/* 4610int workqueue_prepare_cpu(unsigned int cpu)
4611 * Workqueues should be brought up before normal priority CPU notifiers. 4611{
4612 * This will be registered high priority CPU notifier. 4612 struct worker_pool *pool;
4613 */ 4613
4614static int workqueue_cpu_up_callback(struct notifier_block *nfb, 4614 for_each_cpu_worker_pool(pool, cpu) {
4615 unsigned long action, 4615 if (pool->nr_workers)
4616 void *hcpu) 4616 continue;
4617 if (!create_worker(pool))
4618 return -ENOMEM;
4619 }
4620 return 0;
4621}
4622
4623int workqueue_online_cpu(unsigned int cpu)
4617{ 4624{
4618 int cpu = (unsigned long)hcpu;
4619 struct worker_pool *pool; 4625 struct worker_pool *pool;
4620 struct workqueue_struct *wq; 4626 struct workqueue_struct *wq;
4621 int pi; 4627 int pi;
4622 4628
4623 switch (action & ~CPU_TASKS_FROZEN) { 4629 mutex_lock(&wq_pool_mutex);
4624 case CPU_UP_PREPARE:
4625 for_each_cpu_worker_pool(pool, cpu) {
4626 if (pool->nr_workers)
4627 continue;
4628 if (!create_worker(pool))
4629 return NOTIFY_BAD;
4630 }
4631 break;
4632
4633 case CPU_DOWN_FAILED:
4634 case CPU_ONLINE:
4635 mutex_lock(&wq_pool_mutex);
4636 4630
4637 for_each_pool(pool, pi) { 4631 for_each_pool(pool, pi) {
4638 mutex_lock(&pool->attach_mutex); 4632 mutex_lock(&pool->attach_mutex);
4639 4633
4640 if (pool->cpu == cpu) 4634 if (pool->cpu == cpu)
4641 rebind_workers(pool); 4635 rebind_workers(pool);
4642 else if (pool->cpu < 0) 4636 else if (pool->cpu < 0)
4643 restore_unbound_workers_cpumask(pool, cpu); 4637 restore_unbound_workers_cpumask(pool, cpu);
4644 4638
4645 mutex_unlock(&pool->attach_mutex); 4639 mutex_unlock(&pool->attach_mutex);
4646 } 4640 }
4647 4641
4648 /* update NUMA affinity of unbound workqueues */ 4642 /* update NUMA affinity of unbound workqueues */
4649 list_for_each_entry(wq, &workqueues, list) 4643 list_for_each_entry(wq, &workqueues, list)
4650 wq_update_unbound_numa(wq, cpu, true); 4644 wq_update_unbound_numa(wq, cpu, true);
4651 4645
4652 mutex_unlock(&wq_pool_mutex); 4646 mutex_unlock(&wq_pool_mutex);
4653 break; 4647 return 0;
4654 }
4655 return NOTIFY_OK;
4656} 4648}
4657 4649
4658/* 4650int workqueue_offline_cpu(unsigned int cpu)
4659 * Workqueues should be brought down after normal priority CPU notifiers.
4660 * This will be registered as low priority CPU notifier.
4661 */
4662static int workqueue_cpu_down_callback(struct notifier_block *nfb,
4663 unsigned long action,
4664 void *hcpu)
4665{ 4651{
4666 int cpu = (unsigned long)hcpu;
4667 struct work_struct unbind_work; 4652 struct work_struct unbind_work;
4668 struct workqueue_struct *wq; 4653 struct workqueue_struct *wq;
4669 4654
4670 switch (action & ~CPU_TASKS_FROZEN) { 4655 /* unbinding per-cpu workers should happen on the local CPU */
4671 case CPU_DOWN_PREPARE: 4656 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
4672 /* unbinding per-cpu workers should happen on the local CPU */ 4657 queue_work_on(cpu, system_highpri_wq, &unbind_work);
4673 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 4658
4674 queue_work_on(cpu, system_highpri_wq, &unbind_work); 4659 /* update NUMA affinity of unbound workqueues */
4675 4660 mutex_lock(&wq_pool_mutex);
4676 /* update NUMA affinity of unbound workqueues */ 4661 list_for_each_entry(wq, &workqueues, list)
4677 mutex_lock(&wq_pool_mutex); 4662 wq_update_unbound_numa(wq, cpu, false);
4678 list_for_each_entry(wq, &workqueues, list) 4663 mutex_unlock(&wq_pool_mutex);
4679 wq_update_unbound_numa(wq, cpu, false); 4664
4680 mutex_unlock(&wq_pool_mutex); 4665 /* wait for per-cpu unbinding to finish */
4681 4666 flush_work(&unbind_work);
4682 /* wait for per-cpu unbinding to finish */ 4667 destroy_work_on_stack(&unbind_work);
4683 flush_work(&unbind_work); 4668 return 0;
4684 destroy_work_on_stack(&unbind_work);
4685 break;
4686 }
4687 return NOTIFY_OK;
4688} 4669}
4689 4670
4690#ifdef CONFIG_SMP 4671#ifdef CONFIG_SMP
@@ -5486,9 +5467,6 @@ static int __init init_workqueues(void)
5486 5467
5487 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 5468 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5488 5469
5489 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
5490 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
5491
5492 wq_numa_init(); 5470 wq_numa_init();
5493 5471
5494 /* initialize CPU pools */ 5472 /* initialize CPU pools */