diff options
-rw-r--r-- | include/linux/cpu.h | 9 | ||||
-rw-r--r-- | include/linux/cpuhotplug.h | 2 | ||||
-rw-r--r-- | include/linux/workqueue.h | 6 | ||||
-rw-r--r-- | kernel/cpu.c | 10 | ||||
-rw-r--r-- | kernel/workqueue.c | 108 |
5 files changed, 61 insertions, 74 deletions
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index ca2dd865a34e..797d9c8e9a1b 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -55,15 +55,6 @@ extern ssize_t arch_cpu_release(const char *, size_t); | |||
55 | #endif | 55 | #endif |
56 | struct notifier_block; | 56 | struct notifier_block; |
57 | 57 | ||
58 | /* | ||
59 | * CPU notifier priorities. | ||
60 | */ | ||
61 | enum { | ||
62 | /* bring up workqueues before normal notifiers and down after */ | ||
63 | CPU_PRI_WORKQUEUE_UP = 5, | ||
64 | CPU_PRI_WORKQUEUE_DOWN = -5, | ||
65 | }; | ||
66 | |||
67 | #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ | 58 | #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ |
68 | #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ | 59 | #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ |
69 | #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ | 60 | #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ |
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index acfeda137df8..60557a9e783d 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h | |||
@@ -12,6 +12,7 @@ enum cpuhp_state { | |||
12 | CPUHP_PERF_BFIN, | 12 | CPUHP_PERF_BFIN, |
13 | CPUHP_PERF_POWER, | 13 | CPUHP_PERF_POWER, |
14 | CPUHP_PERF_SUPERH, | 14 | CPUHP_PERF_SUPERH, |
15 | CPUHP_WORKQUEUE_PREP, | ||
15 | CPUHP_NOTIFY_PREPARE, | 16 | CPUHP_NOTIFY_PREPARE, |
16 | CPUHP_BRINGUP_CPU, | 17 | CPUHP_BRINGUP_CPU, |
17 | CPUHP_AP_IDLE_DEAD, | 18 | CPUHP_AP_IDLE_DEAD, |
@@ -49,6 +50,7 @@ enum cpuhp_state { | |||
49 | CPUHP_AP_PERF_S390_SF_ONLINE, | 50 | CPUHP_AP_PERF_S390_SF_ONLINE, |
50 | CPUHP_AP_PERF_ARM_CCI_ONLINE, | 51 | CPUHP_AP_PERF_ARM_CCI_ONLINE, |
51 | CPUHP_AP_PERF_ARM_CCN_ONLINE, | 52 | CPUHP_AP_PERF_ARM_CCN_ONLINE, |
53 | CPUHP_AP_WORKQUEUE_ONLINE, | ||
52 | CPUHP_AP_NOTIFY_ONLINE, | 54 | CPUHP_AP_NOTIFY_ONLINE, |
53 | CPUHP_AP_ONLINE_DYN, | 55 | CPUHP_AP_ONLINE_DYN, |
54 | CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, | 56 | CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index ca73c503b92a..26cc1df280d6 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -625,4 +625,10 @@ void wq_watchdog_touch(int cpu); | |||
625 | static inline void wq_watchdog_touch(int cpu) { } | 625 | static inline void wq_watchdog_touch(int cpu) { } |
626 | #endif /* CONFIG_WQ_WATCHDOG */ | 626 | #endif /* CONFIG_WQ_WATCHDOG */ |
627 | 627 | ||
628 | #ifdef CONFIG_SMP | ||
629 | int workqueue_prepare_cpu(unsigned int cpu); | ||
630 | int workqueue_online_cpu(unsigned int cpu); | ||
631 | int workqueue_offline_cpu(unsigned int cpu); | ||
632 | #endif | ||
633 | |||
628 | #endif | 634 | #endif |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 3705d9043c08..af53f820fec9 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -1185,6 +1185,11 @@ static struct cpuhp_step cpuhp_bp_states[] = { | |||
1185 | .startup = perf_event_init_cpu, | 1185 | .startup = perf_event_init_cpu, |
1186 | .teardown = perf_event_exit_cpu, | 1186 | .teardown = perf_event_exit_cpu, |
1187 | }, | 1187 | }, |
1188 | [CPUHP_WORKQUEUE_PREP] = { | ||
1189 | .name = "workqueue prepare", | ||
1190 | .startup = workqueue_prepare_cpu, | ||
1191 | .teardown = NULL, | ||
1192 | }, | ||
1188 | /* | 1193 | /* |
1189 | * Preparatory and dead notifiers. Will be replaced once the notifiers | 1194 | * Preparatory and dead notifiers. Will be replaced once the notifiers |
1190 | * are converted to states. | 1195 | * are converted to states. |
@@ -1267,6 +1272,11 @@ static struct cpuhp_step cpuhp_ap_states[] = { | |||
1267 | .startup = perf_event_init_cpu, | 1272 | .startup = perf_event_init_cpu, |
1268 | .teardown = perf_event_exit_cpu, | 1273 | .teardown = perf_event_exit_cpu, |
1269 | }, | 1274 | }, |
1275 | [CPUHP_AP_WORKQUEUE_ONLINE] = { | ||
1276 | .name = "workqueue online", | ||
1277 | .startup = workqueue_online_cpu, | ||
1278 | .teardown = workqueue_offline_cpu, | ||
1279 | }, | ||
1270 | 1280 | ||
1271 | /* | 1281 | /* |
1272 | * Online/down_prepare notifiers. Will be removed once the notifiers | 1282 | * Online/down_prepare notifiers. Will be removed once the notifiers |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e1c0e996b5ae..c9dd5fbdbf33 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -4611,84 +4611,65 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) | |||
4611 | pool->attrs->cpumask) < 0); | 4611 | pool->attrs->cpumask) < 0); |
4612 | } | 4612 | } |
4613 | 4613 | ||
4614 | /* | 4614 | int workqueue_prepare_cpu(unsigned int cpu) |
4615 | * Workqueues should be brought up before normal priority CPU notifiers. | 4615 | { |
4616 | * This will be registered high priority CPU notifier. | 4616 | struct worker_pool *pool; |
4617 | */ | 4617 | |
4618 | static int workqueue_cpu_up_callback(struct notifier_block *nfb, | 4618 | for_each_cpu_worker_pool(pool, cpu) { |
4619 | unsigned long action, | 4619 | if (pool->nr_workers) |
4620 | void *hcpu) | 4620 | continue; |
4621 | if (!create_worker(pool)) | ||
4622 | return -ENOMEM; | ||
4623 | } | ||
4624 | return 0; | ||
4625 | } | ||
4626 | |||
4627 | int workqueue_online_cpu(unsigned int cpu) | ||
4621 | { | 4628 | { |
4622 | int cpu = (unsigned long)hcpu; | ||
4623 | struct worker_pool *pool; | 4629 | struct worker_pool *pool; |
4624 | struct workqueue_struct *wq; | 4630 | struct workqueue_struct *wq; |
4625 | int pi; | 4631 | int pi; |
4626 | 4632 | ||
4627 | switch (action & ~CPU_TASKS_FROZEN) { | 4633 | mutex_lock(&wq_pool_mutex); |
4628 | case CPU_UP_PREPARE: | ||
4629 | for_each_cpu_worker_pool(pool, cpu) { | ||
4630 | if (pool->nr_workers) | ||
4631 | continue; | ||
4632 | if (!create_worker(pool)) | ||
4633 | return NOTIFY_BAD; | ||
4634 | } | ||
4635 | break; | ||
4636 | |||
4637 | case CPU_DOWN_FAILED: | ||
4638 | case CPU_ONLINE: | ||
4639 | mutex_lock(&wq_pool_mutex); | ||
4640 | 4634 | ||
4641 | for_each_pool(pool, pi) { | 4635 | for_each_pool(pool, pi) { |
4642 | mutex_lock(&pool->attach_mutex); | 4636 | mutex_lock(&pool->attach_mutex); |
4643 | 4637 | ||
4644 | if (pool->cpu == cpu) | 4638 | if (pool->cpu == cpu) |
4645 | rebind_workers(pool); | 4639 | rebind_workers(pool); |
4646 | else if (pool->cpu < 0) | 4640 | else if (pool->cpu < 0) |
4647 | restore_unbound_workers_cpumask(pool, cpu); | 4641 | restore_unbound_workers_cpumask(pool, cpu); |
4648 | 4642 | ||
4649 | mutex_unlock(&pool->attach_mutex); | 4643 | mutex_unlock(&pool->attach_mutex); |
4650 | } | 4644 | } |
4651 | 4645 | ||
4652 | /* update NUMA affinity of unbound workqueues */ | 4646 | /* update NUMA affinity of unbound workqueues */ |
4653 | list_for_each_entry(wq, &workqueues, list) | 4647 | list_for_each_entry(wq, &workqueues, list) |
4654 | wq_update_unbound_numa(wq, cpu, true); | 4648 | wq_update_unbound_numa(wq, cpu, true); |
4655 | 4649 | ||
4656 | mutex_unlock(&wq_pool_mutex); | 4650 | mutex_unlock(&wq_pool_mutex); |
4657 | break; | 4651 | return 0; |
4658 | } | ||
4659 | return NOTIFY_OK; | ||
4660 | } | 4652 | } |
4661 | 4653 | ||
4662 | /* | 4654 | int workqueue_offline_cpu(unsigned int cpu) |
4663 | * Workqueues should be brought down after normal priority CPU notifiers. | ||
4664 | * This will be registered as low priority CPU notifier. | ||
4665 | */ | ||
4666 | static int workqueue_cpu_down_callback(struct notifier_block *nfb, | ||
4667 | unsigned long action, | ||
4668 | void *hcpu) | ||
4669 | { | 4655 | { |
4670 | int cpu = (unsigned long)hcpu; | ||
4671 | struct work_struct unbind_work; | 4656 | struct work_struct unbind_work; |
4672 | struct workqueue_struct *wq; | 4657 | struct workqueue_struct *wq; |
4673 | 4658 | ||
4674 | switch (action & ~CPU_TASKS_FROZEN) { | 4659 | /* unbinding per-cpu workers should happen on the local CPU */ |
4675 | case CPU_DOWN_PREPARE: | 4660 | INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); |
4676 | /* unbinding per-cpu workers should happen on the local CPU */ | 4661 | queue_work_on(cpu, system_highpri_wq, &unbind_work); |
4677 | INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); | 4662 | |
4678 | queue_work_on(cpu, system_highpri_wq, &unbind_work); | 4663 | /* update NUMA affinity of unbound workqueues */ |
4679 | 4664 | mutex_lock(&wq_pool_mutex); | |
4680 | /* update NUMA affinity of unbound workqueues */ | 4665 | list_for_each_entry(wq, &workqueues, list) |
4681 | mutex_lock(&wq_pool_mutex); | 4666 | wq_update_unbound_numa(wq, cpu, false); |
4682 | list_for_each_entry(wq, &workqueues, list) | 4667 | mutex_unlock(&wq_pool_mutex); |
4683 | wq_update_unbound_numa(wq, cpu, false); | 4668 | |
4684 | mutex_unlock(&wq_pool_mutex); | 4669 | /* wait for per-cpu unbinding to finish */ |
4685 | 4670 | flush_work(&unbind_work); | |
4686 | /* wait for per-cpu unbinding to finish */ | 4671 | destroy_work_on_stack(&unbind_work); |
4687 | flush_work(&unbind_work); | 4672 | return 0; |
4688 | destroy_work_on_stack(&unbind_work); | ||
4689 | break; | ||
4690 | } | ||
4691 | return NOTIFY_OK; | ||
4692 | } | 4673 | } |
4693 | 4674 | ||
4694 | #ifdef CONFIG_SMP | 4675 | #ifdef CONFIG_SMP |
@@ -5490,9 +5471,6 @@ static int __init init_workqueues(void) | |||
5490 | 5471 | ||
5491 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); | 5472 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); |
5492 | 5473 | ||
5493 | cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); | ||
5494 | hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); | ||
5495 | |||
5496 | wq_numa_init(); | 5474 | wq_numa_init(); |
5497 | 5475 | ||
5498 | /* initialize CPU pools */ | 5476 | /* initialize CPU pools */ |