diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-29 16:55:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-29 16:55:30 -0400 |
commit | a6408f6cb63ac0958fee7dbce7861ffb540d8a49 (patch) | |
tree | c94a835d343974171951e3b805e6bbbb02852ebc | |
parent | 1a81a8f2a5918956e214bb718099a89e500e7ec5 (diff) | |
parent | 4fae16dffb812f0e0d98a0b2b0856ca48ca63e6c (diff) |
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug updates from Thomas Gleixner:
"This is the next part of the hotplug rework.
- Convert all notifiers with a priority assigned
- Convert all CPU_STARTING/DYING notifiers
The final removal of the STARTING/DYING infrastructure will happen
when the merge window closes.
Another 700 hundred line of unpenetrable maze gone :)"
* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits)
timers/core: Correct callback order during CPU hot plug
leds/trigger/cpu: Move from CPU_STARTING to ONLINE level
powerpc/numa: Convert to hotplug state machine
arm/perf: Fix hotplug state machine conversion
irqchip/armada: Avoid unused function warnings
ARC/time: Convert to hotplug state machine
clocksource/atlas7: Convert to hotplug state machine
clocksource/armada-370-xp: Convert to hotplug state machine
clocksource/exynos_mct: Convert to hotplug state machine
clocksource/arm_global_timer: Convert to hotplug state machine
rcu: Convert rcutree to hotplug state machine
KVM/arm/arm64/vgic-new: Convert to hotplug state machine
smp/cfd: Convert core to hotplug state machine
x86/x2apic: Convert to CPU hotplug state machine
profile: Convert to hotplug state machine
timers/core: Convert to hotplug state machine
hrtimer: Convert to hotplug state machine
x86/tboot: Convert to hotplug state machine
arm64/armv8 deprecated: Convert to hotplug state machine
hwtracing/coresight-etm4x: Convert to hotplug state machine
...
75 files changed, 1316 insertions, 2013 deletions
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index 98f22d2eb563..f927b8dc6edd 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c | |||
@@ -296,30 +296,23 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id) | |||
296 | return IRQ_HANDLED; | 296 | return IRQ_HANDLED; |
297 | } | 297 | } |
298 | 298 | ||
299 | static int arc_timer_cpu_notify(struct notifier_block *self, | 299 | |
300 | unsigned long action, void *hcpu) | 300 | static int arc_timer_starting_cpu(unsigned int cpu) |
301 | { | 301 | { |
302 | struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); | 302 | struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); |
303 | 303 | ||
304 | evt->cpumask = cpumask_of(smp_processor_id()); | 304 | evt->cpumask = cpumask_of(smp_processor_id()); |
305 | 305 | ||
306 | switch (action & ~CPU_TASKS_FROZEN) { | 306 | clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX); |
307 | case CPU_STARTING: | 307 | enable_percpu_irq(arc_timer_irq, 0); |
308 | clockevents_config_and_register(evt, arc_timer_freq, | 308 | return 0; |
309 | 0, ULONG_MAX); | ||
310 | enable_percpu_irq(arc_timer_irq, 0); | ||
311 | break; | ||
312 | case CPU_DYING: | ||
313 | disable_percpu_irq(arc_timer_irq); | ||
314 | break; | ||
315 | } | ||
316 | |||
317 | return NOTIFY_OK; | ||
318 | } | 309 | } |
319 | 310 | ||
320 | static struct notifier_block arc_timer_cpu_nb = { | 311 | static int arc_timer_dying_cpu(unsigned int cpu) |
321 | .notifier_call = arc_timer_cpu_notify, | 312 | { |
322 | }; | 313 | disable_percpu_irq(arc_timer_irq); |
314 | return 0; | ||
315 | } | ||
323 | 316 | ||
324 | /* | 317 | /* |
325 | * clockevent setup for boot CPU | 318 | * clockevent setup for boot CPU |
@@ -329,12 +322,6 @@ static int __init arc_clockevent_setup(struct device_node *node) | |||
329 | struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); | 322 | struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); |
330 | int ret; | 323 | int ret; |
331 | 324 | ||
332 | ret = register_cpu_notifier(&arc_timer_cpu_nb); | ||
333 | if (ret) { | ||
334 | pr_err("Failed to register cpu notifier"); | ||
335 | return ret; | ||
336 | } | ||
337 | |||
338 | arc_timer_irq = irq_of_parse_and_map(node, 0); | 325 | arc_timer_irq = irq_of_parse_and_map(node, 0); |
339 | if (arc_timer_irq <= 0) { | 326 | if (arc_timer_irq <= 0) { |
340 | pr_err("clockevent: missing irq"); | 327 | pr_err("clockevent: missing irq"); |
@@ -347,11 +334,6 @@ static int __init arc_clockevent_setup(struct device_node *node) | |||
347 | return ret; | 334 | return ret; |
348 | } | 335 | } |
349 | 336 | ||
350 | evt->irq = arc_timer_irq; | ||
351 | evt->cpumask = cpumask_of(smp_processor_id()); | ||
352 | clockevents_config_and_register(evt, arc_timer_freq, | ||
353 | 0, ARC_TIMER_MAX); | ||
354 | |||
355 | /* Needs apriori irq_set_percpu_devid() done in intc map function */ | 337 | /* Needs apriori irq_set_percpu_devid() done in intc map function */ |
356 | ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, | 338 | ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, |
357 | "Timer0 (per-cpu-tick)", evt); | 339 | "Timer0 (per-cpu-tick)", evt); |
@@ -360,8 +342,14 @@ static int __init arc_clockevent_setup(struct device_node *node) | |||
360 | return ret; | 342 | return ret; |
361 | } | 343 | } |
362 | 344 | ||
363 | enable_percpu_irq(arc_timer_irq, 0); | 345 | ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING, |
364 | 346 | "AP_ARC_TIMER_STARTING", | |
347 | arc_timer_starting_cpu, | ||
348 | arc_timer_dying_cpu); | ||
349 | if (ret) { | ||
350 | pr_err("Failed to setup hotplug state"); | ||
351 | return ret; | ||
352 | } | ||
365 | return 0; | 353 | return 0; |
366 | } | 354 | } |
367 | 355 | ||
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index b6ec65e68009..02d5e5e8d44c 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c | |||
@@ -310,24 +310,17 @@ static void twd_timer_setup(void) | |||
310 | enable_percpu_irq(clk->irq, 0); | 310 | enable_percpu_irq(clk->irq, 0); |
311 | } | 311 | } |
312 | 312 | ||
313 | static int twd_timer_cpu_notify(struct notifier_block *self, | 313 | static int twd_timer_starting_cpu(unsigned int cpu) |
314 | unsigned long action, void *hcpu) | ||
315 | { | 314 | { |
316 | switch (action & ~CPU_TASKS_FROZEN) { | 315 | twd_timer_setup(); |
317 | case CPU_STARTING: | 316 | return 0; |
318 | twd_timer_setup(); | ||
319 | break; | ||
320 | case CPU_DYING: | ||
321 | twd_timer_stop(); | ||
322 | break; | ||
323 | } | ||
324 | |||
325 | return NOTIFY_OK; | ||
326 | } | 317 | } |
327 | 318 | ||
328 | static struct notifier_block twd_timer_cpu_nb = { | 319 | static int twd_timer_dying_cpu(unsigned int cpu) |
329 | .notifier_call = twd_timer_cpu_notify, | 320 | { |
330 | }; | 321 | twd_timer_stop(); |
322 | return 0; | ||
323 | } | ||
331 | 324 | ||
332 | static int __init twd_local_timer_common_register(struct device_node *np) | 325 | static int __init twd_local_timer_common_register(struct device_node *np) |
333 | { | 326 | { |
@@ -345,9 +338,9 @@ static int __init twd_local_timer_common_register(struct device_node *np) | |||
345 | goto out_free; | 338 | goto out_free; |
346 | } | 339 | } |
347 | 340 | ||
348 | err = register_cpu_notifier(&twd_timer_cpu_nb); | 341 | cpuhp_setup_state_nocalls(CPUHP_AP_ARM_TWD_STARTING, |
349 | if (err) | 342 | "AP_ARM_TWD_STARTING", |
350 | goto out_irq; | 343 | twd_timer_starting_cpu, twd_timer_dying_cpu); |
351 | 344 | ||
352 | twd_get_clock(np); | 345 | twd_get_clock(np); |
353 | if (!of_property_read_bool(np, "always-on")) | 346 | if (!of_property_read_bool(np, "always-on")) |
@@ -365,8 +358,6 @@ static int __init twd_local_timer_common_register(struct device_node *np) | |||
365 | 358 | ||
366 | return 0; | 359 | return 0; |
367 | 360 | ||
368 | out_irq: | ||
369 | free_percpu_irq(twd_ppi, twd_evt); | ||
370 | out_free: | 361 | out_free: |
371 | iounmap(twd_base); | 362 | iounmap(twd_base); |
372 | twd_base = NULL; | 363 | twd_base = NULL; |
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index e80f0dde2189..ae2a018b9305 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c | |||
@@ -111,20 +111,12 @@ static struct notifier_block mvebu_hwcc_pci_nb __maybe_unused = { | |||
111 | .notifier_call = mvebu_hwcc_notifier, | 111 | .notifier_call = mvebu_hwcc_notifier, |
112 | }; | 112 | }; |
113 | 113 | ||
114 | static int armada_xp_clear_shared_l2_notifier_func(struct notifier_block *nfb, | 114 | static int armada_xp_clear_l2_starting(unsigned int cpu) |
115 | unsigned long action, void *hcpu) | ||
116 | { | 115 | { |
117 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 116 | armada_xp_clear_shared_l2(); |
118 | armada_xp_clear_shared_l2(); | 117 | return 0; |
119 | |||
120 | return NOTIFY_OK; | ||
121 | } | 118 | } |
122 | 119 | ||
123 | static struct notifier_block armada_xp_clear_shared_l2_notifier = { | ||
124 | .notifier_call = armada_xp_clear_shared_l2_notifier_func, | ||
125 | .priority = 100, | ||
126 | }; | ||
127 | |||
128 | static void __init armada_370_coherency_init(struct device_node *np) | 120 | static void __init armada_370_coherency_init(struct device_node *np) |
129 | { | 121 | { |
130 | struct resource res; | 122 | struct resource res; |
@@ -155,8 +147,9 @@ static void __init armada_370_coherency_init(struct device_node *np) | |||
155 | 147 | ||
156 | of_node_put(cpu_config_np); | 148 | of_node_put(cpu_config_np); |
157 | 149 | ||
158 | register_cpu_notifier(&armada_xp_clear_shared_l2_notifier); | 150 | cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY, |
159 | 151 | "AP_ARM_MVEBU_COHERENCY", | |
152 | armada_xp_clear_l2_starting, NULL); | ||
160 | exit: | 153 | exit: |
161 | set_cpu_coherent(); | 154 | set_cpu_coherent(); |
162 | } | 155 | } |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c61996c256cc..cc12905ae6f8 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -597,17 +597,16 @@ static void l2c310_configure(void __iomem *base) | |||
597 | L310_POWER_CTRL); | 597 | L310_POWER_CTRL); |
598 | } | 598 | } |
599 | 599 | ||
600 | static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data) | 600 | static int l2c310_starting_cpu(unsigned int cpu) |
601 | { | 601 | { |
602 | switch (act & ~CPU_TASKS_FROZEN) { | 602 | set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); |
603 | case CPU_STARTING: | 603 | return 0; |
604 | set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); | 604 | } |
605 | break; | 605 | |
606 | case CPU_DYING: | 606 | static int l2c310_dying_cpu(unsigned int cpu) |
607 | set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); | 607 | { |
608 | break; | 608 | set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); |
609 | } | 609 | return 0; |
610 | return NOTIFY_OK; | ||
611 | } | 610 | } |
612 | 611 | ||
613 | static void __init l2c310_enable(void __iomem *base, unsigned num_lock) | 612 | static void __init l2c310_enable(void __iomem *base, unsigned num_lock) |
@@ -678,10 +677,10 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock) | |||
678 | power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis"); | 677 | power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis"); |
679 | } | 678 | } |
680 | 679 | ||
681 | if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) { | 680 | if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) |
682 | set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); | 681 | cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING, |
683 | cpu_notifier(l2c310_cpu_enable_flz, 0); | 682 | "AP_ARM_L2X0_STARTING", l2c310_starting_cpu, |
684 | } | 683 | l2c310_dying_cpu); |
685 | } | 684 | } |
686 | 685 | ||
687 | static void __init l2c310_fixup(void __iomem *base, u32 cache_id, | 686 | static void __init l2c310_fixup(void __iomem *base, u32 cache_id, |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 73085d3482ed..da0b33deba6d 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
@@ -643,19 +643,19 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp, | |||
643 | * hardware state at every thread switch. We clear our held state when | 643 | * hardware state at every thread switch. We clear our held state when |
644 | * a CPU has been killed, indicating that the VFP hardware doesn't contain | 644 | * a CPU has been killed, indicating that the VFP hardware doesn't contain |
645 | * a threads VFP state. When a CPU starts up, we re-enable access to the | 645 | * a threads VFP state. When a CPU starts up, we re-enable access to the |
646 | * VFP hardware. | 646 | * VFP hardware. The callbacks below are called on the CPU which |
647 | * | ||
648 | * Both CPU_DYING and CPU_STARTING are called on the CPU which | ||
649 | * is being offlined/onlined. | 647 | * is being offlined/onlined. |
650 | */ | 648 | */ |
651 | static int vfp_hotplug(struct notifier_block *b, unsigned long action, | 649 | static int vfp_dying_cpu(unsigned int cpu) |
652 | void *hcpu) | ||
653 | { | 650 | { |
654 | if (action == CPU_DYING || action == CPU_DYING_FROZEN) | 651 | vfp_force_reload(cpu, current_thread_info()); |
655 | vfp_current_hw_state[(long)hcpu] = NULL; | 652 | return 0; |
656 | else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 653 | } |
657 | vfp_enable(NULL); | 654 | |
658 | return NOTIFY_OK; | 655 | static int vfp_starting_cpu(unsigned int unused) |
656 | { | ||
657 | vfp_enable(NULL); | ||
658 | return 0; | ||
659 | } | 659 | } |
660 | 660 | ||
661 | void vfp_kmode_exception(void) | 661 | void vfp_kmode_exception(void) |
@@ -732,6 +732,10 @@ static int __init vfp_init(void) | |||
732 | unsigned int vfpsid; | 732 | unsigned int vfpsid; |
733 | unsigned int cpu_arch = cpu_architecture(); | 733 | unsigned int cpu_arch = cpu_architecture(); |
734 | 734 | ||
735 | /* | ||
736 | * Enable the access to the VFP on all online CPUs so the | ||
737 | * following test on FPSID will succeed. | ||
738 | */ | ||
735 | if (cpu_arch >= CPU_ARCH_ARMv6) | 739 | if (cpu_arch >= CPU_ARCH_ARMv6) |
736 | on_each_cpu(vfp_enable, NULL, 1); | 740 | on_each_cpu(vfp_enable, NULL, 1); |
737 | 741 | ||
@@ -794,7 +798,9 @@ static int __init vfp_init(void) | |||
794 | VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; | 798 | VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; |
795 | } | 799 | } |
796 | 800 | ||
797 | hotcpu_notifier(vfp_hotplug, 0); | 801 | cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING, |
802 | "AP_ARM_VFP_STARTING", vfp_starting_cpu, | ||
803 | vfp_dying_cpu); | ||
798 | 804 | ||
799 | vfp_vector = vfp_support_entry; | 805 | vfp_vector = vfp_support_entry; |
800 | 806 | ||
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 0bea3d271f6e..b0b82f5ea338 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
@@ -153,12 +153,11 @@ static struct notifier_block xen_pvclock_gtod_notifier = { | |||
153 | .notifier_call = xen_pvclock_gtod_notify, | 153 | .notifier_call = xen_pvclock_gtod_notify, |
154 | }; | 154 | }; |
155 | 155 | ||
156 | static void xen_percpu_init(void) | 156 | static int xen_starting_cpu(unsigned int cpu) |
157 | { | 157 | { |
158 | struct vcpu_register_vcpu_info info; | 158 | struct vcpu_register_vcpu_info info; |
159 | struct vcpu_info *vcpup; | 159 | struct vcpu_info *vcpup; |
160 | int err; | 160 | int err; |
161 | int cpu = get_cpu(); | ||
162 | 161 | ||
163 | /* | 162 | /* |
164 | * VCPUOP_register_vcpu_info cannot be called twice for the same | 163 | * VCPUOP_register_vcpu_info cannot be called twice for the same |
@@ -186,7 +185,13 @@ static void xen_percpu_init(void) | |||
186 | 185 | ||
187 | after_register_vcpu_info: | 186 | after_register_vcpu_info: |
188 | enable_percpu_irq(xen_events_irq, 0); | 187 | enable_percpu_irq(xen_events_irq, 0); |
189 | put_cpu(); | 188 | return 0; |
189 | } | ||
190 | |||
191 | static int xen_dying_cpu(unsigned int cpu) | ||
192 | { | ||
193 | disable_percpu_irq(xen_events_irq); | ||
194 | return 0; | ||
190 | } | 195 | } |
191 | 196 | ||
192 | static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) | 197 | static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) |
@@ -205,28 +210,6 @@ static void xen_power_off(void) | |||
205 | BUG_ON(rc); | 210 | BUG_ON(rc); |
206 | } | 211 | } |
207 | 212 | ||
208 | static int xen_cpu_notification(struct notifier_block *self, | ||
209 | unsigned long action, | ||
210 | void *hcpu) | ||
211 | { | ||
212 | switch (action) { | ||
213 | case CPU_STARTING: | ||
214 | xen_percpu_init(); | ||
215 | break; | ||
216 | case CPU_DYING: | ||
217 | disable_percpu_irq(xen_events_irq); | ||
218 | break; | ||
219 | default: | ||
220 | break; | ||
221 | } | ||
222 | |||
223 | return NOTIFY_OK; | ||
224 | } | ||
225 | |||
226 | static struct notifier_block xen_cpu_notifier = { | ||
227 | .notifier_call = xen_cpu_notification, | ||
228 | }; | ||
229 | |||
230 | static irqreturn_t xen_arm_callback(int irq, void *arg) | 213 | static irqreturn_t xen_arm_callback(int irq, void *arg) |
231 | { | 214 | { |
232 | xen_hvm_evtchn_do_upcall(); | 215 | xen_hvm_evtchn_do_upcall(); |
@@ -425,16 +408,14 @@ static int __init xen_guest_init(void) | |||
425 | return -EINVAL; | 408 | return -EINVAL; |
426 | } | 409 | } |
427 | 410 | ||
428 | xen_percpu_init(); | ||
429 | |||
430 | register_cpu_notifier(&xen_cpu_notifier); | ||
431 | |||
432 | xen_time_setup_guest(); | 411 | xen_time_setup_guest(); |
433 | 412 | ||
434 | if (xen_initial_domain()) | 413 | if (xen_initial_domain()) |
435 | pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); | 414 | pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); |
436 | 415 | ||
437 | return 0; | 416 | return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING, |
417 | "AP_ARM_XEN_STARTING", xen_starting_cpu, | ||
418 | xen_dying_cpu); | ||
438 | } | 419 | } |
439 | early_initcall(xen_guest_init); | 420 | early_initcall(xen_guest_init); |
440 | 421 | ||
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 5f72475e2e3b..42ffdb54e162 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c | |||
@@ -121,7 +121,7 @@ static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable) | |||
121 | * 0 - If all the hooks ran successfully. | 121 | * 0 - If all the hooks ran successfully. |
122 | * -EINVAL - At least one hook is not supported by the CPU. | 122 | * -EINVAL - At least one hook is not supported by the CPU. |
123 | */ | 123 | */ |
124 | static int run_all_insn_set_hw_mode(unsigned long cpu) | 124 | static int run_all_insn_set_hw_mode(unsigned int cpu) |
125 | { | 125 | { |
126 | int rc = 0; | 126 | int rc = 0; |
127 | unsigned long flags; | 127 | unsigned long flags; |
@@ -131,7 +131,7 @@ static int run_all_insn_set_hw_mode(unsigned long cpu) | |||
131 | list_for_each_entry(insn, &insn_emulation, node) { | 131 | list_for_each_entry(insn, &insn_emulation, node) { |
132 | bool enable = (insn->current_mode == INSN_HW); | 132 | bool enable = (insn->current_mode == INSN_HW); |
133 | if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) { | 133 | if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) { |
134 | pr_warn("CPU[%ld] cannot support the emulation of %s", | 134 | pr_warn("CPU[%u] cannot support the emulation of %s", |
135 | cpu, insn->ops->name); | 135 | cpu, insn->ops->name); |
136 | rc = -EINVAL; | 136 | rc = -EINVAL; |
137 | } | 137 | } |
@@ -611,20 +611,6 @@ static struct insn_emulation_ops setend_ops = { | |||
611 | .set_hw_mode = setend_set_hw_mode, | 611 | .set_hw_mode = setend_set_hw_mode, |
612 | }; | 612 | }; |
613 | 613 | ||
614 | static int insn_cpu_hotplug_notify(struct notifier_block *b, | ||
615 | unsigned long action, void *hcpu) | ||
616 | { | ||
617 | int rc = 0; | ||
618 | if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING) | ||
619 | rc = run_all_insn_set_hw_mode((unsigned long)hcpu); | ||
620 | |||
621 | return notifier_from_errno(rc); | ||
622 | } | ||
623 | |||
624 | static struct notifier_block insn_cpu_hotplug_notifier = { | ||
625 | .notifier_call = insn_cpu_hotplug_notify, | ||
626 | }; | ||
627 | |||
628 | /* | 614 | /* |
629 | * Invoked as late_initcall, since not needed before init spawned. | 615 | * Invoked as late_initcall, since not needed before init spawned. |
630 | */ | 616 | */ |
@@ -643,7 +629,9 @@ static int __init armv8_deprecated_init(void) | |||
643 | pr_info("setend instruction emulation is not supported on the system"); | 629 | pr_info("setend instruction emulation is not supported on the system"); |
644 | } | 630 | } |
645 | 631 | ||
646 | register_cpu_notifier(&insn_cpu_hotplug_notifier); | 632 | cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING, |
633 | "AP_ARM64_ISNDEP_STARTING", | ||
634 | run_all_insn_set_hw_mode, NULL); | ||
647 | register_insn_emulation_sysctl(ctl_abi); | 635 | register_insn_emulation_sysctl(ctl_abi); |
648 | 636 | ||
649 | return 0; | 637 | return 0; |
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c index 170d786807c4..6355e97d22b9 100644 --- a/arch/blackfin/kernel/perf_event.c +++ b/arch/blackfin/kernel/perf_event.c | |||
@@ -453,29 +453,13 @@ static struct pmu pmu = { | |||
453 | .read = bfin_pmu_read, | 453 | .read = bfin_pmu_read, |
454 | }; | 454 | }; |
455 | 455 | ||
456 | static void bfin_pmu_setup(int cpu) | 456 | static int bfin_pmu_prepare_cpu(unsigned int cpu) |
457 | { | 457 | { |
458 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | 458 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); |
459 | 459 | ||
460 | bfin_write_PFCTL(0); | ||
460 | memset(cpuhw, 0, sizeof(struct cpu_hw_events)); | 461 | memset(cpuhw, 0, sizeof(struct cpu_hw_events)); |
461 | } | 462 | return 0; |
462 | |||
463 | static int | ||
464 | bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | ||
465 | { | ||
466 | unsigned int cpu = (long)hcpu; | ||
467 | |||
468 | switch (action & ~CPU_TASKS_FROZEN) { | ||
469 | case CPU_UP_PREPARE: | ||
470 | bfin_write_PFCTL(0); | ||
471 | bfin_pmu_setup(cpu); | ||
472 | break; | ||
473 | |||
474 | default: | ||
475 | break; | ||
476 | } | ||
477 | |||
478 | return NOTIFY_OK; | ||
479 | } | 463 | } |
480 | 464 | ||
481 | static int __init bfin_pmu_init(void) | 465 | static int __init bfin_pmu_init(void) |
@@ -491,8 +475,8 @@ static int __init bfin_pmu_init(void) | |||
491 | 475 | ||
492 | ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | 476 | ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); |
493 | if (!ret) | 477 | if (!ret) |
494 | perf_cpu_notifier(bfin_pmu_notifier); | 478 | cpuhp_setup_state(CPUHP_PERF_BFIN, "PERF_BFIN", |
495 | 479 | bfin_pmu_prepare_cpu, NULL); | |
496 | return ret; | 480 | return ret; |
497 | } | 481 | } |
498 | early_initcall(bfin_pmu_init); | 482 | early_initcall(bfin_pmu_init); |
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c index 33a365f924be..052cba23708c 100644 --- a/arch/metag/kernel/perf/perf_event.c +++ b/arch/metag/kernel/perf/perf_event.c | |||
@@ -806,25 +806,16 @@ static struct metag_pmu _metag_pmu = { | |||
806 | }; | 806 | }; |
807 | 807 | ||
808 | /* PMU CPU hotplug notifier */ | 808 | /* PMU CPU hotplug notifier */ |
809 | static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action, | 809 | static int metag_pmu_starting_cpu(unsigned int cpu) |
810 | void *hcpu) | ||
811 | { | 810 | { |
812 | unsigned int cpu = (unsigned int)hcpu; | ||
813 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | 811 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
814 | 812 | ||
815 | if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) | ||
816 | return NOTIFY_DONE; | ||
817 | |||
818 | memset(cpuc, 0, sizeof(struct cpu_hw_events)); | 813 | memset(cpuc, 0, sizeof(struct cpu_hw_events)); |
819 | raw_spin_lock_init(&cpuc->pmu_lock); | 814 | raw_spin_lock_init(&cpuc->pmu_lock); |
820 | 815 | ||
821 | return NOTIFY_OK; | 816 | return 0; |
822 | } | 817 | } |
823 | 818 | ||
824 | static struct notifier_block metag_pmu_notifier = { | ||
825 | .notifier_call = metag_pmu_cpu_notify, | ||
826 | }; | ||
827 | |||
828 | /* PMU Initialisation */ | 819 | /* PMU Initialisation */ |
829 | static int __init init_hw_perf_events(void) | 820 | static int __init init_hw_perf_events(void) |
830 | { | 821 | { |
@@ -876,16 +867,13 @@ static int __init init_hw_perf_events(void) | |||
876 | metag_out32(0, PERF_COUNT(0)); | 867 | metag_out32(0, PERF_COUNT(0)); |
877 | metag_out32(0, PERF_COUNT(1)); | 868 | metag_out32(0, PERF_COUNT(1)); |
878 | 869 | ||
879 | for_each_possible_cpu(cpu) { | 870 | cpuhp_setup_state(CPUHP_AP_PERF_METAG_STARTING, |
880 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | 871 | "AP_PERF_METAG_STARTING", metag_pmu_starting_cpu, |
872 | NULL); | ||
881 | 873 | ||
882 | memset(cpuc, 0, sizeof(struct cpu_hw_events)); | ||
883 | raw_spin_lock_init(&cpuc->pmu_lock); | ||
884 | } | ||
885 | |||
886 | register_cpu_notifier(&metag_pmu_notifier); | ||
887 | ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW); | 874 | ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW); |
888 | out: | 875 | if (ret) |
876 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_METAG_STARTING); | ||
889 | return ret; | 877 | return ret; |
890 | } | 878 | } |
891 | early_initcall(init_hw_perf_events); | 879 | early_initcall(init_hw_perf_events); |
diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c index 8bcf7fc40f0d..85f3ee4ab456 100644 --- a/arch/mips/oprofile/op_model_loongson3.c +++ b/arch/mips/oprofile/op_model_loongson3.c | |||
@@ -168,33 +168,26 @@ static int loongson3_perfcount_handler(void) | |||
168 | return handled; | 168 | return handled; |
169 | } | 169 | } |
170 | 170 | ||
171 | static int loongson3_cpu_callback(struct notifier_block *nfb, | 171 | static int loongson3_starting_cpu(unsigned int cpu) |
172 | unsigned long action, void *hcpu) | ||
173 | { | 172 | { |
174 | switch (action) { | 173 | write_c0_perflo1(reg.control1); |
175 | case CPU_STARTING: | 174 | write_c0_perflo2(reg.control2); |
176 | case CPU_STARTING_FROZEN: | 175 | return 0; |
177 | write_c0_perflo1(reg.control1); | ||
178 | write_c0_perflo2(reg.control2); | ||
179 | break; | ||
180 | case CPU_DYING: | ||
181 | case CPU_DYING_FROZEN: | ||
182 | write_c0_perflo1(0xc0000000); | ||
183 | write_c0_perflo2(0x40000000); | ||
184 | break; | ||
185 | } | ||
186 | |||
187 | return NOTIFY_OK; | ||
188 | } | 176 | } |
189 | 177 | ||
190 | static struct notifier_block loongson3_notifier_block = { | 178 | static int loongson3_dying_cpu(unsigned int cpu) |
191 | .notifier_call = loongson3_cpu_callback | 179 | { |
192 | }; | 180 | write_c0_perflo1(0xc0000000); |
181 | write_c0_perflo2(0x40000000); | ||
182 | return 0; | ||
183 | } | ||
193 | 184 | ||
194 | static int __init loongson3_init(void) | 185 | static int __init loongson3_init(void) |
195 | { | 186 | { |
196 | on_each_cpu(reset_counters, NULL, 1); | 187 | on_each_cpu(reset_counters, NULL, 1); |
197 | register_hotcpu_notifier(&loongson3_notifier_block); | 188 | cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, |
189 | "AP_MIPS_OP_LOONGSON3_STARTING", | ||
190 | loongson3_starting_cpu, loongson3_dying_cpu); | ||
198 | save_perf_irq = perf_irq; | 191 | save_perf_irq = perf_irq; |
199 | perf_irq = loongson3_perfcount_handler; | 192 | perf_irq = loongson3_perfcount_handler; |
200 | 193 | ||
@@ -204,7 +197,7 @@ static int __init loongson3_init(void) | |||
204 | static void loongson3_exit(void) | 197 | static void loongson3_exit(void) |
205 | { | 198 | { |
206 | on_each_cpu(reset_counters, NULL, 1); | 199 | on_each_cpu(reset_counters, NULL, 1); |
207 | unregister_hotcpu_notifier(&loongson3_notifier_block); | 200 | cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING); |
208 | perf_irq = save_perf_irq; | 201 | perf_irq = save_perf_irq; |
209 | } | 202 | } |
210 | 203 | ||
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 669a15e7fa76..6dc07ddbfd04 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -581,30 +581,22 @@ static void verify_cpu_node_mapping(int cpu, int node) | |||
581 | } | 581 | } |
582 | } | 582 | } |
583 | 583 | ||
584 | static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action, | 584 | /* Must run before sched domains notifier. */ |
585 | void *hcpu) | 585 | static int ppc_numa_cpu_prepare(unsigned int cpu) |
586 | { | 586 | { |
587 | unsigned long lcpu = (unsigned long)hcpu; | 587 | int nid; |
588 | int ret = NOTIFY_DONE, nid; | ||
589 | 588 | ||
590 | switch (action) { | 589 | nid = numa_setup_cpu(cpu); |
591 | case CPU_UP_PREPARE: | 590 | verify_cpu_node_mapping(cpu, nid); |
592 | case CPU_UP_PREPARE_FROZEN: | 591 | return 0; |
593 | nid = numa_setup_cpu(lcpu); | 592 | } |
594 | verify_cpu_node_mapping((int)lcpu, nid); | 593 | |
595 | ret = NOTIFY_OK; | 594 | static int ppc_numa_cpu_dead(unsigned int cpu) |
596 | break; | 595 | { |
597 | #ifdef CONFIG_HOTPLUG_CPU | 596 | #ifdef CONFIG_HOTPLUG_CPU |
598 | case CPU_DEAD: | 597 | unmap_cpu_from_node(cpu); |
599 | case CPU_DEAD_FROZEN: | ||
600 | case CPU_UP_CANCELED: | ||
601 | case CPU_UP_CANCELED_FROZEN: | ||
602 | unmap_cpu_from_node(lcpu); | ||
603 | ret = NOTIFY_OK; | ||
604 | break; | ||
605 | #endif | 598 | #endif |
606 | } | 599 | return 0; |
607 | return ret; | ||
608 | } | 600 | } |
609 | 601 | ||
610 | /* | 602 | /* |
@@ -913,11 +905,6 @@ static void __init dump_numa_memory_topology(void) | |||
913 | } | 905 | } |
914 | } | 906 | } |
915 | 907 | ||
916 | static struct notifier_block ppc64_numa_nb = { | ||
917 | .notifier_call = cpu_numa_callback, | ||
918 | .priority = 1 /* Must run before sched domains notifier. */ | ||
919 | }; | ||
920 | |||
921 | /* Initialize NODE_DATA for a node on the local memory */ | 908 | /* Initialize NODE_DATA for a node on the local memory */ |
922 | static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) | 909 | static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) |
923 | { | 910 | { |
@@ -985,15 +972,18 @@ void __init initmem_init(void) | |||
985 | setup_node_to_cpumask_map(); | 972 | setup_node_to_cpumask_map(); |
986 | 973 | ||
987 | reset_numa_cpu_lookup_table(); | 974 | reset_numa_cpu_lookup_table(); |
988 | register_cpu_notifier(&ppc64_numa_nb); | 975 | |
989 | /* | 976 | /* |
990 | * We need the numa_cpu_lookup_table to be accurate for all CPUs, | 977 | * We need the numa_cpu_lookup_table to be accurate for all CPUs, |
991 | * even before we online them, so that we can use cpu_to_{node,mem} | 978 | * even before we online them, so that we can use cpu_to_{node,mem} |
992 | * early in boot, cf. smp_prepare_cpus(). | 979 | * early in boot, cf. smp_prepare_cpus(). |
980 | * _nocalls() + manual invocation is used because cpuhp is not yet | ||
981 | * initialized for the boot CPU. | ||
993 | */ | 982 | */ |
994 | for_each_present_cpu(cpu) { | 983 | cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "POWER_NUMA_PREPARE", |
995 | numa_setup_cpu((unsigned long)cpu); | 984 | ppc_numa_cpu_prepare, ppc_numa_cpu_dead); |
996 | } | 985 | for_each_present_cpu(cpu) |
986 | numa_setup_cpu(cpu); | ||
997 | } | 987 | } |
998 | 988 | ||
999 | static int __init early_numa(char *p) | 989 | static int __init early_numa(char *p) |
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 97a1d40d8696..ffd61d55fb25 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -2158,31 +2158,15 @@ static void perf_event_interrupt(struct pt_regs *regs) | |||
2158 | irq_exit(); | 2158 | irq_exit(); |
2159 | } | 2159 | } |
2160 | 2160 | ||
2161 | static void power_pmu_setup(int cpu) | 2161 | int power_pmu_prepare_cpu(unsigned int cpu) |
2162 | { | 2162 | { |
2163 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | 2163 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); |
2164 | 2164 | ||
2165 | if (!ppmu) | 2165 | if (ppmu) { |
2166 | return; | 2166 | memset(cpuhw, 0, sizeof(*cpuhw)); |
2167 | memset(cpuhw, 0, sizeof(*cpuhw)); | 2167 | cpuhw->mmcr[0] = MMCR0_FC; |
2168 | cpuhw->mmcr[0] = MMCR0_FC; | ||
2169 | } | ||
2170 | |||
2171 | static int | ||
2172 | power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | ||
2173 | { | ||
2174 | unsigned int cpu = (long)hcpu; | ||
2175 | |||
2176 | switch (action & ~CPU_TASKS_FROZEN) { | ||
2177 | case CPU_UP_PREPARE: | ||
2178 | power_pmu_setup(cpu); | ||
2179 | break; | ||
2180 | |||
2181 | default: | ||
2182 | break; | ||
2183 | } | 2168 | } |
2184 | 2169 | return 0; | |
2185 | return NOTIFY_OK; | ||
2186 | } | 2170 | } |
2187 | 2171 | ||
2188 | int register_power_pmu(struct power_pmu *pmu) | 2172 | int register_power_pmu(struct power_pmu *pmu) |
@@ -2205,7 +2189,7 @@ int register_power_pmu(struct power_pmu *pmu) | |||
2205 | #endif /* CONFIG_PPC64 */ | 2189 | #endif /* CONFIG_PPC64 */ |
2206 | 2190 | ||
2207 | perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW); | 2191 | perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW); |
2208 | perf_cpu_notifier(power_pmu_notifier); | 2192 | cpuhp_setup_state(CPUHP_PERF_POWER, "PERF_POWER", |
2209 | 2193 | power_pmu_prepare_cpu, NULL); | |
2210 | return 0; | 2194 | return 0; |
2211 | } | 2195 | } |
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 7ec63b1d920d..037c2a253ae4 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c | |||
@@ -664,30 +664,22 @@ static struct pmu cpumf_pmu = { | |||
664 | .cancel_txn = cpumf_pmu_cancel_txn, | 664 | .cancel_txn = cpumf_pmu_cancel_txn, |
665 | }; | 665 | }; |
666 | 666 | ||
667 | static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action, | 667 | static int cpumf_pmf_setup(unsigned int cpu, int flags) |
668 | void *hcpu) | ||
669 | { | 668 | { |
670 | int flags; | 669 | local_irq_disable(); |
671 | 670 | setup_pmc_cpu(&flags); | |
672 | switch (action & ~CPU_TASKS_FROZEN) { | 671 | local_irq_enable(); |
673 | case CPU_ONLINE: | 672 | return 0; |
674 | case CPU_DOWN_FAILED: | 673 | } |
675 | flags = PMC_INIT; | 674 | |
676 | local_irq_disable(); | 675 | static int s390_pmu_online_cpu(unsigned int cpu) |
677 | setup_pmc_cpu(&flags); | 676 | { |
678 | local_irq_enable(); | 677 | return cpumf_pmf_setup(cpu, PMC_INIT); |
679 | break; | 678 | } |
680 | case CPU_DOWN_PREPARE: | ||
681 | flags = PMC_RELEASE; | ||
682 | local_irq_disable(); | ||
683 | setup_pmc_cpu(&flags); | ||
684 | local_irq_enable(); | ||
685 | break; | ||
686 | default: | ||
687 | break; | ||
688 | } | ||
689 | 679 | ||
690 | return NOTIFY_OK; | 680 | static int s390_pmu_offline_cpu(unsigned int cpu) |
681 | { | ||
682 | return cpumf_pmf_setup(cpu, PMC_RELEASE); | ||
691 | } | 683 | } |
692 | 684 | ||
693 | static int __init cpumf_pmu_init(void) | 685 | static int __init cpumf_pmu_init(void) |
@@ -707,7 +699,7 @@ static int __init cpumf_pmu_init(void) | |||
707 | if (rc) { | 699 | if (rc) { |
708 | pr_err("Registering for CPU-measurement alerts " | 700 | pr_err("Registering for CPU-measurement alerts " |
709 | "failed with rc=%i\n", rc); | 701 | "failed with rc=%i\n", rc); |
710 | goto out; | 702 | return rc; |
711 | } | 703 | } |
712 | 704 | ||
713 | cpumf_pmu.attr_groups = cpumf_cf_event_group(); | 705 | cpumf_pmu.attr_groups = cpumf_cf_event_group(); |
@@ -716,10 +708,10 @@ static int __init cpumf_pmu_init(void) | |||
716 | pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); | 708 | pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); |
717 | unregister_external_irq(EXT_IRQ_MEASURE_ALERT, | 709 | unregister_external_irq(EXT_IRQ_MEASURE_ALERT, |
718 | cpumf_measurement_alert); | 710 | cpumf_measurement_alert); |
719 | goto out; | 711 | return rc; |
720 | } | 712 | } |
721 | perf_cpu_notifier(cpumf_pmu_notifier); | 713 | return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE, |
722 | out: | 714 | "AP_PERF_S390_CF_ONLINE", |
723 | return rc; | 715 | s390_pmu_online_cpu, s390_pmu_offline_cpu); |
724 | } | 716 | } |
725 | early_initcall(cpumf_pmu_init); | 717 | early_initcall(cpumf_pmu_init); |
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 53acf2d76fa9..fcc634c1479a 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
@@ -1504,37 +1504,28 @@ static void cpumf_measurement_alert(struct ext_code ext_code, | |||
1504 | sf_disable(); | 1504 | sf_disable(); |
1505 | } | 1505 | } |
1506 | } | 1506 | } |
1507 | 1507 | static int cpusf_pmu_setup(unsigned int cpu, int flags) | |
1508 | static int cpumf_pmu_notifier(struct notifier_block *self, | ||
1509 | unsigned long action, void *hcpu) | ||
1510 | { | 1508 | { |
1511 | int flags; | ||
1512 | |||
1513 | /* Ignore the notification if no events are scheduled on the PMU. | 1509 | /* Ignore the notification if no events are scheduled on the PMU. |
1514 | * This might be racy... | 1510 | * This might be racy... |
1515 | */ | 1511 | */ |
1516 | if (!atomic_read(&num_events)) | 1512 | if (!atomic_read(&num_events)) |
1517 | return NOTIFY_OK; | 1513 | return 0; |
1518 | |||
1519 | switch (action & ~CPU_TASKS_FROZEN) { | ||
1520 | case CPU_ONLINE: | ||
1521 | case CPU_DOWN_FAILED: | ||
1522 | flags = PMC_INIT; | ||
1523 | local_irq_disable(); | ||
1524 | setup_pmc_cpu(&flags); | ||
1525 | local_irq_enable(); | ||
1526 | break; | ||
1527 | case CPU_DOWN_PREPARE: | ||
1528 | flags = PMC_RELEASE; | ||
1529 | local_irq_disable(); | ||
1530 | setup_pmc_cpu(&flags); | ||
1531 | local_irq_enable(); | ||
1532 | break; | ||
1533 | default: | ||
1534 | break; | ||
1535 | } | ||
1536 | 1514 | ||
1537 | return NOTIFY_OK; | 1515 | local_irq_disable(); |
1516 | setup_pmc_cpu(&flags); | ||
1517 | local_irq_enable(); | ||
1518 | return 0; | ||
1519 | } | ||
1520 | |||
1521 | static int s390_pmu_sf_online_cpu(unsigned int cpu) | ||
1522 | { | ||
1523 | return cpusf_pmu_setup(cpu, PMC_INIT); | ||
1524 | } | ||
1525 | |||
1526 | static int s390_pmu_sf_offline_cpu(unsigned int cpu) | ||
1527 | { | ||
1528 | return cpusf_pmu_setup(cpu, PMC_RELEASE); | ||
1538 | } | 1529 | } |
1539 | 1530 | ||
1540 | static int param_get_sfb_size(char *buffer, const struct kernel_param *kp) | 1531 | static int param_get_sfb_size(char *buffer, const struct kernel_param *kp) |
@@ -1634,7 +1625,9 @@ static int __init init_cpum_sampling_pmu(void) | |||
1634 | cpumf_measurement_alert); | 1625 | cpumf_measurement_alert); |
1635 | goto out; | 1626 | goto out; |
1636 | } | 1627 | } |
1637 | perf_cpu_notifier(cpumf_pmu_notifier); | 1628 | |
1629 | cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "AP_PERF_S390_SF_ONLINE", | ||
1630 | s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu); | ||
1638 | out: | 1631 | out: |
1639 | return err; | 1632 | return err; |
1640 | } | 1633 | } |
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 4dca18347ee9..ba3269a8304b 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
@@ -352,28 +352,12 @@ static struct pmu pmu = { | |||
352 | .read = sh_pmu_read, | 352 | .read = sh_pmu_read, |
353 | }; | 353 | }; |
354 | 354 | ||
355 | static void sh_pmu_setup(int cpu) | 355 | static int sh_pmu_prepare_cpu(unsigned int cpu) |
356 | { | 356 | { |
357 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | 357 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); |
358 | 358 | ||
359 | memset(cpuhw, 0, sizeof(struct cpu_hw_events)); | 359 | memset(cpuhw, 0, sizeof(struct cpu_hw_events)); |
360 | } | 360 | return 0; |
361 | |||
362 | static int | ||
363 | sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | ||
364 | { | ||
365 | unsigned int cpu = (long)hcpu; | ||
366 | |||
367 | switch (action & ~CPU_TASKS_FROZEN) { | ||
368 | case CPU_UP_PREPARE: | ||
369 | sh_pmu_setup(cpu); | ||
370 | break; | ||
371 | |||
372 | default: | ||
373 | break; | ||
374 | } | ||
375 | |||
376 | return NOTIFY_OK; | ||
377 | } | 361 | } |
378 | 362 | ||
379 | int register_sh_pmu(struct sh_pmu *_pmu) | 363 | int register_sh_pmu(struct sh_pmu *_pmu) |
@@ -394,6 +378,7 @@ int register_sh_pmu(struct sh_pmu *_pmu) | |||
394 | WARN_ON(_pmu->num_events > MAX_HWEVENTS); | 378 | WARN_ON(_pmu->num_events > MAX_HWEVENTS); |
395 | 379 | ||
396 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | 380 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); |
397 | perf_cpu_notifier(sh_pmu_notifier); | 381 | cpuhp_setup_state(CPUHP_PERF_SUPERH, "PERF_SUPERH", sh_pmu_prepare_cpu, |
382 | NULL); | ||
398 | return 0; | 383 | return 0; |
399 | } | 384 | } |
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 3329844e3c43..f840766659a8 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c | |||
@@ -331,15 +331,9 @@ static void vgetcpu_cpu_init(void *arg) | |||
331 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); | 331 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); |
332 | } | 332 | } |
333 | 333 | ||
334 | static int | 334 | static int vgetcpu_online(unsigned int cpu) |
335 | vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg) | ||
336 | { | 335 | { |
337 | long cpu = (long)arg; | 336 | return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1); |
338 | |||
339 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) | ||
340 | smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1); | ||
341 | |||
342 | return NOTIFY_DONE; | ||
343 | } | 337 | } |
344 | 338 | ||
345 | static int __init init_vdso(void) | 339 | static int __init init_vdso(void) |
@@ -350,15 +344,9 @@ static int __init init_vdso(void) | |||
350 | init_vdso_image(&vdso_image_x32); | 344 | init_vdso_image(&vdso_image_x32); |
351 | #endif | 345 | #endif |
352 | 346 | ||
353 | cpu_notifier_register_begin(); | ||
354 | |||
355 | on_each_cpu(vgetcpu_cpu_init, NULL, 1); | ||
356 | /* notifier priority > KVM */ | 347 | /* notifier priority > KVM */ |
357 | __hotcpu_notifier(vgetcpu_cpu_notifier, 30); | 348 | return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE, |
358 | 349 | "AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL); | |
359 | cpu_notifier_register_done(); | ||
360 | |||
361 | return 0; | ||
362 | } | 350 | } |
363 | subsys_initcall(init_vdso); | 351 | subsys_initcall(init_vdso); |
364 | #endif /* CONFIG_X86_64 */ | 352 | #endif /* CONFIG_X86_64 */ |
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index bd3e8421b57c..e07a22bb9308 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c | |||
@@ -370,13 +370,13 @@ static int amd_pmu_cpu_prepare(int cpu) | |||
370 | WARN_ON_ONCE(cpuc->amd_nb); | 370 | WARN_ON_ONCE(cpuc->amd_nb); |
371 | 371 | ||
372 | if (!x86_pmu.amd_nb_constraints) | 372 | if (!x86_pmu.amd_nb_constraints) |
373 | return NOTIFY_OK; | 373 | return 0; |
374 | 374 | ||
375 | cpuc->amd_nb = amd_alloc_nb(cpu); | 375 | cpuc->amd_nb = amd_alloc_nb(cpu); |
376 | if (!cpuc->amd_nb) | 376 | if (!cpuc->amd_nb) |
377 | return NOTIFY_BAD; | 377 | return -ENOMEM; |
378 | 378 | ||
379 | return NOTIFY_OK; | 379 | return 0; |
380 | } | 380 | } |
381 | 381 | ||
382 | static void amd_pmu_cpu_starting(int cpu) | 382 | static void amd_pmu_cpu_starting(int cpu) |
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 72dea2f40fc4..155ea5324ae0 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c | |||
@@ -725,13 +725,10 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name) | |||
725 | return ret; | 725 | return ret; |
726 | } | 726 | } |
727 | 727 | ||
728 | static __init int perf_event_ibs_init(void) | 728 | static __init void perf_event_ibs_init(void) |
729 | { | 729 | { |
730 | struct attribute **attr = ibs_op_format_attrs; | 730 | struct attribute **attr = ibs_op_format_attrs; |
731 | 731 | ||
732 | if (!ibs_caps) | ||
733 | return -ENODEV; /* ibs not supported by the cpu */ | ||
734 | |||
735 | perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); | 732 | perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); |
736 | 733 | ||
737 | if (ibs_caps & IBS_CAPS_OPCNT) { | 734 | if (ibs_caps & IBS_CAPS_OPCNT) { |
@@ -742,13 +739,11 @@ static __init int perf_event_ibs_init(void) | |||
742 | 739 | ||
743 | register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); | 740 | register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); |
744 | pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps); | 741 | pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps); |
745 | |||
746 | return 0; | ||
747 | } | 742 | } |
748 | 743 | ||
749 | #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */ | 744 | #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */ |
750 | 745 | ||
751 | static __init int perf_event_ibs_init(void) { return 0; } | 746 | static __init void perf_event_ibs_init(void) { } |
752 | 747 | ||
753 | #endif | 748 | #endif |
754 | 749 | ||
@@ -925,7 +920,7 @@ static inline int get_ibs_lvt_offset(void) | |||
925 | return val & IBSCTL_LVT_OFFSET_MASK; | 920 | return val & IBSCTL_LVT_OFFSET_MASK; |
926 | } | 921 | } |
927 | 922 | ||
928 | static void setup_APIC_ibs(void *dummy) | 923 | static void setup_APIC_ibs(void) |
929 | { | 924 | { |
930 | int offset; | 925 | int offset; |
931 | 926 | ||
@@ -940,7 +935,7 @@ failed: | |||
940 | smp_processor_id()); | 935 | smp_processor_id()); |
941 | } | 936 | } |
942 | 937 | ||
943 | static void clear_APIC_ibs(void *dummy) | 938 | static void clear_APIC_ibs(void) |
944 | { | 939 | { |
945 | int offset; | 940 | int offset; |
946 | 941 | ||
@@ -949,18 +944,24 @@ static void clear_APIC_ibs(void *dummy) | |||
949 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); | 944 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); |
950 | } | 945 | } |
951 | 946 | ||
947 | static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu) | ||
948 | { | ||
949 | setup_APIC_ibs(); | ||
950 | return 0; | ||
951 | } | ||
952 | |||
952 | #ifdef CONFIG_PM | 953 | #ifdef CONFIG_PM |
953 | 954 | ||
954 | static int perf_ibs_suspend(void) | 955 | static int perf_ibs_suspend(void) |
955 | { | 956 | { |
956 | clear_APIC_ibs(NULL); | 957 | clear_APIC_ibs(); |
957 | return 0; | 958 | return 0; |
958 | } | 959 | } |
959 | 960 | ||
960 | static void perf_ibs_resume(void) | 961 | static void perf_ibs_resume(void) |
961 | { | 962 | { |
962 | ibs_eilvt_setup(); | 963 | ibs_eilvt_setup(); |
963 | setup_APIC_ibs(NULL); | 964 | setup_APIC_ibs(); |
964 | } | 965 | } |
965 | 966 | ||
966 | static struct syscore_ops perf_ibs_syscore_ops = { | 967 | static struct syscore_ops perf_ibs_syscore_ops = { |
@@ -979,27 +980,15 @@ static inline void perf_ibs_pm_init(void) { } | |||
979 | 980 | ||
980 | #endif | 981 | #endif |
981 | 982 | ||
982 | static int | 983 | static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu) |
983 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | ||
984 | { | 984 | { |
985 | switch (action & ~CPU_TASKS_FROZEN) { | 985 | clear_APIC_ibs(); |
986 | case CPU_STARTING: | 986 | return 0; |
987 | setup_APIC_ibs(NULL); | ||
988 | break; | ||
989 | case CPU_DYING: | ||
990 | clear_APIC_ibs(NULL); | ||
991 | break; | ||
992 | default: | ||
993 | break; | ||
994 | } | ||
995 | |||
996 | return NOTIFY_OK; | ||
997 | } | 987 | } |
998 | 988 | ||
999 | static __init int amd_ibs_init(void) | 989 | static __init int amd_ibs_init(void) |
1000 | { | 990 | { |
1001 | u32 caps; | 991 | u32 caps; |
1002 | int ret = -EINVAL; | ||
1003 | 992 | ||
1004 | caps = __get_ibs_caps(); | 993 | caps = __get_ibs_caps(); |
1005 | if (!caps) | 994 | if (!caps) |
@@ -1008,22 +997,25 @@ static __init int amd_ibs_init(void) | |||
1008 | ibs_eilvt_setup(); | 997 | ibs_eilvt_setup(); |
1009 | 998 | ||
1010 | if (!ibs_eilvt_valid()) | 999 | if (!ibs_eilvt_valid()) |
1011 | goto out; | 1000 | return -EINVAL; |
1012 | 1001 | ||
1013 | perf_ibs_pm_init(); | 1002 | perf_ibs_pm_init(); |
1014 | cpu_notifier_register_begin(); | 1003 | |
1015 | ibs_caps = caps; | 1004 | ibs_caps = caps; |
1016 | /* make ibs_caps visible to other cpus: */ | 1005 | /* make ibs_caps visible to other cpus: */ |
1017 | smp_mb(); | 1006 | smp_mb(); |
1018 | smp_call_function(setup_APIC_ibs, NULL, 1); | 1007 | /* |
1019 | __perf_cpu_notifier(perf_ibs_cpu_notifier); | 1008 | * x86_pmu_amd_ibs_starting_cpu will be called from core on |
1020 | cpu_notifier_register_done(); | 1009 | * all online cpus. |
1010 | */ | ||
1011 | cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING, | ||
1012 | "AP_PERF_X86_AMD_IBS_STARTING", | ||
1013 | x86_pmu_amd_ibs_starting_cpu, | ||
1014 | x86_pmu_amd_ibs_dying_cpu); | ||
1021 | 1015 | ||
1022 | ret = perf_event_ibs_init(); | 1016 | perf_event_ibs_init(); |
1023 | out: | 1017 | |
1024 | if (ret) | 1018 | return 0; |
1025 | pr_err("Failed to setup IBS, %d\n", ret); | ||
1026 | return ret; | ||
1027 | } | 1019 | } |
1028 | 1020 | ||
1029 | /* Since we need the pci subsystem to init ibs we can't do this earlier: */ | 1021 | /* Since we need the pci subsystem to init ibs we can't do this earlier: */ |
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c index 55a3529dbf12..9842270ed2f2 100644 --- a/arch/x86/events/amd/power.c +++ b/arch/x86/events/amd/power.c | |||
@@ -228,12 +228,12 @@ static struct pmu pmu_class = { | |||
228 | .read = pmu_event_read, | 228 | .read = pmu_event_read, |
229 | }; | 229 | }; |
230 | 230 | ||
231 | static void power_cpu_exit(int cpu) | 231 | static int power_cpu_exit(unsigned int cpu) |
232 | { | 232 | { |
233 | int target; | 233 | int target; |
234 | 234 | ||
235 | if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask)) | 235 | if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask)) |
236 | return; | 236 | return 0; |
237 | 237 | ||
238 | /* | 238 | /* |
239 | * Find a new CPU on the same compute unit, if was set in cpumask | 239 | * Find a new CPU on the same compute unit, if was set in cpumask |
@@ -245,9 +245,10 @@ static void power_cpu_exit(int cpu) | |||
245 | cpumask_set_cpu(target, &cpu_mask); | 245 | cpumask_set_cpu(target, &cpu_mask); |
246 | perf_pmu_migrate_context(&pmu_class, cpu, target); | 246 | perf_pmu_migrate_context(&pmu_class, cpu, target); |
247 | } | 247 | } |
248 | return 0; | ||
248 | } | 249 | } |
249 | 250 | ||
250 | static void power_cpu_init(int cpu) | 251 | static int power_cpu_init(unsigned int cpu) |
251 | { | 252 | { |
252 | int target; | 253 | int target; |
253 | 254 | ||
@@ -255,7 +256,7 @@ static void power_cpu_init(int cpu) | |||
255 | * 1) If any CPU is set at cpu_mask in the same compute unit, do | 256 | * 1) If any CPU is set at cpu_mask in the same compute unit, do |
256 | * nothing. | 257 | * nothing. |
257 | * 2) If no CPU is set at cpu_mask in the same compute unit, | 258 | * 2) If no CPU is set at cpu_mask in the same compute unit, |
258 | * set current STARTING CPU. | 259 | * set current ONLINE CPU. |
259 | * | 260 | * |
260 | * Note: if there is a CPU aside of the new one already in the | 261 | * Note: if there is a CPU aside of the new one already in the |
261 | * sibling mask, then it is also in cpu_mask. | 262 | * sibling mask, then it is also in cpu_mask. |
@@ -263,33 +264,9 @@ static void power_cpu_init(int cpu) | |||
263 | target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); | 264 | target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); |
264 | if (target >= nr_cpumask_bits) | 265 | if (target >= nr_cpumask_bits) |
265 | cpumask_set_cpu(cpu, &cpu_mask); | 266 | cpumask_set_cpu(cpu, &cpu_mask); |
267 | return 0; | ||
266 | } | 268 | } |
267 | 269 | ||
268 | static int | ||
269 | power_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | ||
270 | { | ||
271 | unsigned int cpu = (long)hcpu; | ||
272 | |||
273 | switch (action & ~CPU_TASKS_FROZEN) { | ||
274 | case CPU_DOWN_FAILED: | ||
275 | case CPU_STARTING: | ||
276 | power_cpu_init(cpu); | ||
277 | break; | ||
278 | case CPU_DOWN_PREPARE: | ||
279 | power_cpu_exit(cpu); | ||
280 | break; | ||
281 | default: | ||
282 | break; | ||
283 | } | ||
284 | |||
285 | return NOTIFY_OK; | ||
286 | } | ||
287 | |||
288 | static struct notifier_block power_cpu_notifier_nb = { | ||
289 | .notifier_call = power_cpu_notifier, | ||
290 | .priority = CPU_PRI_PERF, | ||
291 | }; | ||
292 | |||
293 | static const struct x86_cpu_id cpu_match[] = { | 270 | static const struct x86_cpu_id cpu_match[] = { |
294 | { .vendor = X86_VENDOR_AMD, .family = 0x15 }, | 271 | { .vendor = X86_VENDOR_AMD, .family = 0x15 }, |
295 | {}, | 272 | {}, |
@@ -297,7 +274,7 @@ static const struct x86_cpu_id cpu_match[] = { | |||
297 | 274 | ||
298 | static int __init amd_power_pmu_init(void) | 275 | static int __init amd_power_pmu_init(void) |
299 | { | 276 | { |
300 | int cpu, target, ret; | 277 | int ret; |
301 | 278 | ||
302 | if (!x86_match_cpu(cpu_match)) | 279 | if (!x86_match_cpu(cpu_match)) |
303 | return 0; | 280 | return 0; |
@@ -312,38 +289,25 @@ static int __init amd_power_pmu_init(void) | |||
312 | return -ENODEV; | 289 | return -ENODEV; |
313 | } | 290 | } |
314 | 291 | ||
315 | cpu_notifier_register_begin(); | ||
316 | 292 | ||
317 | /* Choose one online core of each compute unit. */ | 293 | cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, |
318 | for_each_online_cpu(cpu) { | 294 | "AP_PERF_X86_AMD_POWER_ONLINE", |
319 | target = cpumask_first(topology_sibling_cpumask(cpu)); | 295 | power_cpu_init, power_cpu_exit); |
320 | if (!cpumask_test_cpu(target, &cpu_mask)) | ||
321 | cpumask_set_cpu(target, &cpu_mask); | ||
322 | } | ||
323 | 296 | ||
324 | ret = perf_pmu_register(&pmu_class, "power", -1); | 297 | ret = perf_pmu_register(&pmu_class, "power", -1); |
325 | if (WARN_ON(ret)) { | 298 | if (WARN_ON(ret)) { |
326 | pr_warn("AMD Power PMU registration failed\n"); | 299 | pr_warn("AMD Power PMU registration failed\n"); |
327 | goto out; | 300 | return ret; |
328 | } | 301 | } |
329 | 302 | ||
330 | __register_cpu_notifier(&power_cpu_notifier_nb); | ||
331 | |||
332 | pr_info("AMD Power PMU detected\n"); | 303 | pr_info("AMD Power PMU detected\n"); |
333 | |||
334 | out: | ||
335 | cpu_notifier_register_done(); | ||
336 | |||
337 | return ret; | 304 | return ret; |
338 | } | 305 | } |
339 | module_init(amd_power_pmu_init); | 306 | module_init(amd_power_pmu_init); |
340 | 307 | ||
341 | static void __exit amd_power_pmu_exit(void) | 308 | static void __exit amd_power_pmu_exit(void) |
342 | { | 309 | { |
343 | cpu_notifier_register_begin(); | 310 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE); |
344 | __unregister_cpu_notifier(&power_cpu_notifier_nb); | ||
345 | cpu_notifier_register_done(); | ||
346 | |||
347 | perf_pmu_unregister(&pmu_class); | 311 | perf_pmu_unregister(&pmu_class); |
348 | } | 312 | } |
349 | module_exit(amd_power_pmu_exit); | 313 | module_exit(amd_power_pmu_exit); |
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 98ac57381bf9..e6131d4454e6 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c | |||
@@ -358,7 +358,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this, | |||
358 | return this; | 358 | return this; |
359 | } | 359 | } |
360 | 360 | ||
361 | static void amd_uncore_cpu_starting(unsigned int cpu) | 361 | static int amd_uncore_cpu_starting(unsigned int cpu) |
362 | { | 362 | { |
363 | unsigned int eax, ebx, ecx, edx; | 363 | unsigned int eax, ebx, ecx, edx; |
364 | struct amd_uncore *uncore; | 364 | struct amd_uncore *uncore; |
@@ -384,6 +384,8 @@ static void amd_uncore_cpu_starting(unsigned int cpu) | |||
384 | uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2); | 384 | uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2); |
385 | *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; | 385 | *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; |
386 | } | 386 | } |
387 | |||
388 | return 0; | ||
387 | } | 389 | } |
388 | 390 | ||
389 | static void uncore_online(unsigned int cpu, | 391 | static void uncore_online(unsigned int cpu, |
@@ -398,13 +400,15 @@ static void uncore_online(unsigned int cpu, | |||
398 | cpumask_set_cpu(cpu, uncore->active_mask); | 400 | cpumask_set_cpu(cpu, uncore->active_mask); |
399 | } | 401 | } |
400 | 402 | ||
401 | static void amd_uncore_cpu_online(unsigned int cpu) | 403 | static int amd_uncore_cpu_online(unsigned int cpu) |
402 | { | 404 | { |
403 | if (amd_uncore_nb) | 405 | if (amd_uncore_nb) |
404 | uncore_online(cpu, amd_uncore_nb); | 406 | uncore_online(cpu, amd_uncore_nb); |
405 | 407 | ||
406 | if (amd_uncore_l2) | 408 | if (amd_uncore_l2) |
407 | uncore_online(cpu, amd_uncore_l2); | 409 | uncore_online(cpu, amd_uncore_l2); |
410 | |||
411 | return 0; | ||
408 | } | 412 | } |
409 | 413 | ||
410 | static void uncore_down_prepare(unsigned int cpu, | 414 | static void uncore_down_prepare(unsigned int cpu, |
@@ -433,13 +437,15 @@ static void uncore_down_prepare(unsigned int cpu, | |||
433 | } | 437 | } |
434 | } | 438 | } |
435 | 439 | ||
436 | static void amd_uncore_cpu_down_prepare(unsigned int cpu) | 440 | static int amd_uncore_cpu_down_prepare(unsigned int cpu) |
437 | { | 441 | { |
438 | if (amd_uncore_nb) | 442 | if (amd_uncore_nb) |
439 | uncore_down_prepare(cpu, amd_uncore_nb); | 443 | uncore_down_prepare(cpu, amd_uncore_nb); |
440 | 444 | ||
441 | if (amd_uncore_l2) | 445 | if (amd_uncore_l2) |
442 | uncore_down_prepare(cpu, amd_uncore_l2); | 446 | uncore_down_prepare(cpu, amd_uncore_l2); |
447 | |||
448 | return 0; | ||
443 | } | 449 | } |
444 | 450 | ||
445 | static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) | 451 | static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) |
@@ -454,74 +460,19 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) | |||
454 | *per_cpu_ptr(uncores, cpu) = NULL; | 460 | *per_cpu_ptr(uncores, cpu) = NULL; |
455 | } | 461 | } |
456 | 462 | ||
457 | static void amd_uncore_cpu_dead(unsigned int cpu) | 463 | static int amd_uncore_cpu_dead(unsigned int cpu) |
458 | { | 464 | { |
459 | if (amd_uncore_nb) | 465 | if (amd_uncore_nb) |
460 | uncore_dead(cpu, amd_uncore_nb); | 466 | uncore_dead(cpu, amd_uncore_nb); |
461 | 467 | ||
462 | if (amd_uncore_l2) | 468 | if (amd_uncore_l2) |
463 | uncore_dead(cpu, amd_uncore_l2); | 469 | uncore_dead(cpu, amd_uncore_l2); |
464 | } | ||
465 | |||
466 | static int | ||
467 | amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, | ||
468 | void *hcpu) | ||
469 | { | ||
470 | unsigned int cpu = (long)hcpu; | ||
471 | |||
472 | switch (action & ~CPU_TASKS_FROZEN) { | ||
473 | case CPU_UP_PREPARE: | ||
474 | if (amd_uncore_cpu_up_prepare(cpu)) | ||
475 | return notifier_from_errno(-ENOMEM); | ||
476 | break; | ||
477 | |||
478 | case CPU_STARTING: | ||
479 | amd_uncore_cpu_starting(cpu); | ||
480 | break; | ||
481 | |||
482 | case CPU_ONLINE: | ||
483 | amd_uncore_cpu_online(cpu); | ||
484 | break; | ||
485 | |||
486 | case CPU_DOWN_PREPARE: | ||
487 | amd_uncore_cpu_down_prepare(cpu); | ||
488 | break; | ||
489 | |||
490 | case CPU_UP_CANCELED: | ||
491 | case CPU_DEAD: | ||
492 | amd_uncore_cpu_dead(cpu); | ||
493 | break; | ||
494 | |||
495 | default: | ||
496 | break; | ||
497 | } | ||
498 | |||
499 | return NOTIFY_OK; | ||
500 | } | ||
501 | |||
502 | static struct notifier_block amd_uncore_cpu_notifier_block = { | ||
503 | .notifier_call = amd_uncore_cpu_notifier, | ||
504 | .priority = CPU_PRI_PERF + 1, | ||
505 | }; | ||
506 | |||
507 | static void __init init_cpu_already_online(void *dummy) | ||
508 | { | ||
509 | unsigned int cpu = smp_processor_id(); | ||
510 | |||
511 | amd_uncore_cpu_starting(cpu); | ||
512 | amd_uncore_cpu_online(cpu); | ||
513 | } | ||
514 | 470 | ||
515 | static void cleanup_cpu_online(void *dummy) | 471 | return 0; |
516 | { | ||
517 | unsigned int cpu = smp_processor_id(); | ||
518 | |||
519 | amd_uncore_cpu_dead(cpu); | ||
520 | } | 472 | } |
521 | 473 | ||
522 | static int __init amd_uncore_init(void) | 474 | static int __init amd_uncore_init(void) |
523 | { | 475 | { |
524 | unsigned int cpu, cpu2; | ||
525 | int ret = -ENODEV; | 476 | int ret = -ENODEV; |
526 | 477 | ||
527 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | 478 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) |
@@ -558,38 +509,29 @@ static int __init amd_uncore_init(void) | |||
558 | ret = 0; | 509 | ret = 0; |
559 | } | 510 | } |
560 | 511 | ||
561 | if (ret) | 512 | /* |
562 | goto fail_nodev; | 513 | * Install callbacks. Core will call them for each online cpu. |
563 | 514 | */ | |
564 | cpu_notifier_register_begin(); | 515 | if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP, |
565 | 516 | "PERF_X86_AMD_UNCORE_PREP", | |
566 | /* init cpus already online before registering for hotplug notifier */ | 517 | amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead)) |
567 | for_each_online_cpu(cpu) { | 518 | goto fail_l2; |
568 | ret = amd_uncore_cpu_up_prepare(cpu); | 519 | |
569 | if (ret) | 520 | if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, |
570 | goto fail_online; | 521 | "AP_PERF_X86_AMD_UNCORE_STARTING", |
571 | smp_call_function_single(cpu, init_cpu_already_online, NULL, 1); | 522 | amd_uncore_cpu_starting, NULL)) |
572 | } | 523 | goto fail_prep; |
573 | 524 | if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, | |
574 | __register_cpu_notifier(&amd_uncore_cpu_notifier_block); | 525 | "AP_PERF_X86_AMD_UNCORE_ONLINE", |
575 | cpu_notifier_register_done(); | 526 | amd_uncore_cpu_online, |
576 | 527 | amd_uncore_cpu_down_prepare)) | |
528 | goto fail_start; | ||
577 | return 0; | 529 | return 0; |
578 | 530 | ||
579 | 531 | fail_start: | |
580 | fail_online: | 532 | cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING); |
581 | for_each_online_cpu(cpu2) { | 533 | fail_prep: |
582 | if (cpu2 == cpu) | 534 | cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP); |
583 | break; | ||
584 | smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1); | ||
585 | } | ||
586 | cpu_notifier_register_done(); | ||
587 | |||
588 | /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */ | ||
589 | amd_uncore_nb = amd_uncore_l2 = NULL; | ||
590 | |||
591 | if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) | ||
592 | perf_pmu_unregister(&amd_l2_pmu); | ||
593 | fail_l2: | 535 | fail_l2: |
594 | if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) | 536 | if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) |
595 | perf_pmu_unregister(&amd_nb_pmu); | 537 | perf_pmu_unregister(&amd_nb_pmu); |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index dfebbde2a4cc..c17f0de5fd39 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -1477,49 +1477,49 @@ NOKPROBE_SYMBOL(perf_event_nmi_handler); | |||
1477 | struct event_constraint emptyconstraint; | 1477 | struct event_constraint emptyconstraint; |
1478 | struct event_constraint unconstrained; | 1478 | struct event_constraint unconstrained; |
1479 | 1479 | ||
1480 | static int | 1480 | static int x86_pmu_prepare_cpu(unsigned int cpu) |
1481 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | ||
1482 | { | 1481 | { |
1483 | unsigned int cpu = (long)hcpu; | ||
1484 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | 1482 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
1485 | int i, ret = NOTIFY_OK; | 1483 | int i; |
1486 | |||
1487 | switch (action & ~CPU_TASKS_FROZEN) { | ||
1488 | case CPU_UP_PREPARE: | ||
1489 | for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) | ||
1490 | cpuc->kfree_on_online[i] = NULL; | ||
1491 | if (x86_pmu.cpu_prepare) | ||
1492 | ret = x86_pmu.cpu_prepare(cpu); | ||
1493 | break; | ||
1494 | |||
1495 | case CPU_STARTING: | ||
1496 | if (x86_pmu.cpu_starting) | ||
1497 | x86_pmu.cpu_starting(cpu); | ||
1498 | break; | ||
1499 | 1484 | ||
1500 | case CPU_ONLINE: | 1485 | for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) |
1501 | for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) { | 1486 | cpuc->kfree_on_online[i] = NULL; |
1502 | kfree(cpuc->kfree_on_online[i]); | 1487 | if (x86_pmu.cpu_prepare) |
1503 | cpuc->kfree_on_online[i] = NULL; | 1488 | return x86_pmu.cpu_prepare(cpu); |
1504 | } | 1489 | return 0; |
1505 | break; | 1490 | } |
1506 | 1491 | ||
1507 | case CPU_DYING: | 1492 | static int x86_pmu_dead_cpu(unsigned int cpu) |
1508 | if (x86_pmu.cpu_dying) | 1493 | { |
1509 | x86_pmu.cpu_dying(cpu); | 1494 | if (x86_pmu.cpu_dead) |
1510 | break; | 1495 | x86_pmu.cpu_dead(cpu); |
1496 | return 0; | ||
1497 | } | ||
1511 | 1498 | ||
1512 | case CPU_UP_CANCELED: | 1499 | static int x86_pmu_online_cpu(unsigned int cpu) |
1513 | case CPU_DEAD: | 1500 | { |
1514 | if (x86_pmu.cpu_dead) | 1501 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
1515 | x86_pmu.cpu_dead(cpu); | 1502 | int i; |
1516 | break; | ||
1517 | 1503 | ||
1518 | default: | 1504 | for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) { |
1519 | break; | 1505 | kfree(cpuc->kfree_on_online[i]); |
1506 | cpuc->kfree_on_online[i] = NULL; | ||
1520 | } | 1507 | } |
1508 | return 0; | ||
1509 | } | ||
1521 | 1510 | ||
1522 | return ret; | 1511 | static int x86_pmu_starting_cpu(unsigned int cpu) |
1512 | { | ||
1513 | if (x86_pmu.cpu_starting) | ||
1514 | x86_pmu.cpu_starting(cpu); | ||
1515 | return 0; | ||
1516 | } | ||
1517 | |||
1518 | static int x86_pmu_dying_cpu(unsigned int cpu) | ||
1519 | { | ||
1520 | if (x86_pmu.cpu_dying) | ||
1521 | x86_pmu.cpu_dying(cpu); | ||
1522 | return 0; | ||
1523 | } | 1523 | } |
1524 | 1524 | ||
1525 | static void __init pmu_check_apic(void) | 1525 | static void __init pmu_check_apic(void) |
@@ -1787,10 +1787,39 @@ static int __init init_hw_perf_events(void) | |||
1787 | pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); | 1787 | pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); |
1788 | pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); | 1788 | pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); |
1789 | 1789 | ||
1790 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | 1790 | /* |
1791 | perf_cpu_notifier(x86_pmu_notifier); | 1791 | * Install callbacks. Core will call them for each online |
1792 | * cpu. | ||
1793 | */ | ||
1794 | err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "PERF_X86_PREPARE", | ||
1795 | x86_pmu_prepare_cpu, x86_pmu_dead_cpu); | ||
1796 | if (err) | ||
1797 | return err; | ||
1798 | |||
1799 | err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING, | ||
1800 | "AP_PERF_X86_STARTING", x86_pmu_starting_cpu, | ||
1801 | x86_pmu_dying_cpu); | ||
1802 | if (err) | ||
1803 | goto out; | ||
1804 | |||
1805 | err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "AP_PERF_X86_ONLINE", | ||
1806 | x86_pmu_online_cpu, NULL); | ||
1807 | if (err) | ||
1808 | goto out1; | ||
1809 | |||
1810 | err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | ||
1811 | if (err) | ||
1812 | goto out2; | ||
1792 | 1813 | ||
1793 | return 0; | 1814 | return 0; |
1815 | |||
1816 | out2: | ||
1817 | cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE); | ||
1818 | out1: | ||
1819 | cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING); | ||
1820 | out: | ||
1821 | cpuhp_remove_state(CPUHP_PERF_X86_PREPARE); | ||
1822 | return err; | ||
1794 | } | 1823 | } |
1795 | early_initcall(init_hw_perf_events); | 1824 | early_initcall(init_hw_perf_events); |
1796 | 1825 | ||
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 0974ba11e954..2cbde2f449aa 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
@@ -3109,7 +3109,7 @@ static int intel_pmu_cpu_prepare(int cpu) | |||
3109 | cpuc->excl_thread_id = 0; | 3109 | cpuc->excl_thread_id = 0; |
3110 | } | 3110 | } |
3111 | 3111 | ||
3112 | return NOTIFY_OK; | 3112 | return 0; |
3113 | 3113 | ||
3114 | err_constraint_list: | 3114 | err_constraint_list: |
3115 | kfree(cpuc->constraint_list); | 3115 | kfree(cpuc->constraint_list); |
@@ -3120,7 +3120,7 @@ err_shared_regs: | |||
3120 | cpuc->shared_regs = NULL; | 3120 | cpuc->shared_regs = NULL; |
3121 | 3121 | ||
3122 | err: | 3122 | err: |
3123 | return NOTIFY_BAD; | 3123 | return -ENOMEM; |
3124 | } | 3124 | } |
3125 | 3125 | ||
3126 | static void intel_pmu_cpu_starting(int cpu) | 3126 | static void intel_pmu_cpu_starting(int cpu) |
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c index 7b5fd811ef45..783c49ddef29 100644 --- a/arch/x86/events/intel/cqm.c +++ b/arch/x86/events/intel/cqm.c | |||
@@ -1577,7 +1577,7 @@ static inline void cqm_pick_event_reader(int cpu) | |||
1577 | cpumask_set_cpu(cpu, &cqm_cpumask); | 1577 | cpumask_set_cpu(cpu, &cqm_cpumask); |
1578 | } | 1578 | } |
1579 | 1579 | ||
1580 | static void intel_cqm_cpu_starting(unsigned int cpu) | 1580 | static int intel_cqm_cpu_starting(unsigned int cpu) |
1581 | { | 1581 | { |
1582 | struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); | 1582 | struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); |
1583 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 1583 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
@@ -1588,39 +1588,26 @@ static void intel_cqm_cpu_starting(unsigned int cpu) | |||
1588 | 1588 | ||
1589 | WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid); | 1589 | WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid); |
1590 | WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale); | 1590 | WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale); |
1591 | |||
1592 | cqm_pick_event_reader(cpu); | ||
1593 | return 0; | ||
1591 | } | 1594 | } |
1592 | 1595 | ||
1593 | static void intel_cqm_cpu_exit(unsigned int cpu) | 1596 | static int intel_cqm_cpu_exit(unsigned int cpu) |
1594 | { | 1597 | { |
1595 | int target; | 1598 | int target; |
1596 | 1599 | ||
1597 | /* Is @cpu the current cqm reader for this package ? */ | 1600 | /* Is @cpu the current cqm reader for this package ? */ |
1598 | if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask)) | 1601 | if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask)) |
1599 | return; | 1602 | return 0; |
1600 | 1603 | ||
1601 | /* Find another online reader in this package */ | 1604 | /* Find another online reader in this package */ |
1602 | target = cpumask_any_but(topology_core_cpumask(cpu), cpu); | 1605 | target = cpumask_any_but(topology_core_cpumask(cpu), cpu); |
1603 | 1606 | ||
1604 | if (target < nr_cpu_ids) | 1607 | if (target < nr_cpu_ids) |
1605 | cpumask_set_cpu(target, &cqm_cpumask); | 1608 | cpumask_set_cpu(target, &cqm_cpumask); |
1606 | } | ||
1607 | |||
1608 | static int intel_cqm_cpu_notifier(struct notifier_block *nb, | ||
1609 | unsigned long action, void *hcpu) | ||
1610 | { | ||
1611 | unsigned int cpu = (unsigned long)hcpu; | ||
1612 | |||
1613 | switch (action & ~CPU_TASKS_FROZEN) { | ||
1614 | case CPU_DOWN_PREPARE: | ||
1615 | intel_cqm_cpu_exit(cpu); | ||
1616 | break; | ||
1617 | case CPU_STARTING: | ||
1618 | intel_cqm_cpu_starting(cpu); | ||
1619 | cqm_pick_event_reader(cpu); | ||
1620 | break; | ||
1621 | } | ||
1622 | 1609 | ||
1623 | return NOTIFY_OK; | 1610 | return 0; |
1624 | } | 1611 | } |
1625 | 1612 | ||
1626 | static const struct x86_cpu_id intel_cqm_match[] = { | 1613 | static const struct x86_cpu_id intel_cqm_match[] = { |
@@ -1682,7 +1669,7 @@ out: | |||
1682 | static int __init intel_cqm_init(void) | 1669 | static int __init intel_cqm_init(void) |
1683 | { | 1670 | { |
1684 | char *str = NULL, scale[20]; | 1671 | char *str = NULL, scale[20]; |
1685 | int i, cpu, ret; | 1672 | int cpu, ret; |
1686 | 1673 | ||
1687 | if (x86_match_cpu(intel_cqm_match)) | 1674 | if (x86_match_cpu(intel_cqm_match)) |
1688 | cqm_enabled = true; | 1675 | cqm_enabled = true; |
@@ -1705,8 +1692,7 @@ static int __init intel_cqm_init(void) | |||
1705 | * | 1692 | * |
1706 | * Also, check that the scales match on all cpus. | 1693 | * Also, check that the scales match on all cpus. |
1707 | */ | 1694 | */ |
1708 | cpu_notifier_register_begin(); | 1695 | get_online_cpus(); |
1709 | |||
1710 | for_each_online_cpu(cpu) { | 1696 | for_each_online_cpu(cpu) { |
1711 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 1697 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
1712 | 1698 | ||
@@ -1743,11 +1729,6 @@ static int __init intel_cqm_init(void) | |||
1743 | if (ret) | 1729 | if (ret) |
1744 | goto out; | 1730 | goto out; |
1745 | 1731 | ||
1746 | for_each_online_cpu(i) { | ||
1747 | intel_cqm_cpu_starting(i); | ||
1748 | cqm_pick_event_reader(i); | ||
1749 | } | ||
1750 | |||
1751 | if (mbm_enabled) | 1732 | if (mbm_enabled) |
1752 | ret = intel_mbm_init(); | 1733 | ret = intel_mbm_init(); |
1753 | if (ret && !cqm_enabled) | 1734 | if (ret && !cqm_enabled) |
@@ -1772,12 +1753,18 @@ static int __init intel_cqm_init(void) | |||
1772 | pr_info("Intel MBM enabled\n"); | 1753 | pr_info("Intel MBM enabled\n"); |
1773 | 1754 | ||
1774 | /* | 1755 | /* |
1775 | * Register the hot cpu notifier once we are sure cqm | 1756 | * Setup the hot cpu notifier once we are sure cqm |
1776 | * is enabled to avoid notifier leak. | 1757 | * is enabled to avoid notifier leak. |
1777 | */ | 1758 | */ |
1778 | __perf_cpu_notifier(intel_cqm_cpu_notifier); | 1759 | cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING, |
1760 | "AP_PERF_X86_CQM_STARTING", | ||
1761 | intel_cqm_cpu_starting, NULL); | ||
1762 | cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "AP_PERF_X86_CQM_ONLINE", | ||
1763 | NULL, intel_cqm_cpu_exit); | ||
1764 | |||
1779 | out: | 1765 | out: |
1780 | cpu_notifier_register_done(); | 1766 | put_online_cpus(); |
1767 | |||
1781 | if (ret) { | 1768 | if (ret) { |
1782 | kfree(str); | 1769 | kfree(str); |
1783 | cqm_cleanup(); | 1770 | cqm_cleanup(); |
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 4c7638b91fa5..3ca87b5a8677 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c | |||
@@ -366,7 +366,7 @@ static int cstate_pmu_event_add(struct perf_event *event, int mode) | |||
366 | * Check if exiting cpu is the designated reader. If so migrate the | 366 | * Check if exiting cpu is the designated reader. If so migrate the |
367 | * events when there is a valid target available | 367 | * events when there is a valid target available |
368 | */ | 368 | */ |
369 | static void cstate_cpu_exit(int cpu) | 369 | static int cstate_cpu_exit(unsigned int cpu) |
370 | { | 370 | { |
371 | unsigned int target; | 371 | unsigned int target; |
372 | 372 | ||
@@ -391,9 +391,10 @@ static void cstate_cpu_exit(int cpu) | |||
391 | perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); | 391 | perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); |
392 | } | 392 | } |
393 | } | 393 | } |
394 | return 0; | ||
394 | } | 395 | } |
395 | 396 | ||
396 | static void cstate_cpu_init(int cpu) | 397 | static int cstate_cpu_init(unsigned int cpu) |
397 | { | 398 | { |
398 | unsigned int target; | 399 | unsigned int target; |
399 | 400 | ||
@@ -415,31 +416,10 @@ static void cstate_cpu_init(int cpu) | |||
415 | topology_core_cpumask(cpu)); | 416 | topology_core_cpumask(cpu)); |
416 | if (has_cstate_pkg && target >= nr_cpu_ids) | 417 | if (has_cstate_pkg && target >= nr_cpu_ids) |
417 | cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); | 418 | cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); |
418 | } | ||
419 | 419 | ||
420 | static int cstate_cpu_notifier(struct notifier_block *self, | 420 | return 0; |
421 | unsigned long action, void *hcpu) | ||
422 | { | ||
423 | unsigned int cpu = (long)hcpu; | ||
424 | |||
425 | switch (action & ~CPU_TASKS_FROZEN) { | ||
426 | case CPU_STARTING: | ||
427 | cstate_cpu_init(cpu); | ||
428 | break; | ||
429 | case CPU_DOWN_PREPARE: | ||
430 | cstate_cpu_exit(cpu); | ||
431 | break; | ||
432 | default: | ||
433 | break; | ||
434 | } | ||
435 | return NOTIFY_OK; | ||
436 | } | 421 | } |
437 | 422 | ||
438 | static struct notifier_block cstate_cpu_nb = { | ||
439 | .notifier_call = cstate_cpu_notifier, | ||
440 | .priority = CPU_PRI_PERF + 1, | ||
441 | }; | ||
442 | |||
443 | static struct pmu cstate_core_pmu = { | 423 | static struct pmu cstate_core_pmu = { |
444 | .attr_groups = core_attr_groups, | 424 | .attr_groups = core_attr_groups, |
445 | .name = "cstate_core", | 425 | .name = "cstate_core", |
@@ -600,18 +580,20 @@ static inline void cstate_cleanup(void) | |||
600 | 580 | ||
601 | static int __init cstate_init(void) | 581 | static int __init cstate_init(void) |
602 | { | 582 | { |
603 | int cpu, err; | 583 | int err; |
604 | 584 | ||
605 | cpu_notifier_register_begin(); | 585 | cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING, |
606 | for_each_online_cpu(cpu) | 586 | "AP_PERF_X86_CSTATE_STARTING", cstate_cpu_init, |
607 | cstate_cpu_init(cpu); | 587 | NULL); |
588 | cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE, | ||
589 | "AP_PERF_X86_CSTATE_ONLINE", NULL, cstate_cpu_exit); | ||
608 | 590 | ||
609 | if (has_cstate_core) { | 591 | if (has_cstate_core) { |
610 | err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1); | 592 | err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1); |
611 | if (err) { | 593 | if (err) { |
612 | has_cstate_core = false; | 594 | has_cstate_core = false; |
613 | pr_info("Failed to register cstate core pmu\n"); | 595 | pr_info("Failed to register cstate core pmu\n"); |
614 | goto out; | 596 | return err; |
615 | } | 597 | } |
616 | } | 598 | } |
617 | 599 | ||
@@ -621,12 +603,10 @@ static int __init cstate_init(void) | |||
621 | has_cstate_pkg = false; | 603 | has_cstate_pkg = false; |
622 | pr_info("Failed to register cstate pkg pmu\n"); | 604 | pr_info("Failed to register cstate pkg pmu\n"); |
623 | cstate_cleanup(); | 605 | cstate_cleanup(); |
624 | goto out; | 606 | return err; |
625 | } | 607 | } |
626 | } | 608 | } |
627 | __register_cpu_notifier(&cstate_cpu_nb); | 609 | |
628 | out: | ||
629 | cpu_notifier_register_done(); | ||
630 | return err; | 610 | return err; |
631 | } | 611 | } |
632 | 612 | ||
@@ -652,9 +632,8 @@ module_init(cstate_pmu_init); | |||
652 | 632 | ||
653 | static void __exit cstate_pmu_exit(void) | 633 | static void __exit cstate_pmu_exit(void) |
654 | { | 634 | { |
655 | cpu_notifier_register_begin(); | 635 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE); |
656 | __unregister_cpu_notifier(&cstate_cpu_nb); | 636 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING); |
657 | cstate_cleanup(); | 637 | cstate_cleanup(); |
658 | cpu_notifier_register_done(); | ||
659 | } | 638 | } |
660 | module_exit(cstate_pmu_exit); | 639 | module_exit(cstate_pmu_exit); |
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index d0c58b35155f..28865938aadf 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c | |||
@@ -556,14 +556,14 @@ const struct attribute_group *rapl_attr_groups[] = { | |||
556 | NULL, | 556 | NULL, |
557 | }; | 557 | }; |
558 | 558 | ||
559 | static void rapl_cpu_exit(int cpu) | 559 | static int rapl_cpu_offline(unsigned int cpu) |
560 | { | 560 | { |
561 | struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); | 561 | struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); |
562 | int target; | 562 | int target; |
563 | 563 | ||
564 | /* Check if exiting cpu is used for collecting rapl events */ | 564 | /* Check if exiting cpu is used for collecting rapl events */ |
565 | if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask)) | 565 | if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask)) |
566 | return; | 566 | return 0; |
567 | 567 | ||
568 | pmu->cpu = -1; | 568 | pmu->cpu = -1; |
569 | /* Find a new cpu to collect rapl events */ | 569 | /* Find a new cpu to collect rapl events */ |
@@ -575,9 +575,10 @@ static void rapl_cpu_exit(int cpu) | |||
575 | pmu->cpu = target; | 575 | pmu->cpu = target; |
576 | perf_pmu_migrate_context(pmu->pmu, cpu, target); | 576 | perf_pmu_migrate_context(pmu->pmu, cpu, target); |
577 | } | 577 | } |
578 | return 0; | ||
578 | } | 579 | } |
579 | 580 | ||
580 | static void rapl_cpu_init(int cpu) | 581 | static int rapl_cpu_online(unsigned int cpu) |
581 | { | 582 | { |
582 | struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); | 583 | struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); |
583 | int target; | 584 | int target; |
@@ -588,13 +589,14 @@ static void rapl_cpu_init(int cpu) | |||
588 | */ | 589 | */ |
589 | target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu)); | 590 | target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu)); |
590 | if (target < nr_cpu_ids) | 591 | if (target < nr_cpu_ids) |
591 | return; | 592 | return 0; |
592 | 593 | ||
593 | cpumask_set_cpu(cpu, &rapl_cpu_mask); | 594 | cpumask_set_cpu(cpu, &rapl_cpu_mask); |
594 | pmu->cpu = cpu; | 595 | pmu->cpu = cpu; |
596 | return 0; | ||
595 | } | 597 | } |
596 | 598 | ||
597 | static int rapl_cpu_prepare(int cpu) | 599 | static int rapl_cpu_prepare(unsigned int cpu) |
598 | { | 600 | { |
599 | struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); | 601 | struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); |
600 | 602 | ||
@@ -615,33 +617,6 @@ static int rapl_cpu_prepare(int cpu) | |||
615 | return 0; | 617 | return 0; |
616 | } | 618 | } |
617 | 619 | ||
618 | static int rapl_cpu_notifier(struct notifier_block *self, | ||
619 | unsigned long action, void *hcpu) | ||
620 | { | ||
621 | unsigned int cpu = (long)hcpu; | ||
622 | |||
623 | switch (action & ~CPU_TASKS_FROZEN) { | ||
624 | case CPU_UP_PREPARE: | ||
625 | rapl_cpu_prepare(cpu); | ||
626 | break; | ||
627 | |||
628 | case CPU_DOWN_FAILED: | ||
629 | case CPU_ONLINE: | ||
630 | rapl_cpu_init(cpu); | ||
631 | break; | ||
632 | |||
633 | case CPU_DOWN_PREPARE: | ||
634 | rapl_cpu_exit(cpu); | ||
635 | break; | ||
636 | } | ||
637 | return NOTIFY_OK; | ||
638 | } | ||
639 | |||
640 | static struct notifier_block rapl_cpu_nb = { | ||
641 | .notifier_call = rapl_cpu_notifier, | ||
642 | .priority = CPU_PRI_PERF + 1, | ||
643 | }; | ||
644 | |||
645 | static int rapl_check_hw_unit(bool apply_quirk) | 620 | static int rapl_check_hw_unit(bool apply_quirk) |
646 | { | 621 | { |
647 | u64 msr_rapl_power_unit_bits; | 622 | u64 msr_rapl_power_unit_bits; |
@@ -692,24 +667,6 @@ static void __init rapl_advertise(void) | |||
692 | } | 667 | } |
693 | } | 668 | } |
694 | 669 | ||
695 | static int __init rapl_prepare_cpus(void) | ||
696 | { | ||
697 | unsigned int cpu, pkg; | ||
698 | int ret; | ||
699 | |||
700 | for_each_online_cpu(cpu) { | ||
701 | pkg = topology_logical_package_id(cpu); | ||
702 | if (rapl_pmus->pmus[pkg]) | ||
703 | continue; | ||
704 | |||
705 | ret = rapl_cpu_prepare(cpu); | ||
706 | if (ret) | ||
707 | return ret; | ||
708 | rapl_cpu_init(cpu); | ||
709 | } | ||
710 | return 0; | ||
711 | } | ||
712 | |||
713 | static void cleanup_rapl_pmus(void) | 670 | static void cleanup_rapl_pmus(void) |
714 | { | 671 | { |
715 | int i; | 672 | int i; |
@@ -837,35 +794,44 @@ static int __init rapl_pmu_init(void) | |||
837 | if (ret) | 794 | if (ret) |
838 | return ret; | 795 | return ret; |
839 | 796 | ||
840 | cpu_notifier_register_begin(); | 797 | /* |
798 | * Install callbacks. Core will call them for each online cpu. | ||
799 | */ | ||
841 | 800 | ||
842 | ret = rapl_prepare_cpus(); | 801 | ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP", |
802 | rapl_cpu_prepare, NULL); | ||
843 | if (ret) | 803 | if (ret) |
844 | goto out; | 804 | goto out; |
845 | 805 | ||
806 | ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, | ||
807 | "AP_PERF_X86_RAPL_ONLINE", | ||
808 | rapl_cpu_online, rapl_cpu_offline); | ||
809 | if (ret) | ||
810 | goto out1; | ||
811 | |||
846 | ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); | 812 | ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); |
847 | if (ret) | 813 | if (ret) |
848 | goto out; | 814 | goto out2; |
849 | 815 | ||
850 | __register_cpu_notifier(&rapl_cpu_nb); | ||
851 | cpu_notifier_register_done(); | ||
852 | rapl_advertise(); | 816 | rapl_advertise(); |
853 | return 0; | 817 | return 0; |
854 | 818 | ||
819 | out2: | ||
820 | cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE); | ||
821 | out1: | ||
822 | cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP); | ||
855 | out: | 823 | out: |
856 | pr_warn("Initialization failed (%d), disabled\n", ret); | 824 | pr_warn("Initialization failed (%d), disabled\n", ret); |
857 | cleanup_rapl_pmus(); | 825 | cleanup_rapl_pmus(); |
858 | cpu_notifier_register_done(); | ||
859 | return ret; | 826 | return ret; |
860 | } | 827 | } |
861 | module_init(rapl_pmu_init); | 828 | module_init(rapl_pmu_init); |
862 | 829 | ||
863 | static void __exit intel_rapl_exit(void) | 830 | static void __exit intel_rapl_exit(void) |
864 | { | 831 | { |
865 | cpu_notifier_register_begin(); | 832 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE); |
866 | __unregister_cpu_notifier(&rapl_cpu_nb); | 833 | cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP); |
867 | perf_pmu_unregister(&rapl_pmus->pmu); | 834 | perf_pmu_unregister(&rapl_pmus->pmu); |
868 | cleanup_rapl_pmus(); | 835 | cleanup_rapl_pmus(); |
869 | cpu_notifier_register_done(); | ||
870 | } | 836 | } |
871 | module_exit(intel_rapl_exit); | 837 | module_exit(intel_rapl_exit); |
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 59b4974c697f..3f3d0d67749b 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c | |||
@@ -1052,7 +1052,7 @@ static void uncore_pci_exit(void) | |||
1052 | } | 1052 | } |
1053 | } | 1053 | } |
1054 | 1054 | ||
1055 | static void uncore_cpu_dying(int cpu) | 1055 | static int uncore_cpu_dying(unsigned int cpu) |
1056 | { | 1056 | { |
1057 | struct intel_uncore_type *type, **types = uncore_msr_uncores; | 1057 | struct intel_uncore_type *type, **types = uncore_msr_uncores; |
1058 | struct intel_uncore_pmu *pmu; | 1058 | struct intel_uncore_pmu *pmu; |
@@ -1069,16 +1069,19 @@ static void uncore_cpu_dying(int cpu) | |||
1069 | uncore_box_exit(box); | 1069 | uncore_box_exit(box); |
1070 | } | 1070 | } |
1071 | } | 1071 | } |
1072 | return 0; | ||
1072 | } | 1073 | } |
1073 | 1074 | ||
1074 | static void uncore_cpu_starting(int cpu, bool init) | 1075 | static int first_init; |
1076 | |||
1077 | static int uncore_cpu_starting(unsigned int cpu) | ||
1075 | { | 1078 | { |
1076 | struct intel_uncore_type *type, **types = uncore_msr_uncores; | 1079 | struct intel_uncore_type *type, **types = uncore_msr_uncores; |
1077 | struct intel_uncore_pmu *pmu; | 1080 | struct intel_uncore_pmu *pmu; |
1078 | struct intel_uncore_box *box; | 1081 | struct intel_uncore_box *box; |
1079 | int i, pkg, ncpus = 1; | 1082 | int i, pkg, ncpus = 1; |
1080 | 1083 | ||
1081 | if (init) { | 1084 | if (first_init) { |
1082 | /* | 1085 | /* |
1083 | * On init we get the number of online cpus in the package | 1086 | * On init we get the number of online cpus in the package |
1084 | * and set refcount for all of them. | 1087 | * and set refcount for all of them. |
@@ -1099,9 +1102,11 @@ static void uncore_cpu_starting(int cpu, bool init) | |||
1099 | uncore_box_init(box); | 1102 | uncore_box_init(box); |
1100 | } | 1103 | } |
1101 | } | 1104 | } |
1105 | |||
1106 | return 0; | ||
1102 | } | 1107 | } |
1103 | 1108 | ||
1104 | static int uncore_cpu_prepare(int cpu) | 1109 | static int uncore_cpu_prepare(unsigned int cpu) |
1105 | { | 1110 | { |
1106 | struct intel_uncore_type *type, **types = uncore_msr_uncores; | 1111 | struct intel_uncore_type *type, **types = uncore_msr_uncores; |
1107 | struct intel_uncore_pmu *pmu; | 1112 | struct intel_uncore_pmu *pmu; |
@@ -1164,13 +1169,13 @@ static void uncore_change_context(struct intel_uncore_type **uncores, | |||
1164 | uncore_change_type_ctx(*uncores, old_cpu, new_cpu); | 1169 | uncore_change_type_ctx(*uncores, old_cpu, new_cpu); |
1165 | } | 1170 | } |
1166 | 1171 | ||
1167 | static void uncore_event_exit_cpu(int cpu) | 1172 | static int uncore_event_cpu_offline(unsigned int cpu) |
1168 | { | 1173 | { |
1169 | int target; | 1174 | int target; |
1170 | 1175 | ||
1171 | /* Check if exiting cpu is used for collecting uncore events */ | 1176 | /* Check if exiting cpu is used for collecting uncore events */ |
1172 | if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) | 1177 | if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) |
1173 | return; | 1178 | return 0; |
1174 | 1179 | ||
1175 | /* Find a new cpu to collect uncore events */ | 1180 | /* Find a new cpu to collect uncore events */ |
1176 | target = cpumask_any_but(topology_core_cpumask(cpu), cpu); | 1181 | target = cpumask_any_but(topology_core_cpumask(cpu), cpu); |
@@ -1183,9 +1188,10 @@ static void uncore_event_exit_cpu(int cpu) | |||
1183 | 1188 | ||
1184 | uncore_change_context(uncore_msr_uncores, cpu, target); | 1189 | uncore_change_context(uncore_msr_uncores, cpu, target); |
1185 | uncore_change_context(uncore_pci_uncores, cpu, target); | 1190 | uncore_change_context(uncore_pci_uncores, cpu, target); |
1191 | return 0; | ||
1186 | } | 1192 | } |
1187 | 1193 | ||
1188 | static void uncore_event_init_cpu(int cpu) | 1194 | static int uncore_event_cpu_online(unsigned int cpu) |
1189 | { | 1195 | { |
1190 | int target; | 1196 | int target; |
1191 | 1197 | ||
@@ -1195,50 +1201,15 @@ static void uncore_event_init_cpu(int cpu) | |||
1195 | */ | 1201 | */ |
1196 | target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu)); | 1202 | target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu)); |
1197 | if (target < nr_cpu_ids) | 1203 | if (target < nr_cpu_ids) |
1198 | return; | 1204 | return 0; |
1199 | 1205 | ||
1200 | cpumask_set_cpu(cpu, &uncore_cpu_mask); | 1206 | cpumask_set_cpu(cpu, &uncore_cpu_mask); |
1201 | 1207 | ||
1202 | uncore_change_context(uncore_msr_uncores, -1, cpu); | 1208 | uncore_change_context(uncore_msr_uncores, -1, cpu); |
1203 | uncore_change_context(uncore_pci_uncores, -1, cpu); | 1209 | uncore_change_context(uncore_pci_uncores, -1, cpu); |
1210 | return 0; | ||
1204 | } | 1211 | } |
1205 | 1212 | ||
1206 | static int uncore_cpu_notifier(struct notifier_block *self, | ||
1207 | unsigned long action, void *hcpu) | ||
1208 | { | ||
1209 | unsigned int cpu = (long)hcpu; | ||
1210 | |||
1211 | switch (action & ~CPU_TASKS_FROZEN) { | ||
1212 | case CPU_UP_PREPARE: | ||
1213 | return notifier_from_errno(uncore_cpu_prepare(cpu)); | ||
1214 | |||
1215 | case CPU_STARTING: | ||
1216 | uncore_cpu_starting(cpu, false); | ||
1217 | case CPU_DOWN_FAILED: | ||
1218 | uncore_event_init_cpu(cpu); | ||
1219 | break; | ||
1220 | |||
1221 | case CPU_UP_CANCELED: | ||
1222 | case CPU_DYING: | ||
1223 | uncore_cpu_dying(cpu); | ||
1224 | break; | ||
1225 | |||
1226 | case CPU_DOWN_PREPARE: | ||
1227 | uncore_event_exit_cpu(cpu); | ||
1228 | break; | ||
1229 | } | ||
1230 | return NOTIFY_OK; | ||
1231 | } | ||
1232 | |||
1233 | static struct notifier_block uncore_cpu_nb = { | ||
1234 | .notifier_call = uncore_cpu_notifier, | ||
1235 | /* | ||
1236 | * to migrate uncore events, our notifier should be executed | ||
1237 | * before perf core's notifier. | ||
1238 | */ | ||
1239 | .priority = CPU_PRI_PERF + 1, | ||
1240 | }; | ||
1241 | |||
1242 | static int __init type_pmu_register(struct intel_uncore_type *type) | 1213 | static int __init type_pmu_register(struct intel_uncore_type *type) |
1243 | { | 1214 | { |
1244 | int i, ret; | 1215 | int i, ret; |
@@ -1282,41 +1253,6 @@ err: | |||
1282 | return ret; | 1253 | return ret; |
1283 | } | 1254 | } |
1284 | 1255 | ||
1285 | static void __init uncore_cpu_setup(void *dummy) | ||
1286 | { | ||
1287 | uncore_cpu_starting(smp_processor_id(), true); | ||
1288 | } | ||
1289 | |||
1290 | /* Lazy to avoid allocation of a few bytes for the normal case */ | ||
1291 | static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC); | ||
1292 | |||
1293 | static int __init uncore_cpumask_init(bool msr) | ||
1294 | { | ||
1295 | unsigned int cpu; | ||
1296 | |||
1297 | for_each_online_cpu(cpu) { | ||
1298 | unsigned int pkg = topology_logical_package_id(cpu); | ||
1299 | int ret; | ||
1300 | |||
1301 | if (test_and_set_bit(pkg, packages)) | ||
1302 | continue; | ||
1303 | /* | ||
1304 | * The first online cpu of each package allocates and takes | ||
1305 | * the refcounts for all other online cpus in that package. | ||
1306 | * If msrs are not enabled no allocation is required. | ||
1307 | */ | ||
1308 | if (msr) { | ||
1309 | ret = uncore_cpu_prepare(cpu); | ||
1310 | if (ret) | ||
1311 | return ret; | ||
1312 | } | ||
1313 | uncore_event_init_cpu(cpu); | ||
1314 | smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1); | ||
1315 | } | ||
1316 | __register_cpu_notifier(&uncore_cpu_nb); | ||
1317 | return 0; | ||
1318 | } | ||
1319 | |||
1320 | #define X86_UNCORE_MODEL_MATCH(model, init) \ | 1256 | #define X86_UNCORE_MODEL_MATCH(model, init) \ |
1321 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init } | 1257 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init } |
1322 | 1258 | ||
@@ -1440,11 +1376,33 @@ static int __init intel_uncore_init(void) | |||
1440 | if (cret && pret) | 1376 | if (cret && pret) |
1441 | return -ENODEV; | 1377 | return -ENODEV; |
1442 | 1378 | ||
1443 | cpu_notifier_register_begin(); | 1379 | /* |
1444 | ret = uncore_cpumask_init(!cret); | 1380 | * Install callbacks. Core will call them for each online cpu. |
1445 | if (ret) | 1381 | * |
1446 | goto err; | 1382 | * The first online cpu of each package allocates and takes |
1447 | cpu_notifier_register_done(); | 1383 | * the refcounts for all other online cpus in that package. |
1384 | * If msrs are not enabled no allocation is required and | ||
1385 | * uncore_cpu_prepare() is not called for each online cpu. | ||
1386 | */ | ||
1387 | if (!cret) { | ||
1388 | ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP, | ||
1389 | "PERF_X86_UNCORE_PREP", | ||
1390 | uncore_cpu_prepare, NULL); | ||
1391 | if (ret) | ||
1392 | goto err; | ||
1393 | } else { | ||
1394 | cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP, | ||
1395 | "PERF_X86_UNCORE_PREP", | ||
1396 | uncore_cpu_prepare, NULL); | ||
1397 | } | ||
1398 | first_init = 1; | ||
1399 | cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING, | ||
1400 | "AP_PERF_X86_UNCORE_STARTING", | ||
1401 | uncore_cpu_starting, uncore_cpu_dying); | ||
1402 | first_init = 0; | ||
1403 | cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, | ||
1404 | "AP_PERF_X86_UNCORE_ONLINE", | ||
1405 | uncore_event_cpu_online, uncore_event_cpu_offline); | ||
1448 | return 0; | 1406 | return 0; |
1449 | 1407 | ||
1450 | err: | 1408 | err: |
@@ -1452,17 +1410,16 @@ err: | |||
1452 | on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1); | 1410 | on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1); |
1453 | uncore_types_exit(uncore_msr_uncores); | 1411 | uncore_types_exit(uncore_msr_uncores); |
1454 | uncore_pci_exit(); | 1412 | uncore_pci_exit(); |
1455 | cpu_notifier_register_done(); | ||
1456 | return ret; | 1413 | return ret; |
1457 | } | 1414 | } |
1458 | module_init(intel_uncore_init); | 1415 | module_init(intel_uncore_init); |
1459 | 1416 | ||
1460 | static void __exit intel_uncore_exit(void) | 1417 | static void __exit intel_uncore_exit(void) |
1461 | { | 1418 | { |
1462 | cpu_notifier_register_begin(); | 1419 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE); |
1463 | __unregister_cpu_notifier(&uncore_cpu_nb); | 1420 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING); |
1421 | cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP); | ||
1464 | uncore_types_exit(uncore_msr_uncores); | 1422 | uncore_types_exit(uncore_msr_uncores); |
1465 | uncore_pci_exit(); | 1423 | uncore_pci_exit(); |
1466 | cpu_notifier_register_done(); | ||
1467 | } | 1424 | } |
1468 | module_exit(intel_uncore_exit); | 1425 | module_exit(intel_uncore_exit); |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index cefacbad1531..456316f6c868 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -215,26 +215,18 @@ void apbt_setup_secondary_clock(void) | |||
215 | * cpu timers during the offline process due to the ordering of notification. | 215 | * cpu timers during the offline process due to the ordering of notification. |
216 | * the extra interrupt is harmless. | 216 | * the extra interrupt is harmless. |
217 | */ | 217 | */ |
218 | static int apbt_cpuhp_notify(struct notifier_block *n, | 218 | static int apbt_cpu_dead(unsigned int cpu) |
219 | unsigned long action, void *hcpu) | ||
220 | { | 219 | { |
221 | unsigned long cpu = (unsigned long)hcpu; | ||
222 | struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu); | 220 | struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu); |
223 | 221 | ||
224 | switch (action & ~CPU_TASKS_FROZEN) { | 222 | dw_apb_clockevent_pause(adev->timer); |
225 | case CPU_DEAD: | 223 | if (system_state == SYSTEM_RUNNING) { |
226 | dw_apb_clockevent_pause(adev->timer); | 224 | pr_debug("skipping APBT CPU %u offline\n", cpu); |
227 | if (system_state == SYSTEM_RUNNING) { | 225 | } else { |
228 | pr_debug("skipping APBT CPU %lu offline\n", cpu); | 226 | pr_debug("APBT clockevent for cpu %u offline\n", cpu); |
229 | } else { | 227 | dw_apb_clockevent_stop(adev->timer); |
230 | pr_debug("APBT clockevent for cpu %lu offline\n", cpu); | ||
231 | dw_apb_clockevent_stop(adev->timer); | ||
232 | } | ||
233 | break; | ||
234 | default: | ||
235 | pr_debug("APBT notified %lu, no action\n", action); | ||
236 | } | 228 | } |
237 | return NOTIFY_OK; | 229 | return 0; |
238 | } | 230 | } |
239 | 231 | ||
240 | static __init int apbt_late_init(void) | 232 | static __init int apbt_late_init(void) |
@@ -242,9 +234,8 @@ static __init int apbt_late_init(void) | |||
242 | if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT || | 234 | if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT || |
243 | !apb_timer_block_enabled) | 235 | !apb_timer_block_enabled) |
244 | return 0; | 236 | return 0; |
245 | /* This notifier should be called after workqueue is ready */ | 237 | return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "X86_APB_DEAD", NULL, |
246 | hotcpu_notifier(apbt_cpuhp_notify, -20); | 238 | apbt_cpu_dead); |
247 | return 0; | ||
248 | } | 239 | } |
249 | fs_initcall(apbt_late_init); | 240 | fs_initcall(apbt_late_init); |
250 | #else | 241 | #else |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 24170d0809ba..6368fa69d2af 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -152,68 +152,48 @@ static void init_x2apic_ldr(void) | |||
152 | } | 152 | } |
153 | } | 153 | } |
154 | 154 | ||
155 | /* | 155 | /* |
156 | * At CPU state changes, update the x2apic cluster sibling info. | 156 | * At CPU state changes, update the x2apic cluster sibling info. |
157 | */ | 157 | */ |
158 | static int | 158 | int x2apic_prepare_cpu(unsigned int cpu) |
159 | update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) | ||
160 | { | 159 | { |
161 | unsigned int this_cpu = (unsigned long)hcpu; | 160 | if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL)) |
162 | unsigned int cpu; | 161 | return -ENOMEM; |
163 | int err = 0; | 162 | |
164 | 163 | if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) { | |
165 | switch (action) { | 164 | free_cpumask_var(per_cpu(cpus_in_cluster, cpu)); |
166 | case CPU_UP_PREPARE: | 165 | return -ENOMEM; |
167 | if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu), | ||
168 | GFP_KERNEL)) { | ||
169 | err = -ENOMEM; | ||
170 | } else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu), | ||
171 | GFP_KERNEL)) { | ||
172 | free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu)); | ||
173 | err = -ENOMEM; | ||
174 | } | ||
175 | break; | ||
176 | case CPU_UP_CANCELED: | ||
177 | case CPU_UP_CANCELED_FROZEN: | ||
178 | case CPU_DEAD: | ||
179 | for_each_online_cpu(cpu) { | ||
180 | if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) | ||
181 | continue; | ||
182 | cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); | ||
183 | cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); | ||
184 | } | ||
185 | free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu)); | ||
186 | free_cpumask_var(per_cpu(ipi_mask, this_cpu)); | ||
187 | break; | ||
188 | } | 166 | } |
189 | 167 | ||
190 | return notifier_from_errno(err); | 168 | return 0; |
191 | } | 169 | } |
192 | 170 | ||
193 | static struct notifier_block x2apic_cpu_notifier = { | 171 | int x2apic_dead_cpu(unsigned int this_cpu) |
194 | .notifier_call = update_clusterinfo, | ||
195 | }; | ||
196 | |||
197 | static int x2apic_init_cpu_notifier(void) | ||
198 | { | 172 | { |
199 | int cpu = smp_processor_id(); | 173 | int cpu; |
200 | |||
201 | zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL); | ||
202 | zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL); | ||
203 | 174 | ||
204 | BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu)); | 175 | for_each_online_cpu(cpu) { |
205 | 176 | if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) | |
206 | cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu)); | 177 | continue; |
207 | register_hotcpu_notifier(&x2apic_cpu_notifier); | 178 | cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); |
208 | return 1; | 179 | cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); |
180 | } | ||
181 | free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu)); | ||
182 | free_cpumask_var(per_cpu(ipi_mask, this_cpu)); | ||
183 | return 0; | ||
209 | } | 184 | } |
210 | 185 | ||
211 | static int x2apic_cluster_probe(void) | 186 | static int x2apic_cluster_probe(void) |
212 | { | 187 | { |
213 | if (x2apic_mode) | 188 | int cpu = smp_processor_id(); |
214 | return x2apic_init_cpu_notifier(); | 189 | |
215 | else | 190 | if (!x2apic_mode) |
216 | return 0; | 191 | return 0; |
192 | |||
193 | cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu)); | ||
194 | cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE", | ||
195 | x2apic_prepare_cpu, x2apic_dead_cpu); | ||
196 | return 1; | ||
217 | } | 197 | } |
218 | 198 | ||
219 | static const struct cpumask *x2apic_cluster_target_cpus(void) | 199 | static const struct cpumask *x2apic_cluster_target_cpus(void) |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index f112af7aa62e..3d747070fe67 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -710,31 +710,29 @@ static void hpet_work(struct work_struct *w) | |||
710 | complete(&hpet_work->complete); | 710 | complete(&hpet_work->complete); |
711 | } | 711 | } |
712 | 712 | ||
713 | static int hpet_cpuhp_notify(struct notifier_block *n, | 713 | static int hpet_cpuhp_online(unsigned int cpu) |
714 | unsigned long action, void *hcpu) | ||
715 | { | 714 | { |
716 | unsigned long cpu = (unsigned long)hcpu; | ||
717 | struct hpet_work_struct work; | 715 | struct hpet_work_struct work; |
716 | |||
717 | INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work); | ||
718 | init_completion(&work.complete); | ||
719 | /* FIXME: add schedule_work_on() */ | ||
720 | schedule_delayed_work_on(cpu, &work.work, 0); | ||
721 | wait_for_completion(&work.complete); | ||
722 | destroy_delayed_work_on_stack(&work.work); | ||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | static int hpet_cpuhp_dead(unsigned int cpu) | ||
727 | { | ||
718 | struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu); | 728 | struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu); |
719 | 729 | ||
720 | switch (action & ~CPU_TASKS_FROZEN) { | 730 | if (!hdev) |
721 | case CPU_ONLINE: | 731 | return 0; |
722 | INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work); | 732 | free_irq(hdev->irq, hdev); |
723 | init_completion(&work.complete); | 733 | hdev->flags &= ~HPET_DEV_USED; |
724 | /* FIXME: add schedule_work_on() */ | 734 | per_cpu(cpu_hpet_dev, cpu) = NULL; |
725 | schedule_delayed_work_on(cpu, &work.work, 0); | 735 | return 0; |
726 | wait_for_completion(&work.complete); | ||
727 | destroy_delayed_work_on_stack(&work.work); | ||
728 | break; | ||
729 | case CPU_DEAD: | ||
730 | if (hdev) { | ||
731 | free_irq(hdev->irq, hdev); | ||
732 | hdev->flags &= ~HPET_DEV_USED; | ||
733 | per_cpu(cpu_hpet_dev, cpu) = NULL; | ||
734 | } | ||
735 | break; | ||
736 | } | ||
737 | return NOTIFY_OK; | ||
738 | } | 736 | } |
739 | #else | 737 | #else |
740 | 738 | ||
@@ -750,11 +748,8 @@ static void hpet_reserve_msi_timers(struct hpet_data *hd) | |||
750 | } | 748 | } |
751 | #endif | 749 | #endif |
752 | 750 | ||
753 | static int hpet_cpuhp_notify(struct notifier_block *n, | 751 | #define hpet_cpuhp_online NULL |
754 | unsigned long action, void *hcpu) | 752 | #define hpet_cpuhp_dead NULL |
755 | { | ||
756 | return NOTIFY_OK; | ||
757 | } | ||
758 | 753 | ||
759 | #endif | 754 | #endif |
760 | 755 | ||
@@ -931,7 +926,7 @@ out_nohpet: | |||
931 | */ | 926 | */ |
932 | static __init int hpet_late_init(void) | 927 | static __init int hpet_late_init(void) |
933 | { | 928 | { |
934 | int cpu; | 929 | int ret; |
935 | 930 | ||
936 | if (boot_hpet_disable) | 931 | if (boot_hpet_disable) |
937 | return -ENODEV; | 932 | return -ENODEV; |
@@ -961,16 +956,20 @@ static __init int hpet_late_init(void) | |||
961 | if (boot_cpu_has(X86_FEATURE_ARAT)) | 956 | if (boot_cpu_has(X86_FEATURE_ARAT)) |
962 | return 0; | 957 | return 0; |
963 | 958 | ||
964 | cpu_notifier_register_begin(); | ||
965 | for_each_online_cpu(cpu) { | ||
966 | hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); | ||
967 | } | ||
968 | |||
969 | /* This notifier should be called after workqueue is ready */ | 959 | /* This notifier should be called after workqueue is ready */ |
970 | __hotcpu_notifier(hpet_cpuhp_notify, -20); | 960 | ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "AP_X86_HPET_ONLINE", |
971 | cpu_notifier_register_done(); | 961 | hpet_cpuhp_online, NULL); |
972 | 962 | if (ret) | |
963 | return ret; | ||
964 | ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "X86_HPET_DEAD", NULL, | ||
965 | hpet_cpuhp_dead); | ||
966 | if (ret) | ||
967 | goto err_cpuhp; | ||
973 | return 0; | 968 | return 0; |
969 | |||
970 | err_cpuhp: | ||
971 | cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE); | ||
972 | return ret; | ||
974 | } | 973 | } |
975 | fs_initcall(hpet_late_init); | 974 | fs_initcall(hpet_late_init); |
976 | 975 | ||
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 9b0185fbe3eb..654f6c66fe45 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c | |||
@@ -323,25 +323,16 @@ static int tboot_wait_for_aps(int num_aps) | |||
323 | return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps); | 323 | return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps); |
324 | } | 324 | } |
325 | 325 | ||
326 | static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action, | 326 | static int tboot_dying_cpu(unsigned int cpu) |
327 | void *hcpu) | ||
328 | { | 327 | { |
329 | switch (action) { | 328 | atomic_inc(&ap_wfs_count); |
330 | case CPU_DYING: | 329 | if (num_online_cpus() == 1) { |
331 | atomic_inc(&ap_wfs_count); | 330 | if (tboot_wait_for_aps(atomic_read(&ap_wfs_count))) |
332 | if (num_online_cpus() == 1) | 331 | return -EBUSY; |
333 | if (tboot_wait_for_aps(atomic_read(&ap_wfs_count))) | ||
334 | return NOTIFY_BAD; | ||
335 | break; | ||
336 | } | 332 | } |
337 | return NOTIFY_OK; | 333 | return 0; |
338 | } | 334 | } |
339 | 335 | ||
340 | static struct notifier_block tboot_cpu_notifier = | ||
341 | { | ||
342 | .notifier_call = tboot_cpu_callback, | ||
343 | }; | ||
344 | |||
345 | #ifdef CONFIG_DEBUG_FS | 336 | #ifdef CONFIG_DEBUG_FS |
346 | 337 | ||
347 | #define TBOOT_LOG_UUID { 0x26, 0x25, 0x19, 0xc0, 0x30, 0x6b, 0xb4, 0x4d, \ | 338 | #define TBOOT_LOG_UUID { 0x26, 0x25, 0x19, 0xc0, 0x30, 0x6b, 0xb4, 0x4d, \ |
@@ -417,8 +408,8 @@ static __init int tboot_late_init(void) | |||
417 | tboot_create_trampoline(); | 408 | tboot_create_trampoline(); |
418 | 409 | ||
419 | atomic_set(&ap_wfs_count, 0); | 410 | atomic_set(&ap_wfs_count, 0); |
420 | register_hotcpu_notifier(&tboot_cpu_notifier); | 411 | cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "AP_X86_TBOOT_DYING", NULL, |
421 | 412 | tboot_dying_cpu); | |
422 | #ifdef CONFIG_DEBUG_FS | 413 | #ifdef CONFIG_DEBUG_FS |
423 | debugfs_create_file("tboot_log", S_IRUSR, | 414 | debugfs_create_file("tboot_log", S_IRUSR, |
424 | arch_debugfs_dir, NULL, &tboot_log_fops); | 415 | arch_debugfs_dir, NULL, &tboot_log_fops); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b2766723c951..45608a7da9b3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5552,9 +5552,10 @@ int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) | |||
5552 | } | 5552 | } |
5553 | EXPORT_SYMBOL_GPL(kvm_fast_pio_out); | 5553 | EXPORT_SYMBOL_GPL(kvm_fast_pio_out); |
5554 | 5554 | ||
5555 | static void tsc_bad(void *info) | 5555 | static int kvmclock_cpu_down_prep(unsigned int cpu) |
5556 | { | 5556 | { |
5557 | __this_cpu_write(cpu_tsc_khz, 0); | 5557 | __this_cpu_write(cpu_tsc_khz, 0); |
5558 | return 0; | ||
5558 | } | 5559 | } |
5559 | 5560 | ||
5560 | static void tsc_khz_changed(void *data) | 5561 | static void tsc_khz_changed(void *data) |
@@ -5659,35 +5660,18 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = { | |||
5659 | .notifier_call = kvmclock_cpufreq_notifier | 5660 | .notifier_call = kvmclock_cpufreq_notifier |
5660 | }; | 5661 | }; |
5661 | 5662 | ||
5662 | static int kvmclock_cpu_notifier(struct notifier_block *nfb, | 5663 | static int kvmclock_cpu_online(unsigned int cpu) |
5663 | unsigned long action, void *hcpu) | ||
5664 | { | 5664 | { |
5665 | unsigned int cpu = (unsigned long)hcpu; | 5665 | tsc_khz_changed(NULL); |
5666 | 5666 | return 0; | |
5667 | switch (action) { | ||
5668 | case CPU_ONLINE: | ||
5669 | case CPU_DOWN_FAILED: | ||
5670 | smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); | ||
5671 | break; | ||
5672 | case CPU_DOWN_PREPARE: | ||
5673 | smp_call_function_single(cpu, tsc_bad, NULL, 1); | ||
5674 | break; | ||
5675 | } | ||
5676 | return NOTIFY_OK; | ||
5677 | } | 5667 | } |
5678 | 5668 | ||
5679 | static struct notifier_block kvmclock_cpu_notifier_block = { | ||
5680 | .notifier_call = kvmclock_cpu_notifier, | ||
5681 | .priority = -INT_MAX | ||
5682 | }; | ||
5683 | |||
5684 | static void kvm_timer_init(void) | 5669 | static void kvm_timer_init(void) |
5685 | { | 5670 | { |
5686 | int cpu; | 5671 | int cpu; |
5687 | 5672 | ||
5688 | max_tsc_khz = tsc_khz; | 5673 | max_tsc_khz = tsc_khz; |
5689 | 5674 | ||
5690 | cpu_notifier_register_begin(); | ||
5691 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { | 5675 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { |
5692 | #ifdef CONFIG_CPU_FREQ | 5676 | #ifdef CONFIG_CPU_FREQ |
5693 | struct cpufreq_policy policy; | 5677 | struct cpufreq_policy policy; |
@@ -5702,12 +5686,9 @@ static void kvm_timer_init(void) | |||
5702 | CPUFREQ_TRANSITION_NOTIFIER); | 5686 | CPUFREQ_TRANSITION_NOTIFIER); |
5703 | } | 5687 | } |
5704 | pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); | 5688 | pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); |
5705 | for_each_online_cpu(cpu) | ||
5706 | smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); | ||
5707 | |||
5708 | __register_hotcpu_notifier(&kvmclock_cpu_notifier_block); | ||
5709 | cpu_notifier_register_done(); | ||
5710 | 5689 | ||
5690 | cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "AP_X86_KVM_CLK_ONLINE", | ||
5691 | kvmclock_cpu_online, kvmclock_cpu_down_prep); | ||
5711 | } | 5692 | } |
5712 | 5693 | ||
5713 | static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); | 5694 | static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); |
@@ -5896,7 +5877,7 @@ void kvm_arch_exit(void) | |||
5896 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | 5877 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) |
5897 | cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, | 5878 | cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, |
5898 | CPUFREQ_TRANSITION_NOTIFIER); | 5879 | CPUFREQ_TRANSITION_NOTIFIER); |
5899 | unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); | 5880 | cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); |
5900 | #ifdef CONFIG_X86_64 | 5881 | #ifdef CONFIG_X86_64 |
5901 | pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); | 5882 | pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); |
5902 | #endif | 5883 | #endif |
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c index ef90479e0397..0fecc8a2c0b5 100644 --- a/arch/xtensa/kernel/perf_event.c +++ b/arch/xtensa/kernel/perf_event.c | |||
@@ -404,7 +404,7 @@ static struct pmu xtensa_pmu = { | |||
404 | .read = xtensa_pmu_read, | 404 | .read = xtensa_pmu_read, |
405 | }; | 405 | }; |
406 | 406 | ||
407 | static void xtensa_pmu_setup(void) | 407 | static int xtensa_pmu_setup(int cpu) |
408 | { | 408 | { |
409 | unsigned i; | 409 | unsigned i; |
410 | 410 | ||
@@ -413,21 +413,7 @@ static void xtensa_pmu_setup(void) | |||
413 | set_er(0, XTENSA_PMU_PMCTRL(i)); | 413 | set_er(0, XTENSA_PMU_PMCTRL(i)); |
414 | set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i)); | 414 | set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i)); |
415 | } | 415 | } |
416 | } | 416 | return 0; |
417 | |||
418 | static int xtensa_pmu_notifier(struct notifier_block *self, | ||
419 | unsigned long action, void *data) | ||
420 | { | ||
421 | switch (action & ~CPU_TASKS_FROZEN) { | ||
422 | case CPU_STARTING: | ||
423 | xtensa_pmu_setup(); | ||
424 | break; | ||
425 | |||
426 | default: | ||
427 | break; | ||
428 | } | ||
429 | |||
430 | return NOTIFY_OK; | ||
431 | } | 417 | } |
432 | 418 | ||
433 | static int __init xtensa_pmu_init(void) | 419 | static int __init xtensa_pmu_init(void) |
@@ -435,7 +421,13 @@ static int __init xtensa_pmu_init(void) | |||
435 | int ret; | 421 | int ret; |
436 | int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT); | 422 | int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT); |
437 | 423 | ||
438 | perf_cpu_notifier(xtensa_pmu_notifier); | 424 | ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING, |
425 | "AP_PERF_XTENSA_STARTING", xtensa_pmu_setup, | ||
426 | NULL); | ||
427 | if (ret) { | ||
428 | pr_err("xtensa_pmu: failed to register CPU-hotplug.\n"); | ||
429 | return ret; | ||
430 | } | ||
439 | #if XTENSA_FAKE_NMI | 431 | #if XTENSA_FAKE_NMI |
440 | enable_irq(irq); | 432 | enable_irq(irq); |
441 | #else | 433 | #else |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 0ca14ac7bb28..0553aeebb228 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
@@ -118,12 +118,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, | |||
118 | struct acpi_device *device; | 118 | struct acpi_device *device; |
119 | action &= ~CPU_TASKS_FROZEN; | 119 | action &= ~CPU_TASKS_FROZEN; |
120 | 120 | ||
121 | /* | 121 | switch (action) { |
122 | * CPU_STARTING and CPU_DYING must not sleep. Return here since | 122 | case CPU_ONLINE: |
123 | * acpi_bus_get_device() may sleep. | 123 | case CPU_DEAD: |
124 | */ | 124 | break; |
125 | if (action == CPU_STARTING || action == CPU_DYING) | 125 | default: |
126 | return NOTIFY_DONE; | 126 | return NOTIFY_DONE; |
127 | } | ||
127 | 128 | ||
128 | if (!pr || acpi_bus_get_device(pr->handle, &device)) | 129 | if (!pr || acpi_bus_get_device(pr->handle, &device)) |
129 | return NOTIFY_DONE; | 130 | return NOTIFY_DONE; |
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index a49b28378d59..5755907f836f 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c | |||
@@ -144,12 +144,15 @@ struct cci_pmu { | |||
144 | int num_cntrs; | 144 | int num_cntrs; |
145 | atomic_t active_events; | 145 | atomic_t active_events; |
146 | struct mutex reserve_mutex; | 146 | struct mutex reserve_mutex; |
147 | struct notifier_block cpu_nb; | 147 | struct list_head entry; |
148 | cpumask_t cpus; | 148 | cpumask_t cpus; |
149 | }; | 149 | }; |
150 | 150 | ||
151 | #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) | 151 | #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) |
152 | 152 | ||
153 | static DEFINE_MUTEX(cci_pmu_mutex); | ||
154 | static LIST_HEAD(cci_pmu_list); | ||
155 | |||
153 | enum cci_models { | 156 | enum cci_models { |
154 | #ifdef CONFIG_ARM_CCI400_PMU | 157 | #ifdef CONFIG_ARM_CCI400_PMU |
155 | CCI400_R0, | 158 | CCI400_R0, |
@@ -1503,31 +1506,26 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) | |||
1503 | return perf_pmu_register(&cci_pmu->pmu, name, -1); | 1506 | return perf_pmu_register(&cci_pmu->pmu, name, -1); |
1504 | } | 1507 | } |
1505 | 1508 | ||
1506 | static int cci_pmu_cpu_notifier(struct notifier_block *self, | 1509 | static int cci_pmu_offline_cpu(unsigned int cpu) |
1507 | unsigned long action, void *hcpu) | ||
1508 | { | 1510 | { |
1509 | struct cci_pmu *cci_pmu = container_of(self, | 1511 | struct cci_pmu *cci_pmu; |
1510 | struct cci_pmu, cpu_nb); | ||
1511 | unsigned int cpu = (long)hcpu; | ||
1512 | unsigned int target; | 1512 | unsigned int target; |
1513 | 1513 | ||
1514 | switch (action & ~CPU_TASKS_FROZEN) { | 1514 | mutex_lock(&cci_pmu_mutex); |
1515 | case CPU_DOWN_PREPARE: | 1515 | list_for_each_entry(cci_pmu, &cci_pmu_list, entry) { |
1516 | if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) | 1516 | if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) |
1517 | break; | 1517 | continue; |
1518 | target = cpumask_any_but(cpu_online_mask, cpu); | 1518 | target = cpumask_any_but(cpu_online_mask, cpu); |
1519 | if (target >= nr_cpu_ids) // UP, last CPU | 1519 | if (target >= nr_cpu_ids) |
1520 | break; | 1520 | continue; |
1521 | /* | 1521 | /* |
1522 | * TODO: migrate context once core races on event->ctx have | 1522 | * TODO: migrate context once core races on event->ctx have |
1523 | * been fixed. | 1523 | * been fixed. |
1524 | */ | 1524 | */ |
1525 | cpumask_set_cpu(target, &cci_pmu->cpus); | 1525 | cpumask_set_cpu(target, &cci_pmu->cpus); |
1526 | default: | ||
1527 | break; | ||
1528 | } | 1526 | } |
1529 | 1527 | mutex_unlock(&cci_pmu_mutex); | |
1530 | return NOTIFY_OK; | 1528 | return 0; |
1531 | } | 1529 | } |
1532 | 1530 | ||
1533 | static struct cci_pmu_model cci_pmu_models[] = { | 1531 | static struct cci_pmu_model cci_pmu_models[] = { |
@@ -1766,24 +1764,13 @@ static int cci_pmu_probe(struct platform_device *pdev) | |||
1766 | atomic_set(&cci_pmu->active_events, 0); | 1764 | atomic_set(&cci_pmu->active_events, 0); |
1767 | cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); | 1765 | cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); |
1768 | 1766 | ||
1769 | cci_pmu->cpu_nb = (struct notifier_block) { | 1767 | ret = cci_pmu_init(cci_pmu, pdev); |
1770 | .notifier_call = cci_pmu_cpu_notifier, | ||
1771 | /* | ||
1772 | * to migrate uncore events, our notifier should be executed | ||
1773 | * before perf core's notifier. | ||
1774 | */ | ||
1775 | .priority = CPU_PRI_PERF + 1, | ||
1776 | }; | ||
1777 | |||
1778 | ret = register_cpu_notifier(&cci_pmu->cpu_nb); | ||
1779 | if (ret) | 1768 | if (ret) |
1780 | return ret; | 1769 | return ret; |
1781 | 1770 | ||
1782 | ret = cci_pmu_init(cci_pmu, pdev); | 1771 | mutex_lock(&cci_pmu_mutex); |
1783 | if (ret) { | 1772 | list_add(&cci_pmu->entry, &cci_pmu_list); |
1784 | unregister_cpu_notifier(&cci_pmu->cpu_nb); | 1773 | mutex_unlock(&cci_pmu_mutex); |
1785 | return ret; | ||
1786 | } | ||
1787 | 1774 | ||
1788 | pr_info("ARM %s PMU driver probed", cci_pmu->model->name); | 1775 | pr_info("ARM %s PMU driver probed", cci_pmu->model->name); |
1789 | return 0; | 1776 | return 0; |
@@ -1817,6 +1804,12 @@ static int __init cci_platform_init(void) | |||
1817 | { | 1804 | { |
1818 | int ret; | 1805 | int ret; |
1819 | 1806 | ||
1807 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, | ||
1808 | "AP_PERF_ARM_CCI_ONLINE", NULL, | ||
1809 | cci_pmu_offline_cpu); | ||
1810 | if (ret) | ||
1811 | return ret; | ||
1812 | |||
1820 | ret = platform_driver_register(&cci_pmu_driver); | 1813 | ret = platform_driver_register(&cci_pmu_driver); |
1821 | if (ret) | 1814 | if (ret) |
1822 | return ret; | 1815 | return ret; |
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index acc3eb542c74..97a9185af433 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c | |||
@@ -167,7 +167,7 @@ struct arm_ccn_dt { | |||
167 | struct hrtimer hrtimer; | 167 | struct hrtimer hrtimer; |
168 | 168 | ||
169 | cpumask_t cpu; | 169 | cpumask_t cpu; |
170 | struct notifier_block cpu_nb; | 170 | struct list_head entry; |
171 | 171 | ||
172 | struct pmu pmu; | 172 | struct pmu pmu; |
173 | }; | 173 | }; |
@@ -189,6 +189,8 @@ struct arm_ccn { | |||
189 | struct arm_ccn_dt dt; | 189 | struct arm_ccn_dt dt; |
190 | }; | 190 | }; |
191 | 191 | ||
192 | static DEFINE_MUTEX(arm_ccn_mutex); | ||
193 | static LIST_HEAD(arm_ccn_list); | ||
192 | 194 | ||
193 | static int arm_ccn_node_to_xp(int node) | 195 | static int arm_ccn_node_to_xp(int node) |
194 | { | 196 | { |
@@ -1171,30 +1173,27 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer) | |||
1171 | } | 1173 | } |
1172 | 1174 | ||
1173 | 1175 | ||
1174 | static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb, | 1176 | static int arm_ccn_pmu_offline_cpu(unsigned int cpu) |
1175 | unsigned long action, void *hcpu) | ||
1176 | { | 1177 | { |
1177 | struct arm_ccn_dt *dt = container_of(nb, struct arm_ccn_dt, cpu_nb); | 1178 | struct arm_ccn_dt *dt; |
1178 | struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); | ||
1179 | unsigned int cpu = (long)hcpu; /* for (long) see kernel/cpu.c */ | ||
1180 | unsigned int target; | 1179 | unsigned int target; |
1181 | 1180 | ||
1182 | switch (action & ~CPU_TASKS_FROZEN) { | 1181 | mutex_lock(&arm_ccn_mutex); |
1183 | case CPU_DOWN_PREPARE: | 1182 | list_for_each_entry(dt, &arm_ccn_list, entry) { |
1183 | struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); | ||
1184 | |||
1184 | if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) | 1185 | if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) |
1185 | break; | 1186 | continue; |
1186 | target = cpumask_any_but(cpu_online_mask, cpu); | 1187 | target = cpumask_any_but(cpu_online_mask, cpu); |
1187 | if (target >= nr_cpu_ids) | 1188 | if (target >= nr_cpu_ids) |
1188 | break; | 1189 | continue; |
1189 | perf_pmu_migrate_context(&dt->pmu, cpu, target); | 1190 | perf_pmu_migrate_context(&dt->pmu, cpu, target); |
1190 | cpumask_set_cpu(target, &dt->cpu); | 1191 | cpumask_set_cpu(target, &dt->cpu); |
1191 | if (ccn->irq) | 1192 | if (ccn->irq) |
1192 | WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0); | 1193 | WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0); |
1193 | default: | ||
1194 | break; | ||
1195 | } | 1194 | } |
1196 | 1195 | mutex_unlock(&arm_ccn_mutex); | |
1197 | return NOTIFY_OK; | 1196 | return 0; |
1198 | } | 1197 | } |
1199 | 1198 | ||
1200 | 1199 | ||
@@ -1266,16 +1265,6 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) | |||
1266 | /* Pick one CPU which we will use to collect data from CCN... */ | 1265 | /* Pick one CPU which we will use to collect data from CCN... */ |
1267 | cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu); | 1266 | cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu); |
1268 | 1267 | ||
1269 | /* | ||
1270 | * ... and change the selection when it goes offline. Priority is | ||
1271 | * picked to have a chance to migrate events before perf is notified. | ||
1272 | */ | ||
1273 | ccn->dt.cpu_nb.notifier_call = arm_ccn_pmu_cpu_notifier; | ||
1274 | ccn->dt.cpu_nb.priority = CPU_PRI_PERF + 1, | ||
1275 | err = register_cpu_notifier(&ccn->dt.cpu_nb); | ||
1276 | if (err) | ||
1277 | goto error_cpu_notifier; | ||
1278 | |||
1279 | /* Also make sure that the overflow interrupt is handled by this CPU */ | 1268 | /* Also make sure that the overflow interrupt is handled by this CPU */ |
1280 | if (ccn->irq) { | 1269 | if (ccn->irq) { |
1281 | err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu); | 1270 | err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu); |
@@ -1289,12 +1278,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) | |||
1289 | if (err) | 1278 | if (err) |
1290 | goto error_pmu_register; | 1279 | goto error_pmu_register; |
1291 | 1280 | ||
1281 | mutex_lock(&arm_ccn_mutex); | ||
1282 | list_add(&ccn->dt.entry, &arm_ccn_list); | ||
1283 | mutex_unlock(&arm_ccn_mutex); | ||
1292 | return 0; | 1284 | return 0; |
1293 | 1285 | ||
1294 | error_pmu_register: | 1286 | error_pmu_register: |
1295 | error_set_affinity: | 1287 | error_set_affinity: |
1296 | unregister_cpu_notifier(&ccn->dt.cpu_nb); | ||
1297 | error_cpu_notifier: | ||
1298 | ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); | 1288 | ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); |
1299 | for (i = 0; i < ccn->num_xps; i++) | 1289 | for (i = 0; i < ccn->num_xps; i++) |
1300 | writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); | 1290 | writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); |
@@ -1306,9 +1296,12 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) | |||
1306 | { | 1296 | { |
1307 | int i; | 1297 | int i; |
1308 | 1298 | ||
1299 | mutex_lock(&arm_ccn_mutex); | ||
1300 | list_del(&ccn->dt.entry); | ||
1301 | mutex_unlock(&arm_ccn_mutex); | ||
1302 | |||
1309 | if (ccn->irq) | 1303 | if (ccn->irq) |
1310 | irq_set_affinity_hint(ccn->irq, NULL); | 1304 | irq_set_affinity_hint(ccn->irq, NULL); |
1311 | unregister_cpu_notifier(&ccn->dt.cpu_nb); | ||
1312 | for (i = 0; i < ccn->num_xps; i++) | 1305 | for (i = 0; i < ccn->num_xps; i++) |
1313 | writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); | 1306 | writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); |
1314 | writel(0, ccn->dt.base + CCN_DT_PMCR); | 1307 | writel(0, ccn->dt.base + CCN_DT_PMCR); |
@@ -1316,7 +1309,6 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) | |||
1316 | ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); | 1309 | ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); |
1317 | } | 1310 | } |
1318 | 1311 | ||
1319 | |||
1320 | static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn, | 1312 | static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn, |
1321 | int (*callback)(struct arm_ccn *ccn, int region, | 1313 | int (*callback)(struct arm_ccn *ccn, int region, |
1322 | void __iomem *base, u32 type, u32 id)) | 1314 | void __iomem *base, u32 type, u32 id)) |
@@ -1533,7 +1525,13 @@ static struct platform_driver arm_ccn_driver = { | |||
1533 | 1525 | ||
1534 | static int __init arm_ccn_init(void) | 1526 | static int __init arm_ccn_init(void) |
1535 | { | 1527 | { |
1536 | int i; | 1528 | int i, ret; |
1529 | |||
1530 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, | ||
1531 | "AP_PERF_ARM_CCN_ONLINE", NULL, | ||
1532 | arm_ccn_pmu_offline_cpu); | ||
1533 | if (ret) | ||
1534 | return ret; | ||
1537 | 1535 | ||
1538 | for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++) | 1536 | for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++) |
1539 | arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr; | 1537 | arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr; |
@@ -1543,6 +1541,7 @@ static int __init arm_ccn_init(void) | |||
1543 | 1541 | ||
1544 | static void __exit arm_ccn_exit(void) | 1542 | static void __exit arm_ccn_exit(void) |
1545 | { | 1543 | { |
1544 | cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE); | ||
1546 | platform_driver_unregister(&arm_ccn_driver); | 1545 | platform_driver_unregister(&arm_ccn_driver); |
1547 | } | 1546 | } |
1548 | 1547 | ||
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 5effd3027319..28bce3f4f81d 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
@@ -370,8 +370,10 @@ static bool arch_timer_has_nonsecure_ppi(void) | |||
370 | arch_timer_ppi[PHYS_NONSECURE_PPI]); | 370 | arch_timer_ppi[PHYS_NONSECURE_PPI]); |
371 | } | 371 | } |
372 | 372 | ||
373 | static int arch_timer_setup(struct clock_event_device *clk) | 373 | static int arch_timer_starting_cpu(unsigned int cpu) |
374 | { | 374 | { |
375 | struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); | ||
376 | |||
375 | __arch_timer_setup(ARCH_CP15_TIMER, clk); | 377 | __arch_timer_setup(ARCH_CP15_TIMER, clk); |
376 | 378 | ||
377 | enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0); | 379 | enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0); |
@@ -527,29 +529,14 @@ static void arch_timer_stop(struct clock_event_device *clk) | |||
527 | clk->set_state_shutdown(clk); | 529 | clk->set_state_shutdown(clk); |
528 | } | 530 | } |
529 | 531 | ||
530 | static int arch_timer_cpu_notify(struct notifier_block *self, | 532 | static int arch_timer_dying_cpu(unsigned int cpu) |
531 | unsigned long action, void *hcpu) | ||
532 | { | 533 | { |
533 | /* | 534 | struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); |
534 | * Grab cpu pointer in each case to avoid spurious | ||
535 | * preemptible warnings | ||
536 | */ | ||
537 | switch (action & ~CPU_TASKS_FROZEN) { | ||
538 | case CPU_STARTING: | ||
539 | arch_timer_setup(this_cpu_ptr(arch_timer_evt)); | ||
540 | break; | ||
541 | case CPU_DYING: | ||
542 | arch_timer_stop(this_cpu_ptr(arch_timer_evt)); | ||
543 | break; | ||
544 | } | ||
545 | 535 | ||
546 | return NOTIFY_OK; | 536 | arch_timer_stop(clk); |
537 | return 0; | ||
547 | } | 538 | } |
548 | 539 | ||
549 | static struct notifier_block arch_timer_cpu_nb = { | ||
550 | .notifier_call = arch_timer_cpu_notify, | ||
551 | }; | ||
552 | |||
553 | #ifdef CONFIG_CPU_PM | 540 | #ifdef CONFIG_CPU_PM |
554 | static unsigned int saved_cntkctl; | 541 | static unsigned int saved_cntkctl; |
555 | static int arch_timer_cpu_pm_notify(struct notifier_block *self, | 542 | static int arch_timer_cpu_pm_notify(struct notifier_block *self, |
@@ -570,11 +557,21 @@ static int __init arch_timer_cpu_pm_init(void) | |||
570 | { | 557 | { |
571 | return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); | 558 | return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); |
572 | } | 559 | } |
560 | |||
561 | static void __init arch_timer_cpu_pm_deinit(void) | ||
562 | { | ||
563 | WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier)); | ||
564 | } | ||
565 | |||
573 | #else | 566 | #else |
574 | static int __init arch_timer_cpu_pm_init(void) | 567 | static int __init arch_timer_cpu_pm_init(void) |
575 | { | 568 | { |
576 | return 0; | 569 | return 0; |
577 | } | 570 | } |
571 | |||
572 | static void __init arch_timer_cpu_pm_deinit(void) | ||
573 | { | ||
574 | } | ||
578 | #endif | 575 | #endif |
579 | 576 | ||
580 | static int __init arch_timer_register(void) | 577 | static int __init arch_timer_register(void) |
@@ -621,22 +618,23 @@ static int __init arch_timer_register(void) | |||
621 | goto out_free; | 618 | goto out_free; |
622 | } | 619 | } |
623 | 620 | ||
624 | err = register_cpu_notifier(&arch_timer_cpu_nb); | ||
625 | if (err) | ||
626 | goto out_free_irq; | ||
627 | |||
628 | err = arch_timer_cpu_pm_init(); | 621 | err = arch_timer_cpu_pm_init(); |
629 | if (err) | 622 | if (err) |
630 | goto out_unreg_notify; | 623 | goto out_unreg_notify; |
631 | 624 | ||
632 | /* Immediately configure the timer on the boot CPU */ | ||
633 | arch_timer_setup(this_cpu_ptr(arch_timer_evt)); | ||
634 | 625 | ||
626 | /* Register and immediately configure the timer on the boot CPU */ | ||
627 | err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING, | ||
628 | "AP_ARM_ARCH_TIMER_STARTING", | ||
629 | arch_timer_starting_cpu, arch_timer_dying_cpu); | ||
630 | if (err) | ||
631 | goto out_unreg_cpupm; | ||
635 | return 0; | 632 | return 0; |
636 | 633 | ||
634 | out_unreg_cpupm: | ||
635 | arch_timer_cpu_pm_deinit(); | ||
636 | |||
637 | out_unreg_notify: | 637 | out_unreg_notify: |
638 | unregister_cpu_notifier(&arch_timer_cpu_nb); | ||
639 | out_free_irq: | ||
640 | free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt); | 638 | free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt); |
641 | if (arch_timer_has_nonsecure_ppi()) | 639 | if (arch_timer_has_nonsecure_ppi()) |
642 | free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], | 640 | free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], |
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c index 2a9ceb6e93f9..8da03298f844 100644 --- a/drivers/clocksource/arm_global_timer.c +++ b/drivers/clocksource/arm_global_timer.c | |||
@@ -165,9 +165,9 @@ static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id) | |||
165 | return IRQ_HANDLED; | 165 | return IRQ_HANDLED; |
166 | } | 166 | } |
167 | 167 | ||
168 | static int gt_clockevents_init(struct clock_event_device *clk) | 168 | static int gt_starting_cpu(unsigned int cpu) |
169 | { | 169 | { |
170 | int cpu = smp_processor_id(); | 170 | struct clock_event_device *clk = this_cpu_ptr(gt_evt); |
171 | 171 | ||
172 | clk->name = "arm_global_timer"; | 172 | clk->name = "arm_global_timer"; |
173 | clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | | 173 | clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | |
@@ -186,10 +186,13 @@ static int gt_clockevents_init(struct clock_event_device *clk) | |||
186 | return 0; | 186 | return 0; |
187 | } | 187 | } |
188 | 188 | ||
189 | static void gt_clockevents_stop(struct clock_event_device *clk) | 189 | static int gt_dying_cpu(unsigned int cpu) |
190 | { | 190 | { |
191 | struct clock_event_device *clk = this_cpu_ptr(gt_evt); | ||
192 | |||
191 | gt_clockevent_shutdown(clk); | 193 | gt_clockevent_shutdown(clk); |
192 | disable_percpu_irq(clk->irq); | 194 | disable_percpu_irq(clk->irq); |
195 | return 0; | ||
193 | } | 196 | } |
194 | 197 | ||
195 | static cycle_t gt_clocksource_read(struct clocksource *cs) | 198 | static cycle_t gt_clocksource_read(struct clocksource *cs) |
@@ -252,24 +255,6 @@ static int __init gt_clocksource_init(void) | |||
252 | return clocksource_register_hz(>_clocksource, gt_clk_rate); | 255 | return clocksource_register_hz(>_clocksource, gt_clk_rate); |
253 | } | 256 | } |
254 | 257 | ||
255 | static int gt_cpu_notify(struct notifier_block *self, unsigned long action, | ||
256 | void *hcpu) | ||
257 | { | ||
258 | switch (action & ~CPU_TASKS_FROZEN) { | ||
259 | case CPU_STARTING: | ||
260 | gt_clockevents_init(this_cpu_ptr(gt_evt)); | ||
261 | break; | ||
262 | case CPU_DYING: | ||
263 | gt_clockevents_stop(this_cpu_ptr(gt_evt)); | ||
264 | break; | ||
265 | } | ||
266 | |||
267 | return NOTIFY_OK; | ||
268 | } | ||
269 | static struct notifier_block gt_cpu_nb = { | ||
270 | .notifier_call = gt_cpu_notify, | ||
271 | }; | ||
272 | |||
273 | static int __init global_timer_of_register(struct device_node *np) | 258 | static int __init global_timer_of_register(struct device_node *np) |
274 | { | 259 | { |
275 | struct clk *gt_clk; | 260 | struct clk *gt_clk; |
@@ -325,18 +310,14 @@ static int __init global_timer_of_register(struct device_node *np) | |||
325 | goto out_free; | 310 | goto out_free; |
326 | } | 311 | } |
327 | 312 | ||
328 | err = register_cpu_notifier(>_cpu_nb); | 313 | /* Register and immediately configure the timer on the boot CPU */ |
329 | if (err) { | ||
330 | pr_warn("global-timer: unable to register cpu notifier.\n"); | ||
331 | goto out_irq; | ||
332 | } | ||
333 | |||
334 | /* Immediately configure the timer on the boot CPU */ | ||
335 | err = gt_clocksource_init(); | 314 | err = gt_clocksource_init(); |
336 | if (err) | 315 | if (err) |
337 | goto out_irq; | 316 | goto out_irq; |
338 | 317 | ||
339 | err = gt_clockevents_init(this_cpu_ptr(gt_evt)); | 318 | err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, |
319 | "AP_ARM_GLOBAL_TIMER_STARTING", | ||
320 | gt_starting_cpu, gt_dying_cpu); | ||
340 | if (err) | 321 | if (err) |
341 | goto out_irq; | 322 | goto out_irq; |
342 | 323 | ||
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c index 776b6c86dcd5..89f1c2edbe02 100644 --- a/drivers/clocksource/dummy_timer.c +++ b/drivers/clocksource/dummy_timer.c | |||
@@ -16,10 +16,9 @@ | |||
16 | 16 | ||
17 | static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt); | 17 | static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt); |
18 | 18 | ||
19 | static void dummy_timer_setup(void) | 19 | static int dummy_timer_starting_cpu(unsigned int cpu) |
20 | { | 20 | { |
21 | int cpu = smp_processor_id(); | 21 | struct clock_event_device *evt = per_cpu_ptr(&dummy_timer_evt, cpu); |
22 | struct clock_event_device *evt = raw_cpu_ptr(&dummy_timer_evt); | ||
23 | 22 | ||
24 | evt->name = "dummy_timer"; | 23 | evt->name = "dummy_timer"; |
25 | evt->features = CLOCK_EVT_FEAT_PERIODIC | | 24 | evt->features = CLOCK_EVT_FEAT_PERIODIC | |
@@ -29,36 +28,13 @@ static void dummy_timer_setup(void) | |||
29 | evt->cpumask = cpumask_of(cpu); | 28 | evt->cpumask = cpumask_of(cpu); |
30 | 29 | ||
31 | clockevents_register_device(evt); | 30 | clockevents_register_device(evt); |
31 | return 0; | ||
32 | } | 32 | } |
33 | 33 | ||
34 | static int dummy_timer_cpu_notify(struct notifier_block *self, | ||
35 | unsigned long action, void *hcpu) | ||
36 | { | ||
37 | if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING) | ||
38 | dummy_timer_setup(); | ||
39 | |||
40 | return NOTIFY_OK; | ||
41 | } | ||
42 | |||
43 | static struct notifier_block dummy_timer_cpu_nb = { | ||
44 | .notifier_call = dummy_timer_cpu_notify, | ||
45 | }; | ||
46 | |||
47 | static int __init dummy_timer_register(void) | 34 | static int __init dummy_timer_register(void) |
48 | { | 35 | { |
49 | int err = 0; | 36 | return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING, |
50 | 37 | "AP_DUMMY_TIMER_STARTING", | |
51 | cpu_notifier_register_begin(); | 38 | dummy_timer_starting_cpu, NULL); |
52 | err = __register_cpu_notifier(&dummy_timer_cpu_nb); | ||
53 | if (err) | ||
54 | goto out; | ||
55 | |||
56 | /* We won't get a call on the boot CPU, so register immediately */ | ||
57 | if (num_possible_cpus() > 1) | ||
58 | dummy_timer_setup(); | ||
59 | |||
60 | out: | ||
61 | cpu_notifier_register_done(); | ||
62 | return err; | ||
63 | } | 39 | } |
64 | early_initcall(dummy_timer_register); | 40 | early_initcall(dummy_timer_register); |
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 0d18dd4b3bd2..41840d02c331 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c | |||
@@ -443,10 +443,11 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) | |||
443 | return IRQ_HANDLED; | 443 | return IRQ_HANDLED; |
444 | } | 444 | } |
445 | 445 | ||
446 | static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt) | 446 | static int exynos4_mct_starting_cpu(unsigned int cpu) |
447 | { | 447 | { |
448 | struct mct_clock_event_device *mevt = | ||
449 | per_cpu_ptr(&percpu_mct_tick, cpu); | ||
448 | struct clock_event_device *evt = &mevt->evt; | 450 | struct clock_event_device *evt = &mevt->evt; |
449 | unsigned int cpu = smp_processor_id(); | ||
450 | 451 | ||
451 | mevt->base = EXYNOS4_MCT_L_BASE(cpu); | 452 | mevt->base = EXYNOS4_MCT_L_BASE(cpu); |
452 | snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); | 453 | snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); |
@@ -480,8 +481,10 @@ static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt) | |||
480 | return 0; | 481 | return 0; |
481 | } | 482 | } |
482 | 483 | ||
483 | static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt) | 484 | static int exynos4_mct_dying_cpu(unsigned int cpu) |
484 | { | 485 | { |
486 | struct mct_clock_event_device *mevt = | ||
487 | per_cpu_ptr(&percpu_mct_tick, cpu); | ||
485 | struct clock_event_device *evt = &mevt->evt; | 488 | struct clock_event_device *evt = &mevt->evt; |
486 | 489 | ||
487 | evt->set_state_shutdown(evt); | 490 | evt->set_state_shutdown(evt); |
@@ -491,39 +494,12 @@ static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt) | |||
491 | } else { | 494 | } else { |
492 | disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); | 495 | disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); |
493 | } | 496 | } |
497 | return 0; | ||
494 | } | 498 | } |
495 | 499 | ||
496 | static int exynos4_mct_cpu_notify(struct notifier_block *self, | ||
497 | unsigned long action, void *hcpu) | ||
498 | { | ||
499 | struct mct_clock_event_device *mevt; | ||
500 | |||
501 | /* | ||
502 | * Grab cpu pointer in each case to avoid spurious | ||
503 | * preemptible warnings | ||
504 | */ | ||
505 | switch (action & ~CPU_TASKS_FROZEN) { | ||
506 | case CPU_STARTING: | ||
507 | mevt = this_cpu_ptr(&percpu_mct_tick); | ||
508 | exynos4_local_timer_setup(mevt); | ||
509 | break; | ||
510 | case CPU_DYING: | ||
511 | mevt = this_cpu_ptr(&percpu_mct_tick); | ||
512 | exynos4_local_timer_stop(mevt); | ||
513 | break; | ||
514 | } | ||
515 | |||
516 | return NOTIFY_OK; | ||
517 | } | ||
518 | |||
519 | static struct notifier_block exynos4_mct_cpu_nb = { | ||
520 | .notifier_call = exynos4_mct_cpu_notify, | ||
521 | }; | ||
522 | |||
523 | static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base) | 500 | static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base) |
524 | { | 501 | { |
525 | int err, cpu; | 502 | int err, cpu; |
526 | struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); | ||
527 | struct clk *mct_clk, *tick_clk; | 503 | struct clk *mct_clk, *tick_clk; |
528 | 504 | ||
529 | tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : | 505 | tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : |
@@ -570,12 +546,14 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * | |||
570 | } | 546 | } |
571 | } | 547 | } |
572 | 548 | ||
573 | err = register_cpu_notifier(&exynos4_mct_cpu_nb); | 549 | /* Install hotplug callbacks which configure the timer on this CPU */ |
550 | err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, | ||
551 | "AP_EXYNOS4_MCT_TIMER_STARTING", | ||
552 | exynos4_mct_starting_cpu, | ||
553 | exynos4_mct_dying_cpu); | ||
574 | if (err) | 554 | if (err) |
575 | goto out_irq; | 555 | goto out_irq; |
576 | 556 | ||
577 | /* Immediately configure the timer on the boot CPU */ | ||
578 | exynos4_local_timer_setup(mevt); | ||
579 | return 0; | 557 | return 0; |
580 | 558 | ||
581 | out_irq: | 559 | out_irq: |
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c index bcd5c0d602a0..a80ab3e446b7 100644 --- a/drivers/clocksource/metag_generic.c +++ b/drivers/clocksource/metag_generic.c | |||
@@ -90,7 +90,7 @@ unsigned long long sched_clock(void) | |||
90 | return ticks << HARDWARE_TO_NS_SHIFT; | 90 | return ticks << HARDWARE_TO_NS_SHIFT; |
91 | } | 91 | } |
92 | 92 | ||
93 | static void arch_timer_setup(unsigned int cpu) | 93 | static int arch_timer_starting_cpu(unsigned int cpu) |
94 | { | 94 | { |
95 | unsigned int txdivtime; | 95 | unsigned int txdivtime; |
96 | struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); | 96 | struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); |
@@ -132,27 +132,9 @@ static void arch_timer_setup(unsigned int cpu) | |||
132 | val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0); | 132 | val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0); |
133 | __core_reg_set(TXTIMER, val); | 133 | __core_reg_set(TXTIMER, val); |
134 | } | 134 | } |
135 | return 0; | ||
135 | } | 136 | } |
136 | 137 | ||
137 | static int arch_timer_cpu_notify(struct notifier_block *self, | ||
138 | unsigned long action, void *hcpu) | ||
139 | { | ||
140 | int cpu = (long)hcpu; | ||
141 | |||
142 | switch (action) { | ||
143 | case CPU_STARTING: | ||
144 | case CPU_STARTING_FROZEN: | ||
145 | arch_timer_setup(cpu); | ||
146 | break; | ||
147 | } | ||
148 | |||
149 | return NOTIFY_OK; | ||
150 | } | ||
151 | |||
152 | static struct notifier_block arch_timer_cpu_nb = { | ||
153 | .notifier_call = arch_timer_cpu_notify, | ||
154 | }; | ||
155 | |||
156 | int __init metag_generic_timer_init(void) | 138 | int __init metag_generic_timer_init(void) |
157 | { | 139 | { |
158 | /* | 140 | /* |
@@ -170,11 +152,8 @@ int __init metag_generic_timer_init(void) | |||
170 | 152 | ||
171 | setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq); | 153 | setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq); |
172 | 154 | ||
173 | /* Configure timer on boot CPU */ | 155 | /* Hook cpu boot to configure the CPU's timers */ |
174 | arch_timer_setup(smp_processor_id()); | 156 | return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING, |
175 | 157 | "AP_METAG_TIMER_STARTING", | |
176 | /* Hook cpu boot to configure other CPU's timers */ | 158 | arch_timer_starting_cpu, NULL); |
177 | register_cpu_notifier(&arch_timer_cpu_nb); | ||
178 | |||
179 | return 0; | ||
180 | } | 159 | } |
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index 1572c7a778ab..d91e8725917c 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c | |||
@@ -49,10 +49,9 @@ struct irqaction gic_compare_irqaction = { | |||
49 | .name = "timer", | 49 | .name = "timer", |
50 | }; | 50 | }; |
51 | 51 | ||
52 | static void gic_clockevent_cpu_init(struct clock_event_device *cd) | 52 | static void gic_clockevent_cpu_init(unsigned int cpu, |
53 | struct clock_event_device *cd) | ||
53 | { | 54 | { |
54 | unsigned int cpu = smp_processor_id(); | ||
55 | |||
56 | cd->name = "MIPS GIC"; | 55 | cd->name = "MIPS GIC"; |
57 | cd->features = CLOCK_EVT_FEAT_ONESHOT | | 56 | cd->features = CLOCK_EVT_FEAT_ONESHOT | |
58 | CLOCK_EVT_FEAT_C3STOP; | 57 | CLOCK_EVT_FEAT_C3STOP; |
@@ -79,19 +78,10 @@ static void gic_update_frequency(void *data) | |||
79 | clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate); | 78 | clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate); |
80 | } | 79 | } |
81 | 80 | ||
82 | static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action, | 81 | static int gic_starting_cpu(unsigned int cpu) |
83 | void *data) | ||
84 | { | 82 | { |
85 | switch (action & ~CPU_TASKS_FROZEN) { | 83 | gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device)); |
86 | case CPU_STARTING: | 84 | return 0; |
87 | gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device)); | ||
88 | break; | ||
89 | case CPU_DYING: | ||
90 | gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device)); | ||
91 | break; | ||
92 | } | ||
93 | |||
94 | return NOTIFY_OK; | ||
95 | } | 85 | } |
96 | 86 | ||
97 | static int gic_clk_notifier(struct notifier_block *nb, unsigned long action, | 87 | static int gic_clk_notifier(struct notifier_block *nb, unsigned long action, |
@@ -105,10 +95,11 @@ static int gic_clk_notifier(struct notifier_block *nb, unsigned long action, | |||
105 | return NOTIFY_OK; | 95 | return NOTIFY_OK; |
106 | } | 96 | } |
107 | 97 | ||
108 | 98 | static int gic_dying_cpu(unsigned int cpu) | |
109 | static struct notifier_block gic_cpu_nb = { | 99 | { |
110 | .notifier_call = gic_cpu_notifier, | 100 | gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device)); |
111 | }; | 101 | return 0; |
102 | } | ||
112 | 103 | ||
113 | static struct notifier_block gic_clk_nb = { | 104 | static struct notifier_block gic_clk_nb = { |
114 | .notifier_call = gic_clk_notifier, | 105 | .notifier_call = gic_clk_notifier, |
@@ -125,12 +116,9 @@ static int gic_clockevent_init(void) | |||
125 | if (ret < 0) | 116 | if (ret < 0) |
126 | return ret; | 117 | return ret; |
127 | 118 | ||
128 | ret = register_cpu_notifier(&gic_cpu_nb); | 119 | cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING, |
129 | if (ret < 0) | 120 | "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu, |
130 | pr_warn("GIC: Unable to register CPU notifier\n"); | 121 | gic_dying_cpu); |
131 | |||
132 | gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device)); | ||
133 | |||
134 | return 0; | 122 | return 0; |
135 | } | 123 | } |
136 | 124 | ||
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c index 662576339049..3283cfa2aa52 100644 --- a/drivers/clocksource/qcom-timer.c +++ b/drivers/clocksource/qcom-timer.c | |||
@@ -105,9 +105,9 @@ static struct clocksource msm_clocksource = { | |||
105 | static int msm_timer_irq; | 105 | static int msm_timer_irq; |
106 | static int msm_timer_has_ppi; | 106 | static int msm_timer_has_ppi; |
107 | 107 | ||
108 | static int msm_local_timer_setup(struct clock_event_device *evt) | 108 | static int msm_local_timer_starting_cpu(unsigned int cpu) |
109 | { | 109 | { |
110 | int cpu = smp_processor_id(); | 110 | struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu); |
111 | int err; | 111 | int err; |
112 | 112 | ||
113 | evt->irq = msm_timer_irq; | 113 | evt->irq = msm_timer_irq; |
@@ -135,35 +135,15 @@ static int msm_local_timer_setup(struct clock_event_device *evt) | |||
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
138 | static void msm_local_timer_stop(struct clock_event_device *evt) | 138 | static int msm_local_timer_dying_cpu(unsigned int cpu) |
139 | { | 139 | { |
140 | struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu); | ||
141 | |||
140 | evt->set_state_shutdown(evt); | 142 | evt->set_state_shutdown(evt); |
141 | disable_percpu_irq(evt->irq); | 143 | disable_percpu_irq(evt->irq); |
144 | return 0; | ||
142 | } | 145 | } |
143 | 146 | ||
144 | static int msm_timer_cpu_notify(struct notifier_block *self, | ||
145 | unsigned long action, void *hcpu) | ||
146 | { | ||
147 | /* | ||
148 | * Grab cpu pointer in each case to avoid spurious | ||
149 | * preemptible warnings | ||
150 | */ | ||
151 | switch (action & ~CPU_TASKS_FROZEN) { | ||
152 | case CPU_STARTING: | ||
153 | msm_local_timer_setup(this_cpu_ptr(msm_evt)); | ||
154 | break; | ||
155 | case CPU_DYING: | ||
156 | msm_local_timer_stop(this_cpu_ptr(msm_evt)); | ||
157 | break; | ||
158 | } | ||
159 | |||
160 | return NOTIFY_OK; | ||
161 | } | ||
162 | |||
163 | static struct notifier_block msm_timer_cpu_nb = { | ||
164 | .notifier_call = msm_timer_cpu_notify, | ||
165 | }; | ||
166 | |||
167 | static u64 notrace msm_sched_clock_read(void) | 147 | static u64 notrace msm_sched_clock_read(void) |
168 | { | 148 | { |
169 | return msm_clocksource.read(&msm_clocksource); | 149 | return msm_clocksource.read(&msm_clocksource); |
@@ -200,14 +180,15 @@ static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, | |||
200 | if (res) { | 180 | if (res) { |
201 | pr_err("request_percpu_irq failed\n"); | 181 | pr_err("request_percpu_irq failed\n"); |
202 | } else { | 182 | } else { |
203 | res = register_cpu_notifier(&msm_timer_cpu_nb); | 183 | /* Install and invoke hotplug callbacks */ |
184 | res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING, | ||
185 | "AP_QCOM_TIMER_STARTING", | ||
186 | msm_local_timer_starting_cpu, | ||
187 | msm_local_timer_dying_cpu); | ||
204 | if (res) { | 188 | if (res) { |
205 | free_percpu_irq(irq, msm_evt); | 189 | free_percpu_irq(irq, msm_evt); |
206 | goto err; | 190 | goto err; |
207 | } | 191 | } |
208 | |||
209 | /* Immediately configure the timer on the boot CPU */ | ||
210 | msm_local_timer_setup(raw_cpu_ptr(msm_evt)); | ||
211 | } | 192 | } |
212 | 193 | ||
213 | err: | 194 | err: |
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 20ec066481fe..719b478d136e 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c | |||
@@ -170,10 +170,10 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) | |||
170 | /* | 170 | /* |
171 | * Setup the local clock events for a CPU. | 171 | * Setup the local clock events for a CPU. |
172 | */ | 172 | */ |
173 | static int armada_370_xp_timer_setup(struct clock_event_device *evt) | 173 | static int armada_370_xp_timer_starting_cpu(unsigned int cpu) |
174 | { | 174 | { |
175 | struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu); | ||
175 | u32 clr = 0, set = 0; | 176 | u32 clr = 0, set = 0; |
176 | int cpu = smp_processor_id(); | ||
177 | 177 | ||
178 | if (timer25Mhz) | 178 | if (timer25Mhz) |
179 | set = TIMER0_25MHZ; | 179 | set = TIMER0_25MHZ; |
@@ -200,35 +200,15 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt) | |||
200 | return 0; | 200 | return 0; |
201 | } | 201 | } |
202 | 202 | ||
203 | static void armada_370_xp_timer_stop(struct clock_event_device *evt) | 203 | static int armada_370_xp_timer_dying_cpu(unsigned int cpu) |
204 | { | 204 | { |
205 | struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu); | ||
206 | |||
205 | evt->set_state_shutdown(evt); | 207 | evt->set_state_shutdown(evt); |
206 | disable_percpu_irq(evt->irq); | 208 | disable_percpu_irq(evt->irq); |
209 | return 0; | ||
207 | } | 210 | } |
208 | 211 | ||
209 | static int armada_370_xp_timer_cpu_notify(struct notifier_block *self, | ||
210 | unsigned long action, void *hcpu) | ||
211 | { | ||
212 | /* | ||
213 | * Grab cpu pointer in each case to avoid spurious | ||
214 | * preemptible warnings | ||
215 | */ | ||
216 | switch (action & ~CPU_TASKS_FROZEN) { | ||
217 | case CPU_STARTING: | ||
218 | armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); | ||
219 | break; | ||
220 | case CPU_DYING: | ||
221 | armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt)); | ||
222 | break; | ||
223 | } | ||
224 | |||
225 | return NOTIFY_OK; | ||
226 | } | ||
227 | |||
228 | static struct notifier_block armada_370_xp_timer_cpu_nb = { | ||
229 | .notifier_call = armada_370_xp_timer_cpu_notify, | ||
230 | }; | ||
231 | |||
232 | static u32 timer0_ctrl_reg, timer0_local_ctrl_reg; | 212 | static u32 timer0_ctrl_reg, timer0_local_ctrl_reg; |
233 | 213 | ||
234 | static int armada_370_xp_timer_suspend(void) | 214 | static int armada_370_xp_timer_suspend(void) |
@@ -322,8 +302,6 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np) | |||
322 | return res; | 302 | return res; |
323 | } | 303 | } |
324 | 304 | ||
325 | register_cpu_notifier(&armada_370_xp_timer_cpu_nb); | ||
326 | |||
327 | armada_370_xp_evt = alloc_percpu(struct clock_event_device); | 305 | armada_370_xp_evt = alloc_percpu(struct clock_event_device); |
328 | if (!armada_370_xp_evt) | 306 | if (!armada_370_xp_evt) |
329 | return -ENOMEM; | 307 | return -ENOMEM; |
@@ -341,9 +319,12 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np) | |||
341 | return res; | 319 | return res; |
342 | } | 320 | } |
343 | 321 | ||
344 | res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); | 322 | res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING, |
323 | "AP_ARMADA_TIMER_STARTING", | ||
324 | armada_370_xp_timer_starting_cpu, | ||
325 | armada_370_xp_timer_dying_cpu); | ||
345 | if (res) { | 326 | if (res) { |
346 | pr_err("Failed to setup timer"); | 327 | pr_err("Failed to setup hotplug state and timer"); |
347 | return res; | 328 | return res; |
348 | } | 329 | } |
349 | 330 | ||
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c index 90f8fbc154a4..4334e0330ada 100644 --- a/drivers/clocksource/timer-atlas7.c +++ b/drivers/clocksource/timer-atlas7.c | |||
@@ -172,9 +172,9 @@ static struct irqaction sirfsoc_timer1_irq = { | |||
172 | .handler = sirfsoc_timer_interrupt, | 172 | .handler = sirfsoc_timer_interrupt, |
173 | }; | 173 | }; |
174 | 174 | ||
175 | static int sirfsoc_local_timer_setup(struct clock_event_device *ce) | 175 | static int sirfsoc_local_timer_starting_cpu(unsigned int cpu) |
176 | { | 176 | { |
177 | int cpu = smp_processor_id(); | 177 | struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu); |
178 | struct irqaction *action; | 178 | struct irqaction *action; |
179 | 179 | ||
180 | if (cpu == 0) | 180 | if (cpu == 0) |
@@ -203,50 +203,27 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce) | |||
203 | return 0; | 203 | return 0; |
204 | } | 204 | } |
205 | 205 | ||
206 | static void sirfsoc_local_timer_stop(struct clock_event_device *ce) | 206 | static int sirfsoc_local_timer_dying_cpu(unsigned int cpu) |
207 | { | 207 | { |
208 | int cpu = smp_processor_id(); | ||
209 | |||
210 | sirfsoc_timer_count_disable(1); | 208 | sirfsoc_timer_count_disable(1); |
211 | 209 | ||
212 | if (cpu == 0) | 210 | if (cpu == 0) |
213 | remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); | 211 | remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); |
214 | else | 212 | else |
215 | remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); | 213 | remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); |
214 | return 0; | ||
216 | } | 215 | } |
217 | 216 | ||
218 | static int sirfsoc_cpu_notify(struct notifier_block *self, | ||
219 | unsigned long action, void *hcpu) | ||
220 | { | ||
221 | /* | ||
222 | * Grab cpu pointer in each case to avoid spurious | ||
223 | * preemptible warnings | ||
224 | */ | ||
225 | switch (action & ~CPU_TASKS_FROZEN) { | ||
226 | case CPU_STARTING: | ||
227 | sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); | ||
228 | break; | ||
229 | case CPU_DYING: | ||
230 | sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent)); | ||
231 | break; | ||
232 | } | ||
233 | |||
234 | return NOTIFY_OK; | ||
235 | } | ||
236 | |||
237 | static struct notifier_block sirfsoc_cpu_nb = { | ||
238 | .notifier_call = sirfsoc_cpu_notify, | ||
239 | }; | ||
240 | |||
241 | static int __init sirfsoc_clockevent_init(void) | 217 | static int __init sirfsoc_clockevent_init(void) |
242 | { | 218 | { |
243 | sirfsoc_clockevent = alloc_percpu(struct clock_event_device); | 219 | sirfsoc_clockevent = alloc_percpu(struct clock_event_device); |
244 | BUG_ON(!sirfsoc_clockevent); | 220 | BUG_ON(!sirfsoc_clockevent); |
245 | 221 | ||
246 | BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb)); | 222 | /* Install and invoke hotplug callbacks */ |
247 | 223 | return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING, | |
248 | /* Immediately configure the timer on the boot CPU */ | 224 | "AP_MARCO_TIMER_STARTING", |
249 | return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); | 225 | sirfsoc_local_timer_starting_cpu, |
226 | sirfsoc_local_timer_dying_cpu); | ||
250 | } | 227 | } |
251 | 228 | ||
252 | /* initialize the kernel jiffy timer source */ | 229 | /* initialize the kernel jiffy timer source */ |
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c index d83ab82672e4..2de4cad9c5ed 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x.c +++ b/drivers/hwtracing/coresight/coresight-etm3x.c | |||
@@ -51,6 +51,8 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO); | |||
51 | static int etm_count; | 51 | static int etm_count; |
52 | static struct etm_drvdata *etmdrvdata[NR_CPUS]; | 52 | static struct etm_drvdata *etmdrvdata[NR_CPUS]; |
53 | 53 | ||
54 | static enum cpuhp_state hp_online; | ||
55 | |||
54 | /* | 56 | /* |
55 | * Memory mapped writes to clear os lock are not supported on some processors | 57 | * Memory mapped writes to clear os lock are not supported on some processors |
56 | * and OS lock must be unlocked before any memory mapped access on such | 58 | * and OS lock must be unlocked before any memory mapped access on such |
@@ -481,8 +483,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev) | |||
481 | 483 | ||
482 | /* | 484 | /* |
483 | * Configure the ETM only if the CPU is online. If it isn't online | 485 | * Configure the ETM only if the CPU is online. If it isn't online |
484 | * hw configuration will take place when 'CPU_STARTING' is received | 486 | * hw configuration will take place on the local CPU during bring up. |
485 | * in @etm_cpu_callback. | ||
486 | */ | 487 | */ |
487 | if (cpu_online(drvdata->cpu)) { | 488 | if (cpu_online(drvdata->cpu)) { |
488 | ret = smp_call_function_single(drvdata->cpu, | 489 | ret = smp_call_function_single(drvdata->cpu, |
@@ -641,47 +642,44 @@ static const struct coresight_ops etm_cs_ops = { | |||
641 | .source_ops = &etm_source_ops, | 642 | .source_ops = &etm_source_ops, |
642 | }; | 643 | }; |
643 | 644 | ||
644 | static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, | 645 | static int etm_online_cpu(unsigned int cpu) |
645 | void *hcpu) | ||
646 | { | 646 | { |
647 | unsigned int cpu = (unsigned long)hcpu; | ||
648 | |||
649 | if (!etmdrvdata[cpu]) | 647 | if (!etmdrvdata[cpu]) |
650 | goto out; | 648 | return 0; |
651 | 649 | ||
652 | switch (action & (~CPU_TASKS_FROZEN)) { | 650 | if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable) |
653 | case CPU_STARTING: | 651 | coresight_enable(etmdrvdata[cpu]->csdev); |
654 | spin_lock(&etmdrvdata[cpu]->spinlock); | 652 | return 0; |
655 | if (!etmdrvdata[cpu]->os_unlock) { | 653 | } |
656 | etm_os_unlock(etmdrvdata[cpu]); | ||
657 | etmdrvdata[cpu]->os_unlock = true; | ||
658 | } | ||
659 | |||
660 | if (local_read(&etmdrvdata[cpu]->mode)) | ||
661 | etm_enable_hw(etmdrvdata[cpu]); | ||
662 | spin_unlock(&etmdrvdata[cpu]->spinlock); | ||
663 | break; | ||
664 | 654 | ||
665 | case CPU_ONLINE: | 655 | static int etm_starting_cpu(unsigned int cpu) |
666 | if (etmdrvdata[cpu]->boot_enable && | 656 | { |
667 | !etmdrvdata[cpu]->sticky_enable) | 657 | if (!etmdrvdata[cpu]) |
668 | coresight_enable(etmdrvdata[cpu]->csdev); | 658 | return 0; |
669 | break; | ||
670 | 659 | ||
671 | case CPU_DYING: | 660 | spin_lock(&etmdrvdata[cpu]->spinlock); |
672 | spin_lock(&etmdrvdata[cpu]->spinlock); | 661 | if (!etmdrvdata[cpu]->os_unlock) { |
673 | if (local_read(&etmdrvdata[cpu]->mode)) | 662 | etm_os_unlock(etmdrvdata[cpu]); |
674 | etm_disable_hw(etmdrvdata[cpu]); | 663 | etmdrvdata[cpu]->os_unlock = true; |
675 | spin_unlock(&etmdrvdata[cpu]->spinlock); | ||
676 | break; | ||
677 | } | 664 | } |
678 | out: | 665 | |
679 | return NOTIFY_OK; | 666 | if (local_read(&etmdrvdata[cpu]->mode)) |
667 | etm_enable_hw(etmdrvdata[cpu]); | ||
668 | spin_unlock(&etmdrvdata[cpu]->spinlock); | ||
669 | return 0; | ||
680 | } | 670 | } |
681 | 671 | ||
682 | static struct notifier_block etm_cpu_notifier = { | 672 | static int etm_dying_cpu(unsigned int cpu) |
683 | .notifier_call = etm_cpu_callback, | 673 | { |
684 | }; | 674 | if (!etmdrvdata[cpu]) |
675 | return 0; | ||
676 | |||
677 | spin_lock(&etmdrvdata[cpu]->spinlock); | ||
678 | if (local_read(&etmdrvdata[cpu]->mode)) | ||
679 | etm_disable_hw(etmdrvdata[cpu]); | ||
680 | spin_unlock(&etmdrvdata[cpu]->spinlock); | ||
681 | return 0; | ||
682 | } | ||
685 | 683 | ||
686 | static bool etm_arch_supported(u8 arch) | 684 | static bool etm_arch_supported(u8 arch) |
687 | { | 685 | { |
@@ -806,9 +804,17 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) | |||
806 | etm_init_arch_data, drvdata, 1)) | 804 | etm_init_arch_data, drvdata, 1)) |
807 | dev_err(dev, "ETM arch init failed\n"); | 805 | dev_err(dev, "ETM arch init failed\n"); |
808 | 806 | ||
809 | if (!etm_count++) | 807 | if (!etm_count++) { |
810 | register_hotcpu_notifier(&etm_cpu_notifier); | 808 | cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING, |
811 | 809 | "AP_ARM_CORESIGHT_STARTING", | |
810 | etm_starting_cpu, etm_dying_cpu); | ||
811 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, | ||
812 | "AP_ARM_CORESIGHT_ONLINE", | ||
813 | etm_online_cpu, NULL); | ||
814 | if (ret < 0) | ||
815 | goto err_arch_supported; | ||
816 | hp_online = ret; | ||
817 | } | ||
812 | put_online_cpus(); | 818 | put_online_cpus(); |
813 | 819 | ||
814 | if (etm_arch_supported(drvdata->arch) == false) { | 820 | if (etm_arch_supported(drvdata->arch) == false) { |
@@ -839,7 +845,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) | |||
839 | 845 | ||
840 | pm_runtime_put(&adev->dev); | 846 | pm_runtime_put(&adev->dev); |
841 | dev_info(dev, "%s initialized\n", (char *)id->data); | 847 | dev_info(dev, "%s initialized\n", (char *)id->data); |
842 | |||
843 | if (boot_enable) { | 848 | if (boot_enable) { |
844 | coresight_enable(drvdata->csdev); | 849 | coresight_enable(drvdata->csdev); |
845 | drvdata->boot_enable = true; | 850 | drvdata->boot_enable = true; |
@@ -848,8 +853,11 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) | |||
848 | return 0; | 853 | return 0; |
849 | 854 | ||
850 | err_arch_supported: | 855 | err_arch_supported: |
851 | if (--etm_count == 0) | 856 | if (--etm_count == 0) { |
852 | unregister_hotcpu_notifier(&etm_cpu_notifier); | 857 | cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); |
858 | if (hp_online) | ||
859 | cpuhp_remove_state_nocalls(hp_online); | ||
860 | } | ||
853 | return ret; | 861 | return ret; |
854 | } | 862 | } |
855 | 863 | ||
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 462f0dc15757..1a5e0d14c1dd 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c | |||
@@ -48,6 +48,8 @@ static int etm4_count; | |||
48 | static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; | 48 | static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; |
49 | static void etm4_set_default(struct etmv4_config *config); | 49 | static void etm4_set_default(struct etmv4_config *config); |
50 | 50 | ||
51 | static enum cpuhp_state hp_online; | ||
52 | |||
51 | static void etm4_os_unlock(struct etmv4_drvdata *drvdata) | 53 | static void etm4_os_unlock(struct etmv4_drvdata *drvdata) |
52 | { | 54 | { |
53 | /* Writing any value to ETMOSLAR unlocks the trace registers */ | 55 | /* Writing any value to ETMOSLAR unlocks the trace registers */ |
@@ -673,47 +675,44 @@ void etm4_config_trace_mode(struct etmv4_config *config) | |||
673 | config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc; | 675 | config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc; |
674 | } | 676 | } |
675 | 677 | ||
676 | static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, | 678 | static int etm4_online_cpu(unsigned int cpu) |
677 | void *hcpu) | ||
678 | { | 679 | { |
679 | unsigned int cpu = (unsigned long)hcpu; | ||
680 | |||
681 | if (!etmdrvdata[cpu]) | 680 | if (!etmdrvdata[cpu]) |
682 | goto out; | 681 | return 0; |
683 | |||
684 | switch (action & (~CPU_TASKS_FROZEN)) { | ||
685 | case CPU_STARTING: | ||
686 | spin_lock(&etmdrvdata[cpu]->spinlock); | ||
687 | if (!etmdrvdata[cpu]->os_unlock) { | ||
688 | etm4_os_unlock(etmdrvdata[cpu]); | ||
689 | etmdrvdata[cpu]->os_unlock = true; | ||
690 | } | ||
691 | |||
692 | if (local_read(&etmdrvdata[cpu]->mode)) | ||
693 | etm4_enable_hw(etmdrvdata[cpu]); | ||
694 | spin_unlock(&etmdrvdata[cpu]->spinlock); | ||
695 | break; | ||
696 | 682 | ||
697 | case CPU_ONLINE: | 683 | if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable) |
698 | if (etmdrvdata[cpu]->boot_enable && | 684 | coresight_enable(etmdrvdata[cpu]->csdev); |
699 | !etmdrvdata[cpu]->sticky_enable) | 685 | return 0; |
700 | coresight_enable(etmdrvdata[cpu]->csdev); | 686 | } |
701 | break; | ||
702 | 687 | ||
703 | case CPU_DYING: | 688 | static int etm4_starting_cpu(unsigned int cpu) |
704 | spin_lock(&etmdrvdata[cpu]->spinlock); | 689 | { |
705 | if (local_read(&etmdrvdata[cpu]->mode)) | 690 | if (!etmdrvdata[cpu]) |
706 | etm4_disable_hw(etmdrvdata[cpu]); | 691 | return 0; |
707 | spin_unlock(&etmdrvdata[cpu]->spinlock); | 692 | |
708 | break; | 693 | spin_lock(&etmdrvdata[cpu]->spinlock); |
694 | if (!etmdrvdata[cpu]->os_unlock) { | ||
695 | etm4_os_unlock(etmdrvdata[cpu]); | ||
696 | etmdrvdata[cpu]->os_unlock = true; | ||
709 | } | 697 | } |
710 | out: | 698 | |
711 | return NOTIFY_OK; | 699 | if (local_read(&etmdrvdata[cpu]->mode)) |
700 | etm4_enable_hw(etmdrvdata[cpu]); | ||
701 | spin_unlock(&etmdrvdata[cpu]->spinlock); | ||
702 | return 0; | ||
712 | } | 703 | } |
713 | 704 | ||
714 | static struct notifier_block etm4_cpu_notifier = { | 705 | static int etm4_dying_cpu(unsigned int cpu) |
715 | .notifier_call = etm4_cpu_callback, | 706 | { |
716 | }; | 707 | if (!etmdrvdata[cpu]) |
708 | return 0; | ||
709 | |||
710 | spin_lock(&etmdrvdata[cpu]->spinlock); | ||
711 | if (local_read(&etmdrvdata[cpu]->mode)) | ||
712 | etm4_disable_hw(etmdrvdata[cpu]); | ||
713 | spin_unlock(&etmdrvdata[cpu]->spinlock); | ||
714 | return 0; | ||
715 | } | ||
717 | 716 | ||
718 | static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) | 717 | static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) |
719 | { | 718 | { |
@@ -767,8 +766,17 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) | |||
767 | etm4_init_arch_data, drvdata, 1)) | 766 | etm4_init_arch_data, drvdata, 1)) |
768 | dev_err(dev, "ETM arch init failed\n"); | 767 | dev_err(dev, "ETM arch init failed\n"); |
769 | 768 | ||
770 | if (!etm4_count++) | 769 | if (!etm4_count++) { |
771 | register_hotcpu_notifier(&etm4_cpu_notifier); | 770 | cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING, |
771 | "AP_ARM_CORESIGHT4_STARTING", | ||
772 | etm4_starting_cpu, etm4_dying_cpu); | ||
773 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, | ||
774 | "AP_ARM_CORESIGHT4_ONLINE", | ||
775 | etm4_online_cpu, NULL); | ||
776 | if (ret < 0) | ||
777 | goto err_arch_supported; | ||
778 | hp_online = ret; | ||
779 | } | ||
772 | 780 | ||
773 | put_online_cpus(); | 781 | put_online_cpus(); |
774 | 782 | ||
@@ -809,8 +817,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) | |||
809 | return 0; | 817 | return 0; |
810 | 818 | ||
811 | err_arch_supported: | 819 | err_arch_supported: |
812 | if (--etm4_count == 0) | 820 | if (--etm4_count == 0) { |
813 | unregister_hotcpu_notifier(&etm4_cpu_notifier); | 821 | cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING); |
822 | if (hp_online) | ||
823 | cpuhp_remove_state_nocalls(hp_online); | ||
824 | } | ||
814 | return ret; | 825 | return ret; |
815 | } | 826 | } |
816 | 827 | ||
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 7c42b1d13faf..8bcee65a0b8c 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c | |||
@@ -345,38 +345,20 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask, | |||
345 | ARMADA_370_XP_SW_TRIG_INT_OFFS); | 345 | ARMADA_370_XP_SW_TRIG_INT_OFFS); |
346 | } | 346 | } |
347 | 347 | ||
348 | static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, | 348 | static int armada_xp_mpic_starting_cpu(unsigned int cpu) |
349 | unsigned long action, void *hcpu) | ||
350 | { | 349 | { |
351 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) { | 350 | armada_xp_mpic_perf_init(); |
352 | armada_xp_mpic_perf_init(); | 351 | armada_xp_mpic_smp_cpu_init(); |
353 | armada_xp_mpic_smp_cpu_init(); | 352 | return 0; |
354 | } | ||
355 | |||
356 | return NOTIFY_OK; | ||
357 | } | 353 | } |
358 | 354 | ||
359 | static struct notifier_block armada_370_xp_mpic_cpu_notifier = { | 355 | static int mpic_cascaded_starting_cpu(unsigned int cpu) |
360 | .notifier_call = armada_xp_mpic_secondary_init, | ||
361 | .priority = 100, | ||
362 | }; | ||
363 | |||
364 | static int mpic_cascaded_secondary_init(struct notifier_block *nfb, | ||
365 | unsigned long action, void *hcpu) | ||
366 | { | 356 | { |
367 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) { | 357 | armada_xp_mpic_perf_init(); |
368 | armada_xp_mpic_perf_init(); | 358 | enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); |
369 | enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); | 359 | return 0; |
370 | } | ||
371 | |||
372 | return NOTIFY_OK; | ||
373 | } | 360 | } |
374 | 361 | #endif | |
375 | static struct notifier_block mpic_cascaded_cpu_notifier = { | ||
376 | .notifier_call = mpic_cascaded_secondary_init, | ||
377 | .priority = 100, | ||
378 | }; | ||
379 | #endif /* CONFIG_SMP */ | ||
380 | 362 | ||
381 | static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = { | 363 | static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = { |
382 | .map = armada_370_xp_mpic_irq_map, | 364 | .map = armada_370_xp_mpic_irq_map, |
@@ -595,11 +577,15 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
595 | set_handle_irq(armada_370_xp_handle_irq); | 577 | set_handle_irq(armada_370_xp_handle_irq); |
596 | #ifdef CONFIG_SMP | 578 | #ifdef CONFIG_SMP |
597 | set_smp_cross_call(armada_mpic_send_doorbell); | 579 | set_smp_cross_call(armada_mpic_send_doorbell); |
598 | register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); | 580 | cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING, |
581 | "AP_IRQ_ARMADA_XP_STARTING", | ||
582 | armada_xp_mpic_starting_cpu, NULL); | ||
599 | #endif | 583 | #endif |
600 | } else { | 584 | } else { |
601 | #ifdef CONFIG_SMP | 585 | #ifdef CONFIG_SMP |
602 | register_cpu_notifier(&mpic_cascaded_cpu_notifier); | 586 | cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_CASC_STARTING, |
587 | "AP_IRQ_ARMADA_CASC_STARTING", | ||
588 | mpic_cascaded_starting_cpu, NULL); | ||
603 | #endif | 589 | #endif |
604 | irq_set_chained_handler(parent_irq, | 590 | irq_set_chained_handler(parent_irq, |
605 | armada_370_xp_mpic_handle_cascade_irq); | 591 | armada_370_xp_mpic_handle_cascade_irq); |
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c index df1949c0aa23..d96b2c947e74 100644 --- a/drivers/irqchip/irq-bcm2836.c +++ b/drivers/irqchip/irq-bcm2836.c | |||
@@ -202,26 +202,19 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask, | |||
202 | } | 202 | } |
203 | } | 203 | } |
204 | 204 | ||
205 | /* Unmasks the IPI on the CPU when it's online. */ | 205 | static int bcm2836_cpu_starting(unsigned int cpu) |
206 | static int bcm2836_arm_irqchip_cpu_notify(struct notifier_block *nfb, | ||
207 | unsigned long action, void *hcpu) | ||
208 | { | 206 | { |
209 | unsigned int cpu = (unsigned long)hcpu; | 207 | bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0, |
210 | unsigned int int_reg = LOCAL_MAILBOX_INT_CONTROL0; | 208 | cpu); |
211 | unsigned int mailbox = 0; | 209 | return 0; |
212 | |||
213 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | ||
214 | bcm2836_arm_irqchip_unmask_per_cpu_irq(int_reg, mailbox, cpu); | ||
215 | else if (action == CPU_DYING) | ||
216 | bcm2836_arm_irqchip_mask_per_cpu_irq(int_reg, mailbox, cpu); | ||
217 | |||
218 | return NOTIFY_OK; | ||
219 | } | 210 | } |
220 | 211 | ||
221 | static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = { | 212 | static int bcm2836_cpu_dying(unsigned int cpu) |
222 | .notifier_call = bcm2836_arm_irqchip_cpu_notify, | 213 | { |
223 | .priority = 100, | 214 | bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0, |
224 | }; | 215 | cpu); |
216 | return 0; | ||
217 | } | ||
225 | 218 | ||
226 | #ifdef CONFIG_ARM | 219 | #ifdef CONFIG_ARM |
227 | static int __init bcm2836_smp_boot_secondary(unsigned int cpu, | 220 | static int __init bcm2836_smp_boot_secondary(unsigned int cpu, |
@@ -251,10 +244,9 @@ bcm2836_arm_irqchip_smp_init(void) | |||
251 | { | 244 | { |
252 | #ifdef CONFIG_SMP | 245 | #ifdef CONFIG_SMP |
253 | /* Unmask IPIs to the boot CPU. */ | 246 | /* Unmask IPIs to the boot CPU. */ |
254 | bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier, | 247 | cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING, |
255 | CPU_STARTING, | 248 | "AP_IRQ_BCM2836_STARTING", bcm2836_cpu_starting, |
256 | (void *)(uintptr_t)smp_processor_id()); | 249 | bcm2836_cpu_dying); |
257 | register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier); | ||
258 | 250 | ||
259 | set_smp_cross_call(bcm2836_arm_irqchip_send_ipi); | 251 | set_smp_cross_call(bcm2836_arm_irqchip_send_ipi); |
260 | 252 | ||
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 2c5ba0e704bf..6fc56c3466b0 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -538,23 +538,13 @@ static void gic_cpu_init(void) | |||
538 | } | 538 | } |
539 | 539 | ||
540 | #ifdef CONFIG_SMP | 540 | #ifdef CONFIG_SMP |
541 | static int gic_secondary_init(struct notifier_block *nfb, | 541 | |
542 | unsigned long action, void *hcpu) | 542 | static int gic_starting_cpu(unsigned int cpu) |
543 | { | 543 | { |
544 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 544 | gic_cpu_init(); |
545 | gic_cpu_init(); | 545 | return 0; |
546 | return NOTIFY_OK; | ||
547 | } | 546 | } |
548 | 547 | ||
549 | /* | ||
550 | * Notifier for enabling the GIC CPU interface. Set an arbitrarily high | ||
551 | * priority because the GIC needs to be up before the ARM generic timers. | ||
552 | */ | ||
553 | static struct notifier_block gic_cpu_notifier = { | ||
554 | .notifier_call = gic_secondary_init, | ||
555 | .priority = 100, | ||
556 | }; | ||
557 | |||
558 | static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, | 548 | static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, |
559 | unsigned long cluster_id) | 549 | unsigned long cluster_id) |
560 | { | 550 | { |
@@ -634,7 +624,9 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | |||
634 | static void gic_smp_init(void) | 624 | static void gic_smp_init(void) |
635 | { | 625 | { |
636 | set_smp_cross_call(gic_raise_softirq); | 626 | set_smp_cross_call(gic_raise_softirq); |
637 | register_cpu_notifier(&gic_cpu_notifier); | 627 | cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING, |
628 | "AP_IRQ_GICV3_STARTING", gic_starting_cpu, | ||
629 | NULL); | ||
638 | } | 630 | } |
639 | 631 | ||
640 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | 632 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 1de07eb5839c..c2cab572c511 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -984,25 +984,12 @@ static int gic_irq_domain_translate(struct irq_domain *d, | |||
984 | return -EINVAL; | 984 | return -EINVAL; |
985 | } | 985 | } |
986 | 986 | ||
987 | #ifdef CONFIG_SMP | 987 | static int gic_starting_cpu(unsigned int cpu) |
988 | static int gic_secondary_init(struct notifier_block *nfb, unsigned long action, | ||
989 | void *hcpu) | ||
990 | { | 988 | { |
991 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 989 | gic_cpu_init(&gic_data[0]); |
992 | gic_cpu_init(&gic_data[0]); | 990 | return 0; |
993 | return NOTIFY_OK; | ||
994 | } | 991 | } |
995 | 992 | ||
996 | /* | ||
997 | * Notifier for enabling the GIC CPU interface. Set an arbitrarily high | ||
998 | * priority because the GIC needs to be up before the ARM generic timers. | ||
999 | */ | ||
1000 | static struct notifier_block gic_cpu_notifier = { | ||
1001 | .notifier_call = gic_secondary_init, | ||
1002 | .priority = 100, | ||
1003 | }; | ||
1004 | #endif | ||
1005 | |||
1006 | static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | 993 | static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
1007 | unsigned int nr_irqs, void *arg) | 994 | unsigned int nr_irqs, void *arg) |
1008 | { | 995 | { |
@@ -1177,8 +1164,10 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, | |||
1177 | gic_cpu_map[i] = 0xff; | 1164 | gic_cpu_map[i] = 0xff; |
1178 | #ifdef CONFIG_SMP | 1165 | #ifdef CONFIG_SMP |
1179 | set_smp_cross_call(gic_raise_softirq); | 1166 | set_smp_cross_call(gic_raise_softirq); |
1180 | register_cpu_notifier(&gic_cpu_notifier); | ||
1181 | #endif | 1167 | #endif |
1168 | cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, | ||
1169 | "AP_IRQ_GIC_STARTING", | ||
1170 | gic_starting_cpu, NULL); | ||
1182 | set_handle_irq(gic_handle_irq); | 1171 | set_handle_irq(gic_handle_irq); |
1183 | if (static_key_true(&supports_deactivate)) | 1172 | if (static_key_true(&supports_deactivate)) |
1184 | pr_info("GIC: Using split EOI/Deactivate mode\n"); | 1173 | pr_info("GIC: Using split EOI/Deactivate mode\n"); |
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index 9e25d8ce08e5..021b0e0833c1 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c | |||
@@ -342,26 +342,12 @@ static int hip04_irq_domain_xlate(struct irq_domain *d, | |||
342 | return ret; | 342 | return ret; |
343 | } | 343 | } |
344 | 344 | ||
345 | #ifdef CONFIG_SMP | 345 | static int hip04_irq_starting_cpu(unsigned int cpu) |
346 | static int hip04_irq_secondary_init(struct notifier_block *nfb, | ||
347 | unsigned long action, | ||
348 | void *hcpu) | ||
349 | { | 346 | { |
350 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 347 | hip04_irq_cpu_init(&hip04_data); |
351 | hip04_irq_cpu_init(&hip04_data); | 348 | return 0; |
352 | return NOTIFY_OK; | ||
353 | } | 349 | } |
354 | 350 | ||
355 | /* | ||
356 | * Notifier for enabling the INTC CPU interface. Set an arbitrarily high | ||
357 | * priority because the GIC needs to be up before the ARM generic timers. | ||
358 | */ | ||
359 | static struct notifier_block hip04_irq_cpu_notifier = { | ||
360 | .notifier_call = hip04_irq_secondary_init, | ||
361 | .priority = 100, | ||
362 | }; | ||
363 | #endif | ||
364 | |||
365 | static const struct irq_domain_ops hip04_irq_domain_ops = { | 351 | static const struct irq_domain_ops hip04_irq_domain_ops = { |
366 | .map = hip04_irq_domain_map, | 352 | .map = hip04_irq_domain_map, |
367 | .xlate = hip04_irq_domain_xlate, | 353 | .xlate = hip04_irq_domain_xlate, |
@@ -417,13 +403,12 @@ hip04_of_init(struct device_node *node, struct device_node *parent) | |||
417 | 403 | ||
418 | #ifdef CONFIG_SMP | 404 | #ifdef CONFIG_SMP |
419 | set_smp_cross_call(hip04_raise_softirq); | 405 | set_smp_cross_call(hip04_raise_softirq); |
420 | register_cpu_notifier(&hip04_irq_cpu_notifier); | ||
421 | #endif | 406 | #endif |
422 | set_handle_irq(hip04_handle_irq); | 407 | set_handle_irq(hip04_handle_irq); |
423 | 408 | ||
424 | hip04_irq_dist_init(&hip04_data); | 409 | hip04_irq_dist_init(&hip04_data); |
425 | hip04_irq_cpu_init(&hip04_data); | 410 | cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "AP_IRQ_HIP04_STARTING", |
426 | 411 | hip04_irq_starting_cpu, NULL); | |
427 | return 0; | 412 | return 0; |
428 | } | 413 | } |
429 | IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init); | 414 | IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init); |
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c index 938467fb82be..22f0634dd3fa 100644 --- a/drivers/leds/trigger/ledtrig-cpu.c +++ b/drivers/leds/trigger/ledtrig-cpu.c | |||
@@ -92,29 +92,22 @@ static struct syscore_ops ledtrig_cpu_syscore_ops = { | |||
92 | .resume = ledtrig_cpu_syscore_resume, | 92 | .resume = ledtrig_cpu_syscore_resume, |
93 | }; | 93 | }; |
94 | 94 | ||
95 | static int ledtrig_cpu_notify(struct notifier_block *self, | 95 | static int ledtrig_online_cpu(unsigned int cpu) |
96 | unsigned long action, void *hcpu) | ||
97 | { | 96 | { |
98 | switch (action & ~CPU_TASKS_FROZEN) { | 97 | ledtrig_cpu(CPU_LED_START); |
99 | case CPU_STARTING: | 98 | return 0; |
100 | ledtrig_cpu(CPU_LED_START); | ||
101 | break; | ||
102 | case CPU_DYING: | ||
103 | ledtrig_cpu(CPU_LED_STOP); | ||
104 | break; | ||
105 | } | ||
106 | |||
107 | return NOTIFY_OK; | ||
108 | } | 99 | } |
109 | 100 | ||
110 | 101 | static int ledtrig_prepare_down_cpu(unsigned int cpu) | |
111 | static struct notifier_block ledtrig_cpu_nb = { | 102 | { |
112 | .notifier_call = ledtrig_cpu_notify, | 103 | ledtrig_cpu(CPU_LED_STOP); |
113 | }; | 104 | return 0; |
105 | } | ||
114 | 106 | ||
115 | static int __init ledtrig_cpu_init(void) | 107 | static int __init ledtrig_cpu_init(void) |
116 | { | 108 | { |
117 | int cpu; | 109 | int cpu; |
110 | int ret; | ||
118 | 111 | ||
119 | /* Supports up to 9999 cpu cores */ | 112 | /* Supports up to 9999 cpu cores */ |
120 | BUILD_BUG_ON(CONFIG_NR_CPUS > 9999); | 113 | BUILD_BUG_ON(CONFIG_NR_CPUS > 9999); |
@@ -133,7 +126,12 @@ static int __init ledtrig_cpu_init(void) | |||
133 | } | 126 | } |
134 | 127 | ||
135 | register_syscore_ops(&ledtrig_cpu_syscore_ops); | 128 | register_syscore_ops(&ledtrig_cpu_syscore_ops); |
136 | register_cpu_notifier(&ledtrig_cpu_nb); | 129 | |
130 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_LEDTRIG_STARTING", | ||
131 | ledtrig_online_cpu, ledtrig_prepare_down_cpu); | ||
132 | if (ret < 0) | ||
133 | pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n", | ||
134 | ret); | ||
137 | 135 | ||
138 | pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n"); | 136 | pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n"); |
139 | 137 | ||
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 8e4d7f590b06..6ccb994bdfcb 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
@@ -688,30 +688,29 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) | |||
688 | return 0; | 688 | return 0; |
689 | } | 689 | } |
690 | 690 | ||
691 | static DEFINE_MUTEX(arm_pmu_mutex); | ||
692 | static LIST_HEAD(arm_pmu_list); | ||
693 | |||
691 | /* | 694 | /* |
692 | * PMU hardware loses all context when a CPU goes offline. | 695 | * PMU hardware loses all context when a CPU goes offline. |
693 | * When a CPU is hotplugged back in, since some hardware registers are | 696 | * When a CPU is hotplugged back in, since some hardware registers are |
694 | * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading | 697 | * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading |
695 | * junk values out of them. | 698 | * junk values out of them. |
696 | */ | 699 | */ |
697 | static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, | 700 | static int arm_perf_starting_cpu(unsigned int cpu) |
698 | void *hcpu) | ||
699 | { | 701 | { |
700 | int cpu = (unsigned long)hcpu; | 702 | struct arm_pmu *pmu; |
701 | struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb); | ||
702 | |||
703 | if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) | ||
704 | return NOTIFY_DONE; | ||
705 | |||
706 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) | ||
707 | return NOTIFY_DONE; | ||
708 | 703 | ||
709 | if (pmu->reset) | 704 | mutex_lock(&arm_pmu_mutex); |
710 | pmu->reset(pmu); | 705 | list_for_each_entry(pmu, &arm_pmu_list, entry) { |
711 | else | ||
712 | return NOTIFY_DONE; | ||
713 | 706 | ||
714 | return NOTIFY_OK; | 707 | if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) |
708 | continue; | ||
709 | if (pmu->reset) | ||
710 | pmu->reset(pmu); | ||
711 | } | ||
712 | mutex_unlock(&arm_pmu_mutex); | ||
713 | return 0; | ||
715 | } | 714 | } |
716 | 715 | ||
717 | #ifdef CONFIG_CPU_PM | 716 | #ifdef CONFIG_CPU_PM |
@@ -822,10 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
822 | if (!cpu_hw_events) | 821 | if (!cpu_hw_events) |
823 | return -ENOMEM; | 822 | return -ENOMEM; |
824 | 823 | ||
825 | cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify; | 824 | mutex_lock(&arm_pmu_mutex); |
826 | err = register_cpu_notifier(&cpu_pmu->hotplug_nb); | 825 | list_add_tail(&cpu_pmu->entry, &arm_pmu_list); |
827 | if (err) | 826 | mutex_unlock(&arm_pmu_mutex); |
828 | goto out_hw_events; | ||
829 | 827 | ||
830 | err = cpu_pm_pmu_register(cpu_pmu); | 828 | err = cpu_pm_pmu_register(cpu_pmu); |
831 | if (err) | 829 | if (err) |
@@ -861,8 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
861 | return 0; | 859 | return 0; |
862 | 860 | ||
863 | out_unregister: | 861 | out_unregister: |
864 | unregister_cpu_notifier(&cpu_pmu->hotplug_nb); | 862 | mutex_lock(&arm_pmu_mutex); |
865 | out_hw_events: | 863 | list_del(&cpu_pmu->entry); |
864 | mutex_unlock(&arm_pmu_mutex); | ||
866 | free_percpu(cpu_hw_events); | 865 | free_percpu(cpu_hw_events); |
867 | return err; | 866 | return err; |
868 | } | 867 | } |
@@ -870,7 +869,9 @@ out_hw_events: | |||
870 | static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) | 869 | static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) |
871 | { | 870 | { |
872 | cpu_pm_pmu_unregister(cpu_pmu); | 871 | cpu_pm_pmu_unregister(cpu_pmu); |
873 | unregister_cpu_notifier(&cpu_pmu->hotplug_nb); | 872 | mutex_lock(&arm_pmu_mutex); |
873 | list_del(&cpu_pmu->entry); | ||
874 | mutex_unlock(&arm_pmu_mutex); | ||
874 | free_percpu(cpu_pmu->hw_events); | 875 | free_percpu(cpu_pmu->hw_events); |
875 | } | 876 | } |
876 | 877 | ||
@@ -1061,3 +1062,17 @@ out_free: | |||
1061 | kfree(pmu); | 1062 | kfree(pmu); |
1062 | return ret; | 1063 | return ret; |
1063 | } | 1064 | } |
1065 | |||
1066 | static int arm_pmu_hp_init(void) | ||
1067 | { | ||
1068 | int ret; | ||
1069 | |||
1070 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING, | ||
1071 | "AP_PERF_ARM_STARTING", | ||
1072 | arm_perf_starting_cpu, NULL); | ||
1073 | if (ret) | ||
1074 | pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", | ||
1075 | ret); | ||
1076 | return ret; | ||
1077 | } | ||
1078 | subsys_initcall(arm_pmu_hp_init); | ||
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 21597dcac0e2..797d9c8e9a1b 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -55,17 +55,6 @@ extern ssize_t arch_cpu_release(const char *, size_t); | |||
55 | #endif | 55 | #endif |
56 | struct notifier_block; | 56 | struct notifier_block; |
57 | 57 | ||
58 | /* | ||
59 | * CPU notifier priorities. | ||
60 | */ | ||
61 | enum { | ||
62 | CPU_PRI_PERF = 20, | ||
63 | |||
64 | /* bring up workqueues before normal notifiers and down after */ | ||
65 | CPU_PRI_WORKQUEUE_UP = 5, | ||
66 | CPU_PRI_WORKQUEUE_DOWN = -5, | ||
67 | }; | ||
68 | |||
69 | #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ | 58 | #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ |
70 | #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ | 59 | #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ |
71 | #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ | 60 | #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ |
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 386374d19987..242bf530edfc 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h | |||
@@ -4,19 +4,95 @@ | |||
4 | enum cpuhp_state { | 4 | enum cpuhp_state { |
5 | CPUHP_OFFLINE, | 5 | CPUHP_OFFLINE, |
6 | CPUHP_CREATE_THREADS, | 6 | CPUHP_CREATE_THREADS, |
7 | CPUHP_PERF_PREPARE, | ||
8 | CPUHP_PERF_X86_PREPARE, | ||
9 | CPUHP_PERF_X86_UNCORE_PREP, | ||
10 | CPUHP_PERF_X86_AMD_UNCORE_PREP, | ||
11 | CPUHP_PERF_X86_RAPL_PREP, | ||
12 | CPUHP_PERF_BFIN, | ||
13 | CPUHP_PERF_POWER, | ||
14 | CPUHP_PERF_SUPERH, | ||
15 | CPUHP_X86_HPET_DEAD, | ||
16 | CPUHP_X86_APB_DEAD, | ||
17 | CPUHP_WORKQUEUE_PREP, | ||
18 | CPUHP_POWER_NUMA_PREPARE, | ||
19 | CPUHP_HRTIMERS_PREPARE, | ||
20 | CPUHP_PROFILE_PREPARE, | ||
21 | CPUHP_X2APIC_PREPARE, | ||
22 | CPUHP_SMPCFD_PREPARE, | ||
23 | CPUHP_RCUTREE_PREP, | ||
7 | CPUHP_NOTIFY_PREPARE, | 24 | CPUHP_NOTIFY_PREPARE, |
25 | CPUHP_TIMERS_DEAD, | ||
8 | CPUHP_BRINGUP_CPU, | 26 | CPUHP_BRINGUP_CPU, |
9 | CPUHP_AP_IDLE_DEAD, | 27 | CPUHP_AP_IDLE_DEAD, |
10 | CPUHP_AP_OFFLINE, | 28 | CPUHP_AP_OFFLINE, |
11 | CPUHP_AP_SCHED_STARTING, | 29 | CPUHP_AP_SCHED_STARTING, |
30 | CPUHP_AP_RCUTREE_DYING, | ||
31 | CPUHP_AP_IRQ_GIC_STARTING, | ||
32 | CPUHP_AP_IRQ_GICV3_STARTING, | ||
33 | CPUHP_AP_IRQ_HIP04_STARTING, | ||
34 | CPUHP_AP_IRQ_ARMADA_XP_STARTING, | ||
35 | CPUHP_AP_IRQ_ARMADA_CASC_STARTING, | ||
36 | CPUHP_AP_IRQ_BCM2836_STARTING, | ||
37 | CPUHP_AP_ARM_MVEBU_COHERENCY, | ||
38 | CPUHP_AP_PERF_X86_UNCORE_STARTING, | ||
39 | CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, | ||
40 | CPUHP_AP_PERF_X86_STARTING, | ||
41 | CPUHP_AP_PERF_X86_AMD_IBS_STARTING, | ||
42 | CPUHP_AP_PERF_X86_CQM_STARTING, | ||
43 | CPUHP_AP_PERF_X86_CSTATE_STARTING, | ||
44 | CPUHP_AP_PERF_XTENSA_STARTING, | ||
45 | CPUHP_AP_PERF_METAG_STARTING, | ||
46 | CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, | ||
47 | CPUHP_AP_ARM_VFP_STARTING, | ||
48 | CPUHP_AP_PERF_ARM_STARTING, | ||
49 | CPUHP_AP_ARM_L2X0_STARTING, | ||
50 | CPUHP_AP_ARM_ARCH_TIMER_STARTING, | ||
51 | CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, | ||
52 | CPUHP_AP_DUMMY_TIMER_STARTING, | ||
53 | CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, | ||
54 | CPUHP_AP_ARM_TWD_STARTING, | ||
55 | CPUHP_AP_METAG_TIMER_STARTING, | ||
56 | CPUHP_AP_QCOM_TIMER_STARTING, | ||
57 | CPUHP_AP_ARMADA_TIMER_STARTING, | ||
58 | CPUHP_AP_MARCO_TIMER_STARTING, | ||
59 | CPUHP_AP_MIPS_GIC_TIMER_STARTING, | ||
60 | CPUHP_AP_ARC_TIMER_STARTING, | ||
61 | CPUHP_AP_KVM_STARTING, | ||
62 | CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, | ||
63 | CPUHP_AP_KVM_ARM_VGIC_STARTING, | ||
64 | CPUHP_AP_KVM_ARM_TIMER_STARTING, | ||
65 | CPUHP_AP_ARM_XEN_STARTING, | ||
66 | CPUHP_AP_ARM_CORESIGHT_STARTING, | ||
67 | CPUHP_AP_ARM_CORESIGHT4_STARTING, | ||
68 | CPUHP_AP_ARM64_ISNDEP_STARTING, | ||
69 | CPUHP_AP_SMPCFD_DYING, | ||
70 | CPUHP_AP_X86_TBOOT_DYING, | ||
12 | CPUHP_AP_NOTIFY_STARTING, | 71 | CPUHP_AP_NOTIFY_STARTING, |
13 | CPUHP_AP_ONLINE, | 72 | CPUHP_AP_ONLINE, |
14 | CPUHP_TEARDOWN_CPU, | 73 | CPUHP_TEARDOWN_CPU, |
15 | CPUHP_AP_ONLINE_IDLE, | 74 | CPUHP_AP_ONLINE_IDLE, |
16 | CPUHP_AP_SMPBOOT_THREADS, | 75 | CPUHP_AP_SMPBOOT_THREADS, |
76 | CPUHP_AP_X86_VDSO_VMA_ONLINE, | ||
77 | CPUHP_AP_PERF_ONLINE, | ||
78 | CPUHP_AP_PERF_X86_ONLINE, | ||
79 | CPUHP_AP_PERF_X86_UNCORE_ONLINE, | ||
80 | CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, | ||
81 | CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, | ||
82 | CPUHP_AP_PERF_X86_RAPL_ONLINE, | ||
83 | CPUHP_AP_PERF_X86_CQM_ONLINE, | ||
84 | CPUHP_AP_PERF_X86_CSTATE_ONLINE, | ||
85 | CPUHP_AP_PERF_S390_CF_ONLINE, | ||
86 | CPUHP_AP_PERF_S390_SF_ONLINE, | ||
87 | CPUHP_AP_PERF_ARM_CCI_ONLINE, | ||
88 | CPUHP_AP_PERF_ARM_CCN_ONLINE, | ||
89 | CPUHP_AP_WORKQUEUE_ONLINE, | ||
90 | CPUHP_AP_RCUTREE_ONLINE, | ||
17 | CPUHP_AP_NOTIFY_ONLINE, | 91 | CPUHP_AP_NOTIFY_ONLINE, |
18 | CPUHP_AP_ONLINE_DYN, | 92 | CPUHP_AP_ONLINE_DYN, |
19 | CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, | 93 | CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, |
94 | CPUHP_AP_X86_HPET_ONLINE, | ||
95 | CPUHP_AP_X86_KVM_CLK_ONLINE, | ||
20 | CPUHP_AP_ACTIVE, | 96 | CPUHP_AP_ACTIVE, |
21 | CPUHP_ONLINE, | 97 | CPUHP_ONLINE, |
22 | }; | 98 | }; |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index c98c6539e2c2..5e00f80b1535 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -494,4 +494,11 @@ extern void __init hrtimers_init(void); | |||
494 | /* Show pending timers: */ | 494 | /* Show pending timers: */ |
495 | extern void sysrq_timer_list_show(void); | 495 | extern void sysrq_timer_list_show(void); |
496 | 496 | ||
497 | int hrtimers_prepare_cpu(unsigned int cpu); | ||
498 | #ifdef CONFIG_HOTPLUG_CPU | ||
499 | int hrtimers_dead_cpu(unsigned int cpu); | ||
500 | #else | ||
501 | #define hrtimers_dead_cpu NULL | ||
502 | #endif | ||
503 | |||
497 | #endif | 504 | #endif |
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index d28ac05c7f92..e18843809eec 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h | |||
@@ -109,7 +109,7 @@ struct arm_pmu { | |||
109 | DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); | 109 | DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); |
110 | struct platform_device *plat_device; | 110 | struct platform_device *plat_device; |
111 | struct pmu_hw_events __percpu *hw_events; | 111 | struct pmu_hw_events __percpu *hw_events; |
112 | struct notifier_block hotplug_nb; | 112 | struct list_head entry; |
113 | struct notifier_block cpu_pm_nb; | 113 | struct notifier_block cpu_pm_nb; |
114 | }; | 114 | }; |
115 | 115 | ||
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e1f921c2e4e0..8ed4326164cc 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -1309,41 +1309,6 @@ static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) | |||
1309 | 1309 | ||
1310 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) | 1310 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) |
1311 | 1311 | ||
1312 | /* | ||
1313 | * This has to have a higher priority than migration_notifier in sched/core.c. | ||
1314 | */ | ||
1315 | #define perf_cpu_notifier(fn) \ | ||
1316 | do { \ | ||
1317 | static struct notifier_block fn##_nb = \ | ||
1318 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ | ||
1319 | unsigned long cpu = smp_processor_id(); \ | ||
1320 | unsigned long flags; \ | ||
1321 | \ | ||
1322 | cpu_notifier_register_begin(); \ | ||
1323 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | ||
1324 | (void *)(unsigned long)cpu); \ | ||
1325 | local_irq_save(flags); \ | ||
1326 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | ||
1327 | (void *)(unsigned long)cpu); \ | ||
1328 | local_irq_restore(flags); \ | ||
1329 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ | ||
1330 | (void *)(unsigned long)cpu); \ | ||
1331 | __register_cpu_notifier(&fn##_nb); \ | ||
1332 | cpu_notifier_register_done(); \ | ||
1333 | } while (0) | ||
1334 | |||
1335 | /* | ||
1336 | * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the | ||
1337 | * callback for already online CPUs. | ||
1338 | */ | ||
1339 | #define __perf_cpu_notifier(fn) \ | ||
1340 | do { \ | ||
1341 | static struct notifier_block fn##_nb = \ | ||
1342 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ | ||
1343 | \ | ||
1344 | __register_cpu_notifier(&fn##_nb); \ | ||
1345 | } while (0) | ||
1346 | |||
1347 | struct perf_pmu_events_attr { | 1312 | struct perf_pmu_events_attr { |
1348 | struct device_attribute attr; | 1313 | struct device_attribute attr; |
1349 | u64 id; | 1314 | u64 id; |
@@ -1385,4 +1350,13 @@ _name##_show(struct device *dev, \ | |||
1385 | \ | 1350 | \ |
1386 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) | 1351 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) |
1387 | 1352 | ||
1353 | /* Performance counter hotplug functions */ | ||
1354 | #ifdef CONFIG_PERF_EVENTS | ||
1355 | int perf_event_init_cpu(unsigned int cpu); | ||
1356 | int perf_event_exit_cpu(unsigned int cpu); | ||
1357 | #else | ||
1358 | #define perf_event_init_cpu NULL | ||
1359 | #define perf_event_exit_cpu NULL | ||
1360 | #endif | ||
1361 | |||
1388 | #endif /* _LINUX_PERF_EVENT_H */ | 1362 | #endif /* _LINUX_PERF_EVENT_H */ |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 93aea75029fb..ac81e4063b40 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -243,4 +243,11 @@ static inline void rcu_all_qs(void) | |||
243 | barrier(); /* Avoid RCU read-side critical sections leaking across. */ | 243 | barrier(); /* Avoid RCU read-side critical sections leaking across. */ |
244 | } | 244 | } |
245 | 245 | ||
246 | /* RCUtree hotplug events */ | ||
247 | #define rcutree_prepare_cpu NULL | ||
248 | #define rcutree_online_cpu NULL | ||
249 | #define rcutree_offline_cpu NULL | ||
250 | #define rcutree_dead_cpu NULL | ||
251 | #define rcutree_dying_cpu NULL | ||
252 | |||
246 | #endif /* __LINUX_RCUTINY_H */ | 253 | #endif /* __LINUX_RCUTINY_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 5043cb823fb2..63a4e4cf40a5 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -111,4 +111,11 @@ bool rcu_is_watching(void); | |||
111 | 111 | ||
112 | void rcu_all_qs(void); | 112 | void rcu_all_qs(void); |
113 | 113 | ||
114 | /* RCUtree hotplug events */ | ||
115 | int rcutree_prepare_cpu(unsigned int cpu); | ||
116 | int rcutree_online_cpu(unsigned int cpu); | ||
117 | int rcutree_offline_cpu(unsigned int cpu); | ||
118 | int rcutree_dead_cpu(unsigned int cpu); | ||
119 | int rcutree_dying_cpu(unsigned int cpu); | ||
120 | |||
114 | #endif /* __LINUX_RCUTREE_H */ | 121 | #endif /* __LINUX_RCUTREE_H */ |
diff --git a/include/linux/smp.h b/include/linux/smp.h index c4414074bd88..eccae4690f41 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -196,4 +196,9 @@ extern void arch_enable_nonboot_cpus_end(void); | |||
196 | 196 | ||
197 | void smp_setup_processor_id(void); | 197 | void smp_setup_processor_id(void); |
198 | 198 | ||
199 | /* SMP core functions */ | ||
200 | int smpcfd_prepare_cpu(unsigned int cpu); | ||
201 | int smpcfd_dead_cpu(unsigned int cpu); | ||
202 | int smpcfd_dying_cpu(unsigned int cpu); | ||
203 | |||
199 | #endif /* __LINUX_SMP_H */ | 204 | #endif /* __LINUX_SMP_H */ |
diff --git a/include/linux/timer.h b/include/linux/timer.h index 4419506b564e..51d601f192d4 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h | |||
@@ -273,4 +273,10 @@ unsigned long __round_jiffies_up_relative(unsigned long j, int cpu); | |||
273 | unsigned long round_jiffies_up(unsigned long j); | 273 | unsigned long round_jiffies_up(unsigned long j); |
274 | unsigned long round_jiffies_up_relative(unsigned long j); | 274 | unsigned long round_jiffies_up_relative(unsigned long j); |
275 | 275 | ||
276 | #ifdef CONFIG_HOTPLUG_CPU | ||
277 | int timers_dead_cpu(unsigned int cpu); | ||
278 | #else | ||
279 | #define timers_dead_cpu NULL | ||
280 | #endif | ||
281 | |||
276 | #endif | 282 | #endif |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index ca73c503b92a..26cc1df280d6 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -625,4 +625,10 @@ void wq_watchdog_touch(int cpu); | |||
625 | static inline void wq_watchdog_touch(int cpu) { } | 625 | static inline void wq_watchdog_touch(int cpu) { } |
626 | #endif /* CONFIG_WQ_WATCHDOG */ | 626 | #endif /* CONFIG_WQ_WATCHDOG */ |
627 | 627 | ||
628 | #ifdef CONFIG_SMP | ||
629 | int workqueue_prepare_cpu(unsigned int cpu); | ||
630 | int workqueue_online_cpu(unsigned int cpu); | ||
631 | int workqueue_offline_cpu(unsigned int cpu); | ||
632 | #endif | ||
633 | |||
628 | #endif | 634 | #endif |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 7b61887f7ccd..341bf80f80bd 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -517,6 +517,13 @@ static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, | |||
517 | if (!cpu_online(cpu)) | 517 | if (!cpu_online(cpu)) |
518 | return 0; | 518 | return 0; |
519 | 519 | ||
520 | /* | ||
521 | * If we are up and running, use the hotplug thread. For early calls | ||
522 | * we invoke the thread function directly. | ||
523 | */ | ||
524 | if (!st->thread) | ||
525 | return cpuhp_invoke_callback(cpu, state, cb); | ||
526 | |||
520 | st->cb_state = state; | 527 | st->cb_state = state; |
521 | st->cb = cb; | 528 | st->cb = cb; |
522 | /* | 529 | /* |
@@ -1173,6 +1180,31 @@ static struct cpuhp_step cpuhp_bp_states[] = { | |||
1173 | .teardown = NULL, | 1180 | .teardown = NULL, |
1174 | .cant_stop = true, | 1181 | .cant_stop = true, |
1175 | }, | 1182 | }, |
1183 | [CPUHP_PERF_PREPARE] = { | ||
1184 | .name = "perf prepare", | ||
1185 | .startup = perf_event_init_cpu, | ||
1186 | .teardown = perf_event_exit_cpu, | ||
1187 | }, | ||
1188 | [CPUHP_WORKQUEUE_PREP] = { | ||
1189 | .name = "workqueue prepare", | ||
1190 | .startup = workqueue_prepare_cpu, | ||
1191 | .teardown = NULL, | ||
1192 | }, | ||
1193 | [CPUHP_HRTIMERS_PREPARE] = { | ||
1194 | .name = "hrtimers prepare", | ||
1195 | .startup = hrtimers_prepare_cpu, | ||
1196 | .teardown = hrtimers_dead_cpu, | ||
1197 | }, | ||
1198 | [CPUHP_SMPCFD_PREPARE] = { | ||
1199 | .name = "SMPCFD prepare", | ||
1200 | .startup = smpcfd_prepare_cpu, | ||
1201 | .teardown = smpcfd_dead_cpu, | ||
1202 | }, | ||
1203 | [CPUHP_RCUTREE_PREP] = { | ||
1204 | .name = "RCU-tree prepare", | ||
1205 | .startup = rcutree_prepare_cpu, | ||
1206 | .teardown = rcutree_dead_cpu, | ||
1207 | }, | ||
1176 | /* | 1208 | /* |
1177 | * Preparatory and dead notifiers. Will be replaced once the notifiers | 1209 | * Preparatory and dead notifiers. Will be replaced once the notifiers |
1178 | * are converted to states. | 1210 | * are converted to states. |
@@ -1184,6 +1216,16 @@ static struct cpuhp_step cpuhp_bp_states[] = { | |||
1184 | .skip_onerr = true, | 1216 | .skip_onerr = true, |
1185 | .cant_stop = true, | 1217 | .cant_stop = true, |
1186 | }, | 1218 | }, |
1219 | /* | ||
1220 | * On the tear-down path, timers_dead_cpu() must be invoked | ||
1221 | * before blk_mq_queue_reinit_notify() from notify_dead(), | ||
1222 | * otherwise a RCU stall occurs. | ||
1223 | */ | ||
1224 | [CPUHP_TIMERS_DEAD] = { | ||
1225 | .name = "timers dead", | ||
1226 | .startup = NULL, | ||
1227 | .teardown = timers_dead_cpu, | ||
1228 | }, | ||
1187 | /* Kicks the plugged cpu into life */ | 1229 | /* Kicks the plugged cpu into life */ |
1188 | [CPUHP_BRINGUP_CPU] = { | 1230 | [CPUHP_BRINGUP_CPU] = { |
1189 | .name = "cpu:bringup", | 1231 | .name = "cpu:bringup", |
@@ -1191,6 +1233,10 @@ static struct cpuhp_step cpuhp_bp_states[] = { | |||
1191 | .teardown = NULL, | 1233 | .teardown = NULL, |
1192 | .cant_stop = true, | 1234 | .cant_stop = true, |
1193 | }, | 1235 | }, |
1236 | [CPUHP_AP_SMPCFD_DYING] = { | ||
1237 | .startup = NULL, | ||
1238 | .teardown = smpcfd_dying_cpu, | ||
1239 | }, | ||
1194 | /* | 1240 | /* |
1195 | * Handled on controll processor until the plugged processor manages | 1241 | * Handled on controll processor until the plugged processor manages |
1196 | * this itself. | 1242 | * this itself. |
@@ -1227,6 +1273,10 @@ static struct cpuhp_step cpuhp_ap_states[] = { | |||
1227 | .startup = sched_cpu_starting, | 1273 | .startup = sched_cpu_starting, |
1228 | .teardown = sched_cpu_dying, | 1274 | .teardown = sched_cpu_dying, |
1229 | }, | 1275 | }, |
1276 | [CPUHP_AP_RCUTREE_DYING] = { | ||
1277 | .startup = NULL, | ||
1278 | .teardown = rcutree_dying_cpu, | ||
1279 | }, | ||
1230 | /* | 1280 | /* |
1231 | * Low level startup/teardown notifiers. Run with interrupts | 1281 | * Low level startup/teardown notifiers. Run with interrupts |
1232 | * disabled. Will be removed once the notifiers are converted to | 1282 | * disabled. Will be removed once the notifiers are converted to |
@@ -1250,6 +1300,22 @@ static struct cpuhp_step cpuhp_ap_states[] = { | |||
1250 | .startup = smpboot_unpark_threads, | 1300 | .startup = smpboot_unpark_threads, |
1251 | .teardown = NULL, | 1301 | .teardown = NULL, |
1252 | }, | 1302 | }, |
1303 | [CPUHP_AP_PERF_ONLINE] = { | ||
1304 | .name = "perf online", | ||
1305 | .startup = perf_event_init_cpu, | ||
1306 | .teardown = perf_event_exit_cpu, | ||
1307 | }, | ||
1308 | [CPUHP_AP_WORKQUEUE_ONLINE] = { | ||
1309 | .name = "workqueue online", | ||
1310 | .startup = workqueue_online_cpu, | ||
1311 | .teardown = workqueue_offline_cpu, | ||
1312 | }, | ||
1313 | [CPUHP_AP_RCUTREE_ONLINE] = { | ||
1314 | .name = "RCU-tree online", | ||
1315 | .startup = rcutree_online_cpu, | ||
1316 | .teardown = rcutree_offline_cpu, | ||
1317 | }, | ||
1318 | |||
1253 | /* | 1319 | /* |
1254 | * Online/down_prepare notifiers. Will be removed once the notifiers | 1320 | * Online/down_prepare notifiers. Will be removed once the notifiers |
1255 | * are converted to states. | 1321 | * are converted to states. |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 09ae27b353c1..356a6c7cb52a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -10357,7 +10357,7 @@ static void __init perf_event_init_all_cpus(void) | |||
10357 | } | 10357 | } |
10358 | } | 10358 | } |
10359 | 10359 | ||
10360 | static void perf_event_init_cpu(int cpu) | 10360 | int perf_event_init_cpu(unsigned int cpu) |
10361 | { | 10361 | { |
10362 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 10362 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
10363 | 10363 | ||
@@ -10370,6 +10370,7 @@ static void perf_event_init_cpu(int cpu) | |||
10370 | rcu_assign_pointer(swhash->swevent_hlist, hlist); | 10370 | rcu_assign_pointer(swhash->swevent_hlist, hlist); |
10371 | } | 10371 | } |
10372 | mutex_unlock(&swhash->hlist_mutex); | 10372 | mutex_unlock(&swhash->hlist_mutex); |
10373 | return 0; | ||
10373 | } | 10374 | } |
10374 | 10375 | ||
10375 | #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE | 10376 | #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE |
@@ -10401,14 +10402,17 @@ static void perf_event_exit_cpu_context(int cpu) | |||
10401 | } | 10402 | } |
10402 | srcu_read_unlock(&pmus_srcu, idx); | 10403 | srcu_read_unlock(&pmus_srcu, idx); |
10403 | } | 10404 | } |
10405 | #else | ||
10406 | |||
10407 | static void perf_event_exit_cpu_context(int cpu) { } | ||
10408 | |||
10409 | #endif | ||
10404 | 10410 | ||
10405 | static void perf_event_exit_cpu(int cpu) | 10411 | int perf_event_exit_cpu(unsigned int cpu) |
10406 | { | 10412 | { |
10407 | perf_event_exit_cpu_context(cpu); | 10413 | perf_event_exit_cpu_context(cpu); |
10414 | return 0; | ||
10408 | } | 10415 | } |
10409 | #else | ||
10410 | static inline void perf_event_exit_cpu(int cpu) { } | ||
10411 | #endif | ||
10412 | 10416 | ||
10413 | static int | 10417 | static int |
10414 | perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) | 10418 | perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) |
@@ -10430,46 +10434,6 @@ static struct notifier_block perf_reboot_notifier = { | |||
10430 | .priority = INT_MIN, | 10434 | .priority = INT_MIN, |
10431 | }; | 10435 | }; |
10432 | 10436 | ||
10433 | static int | ||
10434 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | ||
10435 | { | ||
10436 | unsigned int cpu = (long)hcpu; | ||
10437 | |||
10438 | switch (action & ~CPU_TASKS_FROZEN) { | ||
10439 | |||
10440 | case CPU_UP_PREPARE: | ||
10441 | /* | ||
10442 | * This must be done before the CPU comes alive, because the | ||
10443 | * moment we can run tasks we can encounter (software) events. | ||
10444 | * | ||
10445 | * Specifically, someone can have inherited events on kthreadd | ||
10446 | * or a pre-existing worker thread that gets re-bound. | ||
10447 | */ | ||
10448 | perf_event_init_cpu(cpu); | ||
10449 | break; | ||
10450 | |||
10451 | case CPU_DOWN_PREPARE: | ||
10452 | /* | ||
10453 | * This must be done before the CPU dies because after that an | ||
10454 | * active event might want to IPI the CPU and that'll not work | ||
10455 | * so great for dead CPUs. | ||
10456 | * | ||
10457 | * XXX smp_call_function_single() return -ENXIO without a warn | ||
10458 | * so we could possibly deal with this. | ||
10459 | * | ||
10460 | * This is safe against new events arriving because | ||
10461 | * sys_perf_event_open() serializes against hotplug using | ||
10462 | * get_online_cpus(). | ||
10463 | */ | ||
10464 | perf_event_exit_cpu(cpu); | ||
10465 | break; | ||
10466 | default: | ||
10467 | break; | ||
10468 | } | ||
10469 | |||
10470 | return NOTIFY_OK; | ||
10471 | } | ||
10472 | |||
10473 | void __init perf_event_init(void) | 10437 | void __init perf_event_init(void) |
10474 | { | 10438 | { |
10475 | int ret; | 10439 | int ret; |
@@ -10482,7 +10446,7 @@ void __init perf_event_init(void) | |||
10482 | perf_pmu_register(&perf_cpu_clock, NULL, -1); | 10446 | perf_pmu_register(&perf_cpu_clock, NULL, -1); |
10483 | perf_pmu_register(&perf_task_clock, NULL, -1); | 10447 | perf_pmu_register(&perf_task_clock, NULL, -1); |
10484 | perf_tp_register(); | 10448 | perf_tp_register(); |
10485 | perf_cpu_notifier(perf_cpu_notify); | 10449 | perf_event_init_cpu(smp_processor_id()); |
10486 | register_reboot_notifier(&perf_reboot_notifier); | 10450 | register_reboot_notifier(&perf_reboot_notifier); |
10487 | 10451 | ||
10488 | ret = init_hw_breakpoint(); | 10452 | ret = init_hw_breakpoint(); |
diff --git a/kernel/profile.c b/kernel/profile.c index c2199e9901c9..2dbccf2d806c 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -328,68 +328,57 @@ out: | |||
328 | put_cpu(); | 328 | put_cpu(); |
329 | } | 329 | } |
330 | 330 | ||
331 | static int profile_cpu_callback(struct notifier_block *info, | 331 | static int profile_dead_cpu(unsigned int cpu) |
332 | unsigned long action, void *__cpu) | ||
333 | { | 332 | { |
334 | int node, cpu = (unsigned long)__cpu; | ||
335 | struct page *page; | 333 | struct page *page; |
334 | int i; | ||
336 | 335 | ||
337 | switch (action) { | 336 | if (prof_cpu_mask != NULL) |
338 | case CPU_UP_PREPARE: | 337 | cpumask_clear_cpu(cpu, prof_cpu_mask); |
339 | case CPU_UP_PREPARE_FROZEN: | 338 | |
340 | node = cpu_to_mem(cpu); | 339 | for (i = 0; i < 2; i++) { |
341 | per_cpu(cpu_profile_flip, cpu) = 0; | 340 | if (per_cpu(cpu_profile_hits, cpu)[i]) { |
342 | if (!per_cpu(cpu_profile_hits, cpu)[1]) { | 341 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); |
343 | page = __alloc_pages_node(node, | 342 | per_cpu(cpu_profile_hits, cpu)[i] = NULL; |
344 | GFP_KERNEL | __GFP_ZERO, | ||
345 | 0); | ||
346 | if (!page) | ||
347 | return notifier_from_errno(-ENOMEM); | ||
348 | per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); | ||
349 | } | ||
350 | if (!per_cpu(cpu_profile_hits, cpu)[0]) { | ||
351 | page = __alloc_pages_node(node, | ||
352 | GFP_KERNEL | __GFP_ZERO, | ||
353 | 0); | ||
354 | if (!page) | ||
355 | goto out_free; | ||
356 | per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); | ||
357 | } | ||
358 | break; | ||
359 | out_free: | ||
360 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); | ||
361 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; | ||
362 | __free_page(page); | ||
363 | return notifier_from_errno(-ENOMEM); | ||
364 | case CPU_ONLINE: | ||
365 | case CPU_ONLINE_FROZEN: | ||
366 | if (prof_cpu_mask != NULL) | ||
367 | cpumask_set_cpu(cpu, prof_cpu_mask); | ||
368 | break; | ||
369 | case CPU_UP_CANCELED: | ||
370 | case CPU_UP_CANCELED_FROZEN: | ||
371 | case CPU_DEAD: | ||
372 | case CPU_DEAD_FROZEN: | ||
373 | if (prof_cpu_mask != NULL) | ||
374 | cpumask_clear_cpu(cpu, prof_cpu_mask); | ||
375 | if (per_cpu(cpu_profile_hits, cpu)[0]) { | ||
376 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); | ||
377 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; | ||
378 | __free_page(page); | 343 | __free_page(page); |
379 | } | 344 | } |
380 | if (per_cpu(cpu_profile_hits, cpu)[1]) { | 345 | } |
381 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); | 346 | return 0; |
382 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; | 347 | } |
383 | __free_page(page); | 348 | |
349 | static int profile_prepare_cpu(unsigned int cpu) | ||
350 | { | ||
351 | int i, node = cpu_to_mem(cpu); | ||
352 | struct page *page; | ||
353 | |||
354 | per_cpu(cpu_profile_flip, cpu) = 0; | ||
355 | |||
356 | for (i = 0; i < 2; i++) { | ||
357 | if (per_cpu(cpu_profile_hits, cpu)[i]) | ||
358 | continue; | ||
359 | |||
360 | page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); | ||
361 | if (!page) { | ||
362 | profile_dead_cpu(cpu); | ||
363 | return -ENOMEM; | ||
384 | } | 364 | } |
385 | break; | 365 | per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); |
366 | |||
386 | } | 367 | } |
387 | return NOTIFY_OK; | 368 | return 0; |
369 | } | ||
370 | |||
371 | static int profile_online_cpu(unsigned int cpu) | ||
372 | { | ||
373 | if (prof_cpu_mask != NULL) | ||
374 | cpumask_set_cpu(cpu, prof_cpu_mask); | ||
375 | |||
376 | return 0; | ||
388 | } | 377 | } |
378 | |||
389 | #else /* !CONFIG_SMP */ | 379 | #else /* !CONFIG_SMP */ |
390 | #define profile_flip_buffers() do { } while (0) | 380 | #define profile_flip_buffers() do { } while (0) |
391 | #define profile_discard_flip_buffers() do { } while (0) | 381 | #define profile_discard_flip_buffers() do { } while (0) |
392 | #define profile_cpu_callback NULL | ||
393 | 382 | ||
394 | static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) | 383 | static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) |
395 | { | 384 | { |
@@ -531,83 +520,43 @@ static const struct file_operations proc_profile_operations = { | |||
531 | .llseek = default_llseek, | 520 | .llseek = default_llseek, |
532 | }; | 521 | }; |
533 | 522 | ||
534 | #ifdef CONFIG_SMP | 523 | int __ref create_proc_profile(void) |
535 | static void profile_nop(void *unused) | ||
536 | { | ||
537 | } | ||
538 | |||
539 | static int create_hash_tables(void) | ||
540 | { | 524 | { |
541 | int cpu; | 525 | struct proc_dir_entry *entry; |
542 | 526 | #ifdef CONFIG_SMP | |
543 | for_each_online_cpu(cpu) { | 527 | enum cpuhp_state online_state; |
544 | int node = cpu_to_mem(cpu); | ||
545 | struct page *page; | ||
546 | |||
547 | page = __alloc_pages_node(node, | ||
548 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, | ||
549 | 0); | ||
550 | if (!page) | ||
551 | goto out_cleanup; | ||
552 | per_cpu(cpu_profile_hits, cpu)[1] | ||
553 | = (struct profile_hit *)page_address(page); | ||
554 | page = __alloc_pages_node(node, | ||
555 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, | ||
556 | 0); | ||
557 | if (!page) | ||
558 | goto out_cleanup; | ||
559 | per_cpu(cpu_profile_hits, cpu)[0] | ||
560 | = (struct profile_hit *)page_address(page); | ||
561 | } | ||
562 | return 0; | ||
563 | out_cleanup: | ||
564 | prof_on = 0; | ||
565 | smp_mb(); | ||
566 | on_each_cpu(profile_nop, NULL, 1); | ||
567 | for_each_online_cpu(cpu) { | ||
568 | struct page *page; | ||
569 | |||
570 | if (per_cpu(cpu_profile_hits, cpu)[0]) { | ||
571 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); | ||
572 | per_cpu(cpu_profile_hits, cpu)[0] = NULL; | ||
573 | __free_page(page); | ||
574 | } | ||
575 | if (per_cpu(cpu_profile_hits, cpu)[1]) { | ||
576 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); | ||
577 | per_cpu(cpu_profile_hits, cpu)[1] = NULL; | ||
578 | __free_page(page); | ||
579 | } | ||
580 | } | ||
581 | return -1; | ||
582 | } | ||
583 | #else | ||
584 | #define create_hash_tables() ({ 0; }) | ||
585 | #endif | 528 | #endif |
586 | 529 | ||
587 | int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ | ||
588 | { | ||
589 | struct proc_dir_entry *entry; | ||
590 | int err = 0; | 530 | int err = 0; |
591 | 531 | ||
592 | if (!prof_on) | 532 | if (!prof_on) |
593 | return 0; | 533 | return 0; |
594 | 534 | #ifdef CONFIG_SMP | |
595 | cpu_notifier_register_begin(); | 535 | err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE", |
596 | 536 | profile_prepare_cpu, profile_dead_cpu); | |
597 | if (create_hash_tables()) { | 537 | if (err) |
598 | err = -ENOMEM; | 538 | return err; |
599 | goto out; | 539 | |
600 | } | 540 | err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE", |
601 | 541 | profile_online_cpu, NULL); | |
542 | if (err < 0) | ||
543 | goto err_state_prep; | ||
544 | online_state = err; | ||
545 | err = 0; | ||
546 | #endif | ||
602 | entry = proc_create("profile", S_IWUSR | S_IRUGO, | 547 | entry = proc_create("profile", S_IWUSR | S_IRUGO, |
603 | NULL, &proc_profile_operations); | 548 | NULL, &proc_profile_operations); |
604 | if (!entry) | 549 | if (!entry) |
605 | goto out; | 550 | goto err_state_onl; |
606 | proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); | 551 | proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); |
607 | __hotcpu_notifier(profile_cpu_callback, 0); | ||
608 | 552 | ||
609 | out: | 553 | return err; |
610 | cpu_notifier_register_done(); | 554 | err_state_onl: |
555 | #ifdef CONFIG_SMP | ||
556 | cpuhp_remove_state(online_state); | ||
557 | err_state_prep: | ||
558 | cpuhp_remove_state(CPUHP_PROFILE_PREPARE); | ||
559 | #endif | ||
611 | return err; | 560 | return err; |
612 | } | 561 | } |
613 | subsys_initcall(create_proc_profile); | 562 | subsys_initcall(create_proc_profile); |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f433959e9322..5d80925e7fc8 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -1073,11 +1073,11 @@ EXPORT_SYMBOL_GPL(rcu_is_watching); | |||
1073 | * offline to continue to use RCU for one jiffy after marking itself | 1073 | * offline to continue to use RCU for one jiffy after marking itself |
1074 | * offline in the cpu_online_mask. This leniency is necessary given the | 1074 | * offline in the cpu_online_mask. This leniency is necessary given the |
1075 | * non-atomic nature of the online and offline processing, for example, | 1075 | * non-atomic nature of the online and offline processing, for example, |
1076 | * the fact that a CPU enters the scheduler after completing the CPU_DYING | 1076 | * the fact that a CPU enters the scheduler after completing the teardown |
1077 | * notifiers. | 1077 | * of the CPU. |
1078 | * | 1078 | * |
1079 | * This is also why RCU internally marks CPUs online during the | 1079 | * This is also why RCU internally marks CPUs online during in the |
1080 | * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase. | 1080 | * preparation phase and offline after the CPU has been taken down. |
1081 | * | 1081 | * |
1082 | * Disable checking if in an NMI handler because we cannot safely report | 1082 | * Disable checking if in an NMI handler because we cannot safely report |
1083 | * errors from NMI handlers anyway. | 1083 | * errors from NMI handlers anyway. |
@@ -3806,12 +3806,58 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
3806 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); | 3806 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
3807 | } | 3807 | } |
3808 | 3808 | ||
3809 | static void rcu_prepare_cpu(int cpu) | 3809 | int rcutree_prepare_cpu(unsigned int cpu) |
3810 | { | 3810 | { |
3811 | struct rcu_state *rsp; | 3811 | struct rcu_state *rsp; |
3812 | 3812 | ||
3813 | for_each_rcu_flavor(rsp) | 3813 | for_each_rcu_flavor(rsp) |
3814 | rcu_init_percpu_data(cpu, rsp); | 3814 | rcu_init_percpu_data(cpu, rsp); |
3815 | |||
3816 | rcu_prepare_kthreads(cpu); | ||
3817 | rcu_spawn_all_nocb_kthreads(cpu); | ||
3818 | |||
3819 | return 0; | ||
3820 | } | ||
3821 | |||
3822 | static void rcutree_affinity_setting(unsigned int cpu, int outgoing) | ||
3823 | { | ||
3824 | struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); | ||
3825 | |||
3826 | rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); | ||
3827 | } | ||
3828 | |||
3829 | int rcutree_online_cpu(unsigned int cpu) | ||
3830 | { | ||
3831 | sync_sched_exp_online_cleanup(cpu); | ||
3832 | rcutree_affinity_setting(cpu, -1); | ||
3833 | return 0; | ||
3834 | } | ||
3835 | |||
3836 | int rcutree_offline_cpu(unsigned int cpu) | ||
3837 | { | ||
3838 | rcutree_affinity_setting(cpu, cpu); | ||
3839 | return 0; | ||
3840 | } | ||
3841 | |||
3842 | |||
3843 | int rcutree_dying_cpu(unsigned int cpu) | ||
3844 | { | ||
3845 | struct rcu_state *rsp; | ||
3846 | |||
3847 | for_each_rcu_flavor(rsp) | ||
3848 | rcu_cleanup_dying_cpu(rsp); | ||
3849 | return 0; | ||
3850 | } | ||
3851 | |||
3852 | int rcutree_dead_cpu(unsigned int cpu) | ||
3853 | { | ||
3854 | struct rcu_state *rsp; | ||
3855 | |||
3856 | for_each_rcu_flavor(rsp) { | ||
3857 | rcu_cleanup_dead_cpu(cpu, rsp); | ||
3858 | do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu)); | ||
3859 | } | ||
3860 | return 0; | ||
3815 | } | 3861 | } |
3816 | 3862 | ||
3817 | #ifdef CONFIG_HOTPLUG_CPU | 3863 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -3851,52 +3897,6 @@ void rcu_report_dead(unsigned int cpu) | |||
3851 | } | 3897 | } |
3852 | #endif | 3898 | #endif |
3853 | 3899 | ||
3854 | /* | ||
3855 | * Handle CPU online/offline notification events. | ||
3856 | */ | ||
3857 | int rcu_cpu_notify(struct notifier_block *self, | ||
3858 | unsigned long action, void *hcpu) | ||
3859 | { | ||
3860 | long cpu = (long)hcpu; | ||
3861 | struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); | ||
3862 | struct rcu_node *rnp = rdp->mynode; | ||
3863 | struct rcu_state *rsp; | ||
3864 | |||
3865 | switch (action) { | ||
3866 | case CPU_UP_PREPARE: | ||
3867 | case CPU_UP_PREPARE_FROZEN: | ||
3868 | rcu_prepare_cpu(cpu); | ||
3869 | rcu_prepare_kthreads(cpu); | ||
3870 | rcu_spawn_all_nocb_kthreads(cpu); | ||
3871 | break; | ||
3872 | case CPU_ONLINE: | ||
3873 | case CPU_DOWN_FAILED: | ||
3874 | sync_sched_exp_online_cleanup(cpu); | ||
3875 | rcu_boost_kthread_setaffinity(rnp, -1); | ||
3876 | break; | ||
3877 | case CPU_DOWN_PREPARE: | ||
3878 | rcu_boost_kthread_setaffinity(rnp, cpu); | ||
3879 | break; | ||
3880 | case CPU_DYING: | ||
3881 | case CPU_DYING_FROZEN: | ||
3882 | for_each_rcu_flavor(rsp) | ||
3883 | rcu_cleanup_dying_cpu(rsp); | ||
3884 | break; | ||
3885 | case CPU_DEAD: | ||
3886 | case CPU_DEAD_FROZEN: | ||
3887 | case CPU_UP_CANCELED: | ||
3888 | case CPU_UP_CANCELED_FROZEN: | ||
3889 | for_each_rcu_flavor(rsp) { | ||
3890 | rcu_cleanup_dead_cpu(cpu, rsp); | ||
3891 | do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu)); | ||
3892 | } | ||
3893 | break; | ||
3894 | default: | ||
3895 | break; | ||
3896 | } | ||
3897 | return NOTIFY_OK; | ||
3898 | } | ||
3899 | |||
3900 | static int rcu_pm_notify(struct notifier_block *self, | 3900 | static int rcu_pm_notify(struct notifier_block *self, |
3901 | unsigned long action, void *hcpu) | 3901 | unsigned long action, void *hcpu) |
3902 | { | 3902 | { |
@@ -4208,10 +4208,9 @@ void __init rcu_init(void) | |||
4208 | * this is called early in boot, before either interrupts | 4208 | * this is called early in boot, before either interrupts |
4209 | * or the scheduler are operational. | 4209 | * or the scheduler are operational. |
4210 | */ | 4210 | */ |
4211 | cpu_notifier(rcu_cpu_notify, 0); | ||
4212 | pm_notifier(rcu_pm_notify, 0); | 4211 | pm_notifier(rcu_pm_notify, 0); |
4213 | for_each_online_cpu(cpu) | 4212 | for_each_online_cpu(cpu) |
4214 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | 4213 | rcutree_prepare_cpu(cpu); |
4215 | } | 4214 | } |
4216 | 4215 | ||
4217 | #include "tree_exp.h" | 4216 | #include "tree_exp.h" |
diff --git a/kernel/smp.c b/kernel/smp.c index 36552beed397..3aa642d39c03 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -33,69 +33,54 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); | |||
33 | 33 | ||
34 | static void flush_smp_call_function_queue(bool warn_cpu_offline); | 34 | static void flush_smp_call_function_queue(bool warn_cpu_offline); |
35 | 35 | ||
36 | static int | 36 | int smpcfd_prepare_cpu(unsigned int cpu) |
37 | hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | ||
38 | { | 37 | { |
39 | long cpu = (long)hcpu; | ||
40 | struct call_function_data *cfd = &per_cpu(cfd_data, cpu); | 38 | struct call_function_data *cfd = &per_cpu(cfd_data, cpu); |
41 | 39 | ||
42 | switch (action) { | 40 | if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, |
43 | case CPU_UP_PREPARE: | 41 | cpu_to_node(cpu))) |
44 | case CPU_UP_PREPARE_FROZEN: | 42 | return -ENOMEM; |
45 | if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, | 43 | cfd->csd = alloc_percpu(struct call_single_data); |
46 | cpu_to_node(cpu))) | 44 | if (!cfd->csd) { |
47 | return notifier_from_errno(-ENOMEM); | ||
48 | cfd->csd = alloc_percpu(struct call_single_data); | ||
49 | if (!cfd->csd) { | ||
50 | free_cpumask_var(cfd->cpumask); | ||
51 | return notifier_from_errno(-ENOMEM); | ||
52 | } | ||
53 | break; | ||
54 | |||
55 | #ifdef CONFIG_HOTPLUG_CPU | ||
56 | case CPU_UP_CANCELED: | ||
57 | case CPU_UP_CANCELED_FROZEN: | ||
58 | /* Fall-through to the CPU_DEAD[_FROZEN] case. */ | ||
59 | |||
60 | case CPU_DEAD: | ||
61 | case CPU_DEAD_FROZEN: | ||
62 | free_cpumask_var(cfd->cpumask); | 45 | free_cpumask_var(cfd->cpumask); |
63 | free_percpu(cfd->csd); | 46 | return -ENOMEM; |
64 | break; | 47 | } |
65 | 48 | ||
66 | case CPU_DYING: | 49 | return 0; |
67 | case CPU_DYING_FROZEN: | 50 | } |
68 | /* | 51 | |
69 | * The IPIs for the smp-call-function callbacks queued by other | 52 | int smpcfd_dead_cpu(unsigned int cpu) |
70 | * CPUs might arrive late, either due to hardware latencies or | 53 | { |
71 | * because this CPU disabled interrupts (inside stop-machine) | 54 | struct call_function_data *cfd = &per_cpu(cfd_data, cpu); |
72 | * before the IPIs were sent. So flush out any pending callbacks | ||
73 | * explicitly (without waiting for the IPIs to arrive), to | ||
74 | * ensure that the outgoing CPU doesn't go offline with work | ||
75 | * still pending. | ||
76 | */ | ||
77 | flush_smp_call_function_queue(false); | ||
78 | break; | ||
79 | #endif | ||
80 | }; | ||
81 | 55 | ||
82 | return NOTIFY_OK; | 56 | free_cpumask_var(cfd->cpumask); |
57 | free_percpu(cfd->csd); | ||
58 | return 0; | ||
83 | } | 59 | } |
84 | 60 | ||
85 | static struct notifier_block hotplug_cfd_notifier = { | 61 | int smpcfd_dying_cpu(unsigned int cpu) |
86 | .notifier_call = hotplug_cfd, | 62 | { |
87 | }; | 63 | /* |
64 | * The IPIs for the smp-call-function callbacks queued by other | ||
65 | * CPUs might arrive late, either due to hardware latencies or | ||
66 | * because this CPU disabled interrupts (inside stop-machine) | ||
67 | * before the IPIs were sent. So flush out any pending callbacks | ||
68 | * explicitly (without waiting for the IPIs to arrive), to | ||
69 | * ensure that the outgoing CPU doesn't go offline with work | ||
70 | * still pending. | ||
71 | */ | ||
72 | flush_smp_call_function_queue(false); | ||
73 | return 0; | ||
74 | } | ||
88 | 75 | ||
89 | void __init call_function_init(void) | 76 | void __init call_function_init(void) |
90 | { | 77 | { |
91 | void *cpu = (void *)(long)smp_processor_id(); | ||
92 | int i; | 78 | int i; |
93 | 79 | ||
94 | for_each_possible_cpu(i) | 80 | for_each_possible_cpu(i) |
95 | init_llist_head(&per_cpu(call_single_queue, i)); | 81 | init_llist_head(&per_cpu(call_single_queue, i)); |
96 | 82 | ||
97 | hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); | 83 | smpcfd_prepare_cpu(smp_processor_id()); |
98 | register_cpu_notifier(&hotplug_cfd_notifier); | ||
99 | } | 84 | } |
100 | 85 | ||
101 | /* | 86 | /* |
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index d13c9aebf7a3..9ba7c820fc23 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
@@ -1590,7 +1590,7 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, | |||
1590 | /* | 1590 | /* |
1591 | * Functions related to boot-time initialization: | 1591 | * Functions related to boot-time initialization: |
1592 | */ | 1592 | */ |
1593 | static void init_hrtimers_cpu(int cpu) | 1593 | int hrtimers_prepare_cpu(unsigned int cpu) |
1594 | { | 1594 | { |
1595 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 1595 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
1596 | int i; | 1596 | int i; |
@@ -1602,6 +1602,7 @@ static void init_hrtimers_cpu(int cpu) | |||
1602 | 1602 | ||
1603 | cpu_base->cpu = cpu; | 1603 | cpu_base->cpu = cpu; |
1604 | hrtimer_init_hres(cpu_base); | 1604 | hrtimer_init_hres(cpu_base); |
1605 | return 0; | ||
1605 | } | 1606 | } |
1606 | 1607 | ||
1607 | #ifdef CONFIG_HOTPLUG_CPU | 1608 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -1636,7 +1637,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | |||
1636 | } | 1637 | } |
1637 | } | 1638 | } |
1638 | 1639 | ||
1639 | static void migrate_hrtimers(int scpu) | 1640 | int hrtimers_dead_cpu(unsigned int scpu) |
1640 | { | 1641 | { |
1641 | struct hrtimer_cpu_base *old_base, *new_base; | 1642 | struct hrtimer_cpu_base *old_base, *new_base; |
1642 | int i; | 1643 | int i; |
@@ -1665,45 +1666,14 @@ static void migrate_hrtimers(int scpu) | |||
1665 | /* Check, if we got expired work to do */ | 1666 | /* Check, if we got expired work to do */ |
1666 | __hrtimer_peek_ahead_timers(); | 1667 | __hrtimer_peek_ahead_timers(); |
1667 | local_irq_enable(); | 1668 | local_irq_enable(); |
1669 | return 0; | ||
1668 | } | 1670 | } |
1669 | 1671 | ||
1670 | #endif /* CONFIG_HOTPLUG_CPU */ | 1672 | #endif /* CONFIG_HOTPLUG_CPU */ |
1671 | 1673 | ||
1672 | static int hrtimer_cpu_notify(struct notifier_block *self, | ||
1673 | unsigned long action, void *hcpu) | ||
1674 | { | ||
1675 | int scpu = (long)hcpu; | ||
1676 | |||
1677 | switch (action) { | ||
1678 | |||
1679 | case CPU_UP_PREPARE: | ||
1680 | case CPU_UP_PREPARE_FROZEN: | ||
1681 | init_hrtimers_cpu(scpu); | ||
1682 | break; | ||
1683 | |||
1684 | #ifdef CONFIG_HOTPLUG_CPU | ||
1685 | case CPU_DEAD: | ||
1686 | case CPU_DEAD_FROZEN: | ||
1687 | migrate_hrtimers(scpu); | ||
1688 | break; | ||
1689 | #endif | ||
1690 | |||
1691 | default: | ||
1692 | break; | ||
1693 | } | ||
1694 | |||
1695 | return NOTIFY_OK; | ||
1696 | } | ||
1697 | |||
1698 | static struct notifier_block hrtimers_nb = { | ||
1699 | .notifier_call = hrtimer_cpu_notify, | ||
1700 | }; | ||
1701 | |||
1702 | void __init hrtimers_init(void) | 1674 | void __init hrtimers_init(void) |
1703 | { | 1675 | { |
1704 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, | 1676 | hrtimers_prepare_cpu(smp_processor_id()); |
1705 | (void *)(long)smp_processor_id()); | ||
1706 | register_cpu_notifier(&hrtimers_nb); | ||
1707 | } | 1677 | } |
1708 | 1678 | ||
1709 | /** | 1679 | /** |
diff --git a/kernel/time/timer.c b/kernel/time/timer.c index cb9ab401e2d9..555670a5143c 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c | |||
@@ -1804,7 +1804,7 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h | |||
1804 | } | 1804 | } |
1805 | } | 1805 | } |
1806 | 1806 | ||
1807 | static void migrate_timers(int cpu) | 1807 | int timers_dead_cpu(unsigned int cpu) |
1808 | { | 1808 | { |
1809 | struct timer_base *old_base; | 1809 | struct timer_base *old_base; |
1810 | struct timer_base *new_base; | 1810 | struct timer_base *new_base; |
@@ -1831,29 +1831,9 @@ static void migrate_timers(int cpu) | |||
1831 | spin_unlock_irq(&new_base->lock); | 1831 | spin_unlock_irq(&new_base->lock); |
1832 | put_cpu_ptr(&timer_bases); | 1832 | put_cpu_ptr(&timer_bases); |
1833 | } | 1833 | } |
1834 | return 0; | ||
1834 | } | 1835 | } |
1835 | 1836 | ||
1836 | static int timer_cpu_notify(struct notifier_block *self, | ||
1837 | unsigned long action, void *hcpu) | ||
1838 | { | ||
1839 | switch (action) { | ||
1840 | case CPU_DEAD: | ||
1841 | case CPU_DEAD_FROZEN: | ||
1842 | migrate_timers((long)hcpu); | ||
1843 | break; | ||
1844 | default: | ||
1845 | break; | ||
1846 | } | ||
1847 | |||
1848 | return NOTIFY_OK; | ||
1849 | } | ||
1850 | |||
1851 | static inline void timer_register_cpu_notifier(void) | ||
1852 | { | ||
1853 | cpu_notifier(timer_cpu_notify, 0); | ||
1854 | } | ||
1855 | #else | ||
1856 | static inline void timer_register_cpu_notifier(void) { } | ||
1857 | #endif /* CONFIG_HOTPLUG_CPU */ | 1837 | #endif /* CONFIG_HOTPLUG_CPU */ |
1858 | 1838 | ||
1859 | static void __init init_timer_cpu(int cpu) | 1839 | static void __init init_timer_cpu(int cpu) |
@@ -1881,7 +1861,6 @@ void __init init_timers(void) | |||
1881 | { | 1861 | { |
1882 | init_timer_cpus(); | 1862 | init_timer_cpus(); |
1883 | init_timer_stats(); | 1863 | init_timer_stats(); |
1884 | timer_register_cpu_notifier(); | ||
1885 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); | 1864 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); |
1886 | } | 1865 | } |
1887 | 1866 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d12bd958077e..ef071ca73fc3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -4607,84 +4607,65 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) | |||
4607 | WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); | 4607 | WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); |
4608 | } | 4608 | } |
4609 | 4609 | ||
4610 | /* | 4610 | int workqueue_prepare_cpu(unsigned int cpu) |
4611 | * Workqueues should be brought up before normal priority CPU notifiers. | 4611 | { |
4612 | * This will be registered high priority CPU notifier. | 4612 | struct worker_pool *pool; |
4613 | */ | 4613 | |
4614 | static int workqueue_cpu_up_callback(struct notifier_block *nfb, | 4614 | for_each_cpu_worker_pool(pool, cpu) { |
4615 | unsigned long action, | 4615 | if (pool->nr_workers) |
4616 | void *hcpu) | 4616 | continue; |
4617 | if (!create_worker(pool)) | ||
4618 | return -ENOMEM; | ||
4619 | } | ||
4620 | return 0; | ||
4621 | } | ||
4622 | |||
4623 | int workqueue_online_cpu(unsigned int cpu) | ||
4617 | { | 4624 | { |
4618 | int cpu = (unsigned long)hcpu; | ||
4619 | struct worker_pool *pool; | 4625 | struct worker_pool *pool; |
4620 | struct workqueue_struct *wq; | 4626 | struct workqueue_struct *wq; |
4621 | int pi; | 4627 | int pi; |
4622 | 4628 | ||
4623 | switch (action & ~CPU_TASKS_FROZEN) { | 4629 | mutex_lock(&wq_pool_mutex); |
4624 | case CPU_UP_PREPARE: | ||
4625 | for_each_cpu_worker_pool(pool, cpu) { | ||
4626 | if (pool->nr_workers) | ||
4627 | continue; | ||
4628 | if (!create_worker(pool)) | ||
4629 | return NOTIFY_BAD; | ||
4630 | } | ||
4631 | break; | ||
4632 | |||
4633 | case CPU_DOWN_FAILED: | ||
4634 | case CPU_ONLINE: | ||
4635 | mutex_lock(&wq_pool_mutex); | ||
4636 | 4630 | ||
4637 | for_each_pool(pool, pi) { | 4631 | for_each_pool(pool, pi) { |
4638 | mutex_lock(&pool->attach_mutex); | 4632 | mutex_lock(&pool->attach_mutex); |
4639 | 4633 | ||
4640 | if (pool->cpu == cpu) | 4634 | if (pool->cpu == cpu) |
4641 | rebind_workers(pool); | 4635 | rebind_workers(pool); |
4642 | else if (pool->cpu < 0) | 4636 | else if (pool->cpu < 0) |
4643 | restore_unbound_workers_cpumask(pool, cpu); | 4637 | restore_unbound_workers_cpumask(pool, cpu); |
4644 | 4638 | ||
4645 | mutex_unlock(&pool->attach_mutex); | 4639 | mutex_unlock(&pool->attach_mutex); |
4646 | } | 4640 | } |
4647 | 4641 | ||
4648 | /* update NUMA affinity of unbound workqueues */ | 4642 | /* update NUMA affinity of unbound workqueues */ |
4649 | list_for_each_entry(wq, &workqueues, list) | 4643 | list_for_each_entry(wq, &workqueues, list) |
4650 | wq_update_unbound_numa(wq, cpu, true); | 4644 | wq_update_unbound_numa(wq, cpu, true); |
4651 | 4645 | ||
4652 | mutex_unlock(&wq_pool_mutex); | 4646 | mutex_unlock(&wq_pool_mutex); |
4653 | break; | 4647 | return 0; |
4654 | } | ||
4655 | return NOTIFY_OK; | ||
4656 | } | 4648 | } |
4657 | 4649 | ||
4658 | /* | 4650 | int workqueue_offline_cpu(unsigned int cpu) |
4659 | * Workqueues should be brought down after normal priority CPU notifiers. | ||
4660 | * This will be registered as low priority CPU notifier. | ||
4661 | */ | ||
4662 | static int workqueue_cpu_down_callback(struct notifier_block *nfb, | ||
4663 | unsigned long action, | ||
4664 | void *hcpu) | ||
4665 | { | 4651 | { |
4666 | int cpu = (unsigned long)hcpu; | ||
4667 | struct work_struct unbind_work; | 4652 | struct work_struct unbind_work; |
4668 | struct workqueue_struct *wq; | 4653 | struct workqueue_struct *wq; |
4669 | 4654 | ||
4670 | switch (action & ~CPU_TASKS_FROZEN) { | 4655 | /* unbinding per-cpu workers should happen on the local CPU */ |
4671 | case CPU_DOWN_PREPARE: | 4656 | INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); |
4672 | /* unbinding per-cpu workers should happen on the local CPU */ | 4657 | queue_work_on(cpu, system_highpri_wq, &unbind_work); |
4673 | INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); | 4658 | |
4674 | queue_work_on(cpu, system_highpri_wq, &unbind_work); | 4659 | /* update NUMA affinity of unbound workqueues */ |
4675 | 4660 | mutex_lock(&wq_pool_mutex); | |
4676 | /* update NUMA affinity of unbound workqueues */ | 4661 | list_for_each_entry(wq, &workqueues, list) |
4677 | mutex_lock(&wq_pool_mutex); | 4662 | wq_update_unbound_numa(wq, cpu, false); |
4678 | list_for_each_entry(wq, &workqueues, list) | 4663 | mutex_unlock(&wq_pool_mutex); |
4679 | wq_update_unbound_numa(wq, cpu, false); | 4664 | |
4680 | mutex_unlock(&wq_pool_mutex); | 4665 | /* wait for per-cpu unbinding to finish */ |
4681 | 4666 | flush_work(&unbind_work); | |
4682 | /* wait for per-cpu unbinding to finish */ | 4667 | destroy_work_on_stack(&unbind_work); |
4683 | flush_work(&unbind_work); | 4668 | return 0; |
4684 | destroy_work_on_stack(&unbind_work); | ||
4685 | break; | ||
4686 | } | ||
4687 | return NOTIFY_OK; | ||
4688 | } | 4669 | } |
4689 | 4670 | ||
4690 | #ifdef CONFIG_SMP | 4671 | #ifdef CONFIG_SMP |
@@ -5486,9 +5467,6 @@ static int __init init_workqueues(void) | |||
5486 | 5467 | ||
5487 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); | 5468 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); |
5488 | 5469 | ||
5489 | cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); | ||
5490 | hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); | ||
5491 | |||
5492 | wq_numa_init(); | 5470 | wq_numa_init(); |
5493 | 5471 | ||
5494 | /* initialize CPU pools */ | 5472 | /* initialize CPU pools */ |
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index e2d5b6f988fb..4fde8c7dfcfe 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -405,26 +405,17 @@ u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) | |||
405 | return (u64)-1; | 405 | return (u64)-1; |
406 | } | 406 | } |
407 | 407 | ||
408 | static int kvm_timer_cpu_notify(struct notifier_block *self, | 408 | static int kvm_timer_starting_cpu(unsigned int cpu) |
409 | unsigned long action, void *cpu) | ||
410 | { | 409 | { |
411 | switch (action) { | 410 | kvm_timer_init_interrupt(NULL); |
412 | case CPU_STARTING: | 411 | return 0; |
413 | case CPU_STARTING_FROZEN: | ||
414 | kvm_timer_init_interrupt(NULL); | ||
415 | break; | ||
416 | case CPU_DYING: | ||
417 | case CPU_DYING_FROZEN: | ||
418 | disable_percpu_irq(host_vtimer_irq); | ||
419 | break; | ||
420 | } | ||
421 | |||
422 | return NOTIFY_OK; | ||
423 | } | 412 | } |
424 | 413 | ||
425 | static struct notifier_block kvm_timer_cpu_nb = { | 414 | static int kvm_timer_dying_cpu(unsigned int cpu) |
426 | .notifier_call = kvm_timer_cpu_notify, | 415 | { |
427 | }; | 416 | disable_percpu_irq(host_vtimer_irq); |
417 | return 0; | ||
418 | } | ||
428 | 419 | ||
429 | int kvm_timer_hyp_init(void) | 420 | int kvm_timer_hyp_init(void) |
430 | { | 421 | { |
@@ -449,12 +440,6 @@ int kvm_timer_hyp_init(void) | |||
449 | goto out; | 440 | goto out; |
450 | } | 441 | } |
451 | 442 | ||
452 | err = __register_cpu_notifier(&kvm_timer_cpu_nb); | ||
453 | if (err) { | ||
454 | kvm_err("Cannot register timer CPU notifier\n"); | ||
455 | goto out_free; | ||
456 | } | ||
457 | |||
458 | wqueue = create_singlethread_workqueue("kvm_arch_timer"); | 443 | wqueue = create_singlethread_workqueue("kvm_arch_timer"); |
459 | if (!wqueue) { | 444 | if (!wqueue) { |
460 | err = -ENOMEM; | 445 | err = -ENOMEM; |
@@ -462,8 +447,10 @@ int kvm_timer_hyp_init(void) | |||
462 | } | 447 | } |
463 | 448 | ||
464 | kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); | 449 | kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); |
465 | on_each_cpu(kvm_timer_init_interrupt, NULL, 1); | ||
466 | 450 | ||
451 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, | ||
452 | "AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu, | ||
453 | kvm_timer_dying_cpu); | ||
467 | goto out; | 454 | goto out; |
468 | out_free: | 455 | out_free: |
469 | free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); | 456 | free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index c3bfbb981e73..67cb5e948be2 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -2326,32 +2326,18 @@ int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset) | |||
2326 | return -ENXIO; | 2326 | return -ENXIO; |
2327 | } | 2327 | } |
2328 | 2328 | ||
2329 | static void vgic_init_maintenance_interrupt(void *info) | 2329 | static int vgic_starting_cpu(unsigned int cpu) |
2330 | { | 2330 | { |
2331 | enable_percpu_irq(vgic->maint_irq, 0); | 2331 | enable_percpu_irq(vgic->maint_irq, 0); |
2332 | return 0; | ||
2332 | } | 2333 | } |
2333 | 2334 | ||
2334 | static int vgic_cpu_notify(struct notifier_block *self, | 2335 | static int vgic_dying_cpu(unsigned int cpu) |
2335 | unsigned long action, void *cpu) | ||
2336 | { | 2336 | { |
2337 | switch (action) { | 2337 | disable_percpu_irq(vgic->maint_irq); |
2338 | case CPU_STARTING: | 2338 | return 0; |
2339 | case CPU_STARTING_FROZEN: | ||
2340 | vgic_init_maintenance_interrupt(NULL); | ||
2341 | break; | ||
2342 | case CPU_DYING: | ||
2343 | case CPU_DYING_FROZEN: | ||
2344 | disable_percpu_irq(vgic->maint_irq); | ||
2345 | break; | ||
2346 | } | ||
2347 | |||
2348 | return NOTIFY_OK; | ||
2349 | } | 2339 | } |
2350 | 2340 | ||
2351 | static struct notifier_block vgic_cpu_nb = { | ||
2352 | .notifier_call = vgic_cpu_notify, | ||
2353 | }; | ||
2354 | |||
2355 | static int kvm_vgic_probe(void) | 2341 | static int kvm_vgic_probe(void) |
2356 | { | 2342 | { |
2357 | const struct gic_kvm_info *gic_kvm_info; | 2343 | const struct gic_kvm_info *gic_kvm_info; |
@@ -2392,19 +2378,10 @@ int kvm_vgic_hyp_init(void) | |||
2392 | return ret; | 2378 | return ret; |
2393 | } | 2379 | } |
2394 | 2380 | ||
2395 | ret = __register_cpu_notifier(&vgic_cpu_nb); | 2381 | cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_STARTING, |
2396 | if (ret) { | 2382 | "AP_KVM_ARM_VGIC_STARTING", vgic_starting_cpu, |
2397 | kvm_err("Cannot register vgic CPU notifier\n"); | 2383 | vgic_dying_cpu); |
2398 | goto out_free_irq; | ||
2399 | } | ||
2400 | |||
2401 | on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); | ||
2402 | |||
2403 | return 0; | 2384 | return 0; |
2404 | |||
2405 | out_free_irq: | ||
2406 | free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus()); | ||
2407 | return ret; | ||
2408 | } | 2385 | } |
2409 | 2386 | ||
2410 | int kvm_irq_map_gsi(struct kvm *kvm, | 2387 | int kvm_irq_map_gsi(struct kvm *kvm, |
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index a1442f7c9c4d..2c7f0d5a62ea 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c | |||
@@ -353,32 +353,19 @@ out: | |||
353 | 353 | ||
354 | /* GENERIC PROBE */ | 354 | /* GENERIC PROBE */ |
355 | 355 | ||
356 | static void vgic_init_maintenance_interrupt(void *info) | 356 | static int vgic_init_cpu_starting(unsigned int cpu) |
357 | { | 357 | { |
358 | enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); | 358 | enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); |
359 | return 0; | ||
359 | } | 360 | } |
360 | 361 | ||
361 | static int vgic_cpu_notify(struct notifier_block *self, | ||
362 | unsigned long action, void *cpu) | ||
363 | { | ||
364 | switch (action) { | ||
365 | case CPU_STARTING: | ||
366 | case CPU_STARTING_FROZEN: | ||
367 | vgic_init_maintenance_interrupt(NULL); | ||
368 | break; | ||
369 | case CPU_DYING: | ||
370 | case CPU_DYING_FROZEN: | ||
371 | disable_percpu_irq(kvm_vgic_global_state.maint_irq); | ||
372 | break; | ||
373 | } | ||
374 | 362 | ||
375 | return NOTIFY_OK; | 363 | static int vgic_init_cpu_dying(unsigned int cpu) |
364 | { | ||
365 | disable_percpu_irq(kvm_vgic_global_state.maint_irq); | ||
366 | return 0; | ||
376 | } | 367 | } |
377 | 368 | ||
378 | static struct notifier_block vgic_cpu_nb = { | ||
379 | .notifier_call = vgic_cpu_notify, | ||
380 | }; | ||
381 | |||
382 | static irqreturn_t vgic_maintenance_handler(int irq, void *data) | 369 | static irqreturn_t vgic_maintenance_handler(int irq, void *data) |
383 | { | 370 | { |
384 | /* | 371 | /* |
@@ -434,14 +421,14 @@ int kvm_vgic_hyp_init(void) | |||
434 | return ret; | 421 | return ret; |
435 | } | 422 | } |
436 | 423 | ||
437 | ret = __register_cpu_notifier(&vgic_cpu_nb); | 424 | ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, |
425 | "AP_KVM_ARM_VGIC_INIT_STARTING", | ||
426 | vgic_init_cpu_starting, vgic_init_cpu_dying); | ||
438 | if (ret) { | 427 | if (ret) { |
439 | kvm_err("Cannot register vgic CPU notifier\n"); | 428 | kvm_err("Cannot register vgic CPU notifier\n"); |
440 | goto out_free_irq; | 429 | goto out_free_irq; |
441 | } | 430 | } |
442 | 431 | ||
443 | on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); | ||
444 | |||
445 | kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); | 432 | kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); |
446 | return 0; | 433 | return 0; |
447 | 434 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ce3d8e5be73e..2e791367c576 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -3155,12 +3155,13 @@ static void hardware_enable_nolock(void *junk) | |||
3155 | } | 3155 | } |
3156 | } | 3156 | } |
3157 | 3157 | ||
3158 | static void hardware_enable(void) | 3158 | static int kvm_starting_cpu(unsigned int cpu) |
3159 | { | 3159 | { |
3160 | raw_spin_lock(&kvm_count_lock); | 3160 | raw_spin_lock(&kvm_count_lock); |
3161 | if (kvm_usage_count) | 3161 | if (kvm_usage_count) |
3162 | hardware_enable_nolock(NULL); | 3162 | hardware_enable_nolock(NULL); |
3163 | raw_spin_unlock(&kvm_count_lock); | 3163 | raw_spin_unlock(&kvm_count_lock); |
3164 | return 0; | ||
3164 | } | 3165 | } |
3165 | 3166 | ||
3166 | static void hardware_disable_nolock(void *junk) | 3167 | static void hardware_disable_nolock(void *junk) |
@@ -3173,12 +3174,13 @@ static void hardware_disable_nolock(void *junk) | |||
3173 | kvm_arch_hardware_disable(); | 3174 | kvm_arch_hardware_disable(); |
3174 | } | 3175 | } |
3175 | 3176 | ||
3176 | static void hardware_disable(void) | 3177 | static int kvm_dying_cpu(unsigned int cpu) |
3177 | { | 3178 | { |
3178 | raw_spin_lock(&kvm_count_lock); | 3179 | raw_spin_lock(&kvm_count_lock); |
3179 | if (kvm_usage_count) | 3180 | if (kvm_usage_count) |
3180 | hardware_disable_nolock(NULL); | 3181 | hardware_disable_nolock(NULL); |
3181 | raw_spin_unlock(&kvm_count_lock); | 3182 | raw_spin_unlock(&kvm_count_lock); |
3183 | return 0; | ||
3182 | } | 3184 | } |
3183 | 3185 | ||
3184 | static void hardware_disable_all_nolock(void) | 3186 | static void hardware_disable_all_nolock(void) |
@@ -3219,21 +3221,6 @@ static int hardware_enable_all(void) | |||
3219 | return r; | 3221 | return r; |
3220 | } | 3222 | } |
3221 | 3223 | ||
3222 | static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, | ||
3223 | void *v) | ||
3224 | { | ||
3225 | val &= ~CPU_TASKS_FROZEN; | ||
3226 | switch (val) { | ||
3227 | case CPU_DYING: | ||
3228 | hardware_disable(); | ||
3229 | break; | ||
3230 | case CPU_STARTING: | ||
3231 | hardware_enable(); | ||
3232 | break; | ||
3233 | } | ||
3234 | return NOTIFY_OK; | ||
3235 | } | ||
3236 | |||
3237 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | 3224 | static int kvm_reboot(struct notifier_block *notifier, unsigned long val, |
3238 | void *v) | 3225 | void *v) |
3239 | { | 3226 | { |
@@ -3500,10 +3487,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, | |||
3500 | return r; | 3487 | return r; |
3501 | } | 3488 | } |
3502 | 3489 | ||
3503 | static struct notifier_block kvm_cpu_notifier = { | ||
3504 | .notifier_call = kvm_cpu_hotplug, | ||
3505 | }; | ||
3506 | |||
3507 | static int kvm_debugfs_open(struct inode *inode, struct file *file, | 3490 | static int kvm_debugfs_open(struct inode *inode, struct file *file, |
3508 | int (*get)(void *, u64 *), int (*set)(void *, u64), | 3491 | int (*get)(void *, u64 *), int (*set)(void *, u64), |
3509 | const char *fmt) | 3492 | const char *fmt) |
@@ -3754,7 +3737,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, | |||
3754 | goto out_free_1; | 3737 | goto out_free_1; |
3755 | } | 3738 | } |
3756 | 3739 | ||
3757 | r = register_cpu_notifier(&kvm_cpu_notifier); | 3740 | r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "AP_KVM_STARTING", |
3741 | kvm_starting_cpu, kvm_dying_cpu); | ||
3758 | if (r) | 3742 | if (r) |
3759 | goto out_free_2; | 3743 | goto out_free_2; |
3760 | register_reboot_notifier(&kvm_reboot_notifier); | 3744 | register_reboot_notifier(&kvm_reboot_notifier); |
@@ -3808,7 +3792,7 @@ out_free: | |||
3808 | kmem_cache_destroy(kvm_vcpu_cache); | 3792 | kmem_cache_destroy(kvm_vcpu_cache); |
3809 | out_free_3: | 3793 | out_free_3: |
3810 | unregister_reboot_notifier(&kvm_reboot_notifier); | 3794 | unregister_reboot_notifier(&kvm_reboot_notifier); |
3811 | unregister_cpu_notifier(&kvm_cpu_notifier); | 3795 | cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); |
3812 | out_free_2: | 3796 | out_free_2: |
3813 | out_free_1: | 3797 | out_free_1: |
3814 | kvm_arch_hardware_unsetup(); | 3798 | kvm_arch_hardware_unsetup(); |
@@ -3831,7 +3815,7 @@ void kvm_exit(void) | |||
3831 | kvm_async_pf_deinit(); | 3815 | kvm_async_pf_deinit(); |
3832 | unregister_syscore_ops(&kvm_syscore_ops); | 3816 | unregister_syscore_ops(&kvm_syscore_ops); |
3833 | unregister_reboot_notifier(&kvm_reboot_notifier); | 3817 | unregister_reboot_notifier(&kvm_reboot_notifier); |
3834 | unregister_cpu_notifier(&kvm_cpu_notifier); | 3818 | cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); |
3835 | on_each_cpu(hardware_disable_nolock, NULL, 1); | 3819 | on_each_cpu(hardware_disable_nolock, NULL, 1); |
3836 | kvm_arch_hardware_unsetup(); | 3820 | kvm_arch_hardware_unsetup(); |
3837 | kvm_arch_exit(); | 3821 | kvm_arch_exit(); |