summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kernel/smp_twd.c2
-rw-r--r--arch/arm/mach-imx/mmdc.c34
-rw-r--r--arch/arm/mach-mvebu/coherency.c2
-rw-r--r--arch/arm/mm/cache-l2x0-pmu.c2
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/arm/xen/enlighten.c2
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c2
-rw-r--r--arch/arm64/kernel/debug-monitors.c2
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c2
-rw-r--r--arch/blackfin/kernel/perf_event.c2
-rw-r--r--arch/metag/kernel/perf/perf_event.c2
-rw-r--r--arch/mips/kernel/pm-cps.c2
-rw-r--r--arch/mips/oprofile/op_model_loongson3.c2
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c2
-rw-r--r--arch/x86/entry/vdso/vma.c2
-rw-r--r--arch/x86/events/amd/ibs.c2
-rw-r--r--arch/x86/events/amd/power.c2
-rw-r--r--arch/x86/events/amd/uncore.c6
-rw-r--r--arch/x86/events/core.c6
-rw-r--r--arch/x86/events/intel/cqm.c4
-rw-r--r--arch/x86/events/intel/cstate.c14
-rw-r--r--arch/x86/events/intel/rapl.c4
-rw-r--r--arch/x86/events/intel/uncore.c10
-rw-r--r--arch/x86/kernel/apb_timer.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/msr.c1
-rw-r--r--arch/x86/kernel/tboot.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--arch/xtensa/kernel/perf_event.c2
-rw-r--r--drivers/bus/arm-cci.c2
-rw-r--r--drivers/bus/arm-ccn.c7
-rw-r--r--drivers/clocksource/arc_timer.c2
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/arm_global_timer.c2
-rw-r--r--drivers/clocksource/dummy_timer.c2
-rw-r--r--drivers/clocksource/exynos_mct.c2
-rw-r--r--drivers/clocksource/jcore-pit.c2
-rw-r--r--drivers/clocksource/metag_generic.c2
-rw-r--r--drivers/clocksource/mips-gic-timer.c4
-rw-r--r--drivers/clocksource/qcom-timer.c2
-rw-r--r--drivers/clocksource/time-armada-370-xp.c2
-rw-r--r--drivers/clocksource/timer-atlas7.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c8
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c6
-rw-r--r--drivers/irqchip/irq-bcm2836.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c6
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c2
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c79
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c78
-rw-r--r--drivers/scsi/qedi/qedi_main.c96
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c85
-rw-r--r--drivers/xen/events/events_fifo.c2
-rw-r--r--include/linux/cpu.h90
-rw-r--r--include/linux/cpuhotplug.h9
-rw-r--r--kernel/cpu.c235
-rw-r--r--lib/Kconfig.debug24
-rw-r--r--lib/Makefile1
-rw-r--r--lib/cpu-notifier-error-inject.c84
-rw-r--r--virt/kvm/arm/arch_timer.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c2
-rw-r--r--virt/kvm/kvm_main.c2
72 files changed, 308 insertions, 689 deletions
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 02d5e5e8d44c..895ae5197159 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -339,7 +339,7 @@ static int __init twd_local_timer_common_register(struct device_node *np)
339 } 339 }
340 340
341 cpuhp_setup_state_nocalls(CPUHP_AP_ARM_TWD_STARTING, 341 cpuhp_setup_state_nocalls(CPUHP_AP_ARM_TWD_STARTING,
342 "AP_ARM_TWD_STARTING", 342 "arm/timer/twd:starting",
343 twd_timer_starting_cpu, twd_timer_dying_cpu); 343 twd_timer_starting_cpu, twd_timer_dying_cpu);
344 344
345 twd_get_clock(np); 345 twd_get_clock(np);
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index ba96bf979625..699157759120 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -60,6 +60,7 @@
60 60
61#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu) 61#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
62 62
63static enum cpuhp_state cpuhp_mmdc_state;
63static int ddr_type; 64static int ddr_type;
64 65
65struct fsl_mmdc_devtype_data { 66struct fsl_mmdc_devtype_data {
@@ -451,8 +452,8 @@ static int imx_mmdc_remove(struct platform_device *pdev)
451{ 452{
452 struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev); 453 struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
453 454
455 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
454 perf_pmu_unregister(&pmu_mmdc->pmu); 456 perf_pmu_unregister(&pmu_mmdc->pmu);
455 cpuhp_remove_state_nocalls(CPUHP_ONLINE);
456 kfree(pmu_mmdc); 457 kfree(pmu_mmdc);
457 return 0; 458 return 0;
458} 459}
@@ -472,6 +473,18 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
472 return -ENOMEM; 473 return -ENOMEM;
473 } 474 }
474 475
476 /* The first instance registers the hotplug state */
477 if (!cpuhp_mmdc_state) {
478 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
479 "perf/arm/mmdc:online", NULL,
480 mmdc_pmu_offline_cpu);
481 if (ret < 0) {
482 pr_err("cpuhp_setup_state_multi failed\n");
483 goto pmu_free;
484 }
485 cpuhp_mmdc_state = ret;
486 }
487
475 mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev); 488 mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
476 if (mmdc_num == 0) 489 if (mmdc_num == 0)
477 name = "mmdc"; 490 name = "mmdc";
@@ -485,26 +498,23 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
485 HRTIMER_MODE_REL); 498 HRTIMER_MODE_REL);
486 pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler; 499 pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler;
487 500
488 cpuhp_state_add_instance_nocalls(CPUHP_ONLINE, 501 cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu);
489 &pmu_mmdc->node); 502
490 cpumask_set_cpu(smp_processor_id(), &pmu_mmdc->cpu); 503 /* Register the pmu instance for cpu hotplug */
491 ret = cpuhp_setup_state_multi(CPUHP_AP_NOTIFY_ONLINE, 504 cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
492 "MMDC_ONLINE", NULL,
493 mmdc_pmu_offline_cpu);
494 if (ret) {
495 pr_err("cpuhp_setup_state_multi failure\n");
496 goto pmu_register_err;
497 }
498 505
499 ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1); 506 ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1);
500 platform_set_drvdata(pdev, pmu_mmdc);
501 if (ret) 507 if (ret)
502 goto pmu_register_err; 508 goto pmu_register_err;
509
510 platform_set_drvdata(pdev, pmu_mmdc);
503 return 0; 511 return 0;
504 512
505pmu_register_err: 513pmu_register_err:
506 pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret); 514 pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
515 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
507 hrtimer_cancel(&pmu_mmdc->hrtimer); 516 hrtimer_cancel(&pmu_mmdc->hrtimer);
517pmu_free:
508 kfree(pmu_mmdc); 518 kfree(pmu_mmdc);
509 return ret; 519 return ret;
510} 520}
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index ae2a018b9305..8f8748a0c84f 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -148,7 +148,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
148 of_node_put(cpu_config_np); 148 of_node_put(cpu_config_np);
149 149
150 cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY, 150 cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY,
151 "AP_ARM_MVEBU_COHERENCY", 151 "arm/mvebu/coherency:starting",
152 armada_xp_clear_l2_starting, NULL); 152 armada_xp_clear_l2_starting, NULL);
153exit: 153exit:
154 set_cpu_coherent(); 154 set_cpu_coherent();
diff --git a/arch/arm/mm/cache-l2x0-pmu.c b/arch/arm/mm/cache-l2x0-pmu.c
index 976d3057272e..0a1e2280141f 100644
--- a/arch/arm/mm/cache-l2x0-pmu.c
+++ b/arch/arm/mm/cache-l2x0-pmu.c
@@ -563,7 +563,7 @@ static __init int l2x0_pmu_init(void)
563 563
564 cpumask_set_cpu(0, &pmu_cpu); 564 cpumask_set_cpu(0, &pmu_cpu);
565 ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_L2X0_ONLINE, 565 ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_L2X0_ONLINE,
566 "AP_PERF_ARM_L2X0_ONLINE", NULL, 566 "perf/arm/l2x0:online", NULL,
567 l2x0_pmu_offline_cpu); 567 l2x0_pmu_offline_cpu);
568 if (ret) 568 if (ret)
569 goto out_pmu; 569 goto out_pmu;
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index d1870c777c6e..2290be390f87 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -683,7 +683,7 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
683 683
684 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) 684 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
685 cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING, 685 cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
686 "AP_ARM_L2X0_STARTING", l2c310_starting_cpu, 686 "arm/l2x0:starting", l2c310_starting_cpu,
687 l2c310_dying_cpu); 687 l2c310_dying_cpu);
688} 688}
689 689
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 0351f5645fb1..569d5a650a4a 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -799,7 +799,7 @@ static int __init vfp_init(void)
799 } 799 }
800 800
801 cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING, 801 cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING,
802 "AP_ARM_VFP_STARTING", vfp_starting_cpu, 802 "arm/vfp:starting", vfp_starting_cpu,
803 vfp_dying_cpu); 803 vfp_dying_cpu);
804 804
805 vfp_vector = vfp_support_entry; 805 vfp_vector = vfp_support_entry;
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 4986dc0c1dff..11d9f2898b16 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -412,7 +412,7 @@ static int __init xen_guest_init(void)
412 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); 412 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
413 413
414 return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING, 414 return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING,
415 "AP_ARM_XEN_STARTING", xen_starting_cpu, 415 "arm/xen:starting", xen_starting_cpu,
416 xen_dying_cpu); 416 xen_dying_cpu);
417} 417}
418early_initcall(xen_guest_init); 418early_initcall(xen_guest_init);
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index fde04f029ec3..ecf9298a12d4 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -640,7 +640,7 @@ static int __init armv8_deprecated_init(void)
640 } 640 }
641 641
642 cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING, 642 cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
643 "AP_ARM64_ISNDEP_STARTING", 643 "arm64/isndep:starting",
644 run_all_insn_set_hw_mode, NULL); 644 run_all_insn_set_hw_mode, NULL);
645 register_insn_emulation_sysctl(ctl_abi); 645 register_insn_emulation_sysctl(ctl_abi);
646 646
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 605df76f0a06..2bd426448fc1 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -140,7 +140,7 @@ static int clear_os_lock(unsigned int cpu)
140static int debug_monitors_init(void) 140static int debug_monitors_init(void)
141{ 141{
142 return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, 142 return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
143 "CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING", 143 "arm64/debug_monitors:starting",
144 clear_os_lock, NULL); 144 clear_os_lock, NULL);
145} 145}
146postcore_initcall(debug_monitors_init); 146postcore_initcall(debug_monitors_init);
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 1b3c747fedda..0296e7924240 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -1001,7 +1001,7 @@ static int __init arch_hw_breakpoint_init(void)
1001 * debugger will leave the world in a nice state for us. 1001 * debugger will leave the world in a nice state for us.
1002 */ 1002 */
1003 ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, 1003 ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
1004 "CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING", 1004 "perf/arm64/hw_breakpoint:starting",
1005 hw_breakpoint_reset, NULL); 1005 hw_breakpoint_reset, NULL);
1006 if (ret) 1006 if (ret)
1007 pr_err("failed to register CPU hotplug notifier: %d\n", ret); 1007 pr_err("failed to register CPU hotplug notifier: %d\n", ret);
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
index 6355e97d22b9..6a9524ad04a5 100644
--- a/arch/blackfin/kernel/perf_event.c
+++ b/arch/blackfin/kernel/perf_event.c
@@ -475,7 +475,7 @@ static int __init bfin_pmu_init(void)
475 475
476 ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); 476 ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
477 if (!ret) 477 if (!ret)
478 cpuhp_setup_state(CPUHP_PERF_BFIN, "PERF_BFIN", 478 cpuhp_setup_state(CPUHP_PERF_BFIN,"perf/bfin:starting",
479 bfin_pmu_prepare_cpu, NULL); 479 bfin_pmu_prepare_cpu, NULL);
480 return ret; 480 return ret;
481} 481}
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 052cba23708c..7e793eb0c1fe 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -868,7 +868,7 @@ static int __init init_hw_perf_events(void)
868 metag_out32(0, PERF_COUNT(1)); 868 metag_out32(0, PERF_COUNT(1));
869 869
870 cpuhp_setup_state(CPUHP_AP_PERF_METAG_STARTING, 870 cpuhp_setup_state(CPUHP_AP_PERF_METAG_STARTING,
871 "AP_PERF_METAG_STARTING", metag_pmu_starting_cpu, 871 "perf/metag:starting", metag_pmu_starting_cpu,
872 NULL); 872 NULL);
873 873
874 ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW); 874 ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW);
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 7cf653e21423..5f928c34c148 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -713,7 +713,7 @@ static int __init cps_pm_init(void)
713 pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); 713 pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
714 } 714 }
715 715
716 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PM_CPS_CPU_ONLINE", 716 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mips/cps_pm:online",
717 cps_pm_online_cpu, NULL); 717 cps_pm_online_cpu, NULL);
718} 718}
719arch_initcall(cps_pm_init); 719arch_initcall(cps_pm_init);
diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c
index 40660392006f..436b1fc99f2c 100644
--- a/arch/mips/oprofile/op_model_loongson3.c
+++ b/arch/mips/oprofile/op_model_loongson3.c
@@ -186,7 +186,7 @@ static int __init loongson3_init(void)
186{ 186{
187 on_each_cpu(reset_counters, NULL, 1); 187 on_each_cpu(reset_counters, NULL, 1);
188 cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, 188 cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
189 "AP_MIPS_OP_LOONGSON3_STARTING", 189 "mips/oprofile/loongson3:starting",
190 loongson3_starting_cpu, loongson3_dying_cpu); 190 loongson3_starting_cpu, loongson3_dying_cpu);
191 save_perf_irq = perf_irq; 191 save_perf_irq = perf_irq;
192 perf_irq = loongson3_perfcount_handler; 192 perf_irq = loongson3_perfcount_handler;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 0cb6bd8bfccf..b1099cb2f393 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -944,7 +944,7 @@ void __init initmem_init(void)
944 * _nocalls() + manual invocation is used because cpuhp is not yet 944 * _nocalls() + manual invocation is used because cpuhp is not yet
945 * initialized for the boot CPU. 945 * initialized for the boot CPU.
946 */ 946 */
947 cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "POWER_NUMA_PREPARE", 947 cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
948 ppc_numa_cpu_prepare, ppc_numa_cpu_dead); 948 ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
949 for_each_present_cpu(cpu) 949 for_each_present_cpu(cpu)
950 numa_setup_cpu(cpu); 950 numa_setup_cpu(cpu);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 72c27b8d2cf3..fd3e4034c04d 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2189,7 +2189,7 @@ int register_power_pmu(struct power_pmu *pmu)
2189#endif /* CONFIG_PPC64 */ 2189#endif /* CONFIG_PPC64 */
2190 2190
2191 perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW); 2191 perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
2192 cpuhp_setup_state(CPUHP_PERF_POWER, "PERF_POWER", 2192 cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare",
2193 power_pmu_prepare_cpu, NULL); 2193 power_pmu_prepare_cpu, NULL);
2194 return 0; 2194 return 0;
2195} 2195}
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 037c2a253ae4..1aba10e90906 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -711,7 +711,7 @@ static int __init cpumf_pmu_init(void)
711 return rc; 711 return rc;
712 } 712 }
713 return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE, 713 return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
714 "AP_PERF_S390_CF_ONLINE", 714 "perf/s390/cf:online",
715 s390_pmu_online_cpu, s390_pmu_offline_cpu); 715 s390_pmu_online_cpu, s390_pmu_offline_cpu);
716} 716}
717early_initcall(cpumf_pmu_init); 717early_initcall(cpumf_pmu_init);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 763dec18edcd..1c0b58545c04 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1623,7 +1623,7 @@ static int __init init_cpum_sampling_pmu(void)
1623 goto out; 1623 goto out;
1624 } 1624 }
1625 1625
1626 cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "AP_PERF_S390_SF_ONLINE", 1626 cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "perf/s390/sf:online",
1627 s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu); 1627 s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu);
1628out: 1628out:
1629 return err; 1629 return err;
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 40121d14d34d..10820f6cefbf 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -371,7 +371,7 @@ static int __init init_vdso(void)
371 371
372 /* notifier priority > KVM */ 372 /* notifier priority > KVM */
373 return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE, 373 return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
374 "AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL); 374 "x86/vdso/vma:online", vgetcpu_online, NULL);
375} 375}
376subsys_initcall(init_vdso); 376subsys_initcall(init_vdso);
377#endif /* CONFIG_X86_64 */ 377#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index b26ee32f73e8..05612a2529c8 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -1010,7 +1010,7 @@ static __init int amd_ibs_init(void)
1010 * all online cpus. 1010 * all online cpus.
1011 */ 1011 */
1012 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING, 1012 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
1013 "AP_PERF_X86_AMD_IBS_STARTING", 1013 "perf/x86/amd/ibs:STARTING",
1014 x86_pmu_amd_ibs_starting_cpu, 1014 x86_pmu_amd_ibs_starting_cpu,
1015 x86_pmu_amd_ibs_dying_cpu); 1015 x86_pmu_amd_ibs_dying_cpu);
1016 1016
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
index 9842270ed2f2..a6eee5ac4f58 100644
--- a/arch/x86/events/amd/power.c
+++ b/arch/x86/events/amd/power.c
@@ -291,7 +291,7 @@ static int __init amd_power_pmu_init(void)
291 291
292 292
293 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, 293 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
294 "AP_PERF_X86_AMD_POWER_ONLINE", 294 "perf/x86/amd/power:online",
295 power_cpu_init, power_cpu_exit); 295 power_cpu_init, power_cpu_exit);
296 296
297 ret = perf_pmu_register(&pmu_class, "power", -1); 297 ret = perf_pmu_register(&pmu_class, "power", -1);
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 65577f081d07..a0b1bdb3ad42 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -527,16 +527,16 @@ static int __init amd_uncore_init(void)
527 * Install callbacks. Core will call them for each online cpu. 527 * Install callbacks. Core will call them for each online cpu.
528 */ 528 */
529 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP, 529 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
530 "PERF_X86_AMD_UNCORE_PREP", 530 "perf/x86/amd/uncore:prepare",
531 amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead)) 531 amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
532 goto fail_l2; 532 goto fail_l2;
533 533
534 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, 534 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
535 "AP_PERF_X86_AMD_UNCORE_STARTING", 535 "perf/x86/amd/uncore:starting",
536 amd_uncore_cpu_starting, NULL)) 536 amd_uncore_cpu_starting, NULL))
537 goto fail_prep; 537 goto fail_prep;
538 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, 538 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
539 "AP_PERF_X86_AMD_UNCORE_ONLINE", 539 "perf/x86/amd/uncore:online",
540 amd_uncore_cpu_online, 540 amd_uncore_cpu_online,
541 amd_uncore_cpu_down_prepare)) 541 amd_uncore_cpu_down_prepare))
542 goto fail_start; 542 goto fail_start;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index f1c22584a46f..019c5887b698 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1820,18 +1820,18 @@ static int __init init_hw_perf_events(void)
1820 * Install callbacks. Core will call them for each online 1820 * Install callbacks. Core will call them for each online
1821 * cpu. 1821 * cpu.
1822 */ 1822 */
1823 err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "PERF_X86_PREPARE", 1823 err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "perf/x86:prepare",
1824 x86_pmu_prepare_cpu, x86_pmu_dead_cpu); 1824 x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
1825 if (err) 1825 if (err)
1826 return err; 1826 return err;
1827 1827
1828 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING, 1828 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
1829 "AP_PERF_X86_STARTING", x86_pmu_starting_cpu, 1829 "perf/x86:starting", x86_pmu_starting_cpu,
1830 x86_pmu_dying_cpu); 1830 x86_pmu_dying_cpu);
1831 if (err) 1831 if (err)
1832 goto out; 1832 goto out;
1833 1833
1834 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "AP_PERF_X86_ONLINE", 1834 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "perf/x86:online",
1835 x86_pmu_online_cpu, NULL); 1835 x86_pmu_online_cpu, NULL);
1836 if (err) 1836 if (err)
1837 goto out1; 1837 goto out1;
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 0c45cc8e64ba..8c00dc09a5d2 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -1747,9 +1747,9 @@ static int __init intel_cqm_init(void)
1747 * is enabled to avoid notifier leak. 1747 * is enabled to avoid notifier leak.
1748 */ 1748 */
1749 cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING, 1749 cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING,
1750 "AP_PERF_X86_CQM_STARTING", 1750 "perf/x86/cqm:starting",
1751 intel_cqm_cpu_starting, NULL); 1751 intel_cqm_cpu_starting, NULL);
1752 cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "AP_PERF_X86_CQM_ONLINE", 1752 cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "perf/x86/cqm:online",
1753 NULL, intel_cqm_cpu_exit); 1753 NULL, intel_cqm_cpu_exit);
1754 1754
1755out: 1755out:
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index da51e5a3e2ff..fec8a461bdef 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -594,6 +594,9 @@ static int __init cstate_probe(const struct cstate_model *cm)
594 594
595static inline void cstate_cleanup(void) 595static inline void cstate_cleanup(void)
596{ 596{
597 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
598 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
599
597 if (has_cstate_core) 600 if (has_cstate_core)
598 perf_pmu_unregister(&cstate_core_pmu); 601 perf_pmu_unregister(&cstate_core_pmu);
599 602
@@ -606,16 +609,16 @@ static int __init cstate_init(void)
606 int err; 609 int err;
607 610
608 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING, 611 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
609 "AP_PERF_X86_CSTATE_STARTING", cstate_cpu_init, 612 "perf/x86/cstate:starting", cstate_cpu_init, NULL);
610 NULL);
611 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE, 613 cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
612 "AP_PERF_X86_CSTATE_ONLINE", NULL, cstate_cpu_exit); 614 "perf/x86/cstate:online", NULL, cstate_cpu_exit);
613 615
614 if (has_cstate_core) { 616 if (has_cstate_core) {
615 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1); 617 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
616 if (err) { 618 if (err) {
617 has_cstate_core = false; 619 has_cstate_core = false;
618 pr_info("Failed to register cstate core pmu\n"); 620 pr_info("Failed to register cstate core pmu\n");
621 cstate_cleanup();
619 return err; 622 return err;
620 } 623 }
621 } 624 }
@@ -629,8 +632,7 @@ static int __init cstate_init(void)
629 return err; 632 return err;
630 } 633 }
631 } 634 }
632 635 return 0;
633 return err;
634} 636}
635 637
636static int __init cstate_pmu_init(void) 638static int __init cstate_pmu_init(void)
@@ -655,8 +657,6 @@ module_init(cstate_pmu_init);
655 657
656static void __exit cstate_pmu_exit(void) 658static void __exit cstate_pmu_exit(void)
657{ 659{
658 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
659 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
660 cstate_cleanup(); 660 cstate_cleanup();
661} 661}
662module_exit(cstate_pmu_exit); 662module_exit(cstate_pmu_exit);
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 0a535cea8ff3..bd34124449b0 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -803,13 +803,13 @@ static int __init rapl_pmu_init(void)
803 * Install callbacks. Core will call them for each online cpu. 803 * Install callbacks. Core will call them for each online cpu.
804 */ 804 */
805 805
806 ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP", 806 ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
807 rapl_cpu_prepare, NULL); 807 rapl_cpu_prepare, NULL);
808 if (ret) 808 if (ret)
809 goto out; 809 goto out;
810 810
811 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, 811 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
812 "AP_PERF_X86_RAPL_ONLINE", 812 "perf/x86/rapl:online",
813 rapl_cpu_online, rapl_cpu_offline); 813 rapl_cpu_online, rapl_cpu_offline);
814 if (ret) 814 if (ret)
815 goto out1; 815 goto out1;
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index dbaaf7dc8373..97c246f84dea 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1398,22 +1398,22 @@ static int __init intel_uncore_init(void)
1398 */ 1398 */
1399 if (!cret) { 1399 if (!cret) {
1400 ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP, 1400 ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
1401 "PERF_X86_UNCORE_PREP", 1401 "perf/x86/intel/uncore:prepare",
1402 uncore_cpu_prepare, NULL); 1402 uncore_cpu_prepare, NULL);
1403 if (ret) 1403 if (ret)
1404 goto err; 1404 goto err;
1405 } else { 1405 } else {
1406 cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP, 1406 cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
1407 "PERF_X86_UNCORE_PREP", 1407 "perf/x86/intel/uncore:prepare",
1408 uncore_cpu_prepare, NULL); 1408 uncore_cpu_prepare, NULL);
1409 } 1409 }
1410 first_init = 1; 1410 first_init = 1;
1411 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING, 1411 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
1412 "AP_PERF_X86_UNCORE_STARTING", 1412 "perf/x86/uncore:starting",
1413 uncore_cpu_starting, uncore_cpu_dying); 1413 uncore_cpu_starting, uncore_cpu_dying);
1414 first_init = 0; 1414 first_init = 0;
1415 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, 1415 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1416 "AP_PERF_X86_UNCORE_ONLINE", 1416 "perf/x86/uncore:online",
1417 uncore_event_cpu_online, uncore_event_cpu_offline); 1417 uncore_event_cpu_online, uncore_event_cpu_offline);
1418 return 0; 1418 return 0;
1419 1419
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 456316f6c868..202a7817beaf 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -234,7 +234,7 @@ static __init int apbt_late_init(void)
234 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT || 234 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
235 !apb_timer_block_enabled) 235 !apb_timer_block_enabled)
236 return 0; 236 return 0;
237 return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "X86_APB_DEAD", NULL, 237 return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "x86/apb:dead", NULL,
238 apbt_cpu_dead); 238 apbt_cpu_dead);
239} 239}
240fs_initcall(apbt_late_init); 240fs_initcall(apbt_late_init);
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 200af5ae9662..5a35f208ed95 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -191,7 +191,7 @@ static int x2apic_cluster_probe(void)
191 if (!x2apic_mode) 191 if (!x2apic_mode)
192 return 0; 192 return 0;
193 193
194 ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE", 194 ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
195 x2apic_prepare_cpu, x2apic_dead_cpu); 195 x2apic_prepare_cpu, x2apic_dead_cpu);
196 if (ret < 0) { 196 if (ret < 0) {
197 pr_err("Failed to register X2APIC_PREPARE\n"); 197 pr_err("Failed to register X2APIC_PREPARE\n");
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 274fab99169d..38c8fd684d38 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1051,11 +1051,11 @@ static __init int hpet_late_init(void)
1051 return 0; 1051 return 0;
1052 1052
1053 /* This notifier should be called after workqueue is ready */ 1053 /* This notifier should be called after workqueue is ready */
1054 ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "AP_X86_HPET_ONLINE", 1054 ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "x86/hpet:online",
1055 hpet_cpuhp_online, NULL); 1055 hpet_cpuhp_online, NULL);
1056 if (ret) 1056 if (ret)
1057 return ret; 1057 return ret;
1058 ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "X86_HPET_DEAD", NULL, 1058 ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "x86/hpet:dead", NULL,
1059 hpet_cpuhp_dead); 1059 hpet_cpuhp_dead);
1060 if (ret) 1060 if (ret)
1061 goto err_cpuhp; 1061 goto err_cpuhp;
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index f5e3ff835cc8..ef688804f80d 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -224,7 +224,6 @@ static int __init msr_init(void)
224 return 0; 224 return 0;
225 225
226out_class: 226out_class:
227 cpuhp_remove_state(cpuhp_msr_state);
228 class_destroy(msr_class); 227 class_destroy(msr_class);
229out_chrdev: 228out_chrdev:
230 __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); 229 __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 8402907825b0..b868fa1b812b 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -408,7 +408,7 @@ static __init int tboot_late_init(void)
408 tboot_create_trampoline(); 408 tboot_create_trampoline();
409 409
410 atomic_set(&ap_wfs_count, 0); 410 atomic_set(&ap_wfs_count, 0);
411 cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "AP_X86_TBOOT_DYING", NULL, 411 cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "x86/tboot:dying", NULL,
412 tboot_dying_cpu); 412 tboot_dying_cpu);
413#ifdef CONFIG_DEBUG_FS 413#ifdef CONFIG_DEBUG_FS
414 debugfs_create_file("tboot_log", S_IRUSR, 414 debugfs_create_file("tboot_log", S_IRUSR,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 445c51b6cf6d..6414fa6cb9fd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5855,7 +5855,7 @@ static void kvm_timer_init(void)
5855 } 5855 }
5856 pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); 5856 pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
5857 5857
5858 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "AP_X86_KVM_CLK_ONLINE", 5858 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
5859 kvmclock_cpu_online, kvmclock_cpu_down_prep); 5859 kvmclock_cpu_online, kvmclock_cpu_down_prep);
5860} 5860}
5861 5861
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index ced7027b3fbc..51ef95232725 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1529,11 +1529,11 @@ static int xen_cpuhp_setup(void)
1529 int rc; 1529 int rc;
1530 1530
1531 rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, 1531 rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
1532 "XEN_HVM_GUEST_PREPARE", 1532 "x86/xen/hvm_guest:prepare",
1533 xen_cpu_up_prepare, xen_cpu_dead); 1533 xen_cpu_up_prepare, xen_cpu_dead);
1534 if (rc >= 0) { 1534 if (rc >= 0) {
1535 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 1535 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
1536 "XEN_HVM_GUEST_ONLINE", 1536 "x86/xen/hvm_guest:online",
1537 xen_cpu_up_online, NULL); 1537 xen_cpu_up_online, NULL);
1538 if (rc < 0) 1538 if (rc < 0)
1539 cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); 1539 cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index 0fecc8a2c0b5..ff1d81385ed7 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -422,7 +422,7 @@ static int __init xtensa_pmu_init(void)
422 int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT); 422 int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
423 423
424 ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING, 424 ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING,
425 "AP_PERF_XTENSA_STARTING", xtensa_pmu_setup, 425 "perf/xtensa:starting", xtensa_pmu_setup,
426 NULL); 426 NULL);
427 if (ret) { 427 if (ret) {
428 pr_err("xtensa_pmu: failed to register CPU-hotplug.\n"); 428 pr_err("xtensa_pmu: failed to register CPU-hotplug.\n");
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 231633328dfa..c49da15d9790 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -1796,7 +1796,7 @@ static int __init cci_platform_init(void)
1796 int ret; 1796 int ret;
1797 1797
1798 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE, 1798 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE,
1799 "AP_PERF_ARM_CCI_ONLINE", NULL, 1799 "perf/arm/cci:online", NULL,
1800 cci_pmu_offline_cpu); 1800 cci_pmu_offline_cpu);
1801 if (ret) 1801 if (ret)
1802 return ret; 1802 return ret;
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index d1074d9b38ba..4d6a2b7e4d3f 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -1562,7 +1562,7 @@ static int __init arm_ccn_init(void)
1562 int i, ret; 1562 int i, ret;
1563 1563
1564 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE, 1564 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1565 "AP_PERF_ARM_CCN_ONLINE", NULL, 1565 "perf/arm/ccn:online", NULL,
1566 arm_ccn_pmu_offline_cpu); 1566 arm_ccn_pmu_offline_cpu);
1567 if (ret) 1567 if (ret)
1568 return ret; 1568 return ret;
@@ -1570,7 +1570,10 @@ static int __init arm_ccn_init(void)
1570 for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++) 1570 for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
1571 arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr; 1571 arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
1572 1572
1573 return platform_driver_register(&arm_ccn_driver); 1573 ret = platform_driver_register(&arm_ccn_driver);
1574 if (ret)
1575 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1576 return ret;
1574} 1577}
1575 1578
1576static void __exit arm_ccn_exit(void) 1579static void __exit arm_ccn_exit(void)
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index a49748d826c0..2b7e87134d1a 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -309,7 +309,7 @@ static int __init arc_clockevent_setup(struct device_node *node)
309 } 309 }
310 310
311 ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING, 311 ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
312 "AP_ARC_TIMER_STARTING", 312 "clockevents/arc/timer:starting",
313 arc_timer_starting_cpu, 313 arc_timer_starting_cpu,
314 arc_timer_dying_cpu); 314 arc_timer_dying_cpu);
315 if (ret) { 315 if (ret) {
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 02fef6830e72..cdeca850f29e 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -738,7 +738,7 @@ static int __init arch_timer_register(void)
738 738
739 /* Register and immediately configure the timer on the boot CPU */ 739 /* Register and immediately configure the timer on the boot CPU */
740 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING, 740 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
741 "AP_ARM_ARCH_TIMER_STARTING", 741 "clockevents/arm/arch_timer:starting",
742 arch_timer_starting_cpu, arch_timer_dying_cpu); 742 arch_timer_starting_cpu, arch_timer_dying_cpu);
743 if (err) 743 if (err)
744 goto out_unreg_cpupm; 744 goto out_unreg_cpupm;
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 8da03298f844..bbfeb2800a94 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -316,7 +316,7 @@ static int __init global_timer_of_register(struct device_node *np)
316 goto out_irq; 316 goto out_irq;
317 317
318 err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, 318 err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
319 "AP_ARM_GLOBAL_TIMER_STARTING", 319 "clockevents/arm/global_timer:starting",
320 gt_starting_cpu, gt_dying_cpu); 320 gt_starting_cpu, gt_dying_cpu);
321 if (err) 321 if (err)
322 goto out_irq; 322 goto out_irq;
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c
index 89f1c2edbe02..01f3f5a59bc6 100644
--- a/drivers/clocksource/dummy_timer.c
+++ b/drivers/clocksource/dummy_timer.c
@@ -34,7 +34,7 @@ static int dummy_timer_starting_cpu(unsigned int cpu)
34static int __init dummy_timer_register(void) 34static int __init dummy_timer_register(void)
35{ 35{
36 return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING, 36 return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING,
37 "AP_DUMMY_TIMER_STARTING", 37 "clockevents/dummy_timer:starting",
38 dummy_timer_starting_cpu, NULL); 38 dummy_timer_starting_cpu, NULL);
39} 39}
40early_initcall(dummy_timer_register); 40early_initcall(dummy_timer_register);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 8f3488b80896..b45b72b95861 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -552,7 +552,7 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
552 552
553 /* Install hotplug callbacks which configure the timer on this CPU */ 553 /* Install hotplug callbacks which configure the timer on this CPU */
554 err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, 554 err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
555 "AP_EXYNOS4_MCT_TIMER_STARTING", 555 "clockevents/exynos4/mct_timer:starting",
556 exynos4_mct_starting_cpu, 556 exynos4_mct_starting_cpu,
557 exynos4_mct_dying_cpu); 557 exynos4_mct_dying_cpu);
558 if (err) 558 if (err)
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
index 54e1665aa03c..4e4146f69845 100644
--- a/drivers/clocksource/jcore-pit.c
+++ b/drivers/clocksource/jcore-pit.c
@@ -240,7 +240,7 @@ static int __init jcore_pit_init(struct device_node *node)
240 } 240 }
241 241
242 cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING, 242 cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
243 "AP_JCORE_TIMER_STARTING", 243 "clockevents/jcore:starting",
244 jcore_pit_local_init, NULL); 244 jcore_pit_local_init, NULL);
245 245
246 return 0; 246 return 0;
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
index a80ab3e446b7..172f43d4bc1a 100644
--- a/drivers/clocksource/metag_generic.c
+++ b/drivers/clocksource/metag_generic.c
@@ -154,6 +154,6 @@ int __init metag_generic_timer_init(void)
154 154
155 /* Hook cpu boot to configure the CPU's timers */ 155 /* Hook cpu boot to configure the CPU's timers */
156 return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING, 156 return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
157 "AP_METAG_TIMER_STARTING", 157 "clockevents/metag:starting",
158 arch_timer_starting_cpu, NULL); 158 arch_timer_starting_cpu, NULL);
159} 159}
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 7a960cd01104..d9278847ffb2 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -120,8 +120,8 @@ static int gic_clockevent_init(void)
120 } 120 }
121 121
122 cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING, 122 cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
123 "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu, 123 "clockevents/mips/gic/timer:starting",
124 gic_dying_cpu); 124 gic_starting_cpu, gic_dying_cpu);
125 return 0; 125 return 0;
126} 126}
127 127
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index 3283cfa2aa52..3bf65fff5c08 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -182,7 +182,7 @@ static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
182 } else { 182 } else {
183 /* Install and invoke hotplug callbacks */ 183 /* Install and invoke hotplug callbacks */
184 res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING, 184 res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
185 "AP_QCOM_TIMER_STARTING", 185 "clockevents/qcom/timer:starting",
186 msm_local_timer_starting_cpu, 186 msm_local_timer_starting_cpu,
187 msm_local_timer_dying_cpu); 187 msm_local_timer_dying_cpu);
188 if (res) { 188 if (res) {
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 3c39e6f45971..4440aefc59cd 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -320,7 +320,7 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np)
320 } 320 }
321 321
322 res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING, 322 res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
323 "AP_ARMADA_TIMER_STARTING", 323 "clockevents/armada:starting",
324 armada_370_xp_timer_starting_cpu, 324 armada_370_xp_timer_starting_cpu,
325 armada_370_xp_timer_dying_cpu); 325 armada_370_xp_timer_dying_cpu);
326 if (res) { 326 if (res) {
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 4334e0330ada..3c23e1744f4a 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -221,7 +221,7 @@ static int __init sirfsoc_clockevent_init(void)
221 221
222 /* Install and invoke hotplug callbacks */ 222 /* Install and invoke hotplug callbacks */
223 return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING, 223 return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING,
224 "AP_MARCO_TIMER_STARTING", 224 "clockevents/marco:starting",
225 sirfsoc_local_timer_starting_cpu, 225 sirfsoc_local_timer_starting_cpu,
226 sirfsoc_local_timer_dying_cpu); 226 sirfsoc_local_timer_dying_cpu);
227} 227}
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 3fe368b23d15..a51b6b64ecdf 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -804,10 +804,10 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
804 804
805 if (!etm_count++) { 805 if (!etm_count++) {
806 cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING, 806 cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
807 "AP_ARM_CORESIGHT_STARTING", 807 "arm/coresight:starting",
808 etm_starting_cpu, etm_dying_cpu); 808 etm_starting_cpu, etm_dying_cpu);
809 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 809 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
810 "AP_ARM_CORESIGHT_ONLINE", 810 "arm/coresight:online",
811 etm_online_cpu, NULL); 811 etm_online_cpu, NULL);
812 if (ret < 0) 812 if (ret < 0)
813 goto err_arch_supported; 813 goto err_arch_supported;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 4db8d6a4d0cb..031480f2c34d 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -986,11 +986,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
986 dev_err(dev, "ETM arch init failed\n"); 986 dev_err(dev, "ETM arch init failed\n");
987 987
988 if (!etm4_count++) { 988 if (!etm4_count++) {
989 cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING, 989 cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
990 "AP_ARM_CORESIGHT4_STARTING", 990 "arm/coresight4:starting",
991 etm4_starting_cpu, etm4_dying_cpu); 991 etm4_starting_cpu, etm4_dying_cpu);
992 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 992 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
993 "AP_ARM_CORESIGHT4_ONLINE", 993 "arm/coresight4:online",
994 etm4_online_cpu, NULL); 994 etm4_online_cpu, NULL);
995 if (ret < 0) 995 if (ret < 0)
996 goto err_arch_supported; 996 goto err_arch_supported;
@@ -1037,7 +1037,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
1037 1037
1038err_arch_supported: 1038err_arch_supported:
1039 if (--etm4_count == 0) { 1039 if (--etm4_count == 0) {
1040 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING); 1040 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
1041 if (hp_online) 1041 if (hp_online)
1042 cpuhp_remove_state_nocalls(hp_online); 1042 cpuhp_remove_state_nocalls(hp_online);
1043 } 1043 }
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 8bcee65a0b8c..eb0d4d41b156 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -578,13 +578,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
578#ifdef CONFIG_SMP 578#ifdef CONFIG_SMP
579 set_smp_cross_call(armada_mpic_send_doorbell); 579 set_smp_cross_call(armada_mpic_send_doorbell);
580 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING, 580 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
581 "AP_IRQ_ARMADA_XP_STARTING", 581 "irqchip/armada/ipi:starting",
582 armada_xp_mpic_starting_cpu, NULL); 582 armada_xp_mpic_starting_cpu, NULL);
583#endif 583#endif
584 } else { 584 } else {
585#ifdef CONFIG_SMP 585#ifdef CONFIG_SMP
586 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_CASC_STARTING, 586 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
587 "AP_IRQ_ARMADA_CASC_STARTING", 587 "irqchip/armada/cascade:starting",
588 mpic_cascaded_starting_cpu, NULL); 588 mpic_cascaded_starting_cpu, NULL);
589#endif 589#endif
590 irq_set_chained_handler(parent_irq, 590 irq_set_chained_handler(parent_irq,
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index d96b2c947e74..e7463e3c0814 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -245,7 +245,7 @@ bcm2836_arm_irqchip_smp_init(void)
245#ifdef CONFIG_SMP 245#ifdef CONFIG_SMP
246 /* Unmask IPIs to the boot CPU. */ 246 /* Unmask IPIs to the boot CPU. */
247 cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING, 247 cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
248 "AP_IRQ_BCM2836_STARTING", bcm2836_cpu_starting, 248 "irqchip/bcm2836:starting", bcm2836_cpu_starting,
249 bcm2836_cpu_dying); 249 bcm2836_cpu_dying);
250 250
251 set_smp_cross_call(bcm2836_arm_irqchip_send_ipi); 251 set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 26e1d7fafb1e..c132f29322cc 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -632,9 +632,9 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
632static void gic_smp_init(void) 632static void gic_smp_init(void)
633{ 633{
634 set_smp_cross_call(gic_raise_softirq); 634 set_smp_cross_call(gic_raise_softirq);
635 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING, 635 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
636 "AP_IRQ_GICV3_STARTING", gic_starting_cpu, 636 "irqchip/arm/gicv3:starting",
637 NULL); 637 gic_starting_cpu, NULL);
638} 638}
639 639
640static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 640static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index d6c404b3584d..1b1df4f770bd 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1191,7 +1191,7 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
1191 set_smp_cross_call(gic_raise_softirq); 1191 set_smp_cross_call(gic_raise_softirq);
1192#endif 1192#endif
1193 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, 1193 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
1194 "AP_IRQ_GIC_STARTING", 1194 "irqchip/arm/gic:starting",
1195 gic_starting_cpu, NULL); 1195 gic_starting_cpu, NULL);
1196 set_handle_irq(gic_handle_irq); 1196 set_handle_irq(gic_handle_irq);
1197 if (static_key_true(&supports_deactivate)) 1197 if (static_key_true(&supports_deactivate))
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 021b0e0833c1..c1b4ee955dbe 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -407,7 +407,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
407 set_handle_irq(hip04_handle_irq); 407 set_handle_irq(hip04_handle_irq);
408 408
409 hip04_irq_dist_init(&hip04_data); 409 hip04_irq_dist_init(&hip04_data);
410 cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "AP_IRQ_HIP04_STARTING", 410 cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "irqchip/hip04:starting",
411 hip04_irq_starting_cpu, NULL); 411 hip04_irq_starting_cpu, NULL);
412 return 0; 412 return 0;
413} 413}
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 9719caf7437c..a41896468cb3 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -127,7 +127,7 @@ static int __init ledtrig_cpu_init(void)
127 127
128 register_syscore_ops(&ledtrig_cpu_syscore_ops); 128 register_syscore_ops(&ledtrig_cpu_syscore_ops);
129 129
130 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_LEDTRIG_STARTING", 130 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "leds/trigger:starting",
131 ledtrig_online_cpu, ledtrig_prepare_down_cpu); 131 ledtrig_online_cpu, ledtrig_prepare_down_cpu);
132 if (ret < 0) 132 if (ret < 0)
133 pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n", 133 pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n",
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5deeda61d6d3..4a105006ca63 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2484,13 +2484,13 @@ static __init int virtio_net_driver_init(void)
2484{ 2484{
2485 int ret; 2485 int ret;
2486 2486
2487 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "AP_VIRT_NET_ONLINE", 2487 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
2488 virtnet_cpu_online, 2488 virtnet_cpu_online,
2489 virtnet_cpu_down_prep); 2489 virtnet_cpu_down_prep);
2490 if (ret < 0) 2490 if (ret < 0)
2491 goto out; 2491 goto out;
2492 virtionet_online = ret; 2492 virtionet_online = ret;
2493 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "VIRT_NET_DEAD", 2493 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
2494 NULL, virtnet_cpu_dead); 2494 NULL, virtnet_cpu_dead);
2495 if (ret) 2495 if (ret)
2496 goto err_dead; 2496 goto err_dead;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index b37b57294566..6d9335865880 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -1084,7 +1084,7 @@ static int arm_pmu_hp_init(void)
1084 int ret; 1084 int ret;
1085 1085
1086 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING, 1086 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
1087 "AP_PERF_ARM_STARTING", 1087 "perf/arm/pmu:starting",
1088 arm_perf_starting_cpu, NULL); 1088 arm_perf_starting_cpu, NULL);
1089 if (ret) 1089 if (ret)
1090 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", 1090 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 0990130821fa..c639d5a02656 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -127,13 +127,6 @@ module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR);
127MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is " 127MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is "
128 "initiating a FIP keep alive when debug logging is enabled."); 128 "initiating a FIP keep alive when debug logging is enabled.");
129 129
130static int bnx2fc_cpu_callback(struct notifier_block *nfb,
131 unsigned long action, void *hcpu);
132/* notification function for CPU hotplug events */
133static struct notifier_block bnx2fc_cpu_notifier = {
134 .notifier_call = bnx2fc_cpu_callback,
135};
136
137static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport) 130static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
138{ 131{
139 return ((struct bnx2fc_interface *) 132 return ((struct bnx2fc_interface *)
@@ -2622,37 +2615,19 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
2622 kthread_stop(thread); 2615 kthread_stop(thread);
2623} 2616}
2624 2617
2625/** 2618
2626 * bnx2fc_cpu_callback - Handler for CPU hotplug events 2619static int bnx2fc_cpu_online(unsigned int cpu)
2627 *
2628 * @nfb: The callback data block
2629 * @action: The event triggering the callback
2630 * @hcpu: The index of the CPU that the event is for
2631 *
2632 * This creates or destroys per-CPU data for fcoe
2633 *
2634 * Returns NOTIFY_OK always.
2635 */
2636static int bnx2fc_cpu_callback(struct notifier_block *nfb,
2637 unsigned long action, void *hcpu)
2638{ 2620{
2639 unsigned cpu = (unsigned long)hcpu; 2621 printk(PFX "CPU %x online: Create Rx thread\n", cpu);
2622 bnx2fc_percpu_thread_create(cpu);
2623 return 0;
2624}
2640 2625
2641 switch (action) { 2626static int bnx2fc_cpu_dead(unsigned int cpu)
2642 case CPU_ONLINE: 2627{
2643 case CPU_ONLINE_FROZEN: 2628 printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
2644 printk(PFX "CPU %x online: Create Rx thread\n", cpu); 2629 bnx2fc_percpu_thread_destroy(cpu);
2645 bnx2fc_percpu_thread_create(cpu); 2630 return 0;
2646 break;
2647 case CPU_DEAD:
2648 case CPU_DEAD_FROZEN:
2649 printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
2650 bnx2fc_percpu_thread_destroy(cpu);
2651 break;
2652 default:
2653 break;
2654 }
2655 return NOTIFY_OK;
2656} 2631}
2657 2632
2658static int bnx2fc_slave_configure(struct scsi_device *sdev) 2633static int bnx2fc_slave_configure(struct scsi_device *sdev)
@@ -2664,6 +2639,8 @@ static int bnx2fc_slave_configure(struct scsi_device *sdev)
2664 return 0; 2639 return 0;
2665} 2640}
2666 2641
2642static enum cpuhp_state bnx2fc_online_state;
2643
2667/** 2644/**
2668 * bnx2fc_mod_init - module init entry point 2645 * bnx2fc_mod_init - module init entry point
2669 * 2646 *
@@ -2724,21 +2701,31 @@ static int __init bnx2fc_mod_init(void)
2724 spin_lock_init(&p->fp_work_lock); 2701 spin_lock_init(&p->fp_work_lock);
2725 } 2702 }
2726 2703
2727 cpu_notifier_register_begin(); 2704 get_online_cpus();
2728 2705
2729 for_each_online_cpu(cpu) { 2706 for_each_online_cpu(cpu)
2730 bnx2fc_percpu_thread_create(cpu); 2707 bnx2fc_percpu_thread_create(cpu);
2731 }
2732 2708
2733 /* Initialize per CPU interrupt thread */ 2709 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
2734 __register_hotcpu_notifier(&bnx2fc_cpu_notifier); 2710 "scsi/bnx2fc:online",
2711 bnx2fc_cpu_online, NULL);
2712 if (rc < 0)
2713 goto stop_threads;
2714 bnx2fc_online_state = rc;
2735 2715
2736 cpu_notifier_register_done(); 2716 cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead",
2717 NULL, bnx2fc_cpu_dead);
2718 put_online_cpus();
2737 2719
2738 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); 2720 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
2739 2721
2740 return 0; 2722 return 0;
2741 2723
2724stop_threads:
2725 for_each_online_cpu(cpu)
2726 bnx2fc_percpu_thread_destroy(cpu);
2727 put_online_cpus();
2728 kthread_stop(l2_thread);
2742free_wq: 2729free_wq:
2743 destroy_workqueue(bnx2fc_wq); 2730 destroy_workqueue(bnx2fc_wq);
2744release_bt: 2731release_bt:
@@ -2797,16 +2784,16 @@ static void __exit bnx2fc_mod_exit(void)
2797 if (l2_thread) 2784 if (l2_thread)
2798 kthread_stop(l2_thread); 2785 kthread_stop(l2_thread);
2799 2786
2800 cpu_notifier_register_begin(); 2787 get_online_cpus();
2801
2802 /* Destroy per cpu threads */ 2788 /* Destroy per cpu threads */
2803 for_each_online_cpu(cpu) { 2789 for_each_online_cpu(cpu) {
2804 bnx2fc_percpu_thread_destroy(cpu); 2790 bnx2fc_percpu_thread_destroy(cpu);
2805 } 2791 }
2806 2792
2807 __unregister_hotcpu_notifier(&bnx2fc_cpu_notifier); 2793 cpuhp_remove_state_nocalls(bnx2fc_online_state);
2794 cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD);
2808 2795
2809 cpu_notifier_register_done(); 2796 put_online_cpus();
2810 2797
2811 destroy_workqueue(bnx2fc_wq); 2798 destroy_workqueue(bnx2fc_wq);
2812 /* 2799 /*
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index c8b410c24cf0..86afc002814c 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -70,14 +70,6 @@ u64 iscsi_error_mask = 0x00;
70 70
71DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu); 71DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
72 72
73static int bnx2i_cpu_callback(struct notifier_block *nfb,
74 unsigned long action, void *hcpu);
75/* notification function for CPU hotplug events */
76static struct notifier_block bnx2i_cpu_notifier = {
77 .notifier_call = bnx2i_cpu_callback,
78};
79
80
81/** 73/**
82 * bnx2i_identify_device - identifies NetXtreme II device type 74 * bnx2i_identify_device - identifies NetXtreme II device type
83 * @hba: Adapter structure pointer 75 * @hba: Adapter structure pointer
@@ -461,41 +453,21 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu)
461 kthread_stop(thread); 453 kthread_stop(thread);
462} 454}
463 455
464 456static int bnx2i_cpu_online(unsigned int cpu)
465/**
466 * bnx2i_cpu_callback - Handler for CPU hotplug events
467 *
468 * @nfb: The callback data block
469 * @action: The event triggering the callback
470 * @hcpu: The index of the CPU that the event is for
471 *
472 * This creates or destroys per-CPU data for iSCSI
473 *
474 * Returns NOTIFY_OK always.
475 */
476static int bnx2i_cpu_callback(struct notifier_block *nfb,
477 unsigned long action, void *hcpu)
478{ 457{
479 unsigned cpu = (unsigned long)hcpu; 458 pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu);
459 bnx2i_percpu_thread_create(cpu);
460 return 0;
461}
480 462
481 switch (action) { 463static int bnx2i_cpu_dead(unsigned int cpu)
482 case CPU_ONLINE: 464{
483 case CPU_ONLINE_FROZEN: 465 pr_info("CPU %x offline: Remove Rx thread\n", cpu);
484 printk(KERN_INFO "bnx2i: CPU %x online: Create Rx thread\n", 466 bnx2i_percpu_thread_destroy(cpu);
485 cpu); 467 return 0;
486 bnx2i_percpu_thread_create(cpu);
487 break;
488 case CPU_DEAD:
489 case CPU_DEAD_FROZEN:
490 printk(KERN_INFO "CPU %x offline: Remove Rx thread\n", cpu);
491 bnx2i_percpu_thread_destroy(cpu);
492 break;
493 default:
494 break;
495 }
496 return NOTIFY_OK;
497} 468}
498 469
470static enum cpuhp_state bnx2i_online_state;
499 471
500/** 472/**
501 * bnx2i_mod_init - module init entry point 473 * bnx2i_mod_init - module init entry point
@@ -539,18 +511,28 @@ static int __init bnx2i_mod_init(void)
539 p->iothread = NULL; 511 p->iothread = NULL;
540 } 512 }
541 513
542 cpu_notifier_register_begin(); 514 get_online_cpus();
543 515
544 for_each_online_cpu(cpu) 516 for_each_online_cpu(cpu)
545 bnx2i_percpu_thread_create(cpu); 517 bnx2i_percpu_thread_create(cpu);
546 518
547 /* Initialize per CPU interrupt thread */ 519 err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
548 __register_hotcpu_notifier(&bnx2i_cpu_notifier); 520 "scsi/bnx2i:online",
549 521 bnx2i_cpu_online, NULL);
550 cpu_notifier_register_done(); 522 if (err < 0)
523 goto remove_threads;
524 bnx2i_online_state = err;
551 525
526 cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead",
527 NULL, bnx2i_cpu_dead);
528 put_online_cpus();
552 return 0; 529 return 0;
553 530
531remove_threads:
532 for_each_online_cpu(cpu)
533 bnx2i_percpu_thread_destroy(cpu);
534 put_online_cpus();
535 cnic_unregister_driver(CNIC_ULP_ISCSI);
554unreg_xport: 536unreg_xport:
555 iscsi_unregister_transport(&bnx2i_iscsi_transport); 537 iscsi_unregister_transport(&bnx2i_iscsi_transport);
556out: 538out:
@@ -587,14 +569,14 @@ static void __exit bnx2i_mod_exit(void)
587 } 569 }
588 mutex_unlock(&bnx2i_dev_lock); 570 mutex_unlock(&bnx2i_dev_lock);
589 571
590 cpu_notifier_register_begin(); 572 get_online_cpus();
591 573
592 for_each_online_cpu(cpu) 574 for_each_online_cpu(cpu)
593 bnx2i_percpu_thread_destroy(cpu); 575 bnx2i_percpu_thread_destroy(cpu);
594 576
595 __unregister_hotcpu_notifier(&bnx2i_cpu_notifier); 577 cpuhp_remove_state_nocalls(bnx2i_online_state);
596 578 cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD);
597 cpu_notifier_register_done(); 579 put_online_cpus();
598 580
599 iscsi_unregister_transport(&bnx2i_iscsi_transport); 581 iscsi_unregister_transport(&bnx2i_iscsi_transport);
600 cnic_unregister_driver(CNIC_ULP_ISCSI); 582 cnic_unregister_driver(CNIC_ULP_ISCSI);
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 19ead8d17e55..5eda21d903e9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1612,30 +1612,29 @@ static int qedi_percpu_io_thread(void *arg)
1612 return 0; 1612 return 0;
1613} 1613}
1614 1614
1615static void qedi_percpu_thread_create(unsigned int cpu) 1615static int qedi_cpu_online(unsigned int cpu)
1616{ 1616{
1617 struct qedi_percpu_s *p; 1617 struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
1618 struct task_struct *thread; 1618 struct task_struct *thread;
1619 1619
1620 p = &per_cpu(qedi_percpu, cpu);
1621
1622 thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p, 1620 thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
1623 cpu_to_node(cpu), 1621 cpu_to_node(cpu),
1624 "qedi_thread/%d", cpu); 1622 "qedi_thread/%d", cpu);
1625 if (likely(!IS_ERR(thread))) { 1623 if (IS_ERR(thread))
1626 kthread_bind(thread, cpu); 1624 return PTR_ERR(thread);
1627 p->iothread = thread; 1625
1628 wake_up_process(thread); 1626 kthread_bind(thread, cpu);
1629 } 1627 p->iothread = thread;
1628 wake_up_process(thread);
1629 return 0;
1630} 1630}
1631 1631
1632static void qedi_percpu_thread_destroy(unsigned int cpu) 1632static int qedi_cpu_offline(unsigned int cpu)
1633{ 1633{
1634 struct qedi_percpu_s *p; 1634 struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
1635 struct task_struct *thread;
1636 struct qedi_work *work, *tmp; 1635 struct qedi_work *work, *tmp;
1636 struct task_struct *thread;
1637 1637
1638 p = &per_cpu(qedi_percpu, cpu);
1639 spin_lock_bh(&p->p_work_lock); 1638 spin_lock_bh(&p->p_work_lock);
1640 thread = p->iothread; 1639 thread = p->iothread;
1641 p->iothread = NULL; 1640 p->iothread = NULL;
@@ -1650,35 +1649,9 @@ static void qedi_percpu_thread_destroy(unsigned int cpu)
1650 spin_unlock_bh(&p->p_work_lock); 1649 spin_unlock_bh(&p->p_work_lock);
1651 if (thread) 1650 if (thread)
1652 kthread_stop(thread); 1651 kthread_stop(thread);
1652 return 0;
1653} 1653}
1654 1654
1655static int qedi_cpu_callback(struct notifier_block *nfb,
1656 unsigned long action, void *hcpu)
1657{
1658 unsigned int cpu = (unsigned long)hcpu;
1659
1660 switch (action) {
1661 case CPU_ONLINE:
1662 case CPU_ONLINE_FROZEN:
1663 QEDI_ERR(NULL, "CPU %d online.\n", cpu);
1664 qedi_percpu_thread_create(cpu);
1665 break;
1666 case CPU_DEAD:
1667 case CPU_DEAD_FROZEN:
1668 QEDI_ERR(NULL, "CPU %d offline.\n", cpu);
1669 qedi_percpu_thread_destroy(cpu);
1670 break;
1671 default:
1672 break;
1673 }
1674
1675 return NOTIFY_OK;
1676}
1677
1678static struct notifier_block qedi_cpu_notifier = {
1679 .notifier_call = qedi_cpu_callback,
1680};
1681
1682void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu) 1655void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
1683{ 1656{
1684 struct qed_ll2_params params; 1657 struct qed_ll2_params params;
@@ -2038,6 +2011,8 @@ static struct pci_device_id qedi_pci_tbl[] = {
2038}; 2011};
2039MODULE_DEVICE_TABLE(pci, qedi_pci_tbl); 2012MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
2040 2013
2014static enum cpuhp_state qedi_cpuhp_state;
2015
2041static struct pci_driver qedi_pci_driver = { 2016static struct pci_driver qedi_pci_driver = {
2042 .name = QEDI_MODULE_NAME, 2017 .name = QEDI_MODULE_NAME,
2043 .id_table = qedi_pci_tbl, 2018 .id_table = qedi_pci_tbl,
@@ -2047,16 +2022,13 @@ static struct pci_driver qedi_pci_driver = {
2047 2022
2048static int __init qedi_init(void) 2023static int __init qedi_init(void)
2049{ 2024{
2050 int rc = 0;
2051 int ret;
2052 struct qedi_percpu_s *p; 2025 struct qedi_percpu_s *p;
2053 unsigned int cpu = 0; 2026 int cpu, rc = 0;
2054 2027
2055 qedi_ops = qed_get_iscsi_ops(); 2028 qedi_ops = qed_get_iscsi_ops();
2056 if (!qedi_ops) { 2029 if (!qedi_ops) {
2057 QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n"); 2030 QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n");
2058 rc = -EINVAL; 2031 return -EINVAL;
2059 goto exit_qedi_init_0;
2060 } 2032 }
2061 2033
2062#ifdef CONFIG_DEBUG_FS 2034#ifdef CONFIG_DEBUG_FS
@@ -2070,15 +2042,6 @@ static int __init qedi_init(void)
2070 goto exit_qedi_init_1; 2042 goto exit_qedi_init_1;
2071 } 2043 }
2072 2044
2073 register_hotcpu_notifier(&qedi_cpu_notifier);
2074
2075 ret = pci_register_driver(&qedi_pci_driver);
2076 if (ret) {
2077 QEDI_ERR(NULL, "Failed to register driver\n");
2078 rc = -EINVAL;
2079 goto exit_qedi_init_2;
2080 }
2081
2082 for_each_possible_cpu(cpu) { 2045 for_each_possible_cpu(cpu) {
2083 p = &per_cpu(qedi_percpu, cpu); 2046 p = &per_cpu(qedi_percpu, cpu);
2084 INIT_LIST_HEAD(&p->work_list); 2047 INIT_LIST_HEAD(&p->work_list);
@@ -2086,11 +2049,22 @@ static int __init qedi_init(void)
2086 p->iothread = NULL; 2049 p->iothread = NULL;
2087 } 2050 }
2088 2051
2089 for_each_online_cpu(cpu) 2052 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/qedi:online",
2090 qedi_percpu_thread_create(cpu); 2053 qedi_cpu_online, qedi_cpu_offline);
2054 if (rc < 0)
2055 goto exit_qedi_init_2;
2056 qedi_cpuhp_state = rc;
2091 2057
2092 return rc; 2058 rc = pci_register_driver(&qedi_pci_driver);
2059 if (rc) {
2060 QEDI_ERR(NULL, "Failed to register driver\n");
2061 goto exit_qedi_hp;
2062 }
2063
2064 return 0;
2093 2065
2066exit_qedi_hp:
2067 cpuhp_remove_state(qedi_cpuhp_state);
2094exit_qedi_init_2: 2068exit_qedi_init_2:
2095 iscsi_unregister_transport(&qedi_iscsi_transport); 2069 iscsi_unregister_transport(&qedi_iscsi_transport);
2096exit_qedi_init_1: 2070exit_qedi_init_1:
@@ -2098,19 +2072,13 @@ exit_qedi_init_1:
2098 qedi_dbg_exit(); 2072 qedi_dbg_exit();
2099#endif 2073#endif
2100 qed_put_iscsi_ops(); 2074 qed_put_iscsi_ops();
2101exit_qedi_init_0:
2102 return rc; 2075 return rc;
2103} 2076}
2104 2077
2105static void __exit qedi_cleanup(void) 2078static void __exit qedi_cleanup(void)
2106{ 2079{
2107 unsigned int cpu = 0;
2108
2109 for_each_online_cpu(cpu)
2110 qedi_percpu_thread_destroy(cpu);
2111
2112 pci_unregister_driver(&qedi_pci_driver); 2080 pci_unregister_driver(&qedi_pci_driver);
2113 unregister_hotcpu_notifier(&qedi_cpu_notifier); 2081 cpuhp_remove_state(qedi_cpuhp_state);
2114 iscsi_unregister_transport(&qedi_iscsi_transport); 2082 iscsi_unregister_transport(&qedi_iscsi_transport);
2115 2083
2116#ifdef CONFIG_DEBUG_FS 2084#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index 6b9cf06e8df2..427e2198bb9e 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -967,48 +967,38 @@ cfs_cpt_table_create_pattern(char *pattern)
967} 967}
968 968
969#ifdef CONFIG_HOTPLUG_CPU 969#ifdef CONFIG_HOTPLUG_CPU
970static int 970static enum cpuhp_state lustre_cpu_online;
971cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
972{
973 unsigned int cpu = (unsigned long)hcpu;
974 bool warn;
975
976 switch (action) {
977 case CPU_DEAD:
978 case CPU_DEAD_FROZEN:
979 case CPU_ONLINE:
980 case CPU_ONLINE_FROZEN:
981 spin_lock(&cpt_data.cpt_lock);
982 cpt_data.cpt_version++;
983 spin_unlock(&cpt_data.cpt_lock);
984 /* Fall through */
985 default:
986 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
987 CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
988 cpu, action);
989 break;
990 }
991 971
992 mutex_lock(&cpt_data.cpt_mutex); 972static void cfs_cpu_incr_cpt_version(void)
993 /* if all HTs in a core are offline, it may break affinity */ 973{
994 cpumask_copy(cpt_data.cpt_cpumask, 974 spin_lock(&cpt_data.cpt_lock);
995 topology_sibling_cpumask(cpu)); 975 cpt_data.cpt_version++;
996 warn = cpumask_any_and(cpt_data.cpt_cpumask, 976 spin_unlock(&cpt_data.cpt_lock);
997 cpu_online_mask) >= nr_cpu_ids; 977}
998 mutex_unlock(&cpt_data.cpt_mutex);
999 CDEBUG(warn ? D_WARNING : D_INFO,
1000 "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n",
1001 cpu, action);
1002 }
1003 978
1004 return NOTIFY_OK; 979static int cfs_cpu_online(unsigned int cpu)
980{
981 cfs_cpu_incr_cpt_version();
982 return 0;
1005} 983}
1006 984
1007static struct notifier_block cfs_cpu_notifier = { 985static int cfs_cpu_dead(unsigned int cpu)
1008 .notifier_call = cfs_cpu_notify, 986{
1009 .priority = 0 987 bool warn;
1010}; 988
989 cfs_cpu_incr_cpt_version();
1011 990
991 mutex_lock(&cpt_data.cpt_mutex);
992 /* if all HTs in a core are offline, it may break affinity */
993 cpumask_copy(cpt_data.cpt_cpumask, topology_sibling_cpumask(cpu));
994 warn = cpumask_any_and(cpt_data.cpt_cpumask,
995 cpu_online_mask) >= nr_cpu_ids;
996 mutex_unlock(&cpt_data.cpt_mutex);
997 CDEBUG(warn ? D_WARNING : D_INFO,
998 "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u]\n",
999 cpu);
1000 return 0;
1001}
1012#endif 1002#endif
1013 1003
1014void 1004void
@@ -1018,7 +1008,9 @@ cfs_cpu_fini(void)
1018 cfs_cpt_table_free(cfs_cpt_table); 1008 cfs_cpt_table_free(cfs_cpt_table);
1019 1009
1020#ifdef CONFIG_HOTPLUG_CPU 1010#ifdef CONFIG_HOTPLUG_CPU
1021 unregister_hotcpu_notifier(&cfs_cpu_notifier); 1011 if (lustre_cpu_online > 0)
1012 cpuhp_remove_state_nocalls(lustre_cpu_online);
1013 cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD);
1022#endif 1014#endif
1023 if (cpt_data.cpt_cpumask) 1015 if (cpt_data.cpt_cpumask)
1024 LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size()); 1016 LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
@@ -1027,6 +1019,8 @@ cfs_cpu_fini(void)
1027int 1019int
1028cfs_cpu_init(void) 1020cfs_cpu_init(void)
1029{ 1021{
1022 int ret = 0;
1023
1030 LASSERT(!cfs_cpt_table); 1024 LASSERT(!cfs_cpt_table);
1031 1025
1032 memset(&cpt_data, 0, sizeof(cpt_data)); 1026 memset(&cpt_data, 0, sizeof(cpt_data));
@@ -1041,8 +1035,19 @@ cfs_cpu_init(void)
1041 mutex_init(&cpt_data.cpt_mutex); 1035 mutex_init(&cpt_data.cpt_mutex);
1042 1036
1043#ifdef CONFIG_HOTPLUG_CPU 1037#ifdef CONFIG_HOTPLUG_CPU
1044 register_hotcpu_notifier(&cfs_cpu_notifier); 1038 ret = cpuhp_setup_state_nocalls(CPUHP_LUSTRE_CFS_DEAD,
1039 "staging/lustre/cfe:dead", NULL,
1040 cfs_cpu_dead);
1041 if (ret < 0)
1042 goto failed;
1043 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
1044 "staging/lustre/cfe:online",
1045 cfs_cpu_online, NULL);
1046 if (ret < 0)
1047 goto failed;
1048 lustre_cpu_online = ret;
1045#endif 1049#endif
1050 ret = -EINVAL;
1046 1051
1047 if (*cpu_pattern) { 1052 if (*cpu_pattern) {
1048 cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern); 1053 cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
@@ -1075,7 +1080,7 @@ cfs_cpu_init(void)
1075 1080
1076 failed: 1081 failed:
1077 cfs_cpu_fini(); 1082 cfs_cpu_fini();
1078 return -1; 1083 return ret;
1079} 1084}
1080 1085
1081#endif 1086#endif
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 7ef27c6ed72f..c03f9c86c7e3 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -445,7 +445,7 @@ int __init xen_evtchn_fifo_init(void)
445 evtchn_ops = &evtchn_ops_fifo; 445 evtchn_ops = &evtchn_ops_fifo;
446 446
447 cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE, 447 cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
448 "CPUHP_XEN_EVTCHN_PREPARE", 448 "xen/evtchn:prepare",
449 xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead); 449 xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
450out: 450out:
451 put_cpu(); 451 put_cpu();
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 09807c2ce328..21f9c74496e7 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -57,9 +57,6 @@ struct notifier_block;
57 57
58#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ 58#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
59#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ 59#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
60#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
61#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
62#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
63#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ 60#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
64#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug 61#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
65 * lock is dropped */ 62 * lock is dropped */
@@ -80,80 +77,14 @@ struct notifier_block;
80 77
81#ifdef CONFIG_SMP 78#ifdef CONFIG_SMP
82extern bool cpuhp_tasks_frozen; 79extern bool cpuhp_tasks_frozen;
83/* Need to know about CPUs going up/down? */
84#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
85#define cpu_notifier(fn, pri) { \
86 static struct notifier_block fn##_nb = \
87 { .notifier_call = fn, .priority = pri }; \
88 register_cpu_notifier(&fn##_nb); \
89}
90
91#define __cpu_notifier(fn, pri) { \
92 static struct notifier_block fn##_nb = \
93 { .notifier_call = fn, .priority = pri }; \
94 __register_cpu_notifier(&fn##_nb); \
95}
96
97extern int register_cpu_notifier(struct notifier_block *nb);
98extern int __register_cpu_notifier(struct notifier_block *nb);
99extern void unregister_cpu_notifier(struct notifier_block *nb);
100extern void __unregister_cpu_notifier(struct notifier_block *nb);
101
102#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
103#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
104#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
105
106static inline int register_cpu_notifier(struct notifier_block *nb)
107{
108 return 0;
109}
110
111static inline int __register_cpu_notifier(struct notifier_block *nb)
112{
113 return 0;
114}
115
116static inline void unregister_cpu_notifier(struct notifier_block *nb)
117{
118}
119
120static inline void __unregister_cpu_notifier(struct notifier_block *nb)
121{
122}
123#endif
124
125int cpu_up(unsigned int cpu); 80int cpu_up(unsigned int cpu);
126void notify_cpu_starting(unsigned int cpu); 81void notify_cpu_starting(unsigned int cpu);
127extern void cpu_maps_update_begin(void); 82extern void cpu_maps_update_begin(void);
128extern void cpu_maps_update_done(void); 83extern void cpu_maps_update_done(void);
129 84
130#define cpu_notifier_register_begin cpu_maps_update_begin
131#define cpu_notifier_register_done cpu_maps_update_done
132
133#else /* CONFIG_SMP */ 85#else /* CONFIG_SMP */
134#define cpuhp_tasks_frozen 0 86#define cpuhp_tasks_frozen 0
135 87
136#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
137#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
138
139static inline int register_cpu_notifier(struct notifier_block *nb)
140{
141 return 0;
142}
143
144static inline int __register_cpu_notifier(struct notifier_block *nb)
145{
146 return 0;
147}
148
149static inline void unregister_cpu_notifier(struct notifier_block *nb)
150{
151}
152
153static inline void __unregister_cpu_notifier(struct notifier_block *nb)
154{
155}
156
157static inline void cpu_maps_update_begin(void) 88static inline void cpu_maps_update_begin(void)
158{ 89{
159} 90}
@@ -162,14 +93,6 @@ static inline void cpu_maps_update_done(void)
162{ 93{
163} 94}
164 95
165static inline void cpu_notifier_register_begin(void)
166{
167}
168
169static inline void cpu_notifier_register_done(void)
170{
171}
172
173#endif /* CONFIG_SMP */ 96#endif /* CONFIG_SMP */
174extern struct bus_type cpu_subsys; 97extern struct bus_type cpu_subsys;
175 98
@@ -182,12 +105,6 @@ extern void get_online_cpus(void);
182extern void put_online_cpus(void); 105extern void put_online_cpus(void);
183extern void cpu_hotplug_disable(void); 106extern void cpu_hotplug_disable(void);
184extern void cpu_hotplug_enable(void); 107extern void cpu_hotplug_enable(void);
185#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
186#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
187#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
188#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
189#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
190#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
191void clear_tasks_mm_cpumask(int cpu); 108void clear_tasks_mm_cpumask(int cpu);
192int cpu_down(unsigned int cpu); 109int cpu_down(unsigned int cpu);
193 110
@@ -199,13 +116,6 @@ static inline void cpu_hotplug_done(void) {}
199#define put_online_cpus() do { } while (0) 116#define put_online_cpus() do { } while (0)
200#define cpu_hotplug_disable() do { } while (0) 117#define cpu_hotplug_disable() do { } while (0)
201#define cpu_hotplug_enable() do { } while (0) 118#define cpu_hotplug_enable() do { } while (0)
202#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
203#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
204/* These aren't inline functions due to a GCC bug. */
205#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
206#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
207#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
208#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
209#endif /* CONFIG_HOTPLUG_CPU */ 119#endif /* CONFIG_HOTPLUG_CPU */
210 120
211#ifdef CONFIG_PM_SLEEP_SMP 121#ifdef CONFIG_PM_SLEEP_SMP
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 2ab7bf53d529..20bfefbe7594 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -41,6 +41,9 @@ enum cpuhp_state {
41 CPUHP_NET_DEV_DEAD, 41 CPUHP_NET_DEV_DEAD,
42 CPUHP_PCI_XGENE_DEAD, 42 CPUHP_PCI_XGENE_DEAD,
43 CPUHP_IOMMU_INTEL_DEAD, 43 CPUHP_IOMMU_INTEL_DEAD,
44 CPUHP_LUSTRE_CFS_DEAD,
45 CPUHP_SCSI_BNX2FC_DEAD,
46 CPUHP_SCSI_BNX2I_DEAD,
44 CPUHP_WORKQUEUE_PREP, 47 CPUHP_WORKQUEUE_PREP,
45 CPUHP_POWER_NUMA_PREPARE, 48 CPUHP_POWER_NUMA_PREPARE,
46 CPUHP_HRTIMERS_PREPARE, 49 CPUHP_HRTIMERS_PREPARE,
@@ -56,7 +59,6 @@ enum cpuhp_state {
56 CPUHP_POWERPC_MMU_CTX_PREPARE, 59 CPUHP_POWERPC_MMU_CTX_PREPARE,
57 CPUHP_XEN_PREPARE, 60 CPUHP_XEN_PREPARE,
58 CPUHP_XEN_EVTCHN_PREPARE, 61 CPUHP_XEN_EVTCHN_PREPARE,
59 CPUHP_NOTIFY_PREPARE,
60 CPUHP_ARM_SHMOBILE_SCU_PREPARE, 62 CPUHP_ARM_SHMOBILE_SCU_PREPARE,
61 CPUHP_SH_SH3X_PREPARE, 63 CPUHP_SH_SH3X_PREPARE,
62 CPUHP_BLK_MQ_PREPARE, 64 CPUHP_BLK_MQ_PREPARE,
@@ -71,7 +73,6 @@ enum cpuhp_state {
71 CPUHP_KVM_PPC_BOOK3S_PREPARE, 73 CPUHP_KVM_PPC_BOOK3S_PREPARE,
72 CPUHP_ZCOMP_PREPARE, 74 CPUHP_ZCOMP_PREPARE,
73 CPUHP_TIMERS_DEAD, 75 CPUHP_TIMERS_DEAD,
74 CPUHP_NOTF_ERR_INJ_PREPARE,
75 CPUHP_MIPS_SOC_PREPARE, 76 CPUHP_MIPS_SOC_PREPARE,
76 CPUHP_BRINGUP_CPU, 77 CPUHP_BRINGUP_CPU,
77 CPUHP_AP_IDLE_DEAD, 78 CPUHP_AP_IDLE_DEAD,
@@ -79,10 +80,8 @@ enum cpuhp_state {
79 CPUHP_AP_SCHED_STARTING, 80 CPUHP_AP_SCHED_STARTING,
80 CPUHP_AP_RCUTREE_DYING, 81 CPUHP_AP_RCUTREE_DYING,
81 CPUHP_AP_IRQ_GIC_STARTING, 82 CPUHP_AP_IRQ_GIC_STARTING,
82 CPUHP_AP_IRQ_GICV3_STARTING,
83 CPUHP_AP_IRQ_HIP04_STARTING, 83 CPUHP_AP_IRQ_HIP04_STARTING,
84 CPUHP_AP_IRQ_ARMADA_XP_STARTING, 84 CPUHP_AP_IRQ_ARMADA_XP_STARTING,
85 CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
86 CPUHP_AP_IRQ_BCM2836_STARTING, 85 CPUHP_AP_IRQ_BCM2836_STARTING,
87 CPUHP_AP_ARM_MVEBU_COHERENCY, 86 CPUHP_AP_ARM_MVEBU_COHERENCY,
88 CPUHP_AP_PERF_X86_UNCORE_STARTING, 87 CPUHP_AP_PERF_X86_UNCORE_STARTING,
@@ -118,7 +117,6 @@ enum cpuhp_state {
118 CPUHP_AP_DUMMY_TIMER_STARTING, 117 CPUHP_AP_DUMMY_TIMER_STARTING,
119 CPUHP_AP_ARM_XEN_STARTING, 118 CPUHP_AP_ARM_XEN_STARTING,
120 CPUHP_AP_ARM_CORESIGHT_STARTING, 119 CPUHP_AP_ARM_CORESIGHT_STARTING,
121 CPUHP_AP_ARM_CORESIGHT4_STARTING,
122 CPUHP_AP_ARM64_ISNDEP_STARTING, 120 CPUHP_AP_ARM64_ISNDEP_STARTING,
123 CPUHP_AP_SMPCFD_DYING, 121 CPUHP_AP_SMPCFD_DYING,
124 CPUHP_AP_X86_TBOOT_DYING, 122 CPUHP_AP_X86_TBOOT_DYING,
@@ -142,7 +140,6 @@ enum cpuhp_state {
142 CPUHP_AP_PERF_ARM_L2X0_ONLINE, 140 CPUHP_AP_PERF_ARM_L2X0_ONLINE,
143 CPUHP_AP_WORKQUEUE_ONLINE, 141 CPUHP_AP_WORKQUEUE_ONLINE,
144 CPUHP_AP_RCUTREE_ONLINE, 142 CPUHP_AP_RCUTREE_ONLINE,
145 CPUHP_AP_NOTIFY_ONLINE,
146 CPUHP_AP_ONLINE_DYN, 143 CPUHP_AP_ONLINE_DYN,
147 CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, 144 CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
148 CPUHP_AP_X86_HPET_ONLINE, 145 CPUHP_AP_X86_HPET_ONLINE,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5339aca811d2..042fd7e8e030 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -183,23 +183,16 @@ EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
183/* 183/*
184 * The following two APIs (cpu_maps_update_begin/done) must be used when 184 * The following two APIs (cpu_maps_update_begin/done) must be used when
185 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. 185 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
186 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
187 * hotplug callback (un)registration performed using __register_cpu_notifier()
188 * or __unregister_cpu_notifier().
189 */ 186 */
190void cpu_maps_update_begin(void) 187void cpu_maps_update_begin(void)
191{ 188{
192 mutex_lock(&cpu_add_remove_lock); 189 mutex_lock(&cpu_add_remove_lock);
193} 190}
194EXPORT_SYMBOL(cpu_notifier_register_begin);
195 191
196void cpu_maps_update_done(void) 192void cpu_maps_update_done(void)
197{ 193{
198 mutex_unlock(&cpu_add_remove_lock); 194 mutex_unlock(&cpu_add_remove_lock);
199} 195}
200EXPORT_SYMBOL(cpu_notifier_register_done);
201
202static RAW_NOTIFIER_HEAD(cpu_chain);
203 196
204/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 197/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
205 * Should always be manipulated under cpu_add_remove_lock 198 * Should always be manipulated under cpu_add_remove_lock
@@ -349,66 +342,7 @@ void cpu_hotplug_enable(void)
349EXPORT_SYMBOL_GPL(cpu_hotplug_enable); 342EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
350#endif /* CONFIG_HOTPLUG_CPU */ 343#endif /* CONFIG_HOTPLUG_CPU */
351 344
352/* Need to know about CPUs going up/down? */
353int register_cpu_notifier(struct notifier_block *nb)
354{
355 int ret;
356 cpu_maps_update_begin();
357 ret = raw_notifier_chain_register(&cpu_chain, nb);
358 cpu_maps_update_done();
359 return ret;
360}
361
362int __register_cpu_notifier(struct notifier_block *nb)
363{
364 return raw_notifier_chain_register(&cpu_chain, nb);
365}
366
367static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
368 int *nr_calls)
369{
370 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
371 void *hcpu = (void *)(long)cpu;
372
373 int ret;
374
375 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
376 nr_calls);
377
378 return notifier_to_errno(ret);
379}
380
381static int cpu_notify(unsigned long val, unsigned int cpu)
382{
383 return __cpu_notify(val, cpu, -1, NULL);
384}
385
386static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
387{
388 BUG_ON(cpu_notify(val, cpu));
389}
390
391/* Notifier wrappers for transitioning to state machine */ 345/* Notifier wrappers for transitioning to state machine */
392static int notify_prepare(unsigned int cpu)
393{
394 int nr_calls = 0;
395 int ret;
396
397 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
398 if (ret) {
399 nr_calls--;
400 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
401 __func__, cpu);
402 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
403 }
404 return ret;
405}
406
407static int notify_online(unsigned int cpu)
408{
409 cpu_notify(CPU_ONLINE, cpu);
410 return 0;
411}
412 346
413static int bringup_wait_for_ap(unsigned int cpu) 347static int bringup_wait_for_ap(unsigned int cpu)
414{ 348{
@@ -433,10 +367,8 @@ static int bringup_cpu(unsigned int cpu)
433 /* Arch-specific enabling code. */ 367 /* Arch-specific enabling code. */
434 ret = __cpu_up(cpu, idle); 368 ret = __cpu_up(cpu, idle);
435 irq_unlock_sparse(); 369 irq_unlock_sparse();
436 if (ret) { 370 if (ret)
437 cpu_notify(CPU_UP_CANCELED, cpu);
438 return ret; 371 return ret;
439 }
440 ret = bringup_wait_for_ap(cpu); 372 ret = bringup_wait_for_ap(cpu);
441 BUG_ON(!cpu_online(cpu)); 373 BUG_ON(!cpu_online(cpu));
442 return ret; 374 return ret;
@@ -565,11 +497,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
565 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); 497 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
566 498
567 undo_cpu_down(cpu, st); 499 undo_cpu_down(cpu, st);
568 /*
569 * This is a momentary workaround to keep the notifier users
570 * happy. Will go away once we got rid of the notifiers.
571 */
572 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
573 st->rollback = false; 500 st->rollback = false;
574 } else { 501 } else {
575 /* Cannot happen .... */ 502 /* Cannot happen .... */
@@ -659,22 +586,6 @@ void __init cpuhp_threads_init(void)
659 kthread_unpark(this_cpu_read(cpuhp_state.thread)); 586 kthread_unpark(this_cpu_read(cpuhp_state.thread));
660} 587}
661 588
662EXPORT_SYMBOL(register_cpu_notifier);
663EXPORT_SYMBOL(__register_cpu_notifier);
664void unregister_cpu_notifier(struct notifier_block *nb)
665{
666 cpu_maps_update_begin();
667 raw_notifier_chain_unregister(&cpu_chain, nb);
668 cpu_maps_update_done();
669}
670EXPORT_SYMBOL(unregister_cpu_notifier);
671
672void __unregister_cpu_notifier(struct notifier_block *nb)
673{
674 raw_notifier_chain_unregister(&cpu_chain, nb);
675}
676EXPORT_SYMBOL(__unregister_cpu_notifier);
677
678#ifdef CONFIG_HOTPLUG_CPU 589#ifdef CONFIG_HOTPLUG_CPU
679/** 590/**
680 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 591 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
@@ -741,20 +652,6 @@ static inline void check_for_tasks(int dead_cpu)
741 read_unlock(&tasklist_lock); 652 read_unlock(&tasklist_lock);
742} 653}
743 654
744static int notify_down_prepare(unsigned int cpu)
745{
746 int err, nr_calls = 0;
747
748 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
749 if (err) {
750 nr_calls--;
751 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
752 pr_warn("%s: attempt to take down CPU %u failed\n",
753 __func__, cpu);
754 }
755 return err;
756}
757
758/* Take this CPU down. */ 655/* Take this CPU down. */
759static int take_cpu_down(void *_param) 656static int take_cpu_down(void *_param)
760{ 657{
@@ -833,13 +730,6 @@ static int takedown_cpu(unsigned int cpu)
833 return 0; 730 return 0;
834} 731}
835 732
836static int notify_dead(unsigned int cpu)
837{
838 cpu_notify_nofail(CPU_DEAD, cpu);
839 check_for_tasks(cpu);
840 return 0;
841}
842
843static void cpuhp_complete_idle_dead(void *arg) 733static void cpuhp_complete_idle_dead(void *arg)
844{ 734{
845 struct cpuhp_cpu_state *st = arg; 735 struct cpuhp_cpu_state *st = arg;
@@ -863,9 +753,7 @@ void cpuhp_report_idle_dead(void)
863} 753}
864 754
865#else 755#else
866#define notify_down_prepare NULL
867#define takedown_cpu NULL 756#define takedown_cpu NULL
868#define notify_dead NULL
869#endif 757#endif
870 758
871#ifdef CONFIG_HOTPLUG_CPU 759#ifdef CONFIG_HOTPLUG_CPU
@@ -924,9 +812,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
924 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; 812 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
925out: 813out:
926 cpu_hotplug_done(); 814 cpu_hotplug_done();
927 /* This post dead nonsense must die */
928 if (!ret && hasdied)
929 cpu_notify_nofail(CPU_POST_DEAD, cpu);
930 return ret; 815 return ret;
931} 816}
932 817
@@ -1292,17 +1177,6 @@ static struct cpuhp_step cpuhp_bp_states[] = {
1292 .teardown.single = rcutree_dead_cpu, 1177 .teardown.single = rcutree_dead_cpu,
1293 }, 1178 },
1294 /* 1179 /*
1295 * Preparatory and dead notifiers. Will be replaced once the notifiers
1296 * are converted to states.
1297 */
1298 [CPUHP_NOTIFY_PREPARE] = {
1299 .name = "notify:prepare",
1300 .startup.single = notify_prepare,
1301 .teardown.single = notify_dead,
1302 .skip_onerr = true,
1303 .cant_stop = true,
1304 },
1305 /*
1306 * On the tear-down path, timers_dead_cpu() must be invoked 1180 * On the tear-down path, timers_dead_cpu() must be invoked
1307 * before blk_mq_queue_reinit_notify() from notify_dead(), 1181 * before blk_mq_queue_reinit_notify() from notify_dead(),
1308 * otherwise a RCU stall occurs. 1182 * otherwise a RCU stall occurs.
@@ -1391,17 +1265,6 @@ static struct cpuhp_step cpuhp_ap_states[] = {
1391 .startup.single = rcutree_online_cpu, 1265 .startup.single = rcutree_online_cpu,
1392 .teardown.single = rcutree_offline_cpu, 1266 .teardown.single = rcutree_offline_cpu,
1393 }, 1267 },
1394
1395 /*
1396 * Online/down_prepare notifiers. Will be removed once the notifiers
1397 * are converted to states.
1398 */
1399 [CPUHP_AP_NOTIFY_ONLINE] = {
1400 .name = "notify:online",
1401 .startup.single = notify_online,
1402 .teardown.single = notify_down_prepare,
1403 .skip_onerr = true,
1404 },
1405#endif 1268#endif
1406 /* 1269 /*
1407 * The dynamically registered state space is here 1270 * The dynamically registered state space is here
@@ -1432,23 +1295,53 @@ static int cpuhp_cb_check(enum cpuhp_state state)
1432 return 0; 1295 return 0;
1433} 1296}
1434 1297
1435static void cpuhp_store_callbacks(enum cpuhp_state state, 1298/*
1436 const char *name, 1299 * Returns a free for dynamic slot assignment of the Online state. The states
1437 int (*startup)(unsigned int cpu), 1300 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1438 int (*teardown)(unsigned int cpu), 1301 * by having no name assigned.
1439 bool multi_instance) 1302 */
1303static int cpuhp_reserve_state(enum cpuhp_state state)
1304{
1305 enum cpuhp_state i;
1306
1307 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1308 if (!cpuhp_ap_states[i].name)
1309 return i;
1310 }
1311 WARN(1, "No more dynamic states available for CPU hotplug\n");
1312 return -ENOSPC;
1313}
1314
1315static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1316 int (*startup)(unsigned int cpu),
1317 int (*teardown)(unsigned int cpu),
1318 bool multi_instance)
1440{ 1319{
1441 /* (Un)Install the callbacks for further cpu hotplug operations */ 1320 /* (Un)Install the callbacks for further cpu hotplug operations */
1442 struct cpuhp_step *sp; 1321 struct cpuhp_step *sp;
1322 int ret = 0;
1443 1323
1444 mutex_lock(&cpuhp_state_mutex); 1324 mutex_lock(&cpuhp_state_mutex);
1325
1326 if (state == CPUHP_AP_ONLINE_DYN) {
1327 ret = cpuhp_reserve_state(state);
1328 if (ret < 0)
1329 goto out;
1330 state = ret;
1331 }
1445 sp = cpuhp_get_step(state); 1332 sp = cpuhp_get_step(state);
1333 if (name && sp->name) {
1334 ret = -EBUSY;
1335 goto out;
1336 }
1446 sp->startup.single = startup; 1337 sp->startup.single = startup;
1447 sp->teardown.single = teardown; 1338 sp->teardown.single = teardown;
1448 sp->name = name; 1339 sp->name = name;
1449 sp->multi_instance = multi_instance; 1340 sp->multi_instance = multi_instance;
1450 INIT_HLIST_HEAD(&sp->list); 1341 INIT_HLIST_HEAD(&sp->list);
1342out:
1451 mutex_unlock(&cpuhp_state_mutex); 1343 mutex_unlock(&cpuhp_state_mutex);
1344 return ret;
1452} 1345}
1453 1346
1454static void *cpuhp_get_teardown_cb(enum cpuhp_state state) 1347static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
@@ -1509,29 +1402,6 @@ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1509 } 1402 }
1510} 1403}
1511 1404
1512/*
1513 * Returns a free for dynamic slot assignment of the Online state. The states
1514 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1515 * by having no name assigned.
1516 */
1517static int cpuhp_reserve_state(enum cpuhp_state state)
1518{
1519 enum cpuhp_state i;
1520
1521 mutex_lock(&cpuhp_state_mutex);
1522 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1523 if (cpuhp_ap_states[i].name)
1524 continue;
1525
1526 cpuhp_ap_states[i].name = "Reserved";
1527 mutex_unlock(&cpuhp_state_mutex);
1528 return i;
1529 }
1530 mutex_unlock(&cpuhp_state_mutex);
1531 WARN(1, "No more dynamic states available for CPU hotplug\n");
1532 return -ENOSPC;
1533}
1534
1535int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, 1405int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1536 bool invoke) 1406 bool invoke)
1537{ 1407{
@@ -1580,11 +1450,13 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1580 1450
1581/** 1451/**
1582 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state 1452 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1583 * @state: The state to setup 1453 * @state: The state to setup
1584 * @invoke: If true, the startup function is invoked for cpus where 1454 * @invoke: If true, the startup function is invoked for cpus where
1585 * cpu state >= @state 1455 * cpu state >= @state
1586 * @startup: startup callback function 1456 * @startup: startup callback function
1587 * @teardown: teardown callback function 1457 * @teardown: teardown callback function
1458 * @multi_instance: State is set up for multiple instances which get
1459 * added afterwards.
1588 * 1460 *
1589 * Returns: 1461 * Returns:
1590 * On success: 1462 * On success:
@@ -1599,25 +1471,16 @@ int __cpuhp_setup_state(enum cpuhp_state state,
1599 bool multi_instance) 1471 bool multi_instance)
1600{ 1472{
1601 int cpu, ret = 0; 1473 int cpu, ret = 0;
1602 int dyn_state = 0;
1603 1474
1604 if (cpuhp_cb_check(state) || !name) 1475 if (cpuhp_cb_check(state) || !name)
1605 return -EINVAL; 1476 return -EINVAL;
1606 1477
1607 get_online_cpus(); 1478 get_online_cpus();
1608 1479
1609 /* currently assignments for the ONLINE state are possible */ 1480 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1610 if (state == CPUHP_AP_ONLINE_DYN) { 1481 multi_instance);
1611 dyn_state = 1;
1612 ret = cpuhp_reserve_state(state);
1613 if (ret < 0)
1614 goto out;
1615 state = ret;
1616 }
1617 1482
1618 cpuhp_store_callbacks(state, name, startup, teardown, multi_instance); 1483 if (ret || !invoke || !startup)
1619
1620 if (!invoke || !startup)
1621 goto out; 1484 goto out;
1622 1485
1623 /* 1486 /*
@@ -1641,7 +1504,11 @@ int __cpuhp_setup_state(enum cpuhp_state state,
1641 } 1504 }
1642out: 1505out:
1643 put_online_cpus(); 1506 put_online_cpus();
1644 if (!ret && dyn_state) 1507 /*
1508 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1509 * dynamically allocated state in case of success.
1510 */
1511 if (!ret && state == CPUHP_AP_ONLINE_DYN)
1645 return state; 1512 return state;
1646 return ret; 1513 return ret;
1647} 1514}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index cb66a4648840..b06848a104e6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1538,30 +1538,6 @@ config NOTIFIER_ERROR_INJECTION
1538 1538
1539 Say N if unsure. 1539 Say N if unsure.
1540 1540
1541config CPU_NOTIFIER_ERROR_INJECT
1542 tristate "CPU notifier error injection module"
1543 depends on HOTPLUG_CPU && NOTIFIER_ERROR_INJECTION
1544 help
1545 This option provides a kernel module that can be used to test
1546 the error handling of the cpu notifiers by injecting artificial
1547 errors to CPU notifier chain callbacks. It is controlled through
1548 debugfs interface under /sys/kernel/debug/notifier-error-inject/cpu
1549
1550 If the notifier call chain should be failed with some events
1551 notified, write the error code to "actions/<notifier event>/error".
1552
1553 Example: Inject CPU offline error (-1 == -EPERM)
1554
1555 # cd /sys/kernel/debug/notifier-error-inject/cpu
1556 # echo -1 > actions/CPU_DOWN_PREPARE/error
1557 # echo 0 > /sys/devices/system/cpu/cpu1/online
1558 bash: echo: write error: Operation not permitted
1559
1560 To compile this code as a module, choose M here: the module will
1561 be called cpu-notifier-error-inject.
1562
1563 If unsure, say N.
1564
1565config PM_NOTIFIER_ERROR_INJECT 1541config PM_NOTIFIER_ERROR_INJECT
1566 tristate "PM notifier error injection module" 1542 tristate "PM notifier error injection module"
1567 depends on PM && NOTIFIER_ERROR_INJECTION 1543 depends on PM && NOTIFIER_ERROR_INJECTION
diff --git a/lib/Makefile b/lib/Makefile
index 50144a3aeebd..bc4073a8cd08 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -128,7 +128,6 @@ obj-$(CONFIG_SWIOTLB) += swiotlb.o
128obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o iommu-common.o 128obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o iommu-common.o
129obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o 129obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
130obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o 130obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
131obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
132obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o 131obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
133obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o 132obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
134obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o 133obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c
deleted file mode 100644
index 0e2c9a1e958a..000000000000
--- a/lib/cpu-notifier-error-inject.c
+++ /dev/null
@@ -1,84 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/cpu.h>
4
5#include "notifier-error-inject.h"
6
7static int priority;
8module_param(priority, int, 0);
9MODULE_PARM_DESC(priority, "specify cpu notifier priority");
10
11#define UP_PREPARE 0
12#define UP_PREPARE_FROZEN 0
13#define DOWN_PREPARE 0
14#define DOWN_PREPARE_FROZEN 0
15
16static struct notifier_err_inject cpu_notifier_err_inject = {
17 .actions = {
18 { NOTIFIER_ERR_INJECT_ACTION(UP_PREPARE) },
19 { NOTIFIER_ERR_INJECT_ACTION(UP_PREPARE_FROZEN) },
20 { NOTIFIER_ERR_INJECT_ACTION(DOWN_PREPARE) },
21 { NOTIFIER_ERR_INJECT_ACTION(DOWN_PREPARE_FROZEN) },
22 {}
23 }
24};
25
26static int notf_err_handle(struct notifier_err_inject_action *action)
27{
28 int ret;
29
30 ret = action->error;
31 if (ret)
32 pr_info("Injecting error (%d) to %s\n", ret, action->name);
33 return ret;
34}
35
36static int notf_err_inj_up_prepare(unsigned int cpu)
37{
38 if (!cpuhp_tasks_frozen)
39 return notf_err_handle(&cpu_notifier_err_inject.actions[0]);
40 else
41 return notf_err_handle(&cpu_notifier_err_inject.actions[1]);
42}
43
44static int notf_err_inj_dead(unsigned int cpu)
45{
46 if (!cpuhp_tasks_frozen)
47 return notf_err_handle(&cpu_notifier_err_inject.actions[2]);
48 else
49 return notf_err_handle(&cpu_notifier_err_inject.actions[3]);
50}
51
52static struct dentry *dir;
53
54static int err_inject_init(void)
55{
56 int err;
57
58 dir = notifier_err_inject_init("cpu", notifier_err_inject_dir,
59 &cpu_notifier_err_inject, priority);
60 if (IS_ERR(dir))
61 return PTR_ERR(dir);
62
63 err = cpuhp_setup_state_nocalls(CPUHP_NOTF_ERR_INJ_PREPARE,
64 "cpu-err-notif:prepare",
65 notf_err_inj_up_prepare,
66 notf_err_inj_dead);
67 if (err)
68 debugfs_remove_recursive(dir);
69
70 return err;
71}
72
73static void err_inject_exit(void)
74{
75 cpuhp_remove_state_nocalls(CPUHP_NOTF_ERR_INJ_PREPARE);
76 debugfs_remove_recursive(dir);
77}
78
79module_init(err_inject_init);
80module_exit(err_inject_exit);
81
82MODULE_DESCRIPTION("CPU notifier error injection module");
83MODULE_LICENSE("GPL");
84MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index ae95fc0e3214..5b4f60d43314 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -456,7 +456,7 @@ int kvm_timer_hyp_init(void)
456 kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); 456 kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
457 457
458 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, 458 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
459 "AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu, 459 "kvm/arm/timer:starting", kvm_timer_starting_cpu,
460 kvm_timer_dying_cpu); 460 kvm_timer_dying_cpu);
461 return err; 461 return err;
462} 462}
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 8cebfbc19e90..5114391b7e5a 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -428,7 +428,7 @@ int kvm_vgic_hyp_init(void)
428 } 428 }
429 429
430 ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, 430 ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
431 "AP_KVM_ARM_VGIC_INIT_STARTING", 431 "kvm/arm/vgic:starting",
432 vgic_init_cpu_starting, vgic_init_cpu_dying); 432 vgic_init_cpu_starting, vgic_init_cpu_dying);
433 if (ret) { 433 if (ret) {
434 kvm_err("Cannot register vgic CPU notifier\n"); 434 kvm_err("Cannot register vgic CPU notifier\n");
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 994f81f8eecb..482612b4e496 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3944,7 +3944,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
3944 goto out_free_1; 3944 goto out_free_1;
3945 } 3945 }
3946 3946
3947 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "AP_KVM_STARTING", 3947 r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
3948 kvm_starting_cpu, kvm_dying_cpu); 3948 kvm_starting_cpu, kvm_dying_cpu);
3949 if (r) 3949 if (r)
3950 goto out_free_2; 3950 goto out_free_2;