aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c35
-rw-r--r--arch/arm/mach-shmobile/platsmp-scu.c26
-rw-r--r--arch/arm64/kernel/fpsimd.c22
-rw-r--r--arch/ia64/kernel/mca.c26
-rw-r--r--arch/mips/cavium-octeon/smp.c24
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c34
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c56
-rw-r--r--arch/powerpc/platforms/powermac/smp.c50
-rw-r--r--arch/s390/mm/fault.c30
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c26
-rw-r--r--arch/sparc/kernel/smp_32.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c31
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c52
-rw-r--r--arch/x86/kernel/kvm.c43
-rw-r--r--arch/x86/kernel/smpboot.c11
-rw-r--r--block/blk-softirq.c27
-rw-r--r--drivers/acpi/processor_driver.c91
-rw-r--r--drivers/acpi/processor_throttling.c4
-rw-r--r--drivers/bus/arm-cci.c45
-rw-r--r--drivers/bus/arm-ccn.c54
-rw-r--r--drivers/bus/mips_cdmm.c70
-rw-r--r--drivers/cpufreq/cpufreq.c41
-rw-r--r--drivers/cpuidle/coupled.c75
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c51
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c51
-rw-r--r--drivers/md/raid5.c84
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c232
-rw-r--r--drivers/net/virtio_net.c110
-rw-r--r--drivers/oprofile/timer_int.c44
-rw-r--r--drivers/perf/arm_pmu.c44
-rw-r--r--drivers/scsi/virtio_scsi.c76
-rw-r--r--include/acpi/processor.h4
-rw-r--r--include/linux/cpu.h12
-rw-r--r--include/linux/cpuhotplug.h136
-rw-r--r--include/linux/padata.h2
-rw-r--r--include/linux/perf/arm_pmu.h2
-rw-r--r--include/linux/relay.h23
-rw-r--r--include/linux/slab.h8
-rw-r--r--include/trace/events/cpuhp.h28
-rw-r--r--kernel/cpu.c512
-rw-r--r--kernel/padata.c88
-rw-r--r--kernel/relay.c124
-rw-r--r--kernel/softirq.c27
-rw-r--r--lib/cpu-notifier-error-inject.c46
-rw-r--r--lib/irq_poll.c26
-rw-r--r--mm/page-writeback.c26
-rw-r--r--mm/slab.c114
-rw-r--r--mm/slub.c65
-rw-r--r--tools/testing/radix-tree/linux/cpu.h13
50 files changed, 1452 insertions, 1375 deletions
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 0c4754386532..369f95a703ac 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -322,34 +322,25 @@ static void irq_save_secure_context(void)
322#endif 322#endif
323 323
324#ifdef CONFIG_HOTPLUG_CPU 324#ifdef CONFIG_HOTPLUG_CPU
325static int irq_cpu_hotplug_notify(struct notifier_block *self, 325static int omap_wakeupgen_cpu_online(unsigned int cpu)
326 unsigned long action, void *hcpu)
327{ 326{
328 unsigned int cpu = (unsigned int)hcpu; 327 wakeupgen_irqmask_all(cpu, 0);
329 328 return 0;
330 /*
331 * Corresponding FROZEN transitions do not have to be handled,
332 * they are handled by at a higher level
333 * (drivers/cpuidle/coupled.c).
334 */
335 switch (action) {
336 case CPU_ONLINE:
337 wakeupgen_irqmask_all(cpu, 0);
338 break;
339 case CPU_DEAD:
340 wakeupgen_irqmask_all(cpu, 1);
341 break;
342 }
343 return NOTIFY_OK;
344} 329}
345 330
346static struct notifier_block irq_hotplug_notifier = { 331static int omap_wakeupgen_cpu_dead(unsigned int cpu)
347 .notifier_call = irq_cpu_hotplug_notify, 332{
348}; 333 wakeupgen_irqmask_all(cpu, 1);
334 return 0;
335}
349 336
350static void __init irq_hotplug_init(void) 337static void __init irq_hotplug_init(void)
351{ 338{
352 register_hotcpu_notifier(&irq_hotplug_notifier); 339 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/omap-wake:online",
340 omap_wakeupgen_cpu_online, NULL);
341 cpuhp_setup_state_nocalls(CPUHP_ARM_OMAP_WAKE_DEAD,
342 "arm/omap-wake:dead", NULL,
343 omap_wakeupgen_cpu_dead);
353} 344}
354#else 345#else
355static void __init irq_hotplug_init(void) 346static void __init irq_hotplug_init(void)
diff --git a/arch/arm/mach-shmobile/platsmp-scu.c b/arch/arm/mach-shmobile/platsmp-scu.c
index 8d478f1da265..d1ecaf37d142 100644
--- a/arch/arm/mach-shmobile/platsmp-scu.c
+++ b/arch/arm/mach-shmobile/platsmp-scu.c
@@ -21,26 +21,14 @@
21static phys_addr_t shmobile_scu_base_phys; 21static phys_addr_t shmobile_scu_base_phys;
22static void __iomem *shmobile_scu_base; 22static void __iomem *shmobile_scu_base;
23 23
24static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb, 24static int shmobile_scu_cpu_prepare(unsigned int cpu)
25 unsigned long action, void *hcpu)
26{ 25{
27 unsigned int cpu = (long)hcpu; 26 /* For this particular CPU register SCU SMP boot vector */
28 27 shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu),
29 switch (action) { 28 shmobile_scu_base_phys);
30 case CPU_UP_PREPARE: 29 return 0;
31 /* For this particular CPU register SCU SMP boot vector */
32 shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu),
33 shmobile_scu_base_phys);
34 break;
35 };
36
37 return NOTIFY_OK;
38} 30}
39 31
40static struct notifier_block shmobile_smp_scu_notifier = {
41 .notifier_call = shmobile_smp_scu_notifier_call,
42};
43
44void __init shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys, 32void __init shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
45 unsigned int max_cpus) 33 unsigned int max_cpus)
46{ 34{
@@ -54,7 +42,9 @@ void __init shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
54 scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL); 42 scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL);
55 43
56 /* Use CPU notifier for reset vector control */ 44 /* Use CPU notifier for reset vector control */
57 register_cpu_notifier(&shmobile_smp_scu_notifier); 45 cpuhp_setup_state_nocalls(CPUHP_ARM_SHMOBILE_SCU_PREPARE,
46 "arm/shmobile-scu:prepare",
47 shmobile_scu_cpu_prepare, NULL);
58} 48}
59 49
60#ifdef CONFIG_HOTPLUG_CPU 50#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 975b274ee7b5..394c61db5566 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -299,28 +299,16 @@ static inline void fpsimd_pm_init(void) { }
299#endif /* CONFIG_CPU_PM */ 299#endif /* CONFIG_CPU_PM */
300 300
301#ifdef CONFIG_HOTPLUG_CPU 301#ifdef CONFIG_HOTPLUG_CPU
302static int fpsimd_cpu_hotplug_notifier(struct notifier_block *nfb, 302static int fpsimd_cpu_dead(unsigned int cpu)
303 unsigned long action,
304 void *hcpu)
305{ 303{
306 unsigned int cpu = (long)hcpu; 304 per_cpu(fpsimd_last_state, cpu) = NULL;
307 305 return 0;
308 switch (action) {
309 case CPU_DEAD:
310 case CPU_DEAD_FROZEN:
311 per_cpu(fpsimd_last_state, cpu) = NULL;
312 break;
313 }
314 return NOTIFY_OK;
315} 306}
316 307
317static struct notifier_block fpsimd_cpu_hotplug_notifier_block = {
318 .notifier_call = fpsimd_cpu_hotplug_notifier,
319};
320
321static inline void fpsimd_hotplug_init(void) 308static inline void fpsimd_hotplug_init(void)
322{ 309{
323 register_cpu_notifier(&fpsimd_cpu_hotplug_notifier_block); 310 cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
311 NULL, fpsimd_cpu_dead);
324} 312}
325 313
326#else 314#else
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index d47616c8b885..9509cc73b9c6 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1890,7 +1890,7 @@ ia64_mca_cpu_init(void *cpu_data)
1890 PAGE_KERNEL))); 1890 PAGE_KERNEL)));
1891} 1891}
1892 1892
1893static void ia64_mca_cmc_vector_adjust(void *dummy) 1893static int ia64_mca_cpu_online(unsigned int cpu)
1894{ 1894{
1895 unsigned long flags; 1895 unsigned long flags;
1896 1896
@@ -1898,25 +1898,9 @@ static void ia64_mca_cmc_vector_adjust(void *dummy)
1898 if (!cmc_polling_enabled) 1898 if (!cmc_polling_enabled)
1899 ia64_mca_cmc_vector_enable(NULL); 1899 ia64_mca_cmc_vector_enable(NULL);
1900 local_irq_restore(flags); 1900 local_irq_restore(flags);
1901 return 0;
1901} 1902}
1902 1903
1903static int mca_cpu_callback(struct notifier_block *nfb,
1904 unsigned long action,
1905 void *hcpu)
1906{
1907 switch (action) {
1908 case CPU_ONLINE:
1909 case CPU_ONLINE_FROZEN:
1910 ia64_mca_cmc_vector_adjust(NULL);
1911 break;
1912 }
1913 return NOTIFY_OK;
1914}
1915
1916static struct notifier_block mca_cpu_notifier = {
1917 .notifier_call = mca_cpu_callback
1918};
1919
1920/* 1904/*
1921 * ia64_mca_init 1905 * ia64_mca_init
1922 * 1906 *
@@ -2111,15 +2095,13 @@ ia64_mca_late_init(void)
2111 if (!mca_init) 2095 if (!mca_init)
2112 return 0; 2096 return 0;
2113 2097
2114 register_hotcpu_notifier(&mca_cpu_notifier);
2115
2116 /* Setup the CMCI/P vector and handler */ 2098 /* Setup the CMCI/P vector and handler */
2117 setup_timer(&cmc_poll_timer, ia64_mca_cmc_poll, 0UL); 2099 setup_timer(&cmc_poll_timer, ia64_mca_cmc_poll, 0UL);
2118 2100
2119 /* Unmask/enable the vector */ 2101 /* Unmask/enable the vector */
2120 cmc_polling_enabled = 0; 2102 cmc_polling_enabled = 0;
2121 schedule_work(&cmc_enable_work); 2103 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/mca:online",
2122 2104 ia64_mca_cpu_online, NULL);
2123 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__); 2105 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
2124 2106
2125#ifdef CONFIG_ACPI 2107#ifdef CONFIG_ACPI
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 4d457d602d3b..256fe6f65cf2 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -380,29 +380,11 @@ static int octeon_update_boot_vector(unsigned int cpu)
380 return 0; 380 return 0;
381} 381}
382 382
383static int octeon_cpu_callback(struct notifier_block *nfb,
384 unsigned long action, void *hcpu)
385{
386 unsigned int cpu = (unsigned long)hcpu;
387
388 switch (action & ~CPU_TASKS_FROZEN) {
389 case CPU_UP_PREPARE:
390 octeon_update_boot_vector(cpu);
391 break;
392 case CPU_ONLINE:
393 pr_info("Cpu %d online\n", cpu);
394 break;
395 case CPU_DEAD:
396 break;
397 }
398
399 return NOTIFY_OK;
400}
401
402static int register_cavium_notifier(void) 383static int register_cavium_notifier(void)
403{ 384{
404 hotcpu_notifier(octeon_cpu_callback, 0); 385 return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
405 return 0; 386 "mips/cavium:prepare",
387 octeon_update_boot_vector, NULL);
406} 388}
407late_initcall(register_cavium_notifier); 389late_initcall(register_cavium_notifier);
408 390
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 2fec6f753a35..99aab9f85904 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -677,7 +677,7 @@ void play_dead(void)
677 play_dead_at_ckseg1(state_addr); 677 play_dead_at_ckseg1(state_addr);
678} 678}
679 679
680void loongson3_disable_clock(int cpu) 680static int loongson3_disable_clock(unsigned int cpu)
681{ 681{
682 uint64_t core_id = cpu_data[cpu].core; 682 uint64_t core_id = cpu_data[cpu].core;
683 uint64_t package_id = cpu_data[cpu].package; 683 uint64_t package_id = cpu_data[cpu].package;
@@ -688,9 +688,10 @@ void loongson3_disable_clock(int cpu)
688 if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG)) 688 if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
689 LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3)); 689 LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
690 } 690 }
691 return 0;
691} 692}
692 693
693void loongson3_enable_clock(int cpu) 694static int loongson3_enable_clock(unsigned int cpu)
694{ 695{
695 uint64_t core_id = cpu_data[cpu].core; 696 uint64_t core_id = cpu_data[cpu].core;
696 uint64_t package_id = cpu_data[cpu].package; 697 uint64_t package_id = cpu_data[cpu].package;
@@ -701,34 +702,15 @@ void loongson3_enable_clock(int cpu)
701 if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG)) 702 if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
702 LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3); 703 LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
703 } 704 }
704} 705 return 0;
705
706#define CPU_POST_DEAD_FROZEN (CPU_POST_DEAD | CPU_TASKS_FROZEN)
707static int loongson3_cpu_callback(struct notifier_block *nfb,
708 unsigned long action, void *hcpu)
709{
710 unsigned int cpu = (unsigned long)hcpu;
711
712 switch (action) {
713 case CPU_POST_DEAD:
714 case CPU_POST_DEAD_FROZEN:
715 pr_info("Disable clock for CPU#%d\n", cpu);
716 loongson3_disable_clock(cpu);
717 break;
718 case CPU_UP_PREPARE:
719 case CPU_UP_PREPARE_FROZEN:
720 pr_info("Enable clock for CPU#%d\n", cpu);
721 loongson3_enable_clock(cpu);
722 break;
723 }
724
725 return NOTIFY_OK;
726} 706}
727 707
728static int register_loongson3_notifier(void) 708static int register_loongson3_notifier(void)
729{ 709{
730 hotcpu_notifier(loongson3_cpu_callback, 0); 710 return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
731 return 0; 711 "mips/loongson:prepare",
712 loongson3_enable_clock,
713 loongson3_disable_clock);
732} 714}
733early_initcall(register_loongson3_notifier); 715early_initcall(register_loongson3_notifier);
734 716
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 7d95bc402dba..c491f2c8f2b9 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -369,44 +369,34 @@ void destroy_context(struct mm_struct *mm)
369} 369}
370 370
371#ifdef CONFIG_SMP 371#ifdef CONFIG_SMP
372 372static int mmu_ctx_cpu_prepare(unsigned int cpu)
373static int mmu_context_cpu_notify(struct notifier_block *self,
374 unsigned long action, void *hcpu)
375{ 373{
376 unsigned int cpu = (unsigned int)(long)hcpu;
377
378 /* We don't touch CPU 0 map, it's allocated at aboot and kept 374 /* We don't touch CPU 0 map, it's allocated at aboot and kept
379 * around forever 375 * around forever
380 */ 376 */
381 if (cpu == boot_cpuid) 377 if (cpu == boot_cpuid)
382 return NOTIFY_OK; 378 return 0;
383 379
384 switch (action) { 380 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
385 case CPU_UP_PREPARE: 381 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
386 case CPU_UP_PREPARE_FROZEN: 382 return 0;
387 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
388 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
389 break;
390#ifdef CONFIG_HOTPLUG_CPU
391 case CPU_UP_CANCELED:
392 case CPU_UP_CANCELED_FROZEN:
393 case CPU_DEAD:
394 case CPU_DEAD_FROZEN:
395 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
396 kfree(stale_map[cpu]);
397 stale_map[cpu] = NULL;
398
399 /* We also clear the cpu_vm_mask bits of CPUs going away */
400 clear_tasks_mm_cpumask(cpu);
401 break;
402#endif /* CONFIG_HOTPLUG_CPU */
403 }
404 return NOTIFY_OK;
405} 383}
406 384
407static struct notifier_block mmu_context_cpu_nb = { 385static int mmu_ctx_cpu_dead(unsigned int cpu)
408 .notifier_call = mmu_context_cpu_notify, 386{
409}; 387#ifdef CONFIG_HOTPLUG_CPU
388 if (cpu == boot_cpuid)
389 return 0;
390
391 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
392 kfree(stale_map[cpu]);
393 stale_map[cpu] = NULL;
394
395 /* We also clear the cpu_vm_mask bits of CPUs going away */
396 clear_tasks_mm_cpumask(cpu);
397#endif
398 return 0;
399}
410 400
411#endif /* CONFIG_SMP */ 401#endif /* CONFIG_SMP */
412 402
@@ -469,7 +459,9 @@ void __init mmu_context_init(void)
469#else 459#else
470 stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0); 460 stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0);
471 461
472 register_cpu_notifier(&mmu_context_cpu_nb); 462 cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
463 "powerpc/mmu/ctx:prepare",
464 mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
473#endif 465#endif
474 466
475 printk(KERN_INFO 467 printk(KERN_INFO
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 834868b9fdc9..366e4f510fcf 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -852,37 +852,33 @@ static void smp_core99_setup_cpu(int cpu_nr)
852 852
853#ifdef CONFIG_PPC64 853#ifdef CONFIG_PPC64
854#ifdef CONFIG_HOTPLUG_CPU 854#ifdef CONFIG_HOTPLUG_CPU
855static int smp_core99_cpu_notify(struct notifier_block *self, 855static unsigned int smp_core99_host_open;
856 unsigned long action, void *hcpu) 856
857static int smp_core99_cpu_prepare(unsigned int cpu)
857{ 858{
858 int rc; 859 int rc;
859 860
860 switch(action & ~CPU_TASKS_FROZEN) { 861 /* Open i2c bus if it was used for tb sync */
861 case CPU_UP_PREPARE: 862 if (pmac_tb_clock_chip_host && !smp_core99_host_open) {
862 /* Open i2c bus if it was used for tb sync */ 863 rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
863 if (pmac_tb_clock_chip_host) { 864 if (rc) {
864 rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1); 865 pr_err("Failed to open i2c bus for time sync\n");
865 if (rc) { 866 return notifier_from_errno(rc);
866 pr_err("Failed to open i2c bus for time sync\n");
867 return notifier_from_errno(rc);
868 }
869 } 867 }
870 break; 868 smp_core99_host_open = 1;
871 case CPU_ONLINE:
872 case CPU_UP_CANCELED:
873 /* Close i2c bus if it was used for tb sync */
874 if (pmac_tb_clock_chip_host)
875 pmac_i2c_close(pmac_tb_clock_chip_host);
876 break;
877 default:
878 break;
879 } 869 }
880 return NOTIFY_OK; 870 return 0;
881} 871}
882 872
883static struct notifier_block smp_core99_cpu_nb = { 873static int smp_core99_cpu_online(unsigned int cpu)
884 .notifier_call = smp_core99_cpu_notify, 874{
885}; 875 /* Close i2c bus if it was used for tb sync */
876 if (pmac_tb_clock_chip_host && smp_core99_host_open) {
877 pmac_i2c_close(pmac_tb_clock_chip_host);
878 smp_core99_host_open = 0;
879 }
880 return 0;
881}
886#endif /* CONFIG_HOTPLUG_CPU */ 882#endif /* CONFIG_HOTPLUG_CPU */
887 883
888static void __init smp_core99_bringup_done(void) 884static void __init smp_core99_bringup_done(void)
@@ -902,7 +898,11 @@ static void __init smp_core99_bringup_done(void)
902 g5_phy_disable_cpu1(); 898 g5_phy_disable_cpu1();
903 } 899 }
904#ifdef CONFIG_HOTPLUG_CPU 900#ifdef CONFIG_HOTPLUG_CPU
905 register_cpu_notifier(&smp_core99_cpu_nb); 901 cpuhp_setup_state_nocalls(CPUHP_POWERPC_PMAC_PREPARE,
902 "powerpc/pmac:prepare", smp_core99_cpu_prepare,
903 NULL);
904 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "powerpc/pmac:online",
905 smp_core99_cpu_online, NULL);
906#endif 906#endif
907 907
908 if (ppc_md.progress) 908 if (ppc_md.progress)
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a58bca62a93b..cbb73fabc91e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -740,28 +740,21 @@ out:
740 put_task_struct(tsk); 740 put_task_struct(tsk);
741} 741}
742 742
743static int pfault_cpu_notify(struct notifier_block *self, unsigned long action, 743static int pfault_cpu_dead(unsigned int cpu)
744 void *hcpu)
745{ 744{
746 struct thread_struct *thread, *next; 745 struct thread_struct *thread, *next;
747 struct task_struct *tsk; 746 struct task_struct *tsk;
748 747
749 switch (action & ~CPU_TASKS_FROZEN) { 748 spin_lock_irq(&pfault_lock);
750 case CPU_DEAD: 749 list_for_each_entry_safe(thread, next, &pfault_list, list) {
751 spin_lock_irq(&pfault_lock); 750 thread->pfault_wait = 0;
752 list_for_each_entry_safe(thread, next, &pfault_list, list) { 751 list_del(&thread->list);
753 thread->pfault_wait = 0; 752 tsk = container_of(thread, struct task_struct, thread);
754 list_del(&thread->list); 753 wake_up_process(tsk);
755 tsk = container_of(thread, struct task_struct, thread); 754 put_task_struct(tsk);
756 wake_up_process(tsk);
757 put_task_struct(tsk);
758 }
759 spin_unlock_irq(&pfault_lock);
760 break;
761 default:
762 break;
763 } 755 }
764 return NOTIFY_OK; 756 spin_unlock_irq(&pfault_lock);
757 return 0;
765} 758}
766 759
767static int __init pfault_irq_init(void) 760static int __init pfault_irq_init(void)
@@ -775,7 +768,8 @@ static int __init pfault_irq_init(void)
775 if (rc) 768 if (rc)
776 goto out_pfault; 769 goto out_pfault;
777 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); 770 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
778 hotcpu_notifier(pfault_cpu_notify, 0); 771 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
772 NULL, pfault_cpu_dead);
779 return 0; 773 return 0;
780 774
781out_pfault: 775out_pfault:
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 839612c8a0a0..0d3637c494bf 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -122,32 +122,16 @@ static void shx3_update_boot_vector(unsigned int cpu)
122 __raw_writel(STBCR_RESET, STBCR_REG(cpu)); 122 __raw_writel(STBCR_RESET, STBCR_REG(cpu));
123} 123}
124 124
125static int 125static int shx3_cpu_prepare(unsigned int cpu)
126shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
127{ 126{
128 unsigned int cpu = (unsigned int)hcpu; 127 shx3_update_boot_vector(cpu);
129 128 return 0;
130 switch (action) {
131 case CPU_UP_PREPARE:
132 shx3_update_boot_vector(cpu);
133 break;
134 case CPU_ONLINE:
135 pr_info("CPU %u is now online\n", cpu);
136 break;
137 case CPU_DEAD:
138 break;
139 }
140
141 return NOTIFY_OK;
142} 129}
143 130
144static struct notifier_block shx3_cpu_notifier = {
145 .notifier_call = shx3_cpu_callback,
146};
147
148static int register_shx3_cpu_notifier(void) 131static int register_shx3_cpu_notifier(void)
149{ 132{
150 register_hotcpu_notifier(&shx3_cpu_notifier); 133 cpuhp_setup_state_nocalls(CPUHP_SH_SH3X_PREPARE, "sh/shx3:prepare",
134 shx3_cpu_prepare, NULL);
151 return 0; 135 return 0;
152} 136}
153late_initcall(register_shx3_cpu_notifier); 137late_initcall(register_shx3_cpu_notifier);
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index fb30e7c6a5b1..e80e6ba3d500 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -352,9 +352,7 @@ static void sparc_start_secondary(void *arg)
352 preempt_disable(); 352 preempt_disable();
353 cpu = smp_processor_id(); 353 cpu = smp_processor_id();
354 354
355 /* Invoke the CPU_STARTING notifier callbacks */
356 notify_cpu_starting(cpu); 355 notify_cpu_starting(cpu);
357
358 arch_cpu_pre_online(arg); 356 arch_cpu_pre_online(arg);
359 357
360 /* Set the CPU in the cpu_online_mask */ 358 /* Set the CPU in the cpu_online_mask */
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index f034c84f74c3..aeef53ce93e1 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -924,7 +924,7 @@ static void uv_heartbeat(unsigned long ignored)
924 mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL); 924 mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
925} 925}
926 926
927static void uv_heartbeat_enable(int cpu) 927static int uv_heartbeat_enable(unsigned int cpu)
928{ 928{
929 while (!uv_cpu_scir_info(cpu)->enabled) { 929 while (!uv_cpu_scir_info(cpu)->enabled) {
930 struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer; 930 struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
@@ -938,43 +938,24 @@ static void uv_heartbeat_enable(int cpu)
938 /* also ensure that boot cpu is enabled */ 938 /* also ensure that boot cpu is enabled */
939 cpu = 0; 939 cpu = 0;
940 } 940 }
941 return 0;
941} 942}
942 943
943#ifdef CONFIG_HOTPLUG_CPU 944#ifdef CONFIG_HOTPLUG_CPU
944static void uv_heartbeat_disable(int cpu) 945static int uv_heartbeat_disable(unsigned int cpu)
945{ 946{
946 if (uv_cpu_scir_info(cpu)->enabled) { 947 if (uv_cpu_scir_info(cpu)->enabled) {
947 uv_cpu_scir_info(cpu)->enabled = 0; 948 uv_cpu_scir_info(cpu)->enabled = 0;
948 del_timer(&uv_cpu_scir_info(cpu)->timer); 949 del_timer(&uv_cpu_scir_info(cpu)->timer);
949 } 950 }
950 uv_set_cpu_scir_bits(cpu, 0xff); 951 uv_set_cpu_scir_bits(cpu, 0xff);
951} 952 return 0;
952
953/*
954 * cpu hotplug notifier
955 */
956static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action,
957 void *hcpu)
958{
959 long cpu = (long)hcpu;
960
961 switch (action & ~CPU_TASKS_FROZEN) {
962 case CPU_DOWN_FAILED:
963 case CPU_ONLINE:
964 uv_heartbeat_enable(cpu);
965 break;
966 case CPU_DOWN_PREPARE:
967 uv_heartbeat_disable(cpu);
968 break;
969 default:
970 break;
971 }
972 return NOTIFY_OK;
973} 953}
974 954
975static __init void uv_scir_register_cpu_notifier(void) 955static __init void uv_scir_register_cpu_notifier(void)
976{ 956{
977 hotcpu_notifier(uv_scir_cpu_notify, 0); 957 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/x2apic-uvx:online",
958 uv_heartbeat_enable, uv_heartbeat_disable);
978} 959}
979 960
980#else /* !CONFIG_HOTPLUG_CPU */ 961#else /* !CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index df04b2d033f6..5ce5155f0695 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -558,55 +558,36 @@ static struct syscore_ops mc_syscore_ops = {
558 .resume = mc_bp_resume, 558 .resume = mc_bp_resume,
559}; 559};
560 560
561static int 561static int mc_cpu_online(unsigned int cpu)
562mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
563{ 562{
564 unsigned int cpu = (unsigned long)hcpu;
565 struct device *dev; 563 struct device *dev;
566 564
567 dev = get_cpu_device(cpu); 565 dev = get_cpu_device(cpu);
566 microcode_update_cpu(cpu);
567 pr_debug("CPU%d added\n", cpu);
568 568
569 switch (action & ~CPU_TASKS_FROZEN) { 569 if (sysfs_create_group(&dev->kobj, &mc_attr_group))
570 case CPU_ONLINE: 570 pr_err("Failed to create group for CPU%d\n", cpu);
571 microcode_update_cpu(cpu); 571 return 0;
572 pr_debug("CPU%d added\n", cpu); 572}
573 /*
574 * "break" is missing on purpose here because we want to fall
575 * through in order to create the sysfs group.
576 */
577
578 case CPU_DOWN_FAILED:
579 if (sysfs_create_group(&dev->kobj, &mc_attr_group))
580 pr_err("Failed to create group for CPU%d\n", cpu);
581 break;
582 573
583 case CPU_DOWN_PREPARE: 574static int mc_cpu_down_prep(unsigned int cpu)
584 /* Suspend is in progress, only remove the interface */ 575{
585 sysfs_remove_group(&dev->kobj, &mc_attr_group); 576 struct device *dev;
586 pr_debug("CPU%d removed\n", cpu);
587 break;
588 577
578 dev = get_cpu_device(cpu);
579 /* Suspend is in progress, only remove the interface */
580 sysfs_remove_group(&dev->kobj, &mc_attr_group);
581 pr_debug("CPU%d removed\n", cpu);
589 /* 582 /*
590 * case CPU_DEAD:
591 *
592 * When a CPU goes offline, don't free up or invalidate the copy of 583 * When a CPU goes offline, don't free up or invalidate the copy of
593 * the microcode in kernel memory, so that we can reuse it when the 584 * the microcode in kernel memory, so that we can reuse it when the
594 * CPU comes back online without unnecessarily requesting the userspace 585 * CPU comes back online without unnecessarily requesting the userspace
595 * for it again. 586 * for it again.
596 */ 587 */
597 } 588 return 0;
598
599 /* The CPU refused to come up during a system resume */
600 if (action == CPU_UP_CANCELED_FROZEN)
601 microcode_fini_cpu(cpu);
602
603 return NOTIFY_OK;
604} 589}
605 590
606static struct notifier_block mc_cpu_notifier = {
607 .notifier_call = mc_cpu_callback,
608};
609
610static struct attribute *cpu_root_microcode_attrs[] = { 591static struct attribute *cpu_root_microcode_attrs[] = {
611 &dev_attr_reload.attr, 592 &dev_attr_reload.attr,
612 NULL 593 NULL
@@ -665,7 +646,8 @@ int __init microcode_init(void)
665 goto out_ucode_group; 646 goto out_ucode_group;
666 647
667 register_syscore_ops(&mc_syscore_ops); 648 register_syscore_ops(&mc_syscore_ops);
668 register_hotcpu_notifier(&mc_cpu_notifier); 649 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
650 mc_cpu_online, mc_cpu_down_prep);
669 651
670 pr_info("Microcode Update Driver: v" MICROCODE_VERSION 652 pr_info("Microcode Update Driver: v" MICROCODE_VERSION
671 " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n"); 653 " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 865058d087ac..edbbfc854e39 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -423,12 +423,7 @@ static void __init kvm_smp_prepare_boot_cpu(void)
423 kvm_spinlock_init(); 423 kvm_spinlock_init();
424} 424}
425 425
426static void kvm_guest_cpu_online(void *dummy) 426static void kvm_guest_cpu_offline(void)
427{
428 kvm_guest_cpu_init();
429}
430
431static void kvm_guest_cpu_offline(void *dummy)
432{ 427{
433 kvm_disable_steal_time(); 428 kvm_disable_steal_time();
434 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) 429 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -437,29 +432,21 @@ static void kvm_guest_cpu_offline(void *dummy)
437 apf_task_wake_all(); 432 apf_task_wake_all();
438} 433}
439 434
440static int kvm_cpu_notify(struct notifier_block *self, unsigned long action, 435static int kvm_cpu_online(unsigned int cpu)
441 void *hcpu)
442{ 436{
443 int cpu = (unsigned long)hcpu; 437 local_irq_disable();
444 switch (action) { 438 kvm_guest_cpu_init();
445 case CPU_ONLINE: 439 local_irq_enable();
446 case CPU_DOWN_FAILED: 440 return 0;
447 case CPU_ONLINE_FROZEN:
448 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
449 break;
450 case CPU_DOWN_PREPARE:
451 case CPU_DOWN_PREPARE_FROZEN:
452 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
453 break;
454 default:
455 break;
456 }
457 return NOTIFY_OK;
458} 441}
459 442
460static struct notifier_block kvm_cpu_notifier = { 443static int kvm_cpu_down_prepare(unsigned int cpu)
461 .notifier_call = kvm_cpu_notify, 444{
462}; 445 local_irq_disable();
446 kvm_guest_cpu_offline();
447 local_irq_enable();
448 return 0;
449}
463#endif 450#endif
464 451
465static void __init kvm_apf_trap_init(void) 452static void __init kvm_apf_trap_init(void)
@@ -494,7 +481,9 @@ void __init kvm_guest_init(void)
494 481
495#ifdef CONFIG_SMP 482#ifdef CONFIG_SMP
496 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; 483 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
497 register_cpu_notifier(&kvm_cpu_notifier); 484 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
485 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
486 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
498#else 487#else
499 kvm_guest_cpu_init(); 488 kvm_guest_cpu_init();
500#endif 489#endif
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7249dcf2cbcb..42a93621f5b0 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1115,17 +1115,8 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
1115 1115
1116 common_cpu_up(cpu, tidle); 1116 common_cpu_up(cpu, tidle);
1117 1117
1118 /*
1119 * We have to walk the irq descriptors to setup the vector
1120 * space for the cpu which comes online. Prevent irq
1121 * alloc/free across the bringup.
1122 */
1123 irq_lock_sparse();
1124
1125 err = do_boot_cpu(apicid, cpu, tidle); 1118 err = do_boot_cpu(apicid, cpu, tidle);
1126
1127 if (err) { 1119 if (err) {
1128 irq_unlock_sparse();
1129 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu); 1120 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
1130 return -EIO; 1121 return -EIO;
1131 } 1122 }
@@ -1143,8 +1134,6 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
1143 touch_nmi_watchdog(); 1134 touch_nmi_watchdog();
1144 } 1135 }
1145 1136
1146 irq_unlock_sparse();
1147
1148 return 0; 1137 return 0;
1149} 1138}
1150 1139
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 53b1737e978d..96631e6a22b9 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -78,30 +78,21 @@ static int raise_blk_irq(int cpu, struct request *rq)
78} 78}
79#endif 79#endif
80 80
81static int blk_cpu_notify(struct notifier_block *self, unsigned long action, 81static int blk_softirq_cpu_dead(unsigned int cpu)
82 void *hcpu)
83{ 82{
84 /* 83 /*
85 * If a CPU goes away, splice its entries to the current CPU 84 * If a CPU goes away, splice its entries to the current CPU
86 * and trigger a run of the softirq 85 * and trigger a run of the softirq
87 */ 86 */
88 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 87 local_irq_disable();
89 int cpu = (unsigned long) hcpu; 88 list_splice_init(&per_cpu(blk_cpu_done, cpu),
90 89 this_cpu_ptr(&blk_cpu_done));
91 local_irq_disable(); 90 raise_softirq_irqoff(BLOCK_SOFTIRQ);
92 list_splice_init(&per_cpu(blk_cpu_done, cpu), 91 local_irq_enable();
93 this_cpu_ptr(&blk_cpu_done));
94 raise_softirq_irqoff(BLOCK_SOFTIRQ);
95 local_irq_enable();
96 }
97 92
98 return NOTIFY_OK; 93 return 0;
99} 94}
100 95
101static struct notifier_block blk_cpu_notifier = {
102 .notifier_call = blk_cpu_notify,
103};
104
105void __blk_complete_request(struct request *req) 96void __blk_complete_request(struct request *req)
106{ 97{
107 int ccpu, cpu; 98 int ccpu, cpu;
@@ -180,7 +171,9 @@ static __init int blk_softirq_init(void)
180 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); 171 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
181 172
182 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); 173 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
183 register_hotcpu_notifier(&blk_cpu_notifier); 174 cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
175 "block/softirq:dead", NULL,
176 blk_softirq_cpu_dead);
184 return 0; 177 return 0;
185} 178}
186subsys_initcall(blk_softirq_init); 179subsys_initcall(blk_softirq_init);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 8f8552a19e63..9d5f0c7ed3f7 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -110,55 +110,46 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
110 110
111static int __acpi_processor_start(struct acpi_device *device); 111static int __acpi_processor_start(struct acpi_device *device);
112 112
113static int acpi_cpu_soft_notify(struct notifier_block *nfb, 113static int acpi_soft_cpu_online(unsigned int cpu)
114 unsigned long action, void *hcpu)
115{ 114{
116 unsigned int cpu = (unsigned long)hcpu;
117 struct acpi_processor *pr = per_cpu(processors, cpu); 115 struct acpi_processor *pr = per_cpu(processors, cpu);
118 struct acpi_device *device; 116 struct acpi_device *device;
119 action &= ~CPU_TASKS_FROZEN;
120
121 switch (action) {
122 case CPU_ONLINE:
123 case CPU_DEAD:
124 break;
125 default:
126 return NOTIFY_DONE;
127 }
128 117
129 if (!pr || acpi_bus_get_device(pr->handle, &device)) 118 if (!pr || acpi_bus_get_device(pr->handle, &device))
130 return NOTIFY_DONE; 119 return 0;
131 120 /*
132 if (action == CPU_ONLINE) { 121 * CPU got physically hotplugged and onlined for the first time:
133 /* 122 * Initialize missing things.
134 * CPU got physically hotplugged and onlined for the first time: 123 */
135 * Initialize missing things. 124 if (pr->flags.need_hotplug_init) {
136 */ 125 int ret;
137 if (pr->flags.need_hotplug_init) { 126
138 int ret; 127 pr_info("Will online and init hotplugged CPU: %d\n",
139 128 pr->id);
140 pr_info("Will online and init hotplugged CPU: %d\n", 129 pr->flags.need_hotplug_init = 0;
141 pr->id); 130 ret = __acpi_processor_start(device);
142 pr->flags.need_hotplug_init = 0; 131 WARN(ret, "Failed to start CPU: %d\n", pr->id);
143 ret = __acpi_processor_start(device); 132 } else {
144 WARN(ret, "Failed to start CPU: %d\n", pr->id); 133 /* Normal CPU soft online event. */
145 } else { 134 acpi_processor_ppc_has_changed(pr, 0);
146 /* Normal CPU soft online event. */ 135 acpi_processor_hotplug(pr);
147 acpi_processor_ppc_has_changed(pr, 0); 136 acpi_processor_reevaluate_tstate(pr, false);
148 acpi_processor_hotplug(pr); 137 acpi_processor_tstate_has_changed(pr);
149 acpi_processor_reevaluate_tstate(pr, action);
150 acpi_processor_tstate_has_changed(pr);
151 }
152 } else if (action == CPU_DEAD) {
153 /* Invalidate flag.throttling after the CPU is offline. */
154 acpi_processor_reevaluate_tstate(pr, action);
155 } 138 }
156 return NOTIFY_OK; 139 return 0;
157} 140}
158 141
159static struct notifier_block acpi_cpu_notifier = { 142static int acpi_soft_cpu_dead(unsigned int cpu)
160 .notifier_call = acpi_cpu_soft_notify, 143{
161}; 144 struct acpi_processor *pr = per_cpu(processors, cpu);
145 struct acpi_device *device;
146
147 if (!pr || acpi_bus_get_device(pr->handle, &device))
148 return 0;
149
150 acpi_processor_reevaluate_tstate(pr, true);
151 return 0;
152}
162 153
163#ifdef CONFIG_ACPI_CPU_FREQ_PSS 154#ifdef CONFIG_ACPI_CPU_FREQ_PSS
164static int acpi_pss_perf_init(struct acpi_processor *pr, 155static int acpi_pss_perf_init(struct acpi_processor *pr,
@@ -303,7 +294,7 @@ static int acpi_processor_stop(struct device *dev)
303 * This is needed for the powernow-k8 driver, that works even without 294 * This is needed for the powernow-k8 driver, that works even without
304 * ACPI, but needs symbols from this driver 295 * ACPI, but needs symbols from this driver
305 */ 296 */
306 297static enum cpuhp_state hp_online;
307static int __init acpi_processor_driver_init(void) 298static int __init acpi_processor_driver_init(void)
308{ 299{
309 int result = 0; 300 int result = 0;
@@ -315,11 +306,22 @@ static int __init acpi_processor_driver_init(void)
315 if (result < 0) 306 if (result < 0)
316 return result; 307 return result;
317 308
318 register_hotcpu_notifier(&acpi_cpu_notifier); 309 result = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
310 "acpi/cpu-drv:online",
311 acpi_soft_cpu_online, NULL);
312 if (result < 0)
313 goto err;
314 hp_online = result;
315 cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead",
316 NULL, acpi_soft_cpu_dead);
317
319 acpi_thermal_cpufreq_init(); 318 acpi_thermal_cpufreq_init();
320 acpi_processor_ppc_init(); 319 acpi_processor_ppc_init();
321 acpi_processor_throttling_init(); 320 acpi_processor_throttling_init();
322 return 0; 321 return 0;
322err:
323 driver_unregister(&acpi_processor_driver);
324 return result;
323} 325}
324 326
325static void __exit acpi_processor_driver_exit(void) 327static void __exit acpi_processor_driver_exit(void)
@@ -329,7 +331,8 @@ static void __exit acpi_processor_driver_exit(void)
329 331
330 acpi_processor_ppc_exit(); 332 acpi_processor_ppc_exit();
331 acpi_thermal_cpufreq_exit(); 333 acpi_thermal_cpufreq_exit();
332 unregister_hotcpu_notifier(&acpi_cpu_notifier); 334 cpuhp_remove_state_nocalls(hp_online);
335 cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
333 driver_unregister(&acpi_processor_driver); 336 driver_unregister(&acpi_processor_driver);
334} 337}
335 338
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index c72e64893d03..d51ca1c05619 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -375,11 +375,11 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
375 * 3. TSD domain 375 * 3. TSD domain
376 */ 376 */
377void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, 377void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
378 unsigned long action) 378 bool is_dead)
379{ 379{
380 int result = 0; 380 int result = 0;
381 381
382 if (action == CPU_DEAD) { 382 if (is_dead) {
383 /* When one CPU is offline, the T-state throttling 383 /* When one CPU is offline, the T-state throttling
384 * will be invalidated. 384 * will be invalidated.
385 */ 385 */
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index ffa7c9dcbd7a..890082315054 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -144,15 +144,12 @@ struct cci_pmu {
144 int num_cntrs; 144 int num_cntrs;
145 atomic_t active_events; 145 atomic_t active_events;
146 struct mutex reserve_mutex; 146 struct mutex reserve_mutex;
147 struct list_head entry; 147 struct hlist_node node;
148 cpumask_t cpus; 148 cpumask_t cpus;
149}; 149};
150 150
151#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) 151#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
152 152
153static DEFINE_MUTEX(cci_pmu_mutex);
154static LIST_HEAD(cci_pmu_list);
155
156enum cci_models { 153enum cci_models {
157#ifdef CONFIG_ARM_CCI400_PMU 154#ifdef CONFIG_ARM_CCI400_PMU
158 CCI400_R0, 155 CCI400_R0,
@@ -1506,25 +1503,21 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
1506 return perf_pmu_register(&cci_pmu->pmu, name, -1); 1503 return perf_pmu_register(&cci_pmu->pmu, name, -1);
1507} 1504}
1508 1505
1509static int cci_pmu_offline_cpu(unsigned int cpu) 1506static int cci_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
1510{ 1507{
1511 struct cci_pmu *cci_pmu; 1508 struct cci_pmu *cci_pmu = hlist_entry_safe(node, struct cci_pmu, node);
1512 unsigned int target; 1509 unsigned int target;
1513 1510
1514 mutex_lock(&cci_pmu_mutex); 1511 if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
1515 list_for_each_entry(cci_pmu, &cci_pmu_list, entry) { 1512 return 0;
1516 if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) 1513 target = cpumask_any_but(cpu_online_mask, cpu);
1517 continue; 1514 if (target >= nr_cpu_ids)
1518 target = cpumask_any_but(cpu_online_mask, cpu); 1515 return 0;
1519 if (target >= nr_cpu_ids) 1516 /*
1520 continue; 1517 * TODO: migrate context once core races on event->ctx have
1521 /* 1518 * been fixed.
1522 * TODO: migrate context once core races on event->ctx have 1519 */
1523 * been fixed. 1520 cpumask_set_cpu(target, &cci_pmu->cpus);
1524 */
1525 cpumask_set_cpu(target, &cci_pmu->cpus);
1526 }
1527 mutex_unlock(&cci_pmu_mutex);
1528 return 0; 1521 return 0;
1529} 1522}
1530 1523
@@ -1768,10 +1761,8 @@ static int cci_pmu_probe(struct platform_device *pdev)
1768 if (ret) 1761 if (ret)
1769 return ret; 1762 return ret;
1770 1763
1771 mutex_lock(&cci_pmu_mutex); 1764 cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
1772 list_add(&cci_pmu->entry, &cci_pmu_list); 1765 &cci_pmu->node);
1773 mutex_unlock(&cci_pmu_mutex);
1774
1775 pr_info("ARM %s PMU driver probed", cci_pmu->model->name); 1766 pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
1776 return 0; 1767 return 0;
1777} 1768}
@@ -1804,9 +1795,9 @@ static int __init cci_platform_init(void)
1804{ 1795{
1805 int ret; 1796 int ret;
1806 1797
1807 ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, 1798 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE,
1808 "AP_PERF_ARM_CCI_ONLINE", NULL, 1799 "AP_PERF_ARM_CCI_ONLINE", NULL,
1809 cci_pmu_offline_cpu); 1800 cci_pmu_offline_cpu);
1810 if (ret) 1801 if (ret)
1811 return ret; 1802 return ret;
1812 1803
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 884c0305e290..d1074d9b38ba 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -167,7 +167,7 @@ struct arm_ccn_dt {
167 struct hrtimer hrtimer; 167 struct hrtimer hrtimer;
168 168
169 cpumask_t cpu; 169 cpumask_t cpu;
170 struct list_head entry; 170 struct hlist_node node;
171 171
172 struct pmu pmu; 172 struct pmu pmu;
173}; 173};
@@ -190,9 +190,6 @@ struct arm_ccn {
190 int mn_id; 190 int mn_id;
191}; 191};
192 192
193static DEFINE_MUTEX(arm_ccn_mutex);
194static LIST_HEAD(arm_ccn_list);
195
196static int arm_ccn_node_to_xp(int node) 193static int arm_ccn_node_to_xp(int node)
197{ 194{
198 return node / CCN_NUM_XP_PORTS; 195 return node / CCN_NUM_XP_PORTS;
@@ -1214,30 +1211,24 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
1214} 1211}
1215 1212
1216 1213
1217static int arm_ccn_pmu_offline_cpu(unsigned int cpu) 1214static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
1218{ 1215{
1219 struct arm_ccn_dt *dt; 1216 struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node);
1217 struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
1220 unsigned int target; 1218 unsigned int target;
1221 1219
1222 mutex_lock(&arm_ccn_mutex); 1220 if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
1223 list_for_each_entry(dt, &arm_ccn_list, entry) { 1221 return 0;
1224 struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); 1222 target = cpumask_any_but(cpu_online_mask, cpu);
1225 1223 if (target >= nr_cpu_ids)
1226 if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) 1224 return 0;
1227 continue; 1225 perf_pmu_migrate_context(&dt->pmu, cpu, target);
1228 target = cpumask_any_but(cpu_online_mask, cpu); 1226 cpumask_set_cpu(target, &dt->cpu);
1229 if (target >= nr_cpu_ids) 1227 if (ccn->irq)
1230 continue; 1228 WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
1231 perf_pmu_migrate_context(&dt->pmu, cpu, target);
1232 cpumask_set_cpu(target, &dt->cpu);
1233 if (ccn->irq)
1234 WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
1235 }
1236 mutex_unlock(&arm_ccn_mutex);
1237 return 0; 1229 return 0;
1238} 1230}
1239 1231
1240
1241static DEFINE_IDA(arm_ccn_pmu_ida); 1232static DEFINE_IDA(arm_ccn_pmu_ida);
1242 1233
1243static int arm_ccn_pmu_init(struct arm_ccn *ccn) 1234static int arm_ccn_pmu_init(struct arm_ccn *ccn)
@@ -1321,9 +1312,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1321 if (err) 1312 if (err)
1322 goto error_pmu_register; 1313 goto error_pmu_register;
1323 1314
1324 mutex_lock(&arm_ccn_mutex); 1315 cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1325 list_add(&ccn->dt.entry, &arm_ccn_list); 1316 &ccn->dt.node);
1326 mutex_unlock(&arm_ccn_mutex);
1327 return 0; 1317 return 0;
1328 1318
1329error_pmu_register: 1319error_pmu_register:
@@ -1339,10 +1329,8 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
1339{ 1329{
1340 int i; 1330 int i;
1341 1331
1342 mutex_lock(&arm_ccn_mutex); 1332 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1343 list_del(&ccn->dt.entry); 1333 &ccn->dt.node);
1344 mutex_unlock(&arm_ccn_mutex);
1345
1346 if (ccn->irq) 1334 if (ccn->irq)
1347 irq_set_affinity_hint(ccn->irq, NULL); 1335 irq_set_affinity_hint(ccn->irq, NULL);
1348 for (i = 0; i < ccn->num_xps; i++) 1336 for (i = 0; i < ccn->num_xps; i++)
@@ -1573,9 +1561,9 @@ static int __init arm_ccn_init(void)
1573{ 1561{
1574 int i, ret; 1562 int i, ret;
1575 1563
1576 ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, 1564 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1577 "AP_PERF_ARM_CCN_ONLINE", NULL, 1565 "AP_PERF_ARM_CCN_ONLINE", NULL,
1578 arm_ccn_pmu_offline_cpu); 1566 arm_ccn_pmu_offline_cpu);
1579 if (ret) 1567 if (ret)
1580 return ret; 1568 return ret;
1581 1569
@@ -1587,7 +1575,7 @@ static int __init arm_ccn_init(void)
1587 1575
1588static void __exit arm_ccn_exit(void) 1576static void __exit arm_ccn_exit(void)
1589{ 1577{
1590 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE); 1578 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1591 platform_driver_unregister(&arm_ccn_driver); 1579 platform_driver_unregister(&arm_ccn_driver);
1592} 1580}
1593 1581
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index cad49bc38b3e..1b14256376d2 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -596,19 +596,20 @@ BUILD_PERDEV_HELPER(cpu_down) /* int mips_cdmm_cpu_down_helper(...) */
596BUILD_PERDEV_HELPER(cpu_up) /* int mips_cdmm_cpu_up_helper(...) */ 596BUILD_PERDEV_HELPER(cpu_up) /* int mips_cdmm_cpu_up_helper(...) */
597 597
598/** 598/**
599 * mips_cdmm_bus_down() - Tear down the CDMM bus. 599 * mips_cdmm_cpu_down_prep() - Callback for CPUHP DOWN_PREP:
600 * @data: Pointer to unsigned int CPU number. 600 * Tear down the CDMM bus.
601 * @cpu: unsigned int CPU number.
601 * 602 *
602 * This function is executed on the hotplugged CPU and calls the CDMM 603 * This function is executed on the hotplugged CPU and calls the CDMM
603 * driver cpu_down callback for all devices on that CPU. 604 * driver cpu_down callback for all devices on that CPU.
604 */ 605 */
605static long mips_cdmm_bus_down(void *data) 606static int mips_cdmm_cpu_down_prep(unsigned int cpu)
606{ 607{
607 struct mips_cdmm_bus *bus; 608 struct mips_cdmm_bus *bus;
608 long ret; 609 long ret;
609 610
610 /* Inform all the devices on the bus */ 611 /* Inform all the devices on the bus */
611 ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data, 612 ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
612 mips_cdmm_cpu_down_helper); 613 mips_cdmm_cpu_down_helper);
613 614
614 /* 615 /*
@@ -623,8 +624,8 @@ static long mips_cdmm_bus_down(void *data)
623} 624}
624 625
625/** 626/**
626 * mips_cdmm_bus_up() - Bring up the CDMM bus. 627 * mips_cdmm_cpu_online() - Callback for CPUHP ONLINE: Bring up the CDMM bus.
627 * @data: Pointer to unsigned int CPU number. 628 * @cpu: unsigned int CPU number.
628 * 629 *
629 * This work_on_cpu callback function is executed on a given CPU to discover 630 * This work_on_cpu callback function is executed on a given CPU to discover
630 * CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all 631 * CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all
@@ -634,7 +635,7 @@ static long mips_cdmm_bus_down(void *data)
634 * initialisation. When CPUs are brought online the function is 635 * initialisation. When CPUs are brought online the function is
635 * invoked directly on the hotplugged CPU. 636 * invoked directly on the hotplugged CPU.
636 */ 637 */
637static long mips_cdmm_bus_up(void *data) 638static int mips_cdmm_cpu_online(unsigned int cpu)
638{ 639{
639 struct mips_cdmm_bus *bus; 640 struct mips_cdmm_bus *bus;
640 long ret; 641 long ret;
@@ -651,51 +652,13 @@ static long mips_cdmm_bus_up(void *data)
651 mips_cdmm_bus_discover(bus); 652 mips_cdmm_bus_discover(bus);
652 else 653 else
653 /* Inform all the devices on the bus */ 654 /* Inform all the devices on the bus */
654 ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data, 655 ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu,
655 mips_cdmm_cpu_up_helper); 656 mips_cdmm_cpu_up_helper);
656 657
657 return ret; 658 return ret;
658} 659}
659 660
660/** 661/**
661 * mips_cdmm_cpu_notify() - Take action when a CPU is going online or offline.
662 * @nb: CPU notifier block .
663 * @action: Event that has taken place (CPU_*).
664 * @data: CPU number.
665 *
666 * This notifier is used to keep the CDMM buses updated as CPUs are offlined and
667 * onlined. When CPUs go offline or come back online, so does their CDMM bus, so
668 * devices must be informed. Also when CPUs come online for the first time the
669 * devices on the CDMM bus need discovering.
670 *
671 * Returns: NOTIFY_OK if event was used.
672 * NOTIFY_DONE if we didn't care.
673 */
674static int mips_cdmm_cpu_notify(struct notifier_block *nb,
675 unsigned long action, void *data)
676{
677 unsigned int cpu = (unsigned int)data;
678
679 switch (action & ~CPU_TASKS_FROZEN) {
680 case CPU_ONLINE:
681 case CPU_DOWN_FAILED:
682 mips_cdmm_bus_up(&cpu);
683 break;
684 case CPU_DOWN_PREPARE:
685 mips_cdmm_bus_down(&cpu);
686 break;
687 default:
688 return NOTIFY_DONE;
689 }
690
691 return NOTIFY_OK;
692}
693
694static struct notifier_block mips_cdmm_cpu_nb = {
695 .notifier_call = mips_cdmm_cpu_notify,
696};
697
698/**
699 * mips_cdmm_init() - Initialise CDMM bus. 662 * mips_cdmm_init() - Initialise CDMM bus.
700 * 663 *
701 * Initialise CDMM bus, discover CDMM devices for online CPUs, and arrange for 664 * Initialise CDMM bus, discover CDMM devices for online CPUs, and arrange for
@@ -703,7 +666,6 @@ static struct notifier_block mips_cdmm_cpu_nb = {
703 */ 666 */
704static int __init mips_cdmm_init(void) 667static int __init mips_cdmm_init(void)
705{ 668{
706 unsigned int cpu;
707 int ret; 669 int ret;
708 670
709 /* Register the bus */ 671 /* Register the bus */
@@ -712,19 +674,11 @@ static int __init mips_cdmm_init(void)
712 return ret; 674 return ret;
713 675
714 /* We want to be notified about new CPUs */ 676 /* We want to be notified about new CPUs */
715 ret = register_cpu_notifier(&mips_cdmm_cpu_nb); 677 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "bus/cdmm:online",
716 if (ret) { 678 mips_cdmm_cpu_online, mips_cdmm_cpu_down_prep);
679 if (ret < 0)
717 pr_warn("cdmm: Failed to register CPU notifier\n"); 680 pr_warn("cdmm: Failed to register CPU notifier\n");
718 goto out;
719 }
720
721 /* Discover devices on CDMM of online CPUs */
722 for_each_online_cpu(cpu)
723 work_on_cpu(cpu, mips_cdmm_bus_up, &cpu);
724 681
725 return 0;
726out:
727 bus_unregister(&mips_cdmm_bustype);
728 return ret; 682 return ret;
729} 683}
730subsys_initcall(mips_cdmm_init); 684subsys_initcall(mips_cdmm_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 3a64136bf21b..6e6c1fb60fbc 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1286,7 +1286,7 @@ out_free_policy:
1286 return ret; 1286 return ret;
1287} 1287}
1288 1288
1289static void cpufreq_offline(unsigned int cpu); 1289static int cpufreq_offline(unsigned int cpu);
1290 1290
1291/** 1291/**
1292 * cpufreq_add_dev - the cpufreq interface for a CPU device. 1292 * cpufreq_add_dev - the cpufreq interface for a CPU device.
@@ -1321,7 +1321,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1321 return ret; 1321 return ret;
1322} 1322}
1323 1323
1324static void cpufreq_offline(unsigned int cpu) 1324static int cpufreq_offline(unsigned int cpu)
1325{ 1325{
1326 struct cpufreq_policy *policy; 1326 struct cpufreq_policy *policy;
1327 int ret; 1327 int ret;
@@ -1331,7 +1331,7 @@ static void cpufreq_offline(unsigned int cpu)
1331 policy = cpufreq_cpu_get_raw(cpu); 1331 policy = cpufreq_cpu_get_raw(cpu);
1332 if (!policy) { 1332 if (!policy) {
1333 pr_debug("%s: No cpu_data found\n", __func__); 1333 pr_debug("%s: No cpu_data found\n", __func__);
1334 return; 1334 return 0;
1335 } 1335 }
1336 1336
1337 down_write(&policy->rwsem); 1337 down_write(&policy->rwsem);
@@ -1380,6 +1380,7 @@ static void cpufreq_offline(unsigned int cpu)
1380 1380
1381unlock: 1381unlock:
1382 up_write(&policy->rwsem); 1382 up_write(&policy->rwsem);
1383 return 0;
1383} 1384}
1384 1385
1385/** 1386/**
@@ -2295,28 +2296,6 @@ unlock:
2295} 2296}
2296EXPORT_SYMBOL(cpufreq_update_policy); 2297EXPORT_SYMBOL(cpufreq_update_policy);
2297 2298
2298static int cpufreq_cpu_callback(struct notifier_block *nfb,
2299 unsigned long action, void *hcpu)
2300{
2301 unsigned int cpu = (unsigned long)hcpu;
2302
2303 switch (action & ~CPU_TASKS_FROZEN) {
2304 case CPU_ONLINE:
2305 case CPU_DOWN_FAILED:
2306 cpufreq_online(cpu);
2307 break;
2308
2309 case CPU_DOWN_PREPARE:
2310 cpufreq_offline(cpu);
2311 break;
2312 }
2313 return NOTIFY_OK;
2314}
2315
2316static struct notifier_block __refdata cpufreq_cpu_notifier = {
2317 .notifier_call = cpufreq_cpu_callback,
2318};
2319
2320/********************************************************************* 2299/*********************************************************************
2321 * BOOST * 2300 * BOOST *
2322 *********************************************************************/ 2301 *********************************************************************/
@@ -2418,6 +2397,7 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2418/********************************************************************* 2397/*********************************************************************
2419 * REGISTER / UNREGISTER CPUFREQ DRIVER * 2398 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2420 *********************************************************************/ 2399 *********************************************************************/
2400static enum cpuhp_state hp_online;
2421 2401
2422/** 2402/**
2423 * cpufreq_register_driver - register a CPU Frequency driver 2403 * cpufreq_register_driver - register a CPU Frequency driver
@@ -2480,7 +2460,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2480 goto err_if_unreg; 2460 goto err_if_unreg;
2481 } 2461 }
2482 2462
2483 register_hotcpu_notifier(&cpufreq_cpu_notifier); 2463 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
2464 cpufreq_online,
2465 cpufreq_offline);
2466 if (ret < 0)
2467 goto err_if_unreg;
2468 hp_online = ret;
2469 ret = 0;
2470
2484 pr_debug("driver %s up and running\n", driver_data->name); 2471 pr_debug("driver %s up and running\n", driver_data->name);
2485 goto out; 2472 goto out;
2486 2473
@@ -2519,7 +2506,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2519 get_online_cpus(); 2506 get_online_cpus();
2520 subsys_interface_unregister(&cpufreq_interface); 2507 subsys_interface_unregister(&cpufreq_interface);
2521 remove_boost_sysfs_file(); 2508 remove_boost_sysfs_file();
2522 unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 2509 cpuhp_remove_state_nocalls(hp_online);
2523 2510
2524 write_lock_irqsave(&cpufreq_driver_lock, flags); 2511 write_lock_irqsave(&cpufreq_driver_lock, flags);
2525 2512
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index d5657d50ac40..71e586d7df71 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -749,65 +749,52 @@ static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
749 put_cpu(); 749 put_cpu();
750} 750}
751 751
752/** 752static int coupled_cpu_online(unsigned int cpu)
753 * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions
754 * @nb: notifier block
755 * @action: hotplug transition
756 * @hcpu: target cpu number
757 *
758 * Called when a cpu is brought on or offline using hotplug. Updates the
759 * coupled cpu set appropriately
760 */
761static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,
762 unsigned long action, void *hcpu)
763{ 753{
764 int cpu = (unsigned long)hcpu;
765 struct cpuidle_device *dev; 754 struct cpuidle_device *dev;
766 755
767 switch (action & ~CPU_TASKS_FROZEN) {
768 case CPU_UP_PREPARE:
769 case CPU_DOWN_PREPARE:
770 case CPU_ONLINE:
771 case CPU_DEAD:
772 case CPU_UP_CANCELED:
773 case CPU_DOWN_FAILED:
774 break;
775 default:
776 return NOTIFY_OK;
777 }
778
779 mutex_lock(&cpuidle_lock); 756 mutex_lock(&cpuidle_lock);
780 757
781 dev = per_cpu(cpuidle_devices, cpu); 758 dev = per_cpu(cpuidle_devices, cpu);
782 if (!dev || !dev->coupled) 759 if (dev && dev->coupled) {
783 goto out;
784
785 switch (action & ~CPU_TASKS_FROZEN) {
786 case CPU_UP_PREPARE:
787 case CPU_DOWN_PREPARE:
788 cpuidle_coupled_prevent_idle(dev->coupled);
789 break;
790 case CPU_ONLINE:
791 case CPU_DEAD:
792 cpuidle_coupled_update_online_cpus(dev->coupled); 760 cpuidle_coupled_update_online_cpus(dev->coupled);
793 /* Fall through */
794 case CPU_UP_CANCELED:
795 case CPU_DOWN_FAILED:
796 cpuidle_coupled_allow_idle(dev->coupled); 761 cpuidle_coupled_allow_idle(dev->coupled);
797 break;
798 } 762 }
799 763
800out:
801 mutex_unlock(&cpuidle_lock); 764 mutex_unlock(&cpuidle_lock);
802 return NOTIFY_OK; 765 return 0;
803} 766}
804 767
805static struct notifier_block cpuidle_coupled_cpu_notifier = { 768static int coupled_cpu_up_prepare(unsigned int cpu)
806 .notifier_call = cpuidle_coupled_cpu_notify, 769{
807}; 770 struct cpuidle_device *dev;
771
772 mutex_lock(&cpuidle_lock);
773
774 dev = per_cpu(cpuidle_devices, cpu);
775 if (dev && dev->coupled)
776 cpuidle_coupled_prevent_idle(dev->coupled);
777
778 mutex_unlock(&cpuidle_lock);
779 return 0;
780}
808 781
809static int __init cpuidle_coupled_init(void) 782static int __init cpuidle_coupled_init(void)
810{ 783{
811 return register_cpu_notifier(&cpuidle_coupled_cpu_notifier); 784 int ret;
785
786 ret = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE,
787 "cpuidle/coupled:prepare",
788 coupled_cpu_up_prepare,
789 coupled_cpu_online);
790 if (ret)
791 return ret;
792 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
793 "cpuidle/coupled:online",
794 coupled_cpu_online,
795 coupled_cpu_up_prepare);
796 if (ret < 0)
797 cpuhp_remove_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE);
798 return ret;
812} 799}
813core_initcall(cpuidle_coupled_init); 800core_initcall(cpuidle_coupled_init);
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index f7ca891b5b59..7fe442ca38f4 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -119,40 +119,30 @@ static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = {
119 .enter = snooze_loop }, 119 .enter = snooze_loop },
120}; 120};
121 121
122static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n, 122static int powernv_cpuidle_cpu_online(unsigned int cpu)
123 unsigned long action, void *hcpu)
124{ 123{
125 int hotcpu = (unsigned long)hcpu; 124 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
126 struct cpuidle_device *dev =
127 per_cpu(cpuidle_devices, hotcpu);
128 125
129 if (dev && cpuidle_get_driver()) { 126 if (dev && cpuidle_get_driver()) {
130 switch (action) { 127 cpuidle_pause_and_lock();
131 case CPU_ONLINE: 128 cpuidle_enable_device(dev);
132 case CPU_ONLINE_FROZEN: 129 cpuidle_resume_and_unlock();
133 cpuidle_pause_and_lock(); 130 }
134 cpuidle_enable_device(dev); 131 return 0;
135 cpuidle_resume_and_unlock(); 132}
136 break;
137 133
138 case CPU_DEAD: 134static int powernv_cpuidle_cpu_dead(unsigned int cpu)
139 case CPU_DEAD_FROZEN: 135{
140 cpuidle_pause_and_lock(); 136 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
141 cpuidle_disable_device(dev);
142 cpuidle_resume_and_unlock();
143 break;
144 137
145 default: 138 if (dev && cpuidle_get_driver()) {
146 return NOTIFY_DONE; 139 cpuidle_pause_and_lock();
147 } 140 cpuidle_disable_device(dev);
141 cpuidle_resume_and_unlock();
148 } 142 }
149 return NOTIFY_OK; 143 return 0;
150} 144}
151 145
152static struct notifier_block setup_hotplug_notifier = {
153 .notifier_call = powernv_cpuidle_add_cpu_notifier,
154};
155
156/* 146/*
157 * powernv_cpuidle_driver_init() 147 * powernv_cpuidle_driver_init()
158 */ 148 */
@@ -355,7 +345,14 @@ static int __init powernv_processor_idle_init(void)
355 return retval; 345 return retval;
356 } 346 }
357 347
358 register_cpu_notifier(&setup_hotplug_notifier); 348 retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
349 "cpuidle/powernv:online",
350 powernv_cpuidle_cpu_online, NULL);
351 WARN_ON(retval < 0);
352 retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD,
353 "cpuidle/powernv:dead", NULL,
354 powernv_cpuidle_cpu_dead);
355 WARN_ON(retval < 0);
359 printk(KERN_DEBUG "powernv_idle_driver registered\n"); 356 printk(KERN_DEBUG "powernv_idle_driver registered\n");
360 return 0; 357 return 0;
361} 358}
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 07135e009d8b..166ccd711ec9 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -171,40 +171,30 @@ static struct cpuidle_state shared_states[] = {
171 .enter = &shared_cede_loop }, 171 .enter = &shared_cede_loop },
172}; 172};
173 173
174static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n, 174static int pseries_cpuidle_cpu_online(unsigned int cpu)
175 unsigned long action, void *hcpu)
176{ 175{
177 int hotcpu = (unsigned long)hcpu; 176 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
178 struct cpuidle_device *dev =
179 per_cpu(cpuidle_devices, hotcpu);
180 177
181 if (dev && cpuidle_get_driver()) { 178 if (dev && cpuidle_get_driver()) {
182 switch (action) { 179 cpuidle_pause_and_lock();
183 case CPU_ONLINE: 180 cpuidle_enable_device(dev);
184 case CPU_ONLINE_FROZEN: 181 cpuidle_resume_and_unlock();
185 cpuidle_pause_and_lock(); 182 }
186 cpuidle_enable_device(dev); 183 return 0;
187 cpuidle_resume_and_unlock(); 184}
188 break;
189 185
190 case CPU_DEAD: 186static int pseries_cpuidle_cpu_dead(unsigned int cpu)
191 case CPU_DEAD_FROZEN: 187{
192 cpuidle_pause_and_lock(); 188 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
193 cpuidle_disable_device(dev);
194 cpuidle_resume_and_unlock();
195 break;
196 189
197 default: 190 if (dev && cpuidle_get_driver()) {
198 return NOTIFY_DONE; 191 cpuidle_pause_and_lock();
199 } 192 cpuidle_disable_device(dev);
193 cpuidle_resume_and_unlock();
200 } 194 }
201 return NOTIFY_OK; 195 return 0;
202} 196}
203 197
204static struct notifier_block setup_hotplug_notifier = {
205 .notifier_call = pseries_cpuidle_add_cpu_notifier,
206};
207
208/* 198/*
209 * pseries_cpuidle_driver_init() 199 * pseries_cpuidle_driver_init()
210 */ 200 */
@@ -273,7 +263,14 @@ static int __init pseries_processor_idle_init(void)
273 return retval; 263 return retval;
274 } 264 }
275 265
276 register_cpu_notifier(&setup_hotplug_notifier); 266 retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
267 "cpuidle/pseries:online",
268 pseries_cpuidle_cpu_online, NULL);
269 WARN_ON(retval < 0);
270 retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD,
271 "cpuidle/pseries:DEAD", NULL,
272 pseries_cpuidle_cpu_dead);
273 WARN_ON(retval < 0);
277 printk(KERN_DEBUG "pseries_idle_driver registered\n"); 274 printk(KERN_DEBUG "pseries_idle_driver registered\n");
278 return 0; 275 return 0;
279} 276}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ee7fc3701700..5287e79e0b78 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6349,22 +6349,20 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
6349 return 0; 6349 return 0;
6350} 6350}
6351 6351
6352static void raid5_free_percpu(struct r5conf *conf) 6352static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node)
6353{ 6353{
6354 unsigned long cpu; 6354 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
6355
6356 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6357 return 0;
6358}
6355 6359
6360static void raid5_free_percpu(struct r5conf *conf)
6361{
6356 if (!conf->percpu) 6362 if (!conf->percpu)
6357 return; 6363 return;
6358 6364
6359#ifdef CONFIG_HOTPLUG_CPU 6365 cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
6360 unregister_cpu_notifier(&conf->cpu_notify);
6361#endif
6362
6363 get_online_cpus();
6364 for_each_possible_cpu(cpu)
6365 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6366 put_online_cpus();
6367
6368 free_percpu(conf->percpu); 6366 free_percpu(conf->percpu);
6369} 6367}
6370 6368
@@ -6383,64 +6381,28 @@ static void free_conf(struct r5conf *conf)
6383 kfree(conf); 6381 kfree(conf);
6384} 6382}
6385 6383
6386#ifdef CONFIG_HOTPLUG_CPU 6384static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
6387static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
6388 void *hcpu)
6389{ 6385{
6390 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); 6386 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
6391 long cpu = (long)hcpu;
6392 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); 6387 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
6393 6388
6394 switch (action) { 6389 if (alloc_scratch_buffer(conf, percpu)) {
6395 case CPU_UP_PREPARE: 6390 pr_err("%s: failed memory allocation for cpu%u\n",
6396 case CPU_UP_PREPARE_FROZEN: 6391 __func__, cpu);
6397 if (alloc_scratch_buffer(conf, percpu)) { 6392 return -ENOMEM;
6398 pr_err("%s: failed memory allocation for cpu%ld\n",
6399 __func__, cpu);
6400 return notifier_from_errno(-ENOMEM);
6401 }
6402 break;
6403 case CPU_DEAD:
6404 case CPU_DEAD_FROZEN:
6405 case CPU_UP_CANCELED:
6406 case CPU_UP_CANCELED_FROZEN:
6407 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6408 break;
6409 default:
6410 break;
6411 } 6393 }
6412 return NOTIFY_OK; 6394 return 0;
6413} 6395}
6414#endif
6415 6396
6416static int raid5_alloc_percpu(struct r5conf *conf) 6397static int raid5_alloc_percpu(struct r5conf *conf)
6417{ 6398{
6418 unsigned long cpu;
6419 int err = 0; 6399 int err = 0;
6420 6400
6421 conf->percpu = alloc_percpu(struct raid5_percpu); 6401 conf->percpu = alloc_percpu(struct raid5_percpu);
6422 if (!conf->percpu) 6402 if (!conf->percpu)
6423 return -ENOMEM; 6403 return -ENOMEM;
6424 6404
6425#ifdef CONFIG_HOTPLUG_CPU 6405 err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
6426 conf->cpu_notify.notifier_call = raid456_cpu_notify;
6427 conf->cpu_notify.priority = 0;
6428 err = register_cpu_notifier(&conf->cpu_notify);
6429 if (err)
6430 return err;
6431#endif
6432
6433 get_online_cpus();
6434 for_each_present_cpu(cpu) {
6435 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6436 if (err) {
6437 pr_err("%s: failed memory allocation for cpu%ld\n",
6438 __func__, cpu);
6439 break;
6440 }
6441 }
6442 put_online_cpus();
6443
6444 if (!err) { 6406 if (!err) {
6445 conf->scribble_disks = max(conf->raid_disks, 6407 conf->scribble_disks = max(conf->raid_disks,
6446 conf->previous_raid_disks); 6408 conf->previous_raid_disks);
@@ -7985,10 +7947,21 @@ static struct md_personality raid4_personality =
7985 7947
7986static int __init raid5_init(void) 7948static int __init raid5_init(void)
7987{ 7949{
7950 int ret;
7951
7988 raid5_wq = alloc_workqueue("raid5wq", 7952 raid5_wq = alloc_workqueue("raid5wq",
7989 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); 7953 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
7990 if (!raid5_wq) 7954 if (!raid5_wq)
7991 return -ENOMEM; 7955 return -ENOMEM;
7956
7957 ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE,
7958 "md/raid5:prepare",
7959 raid456_cpu_up_prepare,
7960 raid456_cpu_dead);
7961 if (ret) {
7962 destroy_workqueue(raid5_wq);
7963 return ret;
7964 }
7992 register_md_personality(&raid6_personality); 7965 register_md_personality(&raid6_personality);
7993 register_md_personality(&raid5_personality); 7966 register_md_personality(&raid5_personality);
7994 register_md_personality(&raid4_personality); 7967 register_md_personality(&raid4_personality);
@@ -8000,6 +7973,7 @@ static void raid5_exit(void)
8000 unregister_md_personality(&raid6_personality); 7973 unregister_md_personality(&raid6_personality);
8001 unregister_md_personality(&raid5_personality); 7974 unregister_md_personality(&raid5_personality);
8002 unregister_md_personality(&raid4_personality); 7975 unregister_md_personality(&raid4_personality);
7976 cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE);
8003 destroy_workqueue(raid5_wq); 7977 destroy_workqueue(raid5_wq);
8004} 7978}
8005 7979
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 517d4b68a1be..57ec49f0839e 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -512,9 +512,7 @@ struct r5conf {
512 } __percpu *percpu; 512 } __percpu *percpu;
513 int scribble_disks; 513 int scribble_disks;
514 int scribble_sectors; 514 int scribble_sectors;
515#ifdef CONFIG_HOTPLUG_CPU 515 struct hlist_node node;
516 struct notifier_block cpu_notify;
517#endif
518 516
519 /* 517 /*
520 * Free stripes pool 518 * Free stripes pool
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d41c28d00b57..b74548728fb5 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -382,7 +382,8 @@ struct mvneta_port {
382 struct mvneta_rx_queue *rxqs; 382 struct mvneta_rx_queue *rxqs;
383 struct mvneta_tx_queue *txqs; 383 struct mvneta_tx_queue *txqs;
384 struct net_device *dev; 384 struct net_device *dev;
385 struct notifier_block cpu_notifier; 385 struct hlist_node node_online;
386 struct hlist_node node_dead;
386 int rxq_def; 387 int rxq_def;
387 /* Protect the access to the percpu interrupt registers, 388 /* Protect the access to the percpu interrupt registers,
388 * ensuring that the configuration remains coherent. 389 * ensuring that the configuration remains coherent.
@@ -574,6 +575,7 @@ struct mvneta_rx_queue {
574 int next_desc_to_proc; 575 int next_desc_to_proc;
575}; 576};
576 577
578static enum cpuhp_state online_hpstate;
577/* The hardware supports eight (8) rx queues, but we are only allowing 579/* The hardware supports eight (8) rx queues, but we are only allowing
578 * the first one to be used. Therefore, let's just allocate one queue. 580 * the first one to be used. Therefore, let's just allocate one queue.
579 */ 581 */
@@ -3311,101 +3313,104 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
3311 } 3313 }
3312}; 3314};
3313 3315
3314static int mvneta_percpu_notifier(struct notifier_block *nfb, 3316static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
3315 unsigned long action, void *hcpu)
3316{ 3317{
3317 struct mvneta_port *pp = container_of(nfb, struct mvneta_port, 3318 int other_cpu;
3318 cpu_notifier); 3319 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3319 int cpu = (unsigned long)hcpu, other_cpu; 3320 node_online);
3320 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 3321 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3321 3322
3322 switch (action) {
3323 case CPU_ONLINE:
3324 case CPU_ONLINE_FROZEN:
3325 case CPU_DOWN_FAILED:
3326 case CPU_DOWN_FAILED_FROZEN:
3327 spin_lock(&pp->lock);
3328 /* Configuring the driver for a new CPU while the
3329 * driver is stopping is racy, so just avoid it.
3330 */
3331 if (pp->is_stopped) {
3332 spin_unlock(&pp->lock);
3333 break;
3334 }
3335 netif_tx_stop_all_queues(pp->dev);
3336 3323
3337 /* We have to synchronise on tha napi of each CPU 3324 spin_lock(&pp->lock);
3338 * except the one just being waked up 3325 /*
3339 */ 3326 * Configuring the driver for a new CPU while the driver is
3340 for_each_online_cpu(other_cpu) { 3327 * stopping is racy, so just avoid it.
3341 if (other_cpu != cpu) { 3328 */
3342 struct mvneta_pcpu_port *other_port = 3329 if (pp->is_stopped) {
3343 per_cpu_ptr(pp->ports, other_cpu); 3330 spin_unlock(&pp->lock);
3331 return 0;
3332 }
3333 netif_tx_stop_all_queues(pp->dev);
3344 3334
3345 napi_synchronize(&other_port->napi); 3335 /*
3346 } 3336 * We have to synchronise on tha napi of each CPU except the one
3337 * just being woken up
3338 */
3339 for_each_online_cpu(other_cpu) {
3340 if (other_cpu != cpu) {
3341 struct mvneta_pcpu_port *other_port =
3342 per_cpu_ptr(pp->ports, other_cpu);
3343
3344 napi_synchronize(&other_port->napi);
3347 } 3345 }
3346 }
3348 3347
3349 /* Mask all ethernet port interrupts */ 3348 /* Mask all ethernet port interrupts */
3350 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 3349 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3351 napi_enable(&port->napi); 3350 napi_enable(&port->napi);
3352 3351
3352 /*
3353 * Enable per-CPU interrupts on the CPU that is
3354 * brought up.
3355 */
3356 mvneta_percpu_enable(pp);
3353 3357
3354 /* Enable per-CPU interrupts on the CPU that is 3358 /*
3355 * brought up. 3359 * Enable per-CPU interrupt on the one CPU we care
3356 */ 3360 * about.
3357 mvneta_percpu_enable(pp); 3361 */
3362 mvneta_percpu_elect(pp);
3358 3363
3359 /* Enable per-CPU interrupt on the one CPU we care 3364 /* Unmask all ethernet port interrupts */
3360 * about. 3365 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3361 */ 3366 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3362 mvneta_percpu_elect(pp); 3367 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3363 3368 MVNETA_CAUSE_LINK_CHANGE |
3364 /* Unmask all ethernet port interrupts */ 3369 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3365 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 3370 netif_tx_start_all_queues(pp->dev);
3366 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 3371 spin_unlock(&pp->lock);
3367 MVNETA_CAUSE_PHY_STATUS_CHANGE | 3372 return 0;
3368 MVNETA_CAUSE_LINK_CHANGE | 3373}
3369 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3370 netif_tx_start_all_queues(pp->dev);
3371 spin_unlock(&pp->lock);
3372 break;
3373 case CPU_DOWN_PREPARE:
3374 case CPU_DOWN_PREPARE_FROZEN:
3375 netif_tx_stop_all_queues(pp->dev);
3376 /* Thanks to this lock we are sure that any pending
3377 * cpu election is done
3378 */
3379 spin_lock(&pp->lock);
3380 /* Mask all ethernet port interrupts */
3381 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3382 spin_unlock(&pp->lock);
3383 3374
3384 napi_synchronize(&port->napi); 3375static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
3385 napi_disable(&port->napi); 3376{
3386 /* Disable per-CPU interrupts on the CPU that is 3377 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3387 * brought down. 3378 node_online);
3388 */ 3379 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3389 mvneta_percpu_disable(pp);
3390 3380
3391 break; 3381 /*
3392 case CPU_DEAD: 3382 * Thanks to this lock we are sure that any pending cpu election is
3393 case CPU_DEAD_FROZEN: 3383 * done.
3394 /* Check if a new CPU must be elected now this on is down */ 3384 */
3395 spin_lock(&pp->lock); 3385 spin_lock(&pp->lock);
3396 mvneta_percpu_elect(pp); 3386 /* Mask all ethernet port interrupts */
3397 spin_unlock(&pp->lock); 3387 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3398 /* Unmask all ethernet port interrupts */ 3388 spin_unlock(&pp->lock);
3399 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3400 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3401 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3402 MVNETA_CAUSE_LINK_CHANGE |
3403 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3404 netif_tx_start_all_queues(pp->dev);
3405 break;
3406 }
3407 3389
3408 return NOTIFY_OK; 3390 napi_synchronize(&port->napi);
3391 napi_disable(&port->napi);
3392 /* Disable per-CPU interrupts on the CPU that is brought down. */
3393 mvneta_percpu_disable(pp);
3394 return 0;
3395}
3396
3397static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
3398{
3399 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3400 node_dead);
3401
3402 /* Check if a new CPU must be elected now this on is down */
3403 spin_lock(&pp->lock);
3404 mvneta_percpu_elect(pp);
3405 spin_unlock(&pp->lock);
3406 /* Unmask all ethernet port interrupts */
3407 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3408 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3409 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3410 MVNETA_CAUSE_LINK_CHANGE |
3411 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3412 netif_tx_start_all_queues(pp->dev);
3413 return 0;
3409} 3414}
3410 3415
3411static int mvneta_open(struct net_device *dev) 3416static int mvneta_open(struct net_device *dev)
@@ -3442,7 +3447,15 @@ static int mvneta_open(struct net_device *dev)
3442 /* Register a CPU notifier to handle the case where our CPU 3447 /* Register a CPU notifier to handle the case where our CPU
3443 * might be taken offline. 3448 * might be taken offline.
3444 */ 3449 */
3445 register_cpu_notifier(&pp->cpu_notifier); 3450 ret = cpuhp_state_add_instance_nocalls(online_hpstate,
3451 &pp->node_online);
3452 if (ret)
3453 goto err_free_irq;
3454
3455 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3456 &pp->node_dead);
3457 if (ret)
3458 goto err_free_online_hp;
3446 3459
3447 /* In default link is down */ 3460 /* In default link is down */
3448 netif_carrier_off(pp->dev); 3461 netif_carrier_off(pp->dev);
@@ -3450,15 +3463,19 @@ static int mvneta_open(struct net_device *dev)
3450 ret = mvneta_mdio_probe(pp); 3463 ret = mvneta_mdio_probe(pp);
3451 if (ret < 0) { 3464 if (ret < 0) {
3452 netdev_err(dev, "cannot probe MDIO bus\n"); 3465 netdev_err(dev, "cannot probe MDIO bus\n");
3453 goto err_free_irq; 3466 goto err_free_dead_hp;
3454 } 3467 }
3455 3468
3456 mvneta_start_dev(pp); 3469 mvneta_start_dev(pp);
3457 3470
3458 return 0; 3471 return 0;
3459 3472
3473err_free_dead_hp:
3474 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3475 &pp->node_dead);
3476err_free_online_hp:
3477 cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
3460err_free_irq: 3478err_free_irq:
3461 unregister_cpu_notifier(&pp->cpu_notifier);
3462 on_each_cpu(mvneta_percpu_disable, pp, true); 3479 on_each_cpu(mvneta_percpu_disable, pp, true);
3463 free_percpu_irq(pp->dev->irq, pp->ports); 3480 free_percpu_irq(pp->dev->irq, pp->ports);
3464err_cleanup_txqs: 3481err_cleanup_txqs:
@@ -3484,7 +3501,10 @@ static int mvneta_stop(struct net_device *dev)
3484 3501
3485 mvneta_stop_dev(pp); 3502 mvneta_stop_dev(pp);
3486 mvneta_mdio_remove(pp); 3503 mvneta_mdio_remove(pp);
3487 unregister_cpu_notifier(&pp->cpu_notifier); 3504
3505 cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
3506 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3507 &pp->node_dead);
3488 on_each_cpu(mvneta_percpu_disable, pp, true); 3508 on_each_cpu(mvneta_percpu_disable, pp, true);
3489 free_percpu_irq(dev->irq, pp->ports); 3509 free_percpu_irq(dev->irq, pp->ports);
3490 mvneta_cleanup_rxqs(pp); 3510 mvneta_cleanup_rxqs(pp);
@@ -4024,7 +4044,6 @@ static int mvneta_probe(struct platform_device *pdev)
4024 err = of_property_read_string(dn, "managed", &managed); 4044 err = of_property_read_string(dn, "managed", &managed);
4025 pp->use_inband_status = (err == 0 && 4045 pp->use_inband_status = (err == 0 &&
4026 strcmp(managed, "in-band-status") == 0); 4046 strcmp(managed, "in-band-status") == 0);
4027 pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
4028 4047
4029 pp->rxq_def = rxq_def; 4048 pp->rxq_def = rxq_def;
4030 4049
@@ -4227,7 +4246,42 @@ static struct platform_driver mvneta_driver = {
4227 }, 4246 },
4228}; 4247};
4229 4248
4230module_platform_driver(mvneta_driver); 4249static int __init mvneta_driver_init(void)
4250{
4251 int ret;
4252
4253 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
4254 mvneta_cpu_online,
4255 mvneta_cpu_down_prepare);
4256 if (ret < 0)
4257 goto out;
4258 online_hpstate = ret;
4259 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
4260 NULL, mvneta_cpu_dead);
4261 if (ret)
4262 goto err_dead;
4263
4264 ret = platform_driver_register(&mvneta_driver);
4265 if (ret)
4266 goto err;
4267 return 0;
4268
4269err:
4270 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4271err_dead:
4272 cpuhp_remove_multi_state(online_hpstate);
4273out:
4274 return ret;
4275}
4276module_init(mvneta_driver_init);
4277
4278static void __exit mvneta_driver_exit(void)
4279{
4280 platform_driver_unregister(&mvneta_driver);
4281 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4282 cpuhp_remove_multi_state(online_hpstate);
4283}
4284module_exit(mvneta_driver_exit);
4231 4285
4232MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); 4286MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4233MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 4287MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1b5f531eeb25..fad84f3f4109 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -138,8 +138,9 @@ struct virtnet_info {
138 /* Does the affinity hint is set for virtqueues? */ 138 /* Does the affinity hint is set for virtqueues? */
139 bool affinity_hint_set; 139 bool affinity_hint_set;
140 140
141 /* CPU hot plug notifier */ 141 /* CPU hotplug instances for online & dead */
142 struct notifier_block nb; 142 struct hlist_node node;
143 struct hlist_node node_dead;
143 144
144 /* Control VQ buffers: protected by the rtnl lock */ 145 /* Control VQ buffers: protected by the rtnl lock */
145 struct virtio_net_ctrl_hdr ctrl_hdr; 146 struct virtio_net_ctrl_hdr ctrl_hdr;
@@ -1237,25 +1238,53 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
1237 vi->affinity_hint_set = true; 1238 vi->affinity_hint_set = true;
1238} 1239}
1239 1240
1240static int virtnet_cpu_callback(struct notifier_block *nfb, 1241static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
1241 unsigned long action, void *hcpu)
1242{ 1242{
1243 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); 1243 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1244 node);
1245 virtnet_set_affinity(vi);
1246 return 0;
1247}
1244 1248
1245 switch(action & ~CPU_TASKS_FROZEN) { 1249static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
1246 case CPU_ONLINE: 1250{
1247 case CPU_DOWN_FAILED: 1251 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1248 case CPU_DEAD: 1252 node_dead);
1249 virtnet_set_affinity(vi); 1253 virtnet_set_affinity(vi);
1250 break; 1254 return 0;
1251 case CPU_DOWN_PREPARE: 1255}
1252 virtnet_clean_affinity(vi, (long)hcpu);
1253 break;
1254 default:
1255 break;
1256 }
1257 1256
1258 return NOTIFY_OK; 1257static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
1258{
1259 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1260 node);
1261
1262 virtnet_clean_affinity(vi, cpu);
1263 return 0;
1264}
1265
1266static enum cpuhp_state virtionet_online;
1267
1268static int virtnet_cpu_notif_add(struct virtnet_info *vi)
1269{
1270 int ret;
1271
1272 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
1273 if (ret)
1274 return ret;
1275 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1276 &vi->node_dead);
1277 if (!ret)
1278 return ret;
1279 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1280 return ret;
1281}
1282
1283static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
1284{
1285 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1286 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1287 &vi->node_dead);
1259} 1288}
1260 1289
1261static void virtnet_get_ringparam(struct net_device *dev, 1290static void virtnet_get_ringparam(struct net_device *dev,
@@ -1879,8 +1908,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1879 1908
1880 virtio_device_ready(vdev); 1909 virtio_device_ready(vdev);
1881 1910
1882 vi->nb.notifier_call = &virtnet_cpu_callback; 1911 err = virtnet_cpu_notif_add(vi);
1883 err = register_hotcpu_notifier(&vi->nb);
1884 if (err) { 1912 if (err) {
1885 pr_debug("virtio_net: registering cpu notifier failed\n"); 1913 pr_debug("virtio_net: registering cpu notifier failed\n");
1886 goto free_unregister_netdev; 1914 goto free_unregister_netdev;
@@ -1934,7 +1962,7 @@ static void virtnet_remove(struct virtio_device *vdev)
1934{ 1962{
1935 struct virtnet_info *vi = vdev->priv; 1963 struct virtnet_info *vi = vdev->priv;
1936 1964
1937 unregister_hotcpu_notifier(&vi->nb); 1965 virtnet_cpu_notif_remove(vi);
1938 1966
1939 /* Make sure no work handler is accessing the device. */ 1967 /* Make sure no work handler is accessing the device. */
1940 flush_work(&vi->config_work); 1968 flush_work(&vi->config_work);
@@ -1953,7 +1981,7 @@ static int virtnet_freeze(struct virtio_device *vdev)
1953 struct virtnet_info *vi = vdev->priv; 1981 struct virtnet_info *vi = vdev->priv;
1954 int i; 1982 int i;
1955 1983
1956 unregister_hotcpu_notifier(&vi->nb); 1984 virtnet_cpu_notif_remove(vi);
1957 1985
1958 /* Make sure no work handler is accessing the device */ 1986 /* Make sure no work handler is accessing the device */
1959 flush_work(&vi->config_work); 1987 flush_work(&vi->config_work);
@@ -1997,7 +2025,7 @@ static int virtnet_restore(struct virtio_device *vdev)
1997 virtnet_set_queues(vi, vi->curr_queue_pairs); 2025 virtnet_set_queues(vi, vi->curr_queue_pairs);
1998 rtnl_unlock(); 2026 rtnl_unlock();
1999 2027
2000 err = register_hotcpu_notifier(&vi->nb); 2028 err = virtnet_cpu_notif_add(vi);
2001 if (err) 2029 if (err)
2002 return err; 2030 return err;
2003 2031
@@ -2039,7 +2067,41 @@ static struct virtio_driver virtio_net_driver = {
2039#endif 2067#endif
2040}; 2068};
2041 2069
2042module_virtio_driver(virtio_net_driver); 2070static __init int virtio_net_driver_init(void)
2071{
2072 int ret;
2073
2074 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "AP_VIRT_NET_ONLINE",
2075 virtnet_cpu_online,
2076 virtnet_cpu_down_prep);
2077 if (ret < 0)
2078 goto out;
2079 virtionet_online = ret;
2080 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "VIRT_NET_DEAD",
2081 NULL, virtnet_cpu_dead);
2082 if (ret)
2083 goto err_dead;
2084
2085 ret = register_virtio_driver(&virtio_net_driver);
2086 if (ret)
2087 goto err_virtio;
2088 return 0;
2089err_virtio:
2090 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
2091err_dead:
2092 cpuhp_remove_multi_state(virtionet_online);
2093out:
2094 return ret;
2095}
2096module_init(virtio_net_driver_init);
2097
2098static __exit void virtio_net_driver_exit(void)
2099{
2100 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
2101 cpuhp_remove_multi_state(virtionet_online);
2102 unregister_virtio_driver(&virtio_net_driver);
2103}
2104module_exit(virtio_net_driver_exit);
2043 2105
2044MODULE_DEVICE_TABLE(virtio, id_table); 2106MODULE_DEVICE_TABLE(virtio, id_table);
2045MODULE_DESCRIPTION("Virtio network driver"); 2107MODULE_DESCRIPTION("Virtio network driver");
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index bdef916e5dda..2498a6cd7c24 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -74,37 +74,39 @@ static void oprofile_hrtimer_stop(void)
74 put_online_cpus(); 74 put_online_cpus();
75} 75}
76 76
77static int oprofile_cpu_notify(struct notifier_block *self, 77static int oprofile_timer_online(unsigned int cpu)
78 unsigned long action, void *hcpu)
79{ 78{
80 long cpu = (long) hcpu; 79 local_irq_disable();
81 80 __oprofile_hrtimer_start(NULL);
82 switch (action) { 81 local_irq_enable();
83 case CPU_ONLINE: 82 return 0;
84 case CPU_ONLINE_FROZEN:
85 smp_call_function_single(cpu, __oprofile_hrtimer_start,
86 NULL, 1);
87 break;
88 case CPU_DEAD:
89 case CPU_DEAD_FROZEN:
90 __oprofile_hrtimer_stop(cpu);
91 break;
92 }
93 return NOTIFY_OK;
94} 83}
95 84
96static struct notifier_block __refdata oprofile_cpu_notifier = { 85static int oprofile_timer_prep_down(unsigned int cpu)
97 .notifier_call = oprofile_cpu_notify, 86{
98}; 87 __oprofile_hrtimer_stop(cpu);
88 return 0;
89}
90
91static enum cpuhp_state hp_online;
99 92
100static int oprofile_hrtimer_setup(void) 93static int oprofile_hrtimer_setup(void)
101{ 94{
102 return register_hotcpu_notifier(&oprofile_cpu_notifier); 95 int ret;
96
97 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
98 "oprofile/timer:online",
99 oprofile_timer_online,
100 oprofile_timer_prep_down);
101 if (ret < 0)
102 return ret;
103 hp_online = ret;
104 return 0;
103} 105}
104 106
105static void oprofile_hrtimer_shutdown(void) 107static void oprofile_hrtimer_shutdown(void)
106{ 108{
107 unregister_hotcpu_notifier(&oprofile_cpu_notifier); 109 cpuhp_remove_state_nocalls(hp_online);
108} 110}
109 111
110int oprofile_timer_init(struct oprofile_operations *ops) 112int oprofile_timer_init(struct oprofile_operations *ops)
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 30370817bf13..b37b57294566 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -709,28 +709,20 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
709 return 0; 709 return 0;
710} 710}
711 711
712static DEFINE_SPINLOCK(arm_pmu_lock);
713static LIST_HEAD(arm_pmu_list);
714
715/* 712/*
716 * PMU hardware loses all context when a CPU goes offline. 713 * PMU hardware loses all context when a CPU goes offline.
717 * When a CPU is hotplugged back in, since some hardware registers are 714 * When a CPU is hotplugged back in, since some hardware registers are
718 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading 715 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
719 * junk values out of them. 716 * junk values out of them.
720 */ 717 */
721static int arm_perf_starting_cpu(unsigned int cpu) 718static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
722{ 719{
723 struct arm_pmu *pmu; 720 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
724
725 spin_lock(&arm_pmu_lock);
726 list_for_each_entry(pmu, &arm_pmu_list, entry) {
727 721
728 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) 722 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
729 continue; 723 return 0;
730 if (pmu->reset) 724 if (pmu->reset)
731 pmu->reset(pmu); 725 pmu->reset(pmu);
732 }
733 spin_unlock(&arm_pmu_lock);
734 return 0; 726 return 0;
735} 727}
736 728
@@ -842,9 +834,10 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
842 if (!cpu_hw_events) 834 if (!cpu_hw_events)
843 return -ENOMEM; 835 return -ENOMEM;
844 836
845 spin_lock(&arm_pmu_lock); 837 err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
846 list_add_tail(&cpu_pmu->entry, &arm_pmu_list); 838 &cpu_pmu->node);
847 spin_unlock(&arm_pmu_lock); 839 if (err)
840 goto out_free;
848 841
849 err = cpu_pm_pmu_register(cpu_pmu); 842 err = cpu_pm_pmu_register(cpu_pmu);
850 if (err) 843 if (err)
@@ -880,9 +873,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
880 return 0; 873 return 0;
881 874
882out_unregister: 875out_unregister:
883 spin_lock(&arm_pmu_lock); 876 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
884 list_del(&cpu_pmu->entry); 877 &cpu_pmu->node);
885 spin_unlock(&arm_pmu_lock); 878out_free:
886 free_percpu(cpu_hw_events); 879 free_percpu(cpu_hw_events);
887 return err; 880 return err;
888} 881}
@@ -890,9 +883,8 @@ out_unregister:
890static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) 883static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
891{ 884{
892 cpu_pm_pmu_unregister(cpu_pmu); 885 cpu_pm_pmu_unregister(cpu_pmu);
893 spin_lock(&arm_pmu_lock); 886 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
894 list_del(&cpu_pmu->entry); 887 &cpu_pmu->node);
895 spin_unlock(&arm_pmu_lock);
896 free_percpu(cpu_pmu->hw_events); 888 free_percpu(cpu_pmu->hw_events);
897} 889}
898 890
@@ -1091,9 +1083,9 @@ static int arm_pmu_hp_init(void)
1091{ 1083{
1092 int ret; 1084 int ret;
1093 1085
1094 ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING, 1086 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
1095 "AP_PERF_ARM_STARTING", 1087 "AP_PERF_ARM_STARTING",
1096 arm_perf_starting_cpu, NULL); 1088 arm_perf_starting_cpu, NULL);
1097 if (ret) 1089 if (ret)
1098 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", 1090 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
1099 ret); 1091 ret);
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 7dbbb29d24c6..deefab3a94d0 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -107,8 +107,8 @@ struct virtio_scsi {
107 /* If the affinity hint is set for virtqueues */ 107 /* If the affinity hint is set for virtqueues */
108 bool affinity_hint_set; 108 bool affinity_hint_set;
109 109
110 /* CPU hotplug notifier */ 110 struct hlist_node node;
111 struct notifier_block nb; 111 struct hlist_node node_dead;
112 112
113 /* Protected by event_vq lock */ 113 /* Protected by event_vq lock */
114 bool stop_events; 114 bool stop_events;
@@ -118,6 +118,7 @@ struct virtio_scsi {
118 struct virtio_scsi_vq req_vqs[]; 118 struct virtio_scsi_vq req_vqs[];
119}; 119};
120 120
121static enum cpuhp_state virtioscsi_online;
121static struct kmem_cache *virtscsi_cmd_cache; 122static struct kmem_cache *virtscsi_cmd_cache;
122static mempool_t *virtscsi_cmd_pool; 123static mempool_t *virtscsi_cmd_pool;
123 124
@@ -852,21 +853,33 @@ static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
852 put_online_cpus(); 853 put_online_cpus();
853} 854}
854 855
855static int virtscsi_cpu_callback(struct notifier_block *nfb, 856static int virtscsi_cpu_online(unsigned int cpu, struct hlist_node *node)
856 unsigned long action, void *hcpu)
857{ 857{
858 struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb); 858 struct virtio_scsi *vscsi = hlist_entry_safe(node, struct virtio_scsi,
859 switch(action) { 859 node);
860 case CPU_ONLINE: 860 __virtscsi_set_affinity(vscsi, true);
861 case CPU_ONLINE_FROZEN: 861 return 0;
862 case CPU_DEAD: 862}
863 case CPU_DEAD_FROZEN: 863
864 __virtscsi_set_affinity(vscsi, true); 864static int virtscsi_cpu_notif_add(struct virtio_scsi *vi)
865 break; 865{
866 default: 866 int ret;
867 break; 867
868 } 868 ret = cpuhp_state_add_instance(virtioscsi_online, &vi->node);
869 return NOTIFY_OK; 869 if (ret)
870 return ret;
871
872 ret = cpuhp_state_add_instance(CPUHP_VIRT_SCSI_DEAD, &vi->node_dead);
873 if (ret)
874 cpuhp_state_remove_instance(virtioscsi_online, &vi->node);
875 return ret;
876}
877
878static void virtscsi_cpu_notif_remove(struct virtio_scsi *vi)
879{
880 cpuhp_state_remove_instance_nocalls(virtioscsi_online, &vi->node);
881 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_SCSI_DEAD,
882 &vi->node_dead);
870} 883}
871 884
872static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, 885static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
@@ -929,8 +942,6 @@ static int virtscsi_init(struct virtio_device *vdev,
929 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE], 942 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
930 vqs[i]); 943 vqs[i]);
931 944
932 virtscsi_set_affinity(vscsi, true);
933
934 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); 945 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
935 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); 946 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
936 947
@@ -987,12 +998,9 @@ static int virtscsi_probe(struct virtio_device *vdev)
987 if (err) 998 if (err)
988 goto virtscsi_init_failed; 999 goto virtscsi_init_failed;
989 1000
990 vscsi->nb.notifier_call = &virtscsi_cpu_callback; 1001 err = virtscsi_cpu_notif_add(vscsi);
991 err = register_hotcpu_notifier(&vscsi->nb); 1002 if (err)
992 if (err) {
993 pr_err("registering cpu notifier failed\n");
994 goto scsi_add_host_failed; 1003 goto scsi_add_host_failed;
995 }
996 1004
997 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; 1005 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
998 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); 1006 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
@@ -1049,7 +1057,7 @@ static void virtscsi_remove(struct virtio_device *vdev)
1049 1057
1050 scsi_remove_host(shost); 1058 scsi_remove_host(shost);
1051 1059
1052 unregister_hotcpu_notifier(&vscsi->nb); 1060 virtscsi_cpu_notif_remove(vscsi);
1053 1061
1054 virtscsi_remove_vqs(vdev); 1062 virtscsi_remove_vqs(vdev);
1055 scsi_host_put(shost); 1063 scsi_host_put(shost);
@@ -1061,7 +1069,7 @@ static int virtscsi_freeze(struct virtio_device *vdev)
1061 struct Scsi_Host *sh = virtio_scsi_host(vdev); 1069 struct Scsi_Host *sh = virtio_scsi_host(vdev);
1062 struct virtio_scsi *vscsi = shost_priv(sh); 1070 struct virtio_scsi *vscsi = shost_priv(sh);
1063 1071
1064 unregister_hotcpu_notifier(&vscsi->nb); 1072 virtscsi_cpu_notif_remove(vscsi);
1065 virtscsi_remove_vqs(vdev); 1073 virtscsi_remove_vqs(vdev);
1066 return 0; 1074 return 0;
1067} 1075}
@@ -1076,12 +1084,11 @@ static int virtscsi_restore(struct virtio_device *vdev)
1076 if (err) 1084 if (err)
1077 return err; 1085 return err;
1078 1086
1079 err = register_hotcpu_notifier(&vscsi->nb); 1087 err = virtscsi_cpu_notif_add(vscsi);
1080 if (err) { 1088 if (err) {
1081 vdev->config->del_vqs(vdev); 1089 vdev->config->del_vqs(vdev);
1082 return err; 1090 return err;
1083 } 1091 }
1084
1085 virtio_device_ready(vdev); 1092 virtio_device_ready(vdev);
1086 1093
1087 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) 1094 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
@@ -1136,6 +1143,16 @@ static int __init init(void)
1136 pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); 1143 pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
1137 goto error; 1144 goto error;
1138 } 1145 }
1146 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1147 "scsi/virtio:online",
1148 virtscsi_cpu_online, NULL);
1149 if (ret < 0)
1150 goto error;
1151 virtioscsi_online = ret;
1152 ret = cpuhp_setup_state_multi(CPUHP_VIRT_SCSI_DEAD, "scsi/virtio:dead",
1153 NULL, virtscsi_cpu_online);
1154 if (ret)
1155 goto error;
1139 ret = register_virtio_driver(&virtio_scsi_driver); 1156 ret = register_virtio_driver(&virtio_scsi_driver);
1140 if (ret < 0) 1157 if (ret < 0)
1141 goto error; 1158 goto error;
@@ -1151,12 +1168,17 @@ error:
1151 kmem_cache_destroy(virtscsi_cmd_cache); 1168 kmem_cache_destroy(virtscsi_cmd_cache);
1152 virtscsi_cmd_cache = NULL; 1169 virtscsi_cmd_cache = NULL;
1153 } 1170 }
1171 if (virtioscsi_online)
1172 cpuhp_remove_multi_state(virtioscsi_online);
1173 cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD);
1154 return ret; 1174 return ret;
1155} 1175}
1156 1176
1157static void __exit fini(void) 1177static void __exit fini(void)
1158{ 1178{
1159 unregister_virtio_driver(&virtio_scsi_driver); 1179 unregister_virtio_driver(&virtio_scsi_driver);
1180 cpuhp_remove_multi_state(virtioscsi_online);
1181 cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD);
1160 mempool_destroy(virtscsi_cmd_pool); 1182 mempool_destroy(virtscsi_cmd_pool);
1161 kmem_cache_destroy(virtscsi_cmd_cache); 1183 kmem_cache_destroy(virtscsi_cmd_cache);
1162} 1184}
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index bfe6b2e10f3a..f3db11c24654 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -359,7 +359,7 @@ extern int acpi_processor_set_throttling(struct acpi_processor *pr,
359 * onlined/offlined. In such case the flags.throttling will be updated. 359 * onlined/offlined. In such case the flags.throttling will be updated.
360 */ 360 */
361extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, 361extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
362 unsigned long action); 362 bool is_dead);
363extern const struct file_operations acpi_processor_throttling_fops; 363extern const struct file_operations acpi_processor_throttling_fops;
364extern void acpi_processor_throttling_init(void); 364extern void acpi_processor_throttling_init(void);
365#else 365#else
@@ -380,7 +380,7 @@ static inline int acpi_processor_set_throttling(struct acpi_processor *pr,
380} 380}
381 381
382static inline void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, 382static inline void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
383 unsigned long action) {} 383 bool is_dead) {}
384 384
385static inline void acpi_processor_throttling_init(void) {} 385static inline void acpi_processor_throttling_init(void) {}
386#endif /* CONFIG_ACPI_CPU_FREQ_PSS */ 386#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index ad4f1f33a74e..7572d9e9dced 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -61,17 +61,8 @@ struct notifier_block;
61#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ 61#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
62#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ 62#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
63#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ 63#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
64#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
65 * not handling interrupts, soon dead.
66 * Called on the dying cpu, interrupts
67 * are already disabled. Must not
68 * sleep, must not fail */
69#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug 64#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
70 * lock is dropped */ 65 * lock is dropped */
71#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
72 * Called on the new cpu, just before
73 * enabling interrupts. Must not sleep,
74 * must not fail */
75#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly, 66#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly,
76 * perhaps due to preemption. */ 67 * perhaps due to preemption. */
77 68
@@ -86,9 +77,6 @@ struct notifier_block;
86#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) 77#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
87#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) 78#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
88#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) 79#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
89#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
90#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
91
92 80
93#ifdef CONFIG_SMP 81#ifdef CONFIG_SMP
94extern bool cpuhp_tasks_frozen; 82extern bool cpuhp_tasks_frozen;
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index eb445a4e2a83..7b6c446ee17f 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -16,15 +16,40 @@ enum cpuhp_state {
16 CPUHP_PERF_SUPERH, 16 CPUHP_PERF_SUPERH,
17 CPUHP_X86_HPET_DEAD, 17 CPUHP_X86_HPET_DEAD,
18 CPUHP_X86_APB_DEAD, 18 CPUHP_X86_APB_DEAD,
19 CPUHP_VIRT_NET_DEAD,
20 CPUHP_SLUB_DEAD,
21 CPUHP_MM_WRITEBACK_DEAD,
22 CPUHP_SOFTIRQ_DEAD,
23 CPUHP_NET_MVNETA_DEAD,
24 CPUHP_CPUIDLE_DEAD,
25 CPUHP_ARM64_FPSIMD_DEAD,
26 CPUHP_ARM_OMAP_WAKE_DEAD,
27 CPUHP_IRQ_POLL_DEAD,
28 CPUHP_BLOCK_SOFTIRQ_DEAD,
29 CPUHP_VIRT_SCSI_DEAD,
30 CPUHP_ACPI_CPUDRV_DEAD,
31 CPUHP_S390_PFAULT_DEAD,
32 CPUHP_BLK_MQ_DEAD,
19 CPUHP_WORKQUEUE_PREP, 33 CPUHP_WORKQUEUE_PREP,
20 CPUHP_POWER_NUMA_PREPARE, 34 CPUHP_POWER_NUMA_PREPARE,
21 CPUHP_HRTIMERS_PREPARE, 35 CPUHP_HRTIMERS_PREPARE,
22 CPUHP_PROFILE_PREPARE, 36 CPUHP_PROFILE_PREPARE,
23 CPUHP_X2APIC_PREPARE, 37 CPUHP_X2APIC_PREPARE,
24 CPUHP_SMPCFD_PREPARE, 38 CPUHP_SMPCFD_PREPARE,
39 CPUHP_RELAY_PREPARE,
40 CPUHP_SLAB_PREPARE,
41 CPUHP_MD_RAID5_PREPARE,
25 CPUHP_RCUTREE_PREP, 42 CPUHP_RCUTREE_PREP,
43 CPUHP_CPUIDLE_COUPLED_PREPARE,
44 CPUHP_POWERPC_PMAC_PREPARE,
45 CPUHP_POWERPC_MMU_CTX_PREPARE,
26 CPUHP_NOTIFY_PREPARE, 46 CPUHP_NOTIFY_PREPARE,
47 CPUHP_ARM_SHMOBILE_SCU_PREPARE,
48 CPUHP_SH_SH3X_PREPARE,
49 CPUHP_BLK_MQ_PREPARE,
27 CPUHP_TIMERS_DEAD, 50 CPUHP_TIMERS_DEAD,
51 CPUHP_NOTF_ERR_INJ_PREPARE,
52 CPUHP_MIPS_SOC_PREPARE,
28 CPUHP_BRINGUP_CPU, 53 CPUHP_BRINGUP_CPU,
29 CPUHP_AP_IDLE_DEAD, 54 CPUHP_AP_IDLE_DEAD,
30 CPUHP_AP_OFFLINE, 55 CPUHP_AP_OFFLINE,
@@ -72,7 +97,6 @@ enum cpuhp_state {
72 CPUHP_AP_ARM64_ISNDEP_STARTING, 97 CPUHP_AP_ARM64_ISNDEP_STARTING,
73 CPUHP_AP_SMPCFD_DYING, 98 CPUHP_AP_SMPCFD_DYING,
74 CPUHP_AP_X86_TBOOT_DYING, 99 CPUHP_AP_X86_TBOOT_DYING,
75 CPUHP_AP_NOTIFY_STARTING,
76 CPUHP_AP_ONLINE, 100 CPUHP_AP_ONLINE,
77 CPUHP_TEARDOWN_CPU, 101 CPUHP_TEARDOWN_CPU,
78 CPUHP_AP_ONLINE_IDLE, 102 CPUHP_AP_ONLINE_IDLE,
@@ -103,7 +127,7 @@ enum cpuhp_state {
103 127
104int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke, 128int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke,
105 int (*startup)(unsigned int cpu), 129 int (*startup)(unsigned int cpu),
106 int (*teardown)(unsigned int cpu)); 130 int (*teardown)(unsigned int cpu), bool multi_instance);
107 131
108/** 132/**
109 * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks 133 * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
@@ -120,7 +144,7 @@ static inline int cpuhp_setup_state(enum cpuhp_state state,
120 int (*startup)(unsigned int cpu), 144 int (*startup)(unsigned int cpu),
121 int (*teardown)(unsigned int cpu)) 145 int (*teardown)(unsigned int cpu))
122{ 146{
123 return __cpuhp_setup_state(state, name, true, startup, teardown); 147 return __cpuhp_setup_state(state, name, true, startup, teardown, false);
124} 148}
125 149
126/** 150/**
@@ -139,7 +163,66 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
139 int (*startup)(unsigned int cpu), 163 int (*startup)(unsigned int cpu),
140 int (*teardown)(unsigned int cpu)) 164 int (*teardown)(unsigned int cpu))
141{ 165{
142 return __cpuhp_setup_state(state, name, false, startup, teardown); 166 return __cpuhp_setup_state(state, name, false, startup, teardown,
167 false);
168}
169
170/**
171 * cpuhp_setup_state_multi - Add callbacks for multi state
172 * @state: The state for which the calls are installed
173 * @name: Name of the callback.
174 * @startup: startup callback function
175 * @teardown: teardown callback function
176 *
177 * Sets the internal multi_instance flag and prepares a state to work as a multi
178 * instance callback. No callbacks are invoked at this point. The callbacks are
179 * invoked once an instance for this state are registered via
180 * @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls.
181 */
182static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
183 const char *name,
184 int (*startup)(unsigned int cpu,
185 struct hlist_node *node),
186 int (*teardown)(unsigned int cpu,
187 struct hlist_node *node))
188{
189 return __cpuhp_setup_state(state, name, false,
190 (void *) startup,
191 (void *) teardown, true);
192}
193
194int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
195 bool invoke);
196
197/**
198 * cpuhp_state_add_instance - Add an instance for a state and invoke startup
199 * callback.
200 * @state: The state for which the instance is installed
201 * @node: The node for this individual state.
202 *
203 * Installs the instance for the @state and invokes the startup callback on
204 * the present cpus which have already reached the @state. The @state must have
205 * been earlier marked as multi-instance by @cpuhp_setup_state_multi.
206 */
207static inline int cpuhp_state_add_instance(enum cpuhp_state state,
208 struct hlist_node *node)
209{
210 return __cpuhp_state_add_instance(state, node, true);
211}
212
213/**
214 * cpuhp_state_add_instance_nocalls - Add an instance for a state without
215 * invoking the startup callback.
216 * @state: The state for which the instance is installed
217 * @node: The node for this individual state.
218 *
219 * Installs the instance for the @state The @state must have been earlier
220 * marked as multi-instance by @cpuhp_setup_state_multi.
221 */
222static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
223 struct hlist_node *node)
224{
225 return __cpuhp_state_add_instance(state, node, false);
143} 226}
144 227
145void __cpuhp_remove_state(enum cpuhp_state state, bool invoke); 228void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
@@ -166,6 +249,51 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
166 __cpuhp_remove_state(state, false); 249 __cpuhp_remove_state(state, false);
167} 250}
168 251
252/**
253 * cpuhp_remove_multi_state - Remove hotplug multi state callback
254 * @state: The state for which the calls are removed
255 *
256 * Removes the callback functions from a multi state. This is the reverse of
257 * cpuhp_setup_state_multi(). All instances should have been removed before
258 * invoking this function.
259 */
260static inline void cpuhp_remove_multi_state(enum cpuhp_state state)
261{
262 __cpuhp_remove_state(state, false);
263}
264
265int __cpuhp_state_remove_instance(enum cpuhp_state state,
266 struct hlist_node *node, bool invoke);
267
268/**
269 * cpuhp_state_remove_instance - Remove hotplug instance from state and invoke
270 * the teardown callback
271 * @state: The state from which the instance is removed
272 * @node: The node for this individual state.
273 *
274 * Removes the instance and invokes the teardown callback on the present cpus
275 * which have already reached the @state.
276 */
277static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
278 struct hlist_node *node)
279{
280 return __cpuhp_state_remove_instance(state, node, true);
281}
282
283/**
284 * cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state
285 * without invoking the reatdown callback
286 * @state: The state from which the instance is removed
287 * @node: The node for this individual state.
288 *
289 * Removes the instance without invoking the teardown callback.
290 */
291static inline int cpuhp_state_remove_instance_nocalls(enum cpuhp_state state,
292 struct hlist_node *node)
293{
294 return __cpuhp_state_remove_instance(state, node, false);
295}
296
169#ifdef CONFIG_SMP 297#ifdef CONFIG_SMP
170void cpuhp_online_idle(enum cpuhp_state state); 298void cpuhp_online_idle(enum cpuhp_state state);
171#else 299#else
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 113ee626a4dc..0f9e567d5e15 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -151,7 +151,7 @@ struct parallel_data {
151 * @flags: padata flags. 151 * @flags: padata flags.
152 */ 152 */
153struct padata_instance { 153struct padata_instance {
154 struct notifier_block cpu_notifier; 154 struct hlist_node node;
155 struct workqueue_struct *wq; 155 struct workqueue_struct *wq;
156 struct parallel_data *pd; 156 struct parallel_data *pd;
157 struct padata_cpumask cpumask; 157 struct padata_cpumask cpumask;
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 9ff07d3fc8de..8462da266089 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -116,7 +116,7 @@ struct arm_pmu {
116 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); 116 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
117 struct platform_device *plat_device; 117 struct platform_device *plat_device;
118 struct pmu_hw_events __percpu *hw_events; 118 struct pmu_hw_events __percpu *hw_events;
119 struct list_head entry; 119 struct hlist_node node;
120 struct notifier_block cpu_pm_nb; 120 struct notifier_block cpu_pm_nb;
121 /* the attr_groups array must be NULL-terminated */ 121 /* the attr_groups array must be NULL-terminated */
122 const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1]; 122 const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
diff --git a/include/linux/relay.h b/include/linux/relay.h
index d7c8359693c6..ecbb34a382b8 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -19,6 +19,7 @@
19#include <linux/fs.h> 19#include <linux/fs.h>
20#include <linux/poll.h> 20#include <linux/poll.h>
21#include <linux/kref.h> 21#include <linux/kref.h>
22#include <linux/percpu.h>
22 23
23/* 24/*
24 * Tracks changes to rchan/rchan_buf structs 25 * Tracks changes to rchan/rchan_buf structs
@@ -63,7 +64,7 @@ struct rchan
63 struct kref kref; /* channel refcount */ 64 struct kref kref; /* channel refcount */
64 void *private_data; /* for user-defined data */ 65 void *private_data; /* for user-defined data */
65 size_t last_toobig; /* tried to log event > subbuf size */ 66 size_t last_toobig; /* tried to log event > subbuf size */
66 struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */ 67 struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */
67 int is_global; /* One global buffer ? */ 68 int is_global; /* One global buffer ? */
68 struct list_head list; /* for channel list */ 69 struct list_head list; /* for channel list */
69 struct dentry *parent; /* parent dentry passed to open */ 70 struct dentry *parent; /* parent dentry passed to open */
@@ -204,7 +205,7 @@ static inline void relay_write(struct rchan *chan,
204 struct rchan_buf *buf; 205 struct rchan_buf *buf;
205 206
206 local_irq_save(flags); 207 local_irq_save(flags);
207 buf = chan->buf[smp_processor_id()]; 208 buf = *this_cpu_ptr(chan->buf);
208 if (unlikely(buf->offset + length > chan->subbuf_size)) 209 if (unlikely(buf->offset + length > chan->subbuf_size))
209 length = relay_switch_subbuf(buf, length); 210 length = relay_switch_subbuf(buf, length);
210 memcpy(buf->data + buf->offset, data, length); 211 memcpy(buf->data + buf->offset, data, length);
@@ -230,12 +231,12 @@ static inline void __relay_write(struct rchan *chan,
230{ 231{
231 struct rchan_buf *buf; 232 struct rchan_buf *buf;
232 233
233 buf = chan->buf[get_cpu()]; 234 buf = *get_cpu_ptr(chan->buf);
234 if (unlikely(buf->offset + length > buf->chan->subbuf_size)) 235 if (unlikely(buf->offset + length > buf->chan->subbuf_size))
235 length = relay_switch_subbuf(buf, length); 236 length = relay_switch_subbuf(buf, length);
236 memcpy(buf->data + buf->offset, data, length); 237 memcpy(buf->data + buf->offset, data, length);
237 buf->offset += length; 238 buf->offset += length;
238 put_cpu(); 239 put_cpu_ptr(chan->buf);
239} 240}
240 241
241/** 242/**
@@ -251,17 +252,19 @@ static inline void __relay_write(struct rchan *chan,
251 */ 252 */
252static inline void *relay_reserve(struct rchan *chan, size_t length) 253static inline void *relay_reserve(struct rchan *chan, size_t length)
253{ 254{
254 void *reserved; 255 void *reserved = NULL;
255 struct rchan_buf *buf = chan->buf[smp_processor_id()]; 256 struct rchan_buf *buf = *get_cpu_ptr(chan->buf);
256 257
257 if (unlikely(buf->offset + length > buf->chan->subbuf_size)) { 258 if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
258 length = relay_switch_subbuf(buf, length); 259 length = relay_switch_subbuf(buf, length);
259 if (!length) 260 if (!length)
260 return NULL; 261 goto end;
261 } 262 }
262 reserved = buf->data + buf->offset; 263 reserved = buf->data + buf->offset;
263 buf->offset += length; 264 buf->offset += length;
264 265
266end:
267 put_cpu_ptr(chan->buf);
265 return reserved; 268 return reserved;
266} 269}
267 270
@@ -285,5 +288,11 @@ static inline void subbuf_start_reserve(struct rchan_buf *buf,
285 */ 288 */
286extern const struct file_operations relay_file_operations; 289extern const struct file_operations relay_file_operations;
287 290
291#ifdef CONFIG_RELAY
292int relay_prepare_cpu(unsigned int cpu);
293#else
294#define relay_prepare_cpu NULL
295#endif
296
288#endif /* _LINUX_RELAY_H */ 297#endif /* _LINUX_RELAY_H */
289 298
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 4293808d8cfb..084b12bad198 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -650,4 +650,12 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
650unsigned int kmem_cache_size(struct kmem_cache *s); 650unsigned int kmem_cache_size(struct kmem_cache *s);
651void __init kmem_cache_init_late(void); 651void __init kmem_cache_init_late(void);
652 652
653#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
654int slab_prepare_cpu(unsigned int cpu);
655int slab_dead_cpu(unsigned int cpu);
656#else
657#define slab_prepare_cpu NULL
658#define slab_dead_cpu NULL
659#endif
660
653#endif /* _LINUX_SLAB_H */ 661#endif /* _LINUX_SLAB_H */
diff --git a/include/trace/events/cpuhp.h b/include/trace/events/cpuhp.h
index a72bd93ec7e5..996953db91d7 100644
--- a/include/trace/events/cpuhp.h
+++ b/include/trace/events/cpuhp.h
@@ -33,6 +33,34 @@ TRACE_EVENT(cpuhp_enter,
33 __entry->cpu, __entry->target, __entry->idx, __entry->fun) 33 __entry->cpu, __entry->target, __entry->idx, __entry->fun)
34); 34);
35 35
36TRACE_EVENT(cpuhp_multi_enter,
37
38 TP_PROTO(unsigned int cpu,
39 int target,
40 int idx,
41 int (*fun)(unsigned int, struct hlist_node *),
42 struct hlist_node *node),
43
44 TP_ARGS(cpu, target, idx, fun, node),
45
46 TP_STRUCT__entry(
47 __field( unsigned int, cpu )
48 __field( int, target )
49 __field( int, idx )
50 __field( void *, fun )
51 ),
52
53 TP_fast_assign(
54 __entry->cpu = cpu;
55 __entry->target = target;
56 __entry->idx = idx;
57 __entry->fun = fun;
58 ),
59
60 TP_printk("cpu: %04u target: %3d step: %3d (%pf)",
61 __entry->cpu, __entry->target, __entry->idx, __entry->fun)
62);
63
36TRACE_EVENT(cpuhp_exit, 64TRACE_EVENT(cpuhp_exit,
37 65
38 TP_PROTO(unsigned int cpu, 66 TP_PROTO(unsigned int cpu,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 92c2451db415..5df20d6d1520 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -23,6 +23,8 @@
23#include <linux/tick.h> 23#include <linux/tick.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/smpboot.h> 25#include <linux/smpboot.h>
26#include <linux/relay.h>
27#include <linux/slab.h>
26 28
27#include <trace/events/power.h> 29#include <trace/events/power.h>
28#define CREATE_TRACE_POINTS 30#define CREATE_TRACE_POINTS
@@ -37,8 +39,9 @@
37 * @thread: Pointer to the hotplug thread 39 * @thread: Pointer to the hotplug thread
38 * @should_run: Thread should execute 40 * @should_run: Thread should execute
39 * @rollback: Perform a rollback 41 * @rollback: Perform a rollback
40 * @cb_stat: The state for a single callback (install/uninstall) 42 * @single: Single callback invocation
41 * @cb: Single callback function (install/uninstall) 43 * @bringup: Single callback bringup or teardown selector
44 * @cb_state: The state for a single callback (install/uninstall)
42 * @result: Result of the operation 45 * @result: Result of the operation
43 * @done: Signal completion to the issuer of the task 46 * @done: Signal completion to the issuer of the task
44 */ 47 */
@@ -49,8 +52,10 @@ struct cpuhp_cpu_state {
49 struct task_struct *thread; 52 struct task_struct *thread;
50 bool should_run; 53 bool should_run;
51 bool rollback; 54 bool rollback;
55 bool single;
56 bool bringup;
57 struct hlist_node *node;
52 enum cpuhp_state cb_state; 58 enum cpuhp_state cb_state;
53 int (*cb)(unsigned int cpu);
54 int result; 59 int result;
55 struct completion done; 60 struct completion done;
56#endif 61#endif
@@ -68,35 +73,103 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
68 * @cant_stop: Bringup/teardown can't be stopped at this step 73 * @cant_stop: Bringup/teardown can't be stopped at this step
69 */ 74 */
70struct cpuhp_step { 75struct cpuhp_step {
71 const char *name; 76 const char *name;
72 int (*startup)(unsigned int cpu); 77 union {
73 int (*teardown)(unsigned int cpu); 78 int (*single)(unsigned int cpu);
74 bool skip_onerr; 79 int (*multi)(unsigned int cpu,
75 bool cant_stop; 80 struct hlist_node *node);
81 } startup;
82 union {
83 int (*single)(unsigned int cpu);
84 int (*multi)(unsigned int cpu,
85 struct hlist_node *node);
86 } teardown;
87 struct hlist_head list;
88 bool skip_onerr;
89 bool cant_stop;
90 bool multi_instance;
76}; 91};
77 92
78static DEFINE_MUTEX(cpuhp_state_mutex); 93static DEFINE_MUTEX(cpuhp_state_mutex);
79static struct cpuhp_step cpuhp_bp_states[]; 94static struct cpuhp_step cpuhp_bp_states[];
80static struct cpuhp_step cpuhp_ap_states[]; 95static struct cpuhp_step cpuhp_ap_states[];
81 96
97static bool cpuhp_is_ap_state(enum cpuhp_state state)
98{
99 /*
100 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
101 * purposes as that state is handled explicitly in cpu_down.
102 */
103 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
104}
105
106static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
107{
108 struct cpuhp_step *sp;
109
110 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
111 return sp + state;
112}
113
82/** 114/**
83 * cpuhp_invoke_callback _ Invoke the callbacks for a given state 115 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
84 * @cpu: The cpu for which the callback should be invoked 116 * @cpu: The cpu for which the callback should be invoked
85 * @step: The step in the state machine 117 * @step: The step in the state machine
86 * @cb: The callback function to invoke 118 * @bringup: True if the bringup callback should be invoked
87 * 119 *
88 * Called from cpu hotplug and from the state register machinery 120 * Called from cpu hotplug and from the state register machinery.
89 */ 121 */
90static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step, 122static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
91 int (*cb)(unsigned int)) 123 bool bringup, struct hlist_node *node)
92{ 124{
93 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 125 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
94 int ret = 0; 126 struct cpuhp_step *step = cpuhp_get_step(state);
95 127 int (*cbm)(unsigned int cpu, struct hlist_node *node);
96 if (cb) { 128 int (*cb)(unsigned int cpu);
97 trace_cpuhp_enter(cpu, st->target, step, cb); 129 int ret, cnt;
130
131 if (!step->multi_instance) {
132 cb = bringup ? step->startup.single : step->teardown.single;
133 if (!cb)
134 return 0;
135 trace_cpuhp_enter(cpu, st->target, state, cb);
98 ret = cb(cpu); 136 ret = cb(cpu);
99 trace_cpuhp_exit(cpu, st->state, step, ret); 137 trace_cpuhp_exit(cpu, st->state, state, ret);
138 return ret;
139 }
140 cbm = bringup ? step->startup.multi : step->teardown.multi;
141 if (!cbm)
142 return 0;
143
144 /* Single invocation for instance add/remove */
145 if (node) {
146 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
147 ret = cbm(cpu, node);
148 trace_cpuhp_exit(cpu, st->state, state, ret);
149 return ret;
150 }
151
152 /* State transition. Invoke on all instances */
153 cnt = 0;
154 hlist_for_each(node, &step->list) {
155 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
156 ret = cbm(cpu, node);
157 trace_cpuhp_exit(cpu, st->state, state, ret);
158 if (ret)
159 goto err;
160 cnt++;
161 }
162 return 0;
163err:
164 /* Rollback the instances if one failed */
165 cbm = !bringup ? step->startup.multi : step->teardown.multi;
166 if (!cbm)
167 return ret;
168
169 hlist_for_each(node, &step->list) {
170 if (!cnt--)
171 break;
172 cbm(cpu, node);
100 } 173 }
101 return ret; 174 return ret;
102} 175}
@@ -260,10 +333,17 @@ void cpu_hotplug_disable(void)
260} 333}
261EXPORT_SYMBOL_GPL(cpu_hotplug_disable); 334EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
262 335
336static void __cpu_hotplug_enable(void)
337{
338 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
339 return;
340 cpu_hotplug_disabled--;
341}
342
263void cpu_hotplug_enable(void) 343void cpu_hotplug_enable(void)
264{ 344{
265 cpu_maps_update_begin(); 345 cpu_maps_update_begin();
266 WARN_ON(--cpu_hotplug_disabled < 0); 346 __cpu_hotplug_enable();
267 cpu_maps_update_done(); 347 cpu_maps_update_done();
268} 348}
269EXPORT_SYMBOL_GPL(cpu_hotplug_enable); 349EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
@@ -330,12 +410,6 @@ static int notify_online(unsigned int cpu)
330 return 0; 410 return 0;
331} 411}
332 412
333static int notify_starting(unsigned int cpu)
334{
335 cpu_notify(CPU_STARTING, cpu);
336 return 0;
337}
338
339static int bringup_wait_for_ap(unsigned int cpu) 413static int bringup_wait_for_ap(unsigned int cpu)
340{ 414{
341 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 415 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
@@ -349,8 +423,16 @@ static int bringup_cpu(unsigned int cpu)
349 struct task_struct *idle = idle_thread_get(cpu); 423 struct task_struct *idle = idle_thread_get(cpu);
350 int ret; 424 int ret;
351 425
426 /*
427 * Some architectures have to walk the irq descriptors to
428 * setup the vector space for the cpu which comes online.
429 * Prevent irq alloc/free across the bringup.
430 */
431 irq_lock_sparse();
432
352 /* Arch-specific enabling code. */ 433 /* Arch-specific enabling code. */
353 ret = __cpu_up(cpu, idle); 434 ret = __cpu_up(cpu, idle);
435 irq_unlock_sparse();
354 if (ret) { 436 if (ret) {
355 cpu_notify(CPU_UP_CANCELED, cpu); 437 cpu_notify(CPU_UP_CANCELED, cpu);
356 return ret; 438 return ret;
@@ -363,62 +445,55 @@ static int bringup_cpu(unsigned int cpu)
363/* 445/*
364 * Hotplug state machine related functions 446 * Hotplug state machine related functions
365 */ 447 */
366static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st, 448static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
367 struct cpuhp_step *steps)
368{ 449{
369 for (st->state++; st->state < st->target; st->state++) { 450 for (st->state++; st->state < st->target; st->state++) {
370 struct cpuhp_step *step = steps + st->state; 451 struct cpuhp_step *step = cpuhp_get_step(st->state);
371 452
372 if (!step->skip_onerr) 453 if (!step->skip_onerr)
373 cpuhp_invoke_callback(cpu, st->state, step->startup); 454 cpuhp_invoke_callback(cpu, st->state, true, NULL);
374 } 455 }
375} 456}
376 457
377static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 458static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
378 struct cpuhp_step *steps, enum cpuhp_state target) 459 enum cpuhp_state target)
379{ 460{
380 enum cpuhp_state prev_state = st->state; 461 enum cpuhp_state prev_state = st->state;
381 int ret = 0; 462 int ret = 0;
382 463
383 for (; st->state > target; st->state--) { 464 for (; st->state > target; st->state--) {
384 struct cpuhp_step *step = steps + st->state; 465 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
385
386 ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
387 if (ret) { 466 if (ret) {
388 st->target = prev_state; 467 st->target = prev_state;
389 undo_cpu_down(cpu, st, steps); 468 undo_cpu_down(cpu, st);
390 break; 469 break;
391 } 470 }
392 } 471 }
393 return ret; 472 return ret;
394} 473}
395 474
396static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st, 475static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
397 struct cpuhp_step *steps)
398{ 476{
399 for (st->state--; st->state > st->target; st->state--) { 477 for (st->state--; st->state > st->target; st->state--) {
400 struct cpuhp_step *step = steps + st->state; 478 struct cpuhp_step *step = cpuhp_get_step(st->state);
401 479
402 if (!step->skip_onerr) 480 if (!step->skip_onerr)
403 cpuhp_invoke_callback(cpu, st->state, step->teardown); 481 cpuhp_invoke_callback(cpu, st->state, false, NULL);
404 } 482 }
405} 483}
406 484
407static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 485static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
408 struct cpuhp_step *steps, enum cpuhp_state target) 486 enum cpuhp_state target)
409{ 487{
410 enum cpuhp_state prev_state = st->state; 488 enum cpuhp_state prev_state = st->state;
411 int ret = 0; 489 int ret = 0;
412 490
413 while (st->state < target) { 491 while (st->state < target) {
414 struct cpuhp_step *step;
415
416 st->state++; 492 st->state++;
417 step = steps + st->state; 493 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
418 ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
419 if (ret) { 494 if (ret) {
420 st->target = prev_state; 495 st->target = prev_state;
421 undo_cpu_up(cpu, st, steps); 496 undo_cpu_up(cpu, st);
422 break; 497 break;
423 } 498 }
424 } 499 }
@@ -447,13 +522,13 @@ static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
447{ 522{
448 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU); 523 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
449 524
450 return cpuhp_down_callbacks(cpu, st, cpuhp_ap_states, target); 525 return cpuhp_down_callbacks(cpu, st, target);
451} 526}
452 527
453/* Execute the online startup callbacks. Used to be CPU_ONLINE */ 528/* Execute the online startup callbacks. Used to be CPU_ONLINE */
454static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st) 529static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
455{ 530{
456 return cpuhp_up_callbacks(cpu, st, cpuhp_ap_states, st->target); 531 return cpuhp_up_callbacks(cpu, st, st->target);
457} 532}
458 533
459/* 534/*
@@ -476,18 +551,20 @@ static void cpuhp_thread_fun(unsigned int cpu)
476 st->should_run = false; 551 st->should_run = false;
477 552
478 /* Single callback invocation for [un]install ? */ 553 /* Single callback invocation for [un]install ? */
479 if (st->cb) { 554 if (st->single) {
480 if (st->cb_state < CPUHP_AP_ONLINE) { 555 if (st->cb_state < CPUHP_AP_ONLINE) {
481 local_irq_disable(); 556 local_irq_disable();
482 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); 557 ret = cpuhp_invoke_callback(cpu, st->cb_state,
558 st->bringup, st->node);
483 local_irq_enable(); 559 local_irq_enable();
484 } else { 560 } else {
485 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); 561 ret = cpuhp_invoke_callback(cpu, st->cb_state,
562 st->bringup, st->node);
486 } 563 }
487 } else if (st->rollback) { 564 } else if (st->rollback) {
488 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); 565 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
489 566
490 undo_cpu_down(cpu, st, cpuhp_ap_states); 567 undo_cpu_down(cpu, st);
491 /* 568 /*
492 * This is a momentary workaround to keep the notifier users 569 * This is a momentary workaround to keep the notifier users
493 * happy. Will go away once we got rid of the notifiers. 570 * happy. Will go away once we got rid of the notifiers.
@@ -509,8 +586,9 @@ static void cpuhp_thread_fun(unsigned int cpu)
509} 586}
510 587
511/* Invoke a single callback on a remote cpu */ 588/* Invoke a single callback on a remote cpu */
512static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, 589static int
513 int (*cb)(unsigned int)) 590cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
591 struct hlist_node *node)
514{ 592{
515 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 593 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
516 594
@@ -522,10 +600,13 @@ static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state,
522 * we invoke the thread function directly. 600 * we invoke the thread function directly.
523 */ 601 */
524 if (!st->thread) 602 if (!st->thread)
525 return cpuhp_invoke_callback(cpu, state, cb); 603 return cpuhp_invoke_callback(cpu, state, bringup, node);
526 604
527 st->cb_state = state; 605 st->cb_state = state;
528 st->cb = cb; 606 st->single = true;
607 st->bringup = bringup;
608 st->node = node;
609
529 /* 610 /*
530 * Make sure the above stores are visible before should_run becomes 611 * Make sure the above stores are visible before should_run becomes
531 * true. Paired with the mb() above in cpuhp_thread_fun() 612 * true. Paired with the mb() above in cpuhp_thread_fun()
@@ -541,7 +622,7 @@ static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state,
541static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st) 622static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
542{ 623{
543 st->result = 0; 624 st->result = 0;
544 st->cb = NULL; 625 st->single = false;
545 /* 626 /*
546 * Make sure the above stores are visible before should_run becomes 627 * Make sure the above stores are visible before should_run becomes
547 * true. Paired with the mb() above in cpuhp_thread_fun() 628 * true. Paired with the mb() above in cpuhp_thread_fun()
@@ -674,12 +755,6 @@ static int notify_down_prepare(unsigned int cpu)
674 return err; 755 return err;
675} 756}
676 757
677static int notify_dying(unsigned int cpu)
678{
679 cpu_notify(CPU_DYING, cpu);
680 return 0;
681}
682
683/* Take this CPU down. */ 758/* Take this CPU down. */
684static int take_cpu_down(void *_param) 759static int take_cpu_down(void *_param)
685{ 760{
@@ -692,12 +767,16 @@ static int take_cpu_down(void *_param)
692 if (err < 0) 767 if (err < 0)
693 return err; 768 return err;
694 769
770 /*
771 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
772 * do this step again.
773 */
774 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
775 st->state--;
695 /* Invoke the former CPU_DYING callbacks */ 776 /* Invoke the former CPU_DYING callbacks */
696 for (; st->state > target; st->state--) { 777 for (; st->state > target; st->state--)
697 struct cpuhp_step *step = cpuhp_ap_states + st->state; 778 cpuhp_invoke_callback(cpu, st->state, false, NULL);
698 779
699 cpuhp_invoke_callback(cpu, st->state, step->teardown);
700 }
701 /* Give up timekeeping duties */ 780 /* Give up timekeeping duties */
702 tick_handover_do_timer(); 781 tick_handover_do_timer();
703 /* Park the stopper thread */ 782 /* Park the stopper thread */
@@ -734,7 +813,7 @@ static int takedown_cpu(unsigned int cpu)
734 BUG_ON(cpu_online(cpu)); 813 BUG_ON(cpu_online(cpu));
735 814
736 /* 815 /*
737 * The migration_call() CPU_DYING callback will have removed all 816 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
738 * runnable tasks from the cpu, there's only the idle task left now 817 * runnable tasks from the cpu, there's only the idle task left now
739 * that the migration thread is done doing the stop_machine thing. 818 * that the migration thread is done doing the stop_machine thing.
740 * 819 *
@@ -787,7 +866,6 @@ void cpuhp_report_idle_dead(void)
787#define notify_down_prepare NULL 866#define notify_down_prepare NULL
788#define takedown_cpu NULL 867#define takedown_cpu NULL
789#define notify_dead NULL 868#define notify_dead NULL
790#define notify_dying NULL
791#endif 869#endif
792 870
793#ifdef CONFIG_HOTPLUG_CPU 871#ifdef CONFIG_HOTPLUG_CPU
@@ -836,7 +914,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
836 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need 914 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
837 * to do the further cleanups. 915 * to do the further cleanups.
838 */ 916 */
839 ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target); 917 ret = cpuhp_down_callbacks(cpu, st, target);
840 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { 918 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
841 st->target = prev_state; 919 st->target = prev_state;
842 st->rollback = true; 920 st->rollback = true;
@@ -877,10 +955,9 @@ EXPORT_SYMBOL(cpu_down);
877#endif /*CONFIG_HOTPLUG_CPU*/ 955#endif /*CONFIG_HOTPLUG_CPU*/
878 956
879/** 957/**
880 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 958 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
881 * @cpu: cpu that just started 959 * @cpu: cpu that just started
882 * 960 *
883 * This function calls the cpu_chain notifiers with CPU_STARTING.
884 * It must be called by the arch code on the new cpu, before the new cpu 961 * It must be called by the arch code on the new cpu, before the new cpu
885 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 962 * enables interrupts and before the "boot" cpu returns from __cpu_up().
886 */ 963 */
@@ -891,11 +968,8 @@ void notify_cpu_starting(unsigned int cpu)
891 968
892 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ 969 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
893 while (st->state < target) { 970 while (st->state < target) {
894 struct cpuhp_step *step;
895
896 st->state++; 971 st->state++;
897 step = cpuhp_ap_states + st->state; 972 cpuhp_invoke_callback(cpu, st->state, true, NULL);
898 cpuhp_invoke_callback(cpu, st->state, step->startup);
899 } 973 }
900} 974}
901 975
@@ -980,7 +1054,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
980 * responsible for bringing it up to the target state. 1054 * responsible for bringing it up to the target state.
981 */ 1055 */
982 target = min((int)target, CPUHP_BRINGUP_CPU); 1056 target = min((int)target, CPUHP_BRINGUP_CPU);
983 ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target); 1057 ret = cpuhp_up_callbacks(cpu, st, target);
984out: 1058out:
985 cpu_hotplug_done(); 1059 cpu_hotplug_done();
986 return ret; 1060 return ret;
@@ -1083,7 +1157,7 @@ void enable_nonboot_cpus(void)
1083 1157
1084 /* Allow everyone to use the CPU hotplug again */ 1158 /* Allow everyone to use the CPU hotplug again */
1085 cpu_maps_update_begin(); 1159 cpu_maps_update_begin();
1086 WARN_ON(--cpu_hotplug_disabled < 0); 1160 __cpu_hotplug_enable();
1087 if (cpumask_empty(frozen_cpus)) 1161 if (cpumask_empty(frozen_cpus))
1088 goto out; 1162 goto out;
1089 1163
@@ -1172,40 +1246,50 @@ core_initcall(cpu_hotplug_pm_sync_init);
1172static struct cpuhp_step cpuhp_bp_states[] = { 1246static struct cpuhp_step cpuhp_bp_states[] = {
1173 [CPUHP_OFFLINE] = { 1247 [CPUHP_OFFLINE] = {
1174 .name = "offline", 1248 .name = "offline",
1175 .startup = NULL, 1249 .startup.single = NULL,
1176 .teardown = NULL, 1250 .teardown.single = NULL,
1177 }, 1251 },
1178#ifdef CONFIG_SMP 1252#ifdef CONFIG_SMP
1179 [CPUHP_CREATE_THREADS]= { 1253 [CPUHP_CREATE_THREADS]= {
1180 .name = "threads:create", 1254 .name = "threads:prepare",
1181 .startup = smpboot_create_threads, 1255 .startup.single = smpboot_create_threads,
1182 .teardown = NULL, 1256 .teardown.single = NULL,
1183 .cant_stop = true, 1257 .cant_stop = true,
1184 }, 1258 },
1185 [CPUHP_PERF_PREPARE] = { 1259 [CPUHP_PERF_PREPARE] = {
1186 .name = "perf prepare", 1260 .name = "perf:prepare",
1187 .startup = perf_event_init_cpu, 1261 .startup.single = perf_event_init_cpu,
1188 .teardown = perf_event_exit_cpu, 1262 .teardown.single = perf_event_exit_cpu,
1189 }, 1263 },
1190 [CPUHP_WORKQUEUE_PREP] = { 1264 [CPUHP_WORKQUEUE_PREP] = {
1191 .name = "workqueue prepare", 1265 .name = "workqueue:prepare",
1192 .startup = workqueue_prepare_cpu, 1266 .startup.single = workqueue_prepare_cpu,
1193 .teardown = NULL, 1267 .teardown.single = NULL,
1194 }, 1268 },
1195 [CPUHP_HRTIMERS_PREPARE] = { 1269 [CPUHP_HRTIMERS_PREPARE] = {
1196 .name = "hrtimers prepare", 1270 .name = "hrtimers:prepare",
1197 .startup = hrtimers_prepare_cpu, 1271 .startup.single = hrtimers_prepare_cpu,
1198 .teardown = hrtimers_dead_cpu, 1272 .teardown.single = hrtimers_dead_cpu,
1199 }, 1273 },
1200 [CPUHP_SMPCFD_PREPARE] = { 1274 [CPUHP_SMPCFD_PREPARE] = {
1201 .name = "SMPCFD prepare", 1275 .name = "smpcfd:prepare",
1202 .startup = smpcfd_prepare_cpu, 1276 .startup.single = smpcfd_prepare_cpu,
1203 .teardown = smpcfd_dead_cpu, 1277 .teardown.single = smpcfd_dead_cpu,
1278 },
1279 [CPUHP_RELAY_PREPARE] = {
1280 .name = "relay:prepare",
1281 .startup.single = relay_prepare_cpu,
1282 .teardown.single = NULL,
1283 },
1284 [CPUHP_SLAB_PREPARE] = {
1285 .name = "slab:prepare",
1286 .startup.single = slab_prepare_cpu,
1287 .teardown.single = slab_dead_cpu,
1204 }, 1288 },
1205 [CPUHP_RCUTREE_PREP] = { 1289 [CPUHP_RCUTREE_PREP] = {
1206 .name = "RCU-tree prepare", 1290 .name = "RCU/tree:prepare",
1207 .startup = rcutree_prepare_cpu, 1291 .startup.single = rcutree_prepare_cpu,
1208 .teardown = rcutree_dead_cpu, 1292 .teardown.single = rcutree_dead_cpu,
1209 }, 1293 },
1210 /* 1294 /*
1211 * Preparatory and dead notifiers. Will be replaced once the notifiers 1295 * Preparatory and dead notifiers. Will be replaced once the notifiers
@@ -1213,8 +1297,8 @@ static struct cpuhp_step cpuhp_bp_states[] = {
1213 */ 1297 */
1214 [CPUHP_NOTIFY_PREPARE] = { 1298 [CPUHP_NOTIFY_PREPARE] = {
1215 .name = "notify:prepare", 1299 .name = "notify:prepare",
1216 .startup = notify_prepare, 1300 .startup.single = notify_prepare,
1217 .teardown = notify_dead, 1301 .teardown.single = notify_dead,
1218 .skip_onerr = true, 1302 .skip_onerr = true,
1219 .cant_stop = true, 1303 .cant_stop = true,
1220 }, 1304 },
@@ -1224,20 +1308,21 @@ static struct cpuhp_step cpuhp_bp_states[] = {
1224 * otherwise a RCU stall occurs. 1308 * otherwise a RCU stall occurs.
1225 */ 1309 */
1226 [CPUHP_TIMERS_DEAD] = { 1310 [CPUHP_TIMERS_DEAD] = {
1227 .name = "timers dead", 1311 .name = "timers:dead",
1228 .startup = NULL, 1312 .startup.single = NULL,
1229 .teardown = timers_dead_cpu, 1313 .teardown.single = timers_dead_cpu,
1230 }, 1314 },
1231 /* Kicks the plugged cpu into life */ 1315 /* Kicks the plugged cpu into life */
1232 [CPUHP_BRINGUP_CPU] = { 1316 [CPUHP_BRINGUP_CPU] = {
1233 .name = "cpu:bringup", 1317 .name = "cpu:bringup",
1234 .startup = bringup_cpu, 1318 .startup.single = bringup_cpu,
1235 .teardown = NULL, 1319 .teardown.single = NULL,
1236 .cant_stop = true, 1320 .cant_stop = true,
1237 }, 1321 },
1238 [CPUHP_AP_SMPCFD_DYING] = { 1322 [CPUHP_AP_SMPCFD_DYING] = {
1239 .startup = NULL, 1323 .name = "smpcfd:dying",
1240 .teardown = smpcfd_dying_cpu, 1324 .startup.single = NULL,
1325 .teardown.single = smpcfd_dying_cpu,
1241 }, 1326 },
1242 /* 1327 /*
1243 * Handled on controll processor until the plugged processor manages 1328 * Handled on controll processor until the plugged processor manages
@@ -1245,8 +1330,8 @@ static struct cpuhp_step cpuhp_bp_states[] = {
1245 */ 1330 */
1246 [CPUHP_TEARDOWN_CPU] = { 1331 [CPUHP_TEARDOWN_CPU] = {
1247 .name = "cpu:teardown", 1332 .name = "cpu:teardown",
1248 .startup = NULL, 1333 .startup.single = NULL,
1249 .teardown = takedown_cpu, 1334 .teardown.single = takedown_cpu,
1250 .cant_stop = true, 1335 .cant_stop = true,
1251 }, 1336 },
1252#else 1337#else
@@ -1272,24 +1357,13 @@ static struct cpuhp_step cpuhp_ap_states[] = {
1272 /* First state is scheduler control. Interrupts are disabled */ 1357 /* First state is scheduler control. Interrupts are disabled */
1273 [CPUHP_AP_SCHED_STARTING] = { 1358 [CPUHP_AP_SCHED_STARTING] = {
1274 .name = "sched:starting", 1359 .name = "sched:starting",
1275 .startup = sched_cpu_starting, 1360 .startup.single = sched_cpu_starting,
1276 .teardown = sched_cpu_dying, 1361 .teardown.single = sched_cpu_dying,
1277 }, 1362 },
1278 [CPUHP_AP_RCUTREE_DYING] = { 1363 [CPUHP_AP_RCUTREE_DYING] = {
1279 .startup = NULL, 1364 .name = "RCU/tree:dying",
1280 .teardown = rcutree_dying_cpu, 1365 .startup.single = NULL,
1281 }, 1366 .teardown.single = rcutree_dying_cpu,
1282 /*
1283 * Low level startup/teardown notifiers. Run with interrupts
1284 * disabled. Will be removed once the notifiers are converted to
1285 * states.
1286 */
1287 [CPUHP_AP_NOTIFY_STARTING] = {
1288 .name = "notify:starting",
1289 .startup = notify_starting,
1290 .teardown = notify_dying,
1291 .skip_onerr = true,
1292 .cant_stop = true,
1293 }, 1367 },
1294 /* Entry state on starting. Interrupts enabled from here on. Transient 1368 /* Entry state on starting. Interrupts enabled from here on. Transient
1295 * state for synchronsization */ 1369 * state for synchronsization */
@@ -1298,24 +1372,24 @@ static struct cpuhp_step cpuhp_ap_states[] = {
1298 }, 1372 },
1299 /* Handle smpboot threads park/unpark */ 1373 /* Handle smpboot threads park/unpark */
1300 [CPUHP_AP_SMPBOOT_THREADS] = { 1374 [CPUHP_AP_SMPBOOT_THREADS] = {
1301 .name = "smpboot:threads", 1375 .name = "smpboot/threads:online",
1302 .startup = smpboot_unpark_threads, 1376 .startup.single = smpboot_unpark_threads,
1303 .teardown = NULL, 1377 .teardown.single = NULL,
1304 }, 1378 },
1305 [CPUHP_AP_PERF_ONLINE] = { 1379 [CPUHP_AP_PERF_ONLINE] = {
1306 .name = "perf online", 1380 .name = "perf:online",
1307 .startup = perf_event_init_cpu, 1381 .startup.single = perf_event_init_cpu,
1308 .teardown = perf_event_exit_cpu, 1382 .teardown.single = perf_event_exit_cpu,
1309 }, 1383 },
1310 [CPUHP_AP_WORKQUEUE_ONLINE] = { 1384 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1311 .name = "workqueue online", 1385 .name = "workqueue:online",
1312 .startup = workqueue_online_cpu, 1386 .startup.single = workqueue_online_cpu,
1313 .teardown = workqueue_offline_cpu, 1387 .teardown.single = workqueue_offline_cpu,
1314 }, 1388 },
1315 [CPUHP_AP_RCUTREE_ONLINE] = { 1389 [CPUHP_AP_RCUTREE_ONLINE] = {
1316 .name = "RCU-tree online", 1390 .name = "RCU/tree:online",
1317 .startup = rcutree_online_cpu, 1391 .startup.single = rcutree_online_cpu,
1318 .teardown = rcutree_offline_cpu, 1392 .teardown.single = rcutree_offline_cpu,
1319 }, 1393 },
1320 1394
1321 /* 1395 /*
@@ -1324,8 +1398,8 @@ static struct cpuhp_step cpuhp_ap_states[] = {
1324 */ 1398 */
1325 [CPUHP_AP_NOTIFY_ONLINE] = { 1399 [CPUHP_AP_NOTIFY_ONLINE] = {
1326 .name = "notify:online", 1400 .name = "notify:online",
1327 .startup = notify_online, 1401 .startup.single = notify_online,
1328 .teardown = notify_down_prepare, 1402 .teardown.single = notify_down_prepare,
1329 .skip_onerr = true, 1403 .skip_onerr = true,
1330 }, 1404 },
1331#endif 1405#endif
@@ -1337,16 +1411,16 @@ static struct cpuhp_step cpuhp_ap_states[] = {
1337 /* Last state is scheduler control setting the cpu active */ 1411 /* Last state is scheduler control setting the cpu active */
1338 [CPUHP_AP_ACTIVE] = { 1412 [CPUHP_AP_ACTIVE] = {
1339 .name = "sched:active", 1413 .name = "sched:active",
1340 .startup = sched_cpu_activate, 1414 .startup.single = sched_cpu_activate,
1341 .teardown = sched_cpu_deactivate, 1415 .teardown.single = sched_cpu_deactivate,
1342 }, 1416 },
1343#endif 1417#endif
1344 1418
1345 /* CPU is fully up and running. */ 1419 /* CPU is fully up and running. */
1346 [CPUHP_ONLINE] = { 1420 [CPUHP_ONLINE] = {
1347 .name = "online", 1421 .name = "online",
1348 .startup = NULL, 1422 .startup.single = NULL,
1349 .teardown = NULL, 1423 .teardown.single = NULL,
1350 }, 1424 },
1351}; 1425};
1352 1426
@@ -1358,54 +1432,42 @@ static int cpuhp_cb_check(enum cpuhp_state state)
1358 return 0; 1432 return 0;
1359} 1433}
1360 1434
1361static bool cpuhp_is_ap_state(enum cpuhp_state state)
1362{
1363 /*
1364 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
1365 * purposes as that state is handled explicitely in cpu_down.
1366 */
1367 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
1368}
1369
1370static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
1371{
1372 struct cpuhp_step *sp;
1373
1374 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
1375 return sp + state;
1376}
1377
1378static void cpuhp_store_callbacks(enum cpuhp_state state, 1435static void cpuhp_store_callbacks(enum cpuhp_state state,
1379 const char *name, 1436 const char *name,
1380 int (*startup)(unsigned int cpu), 1437 int (*startup)(unsigned int cpu),
1381 int (*teardown)(unsigned int cpu)) 1438 int (*teardown)(unsigned int cpu),
1439 bool multi_instance)
1382{ 1440{
1383 /* (Un)Install the callbacks for further cpu hotplug operations */ 1441 /* (Un)Install the callbacks for further cpu hotplug operations */
1384 struct cpuhp_step *sp; 1442 struct cpuhp_step *sp;
1385 1443
1386 mutex_lock(&cpuhp_state_mutex); 1444 mutex_lock(&cpuhp_state_mutex);
1387 sp = cpuhp_get_step(state); 1445 sp = cpuhp_get_step(state);
1388 sp->startup = startup; 1446 sp->startup.single = startup;
1389 sp->teardown = teardown; 1447 sp->teardown.single = teardown;
1390 sp->name = name; 1448 sp->name = name;
1449 sp->multi_instance = multi_instance;
1450 INIT_HLIST_HEAD(&sp->list);
1391 mutex_unlock(&cpuhp_state_mutex); 1451 mutex_unlock(&cpuhp_state_mutex);
1392} 1452}
1393 1453
1394static void *cpuhp_get_teardown_cb(enum cpuhp_state state) 1454static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1395{ 1455{
1396 return cpuhp_get_step(state)->teardown; 1456 return cpuhp_get_step(state)->teardown.single;
1397} 1457}
1398 1458
1399/* 1459/*
1400 * Call the startup/teardown function for a step either on the AP or 1460 * Call the startup/teardown function for a step either on the AP or
1401 * on the current CPU. 1461 * on the current CPU.
1402 */ 1462 */
1403static int cpuhp_issue_call(int cpu, enum cpuhp_state state, 1463static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1404 int (*cb)(unsigned int), bool bringup) 1464 struct hlist_node *node)
1405{ 1465{
1466 struct cpuhp_step *sp = cpuhp_get_step(state);
1406 int ret; 1467 int ret;
1407 1468
1408 if (!cb) 1469 if ((bringup && !sp->startup.single) ||
1470 (!bringup && !sp->teardown.single))
1409 return 0; 1471 return 0;
1410 /* 1472 /*
1411 * The non AP bound callbacks can fail on bringup. On teardown 1473 * The non AP bound callbacks can fail on bringup. On teardown
@@ -1413,11 +1475,11 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state,
1413 */ 1475 */
1414#ifdef CONFIG_SMP 1476#ifdef CONFIG_SMP
1415 if (cpuhp_is_ap_state(state)) 1477 if (cpuhp_is_ap_state(state))
1416 ret = cpuhp_invoke_ap_callback(cpu, state, cb); 1478 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1417 else 1479 else
1418 ret = cpuhp_invoke_callback(cpu, state, cb); 1480 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1419#else 1481#else
1420 ret = cpuhp_invoke_callback(cpu, state, cb); 1482 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1421#endif 1483#endif
1422 BUG_ON(ret && !bringup); 1484 BUG_ON(ret && !bringup);
1423 return ret; 1485 return ret;
@@ -1429,13 +1491,10 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state,
1429 * Note: The teardown callbacks for rollback are not allowed to fail! 1491 * Note: The teardown callbacks for rollback are not allowed to fail!
1430 */ 1492 */
1431static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, 1493static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1432 int (*teardown)(unsigned int cpu)) 1494 struct hlist_node *node)
1433{ 1495{
1434 int cpu; 1496 int cpu;
1435 1497
1436 if (!teardown)
1437 return;
1438
1439 /* Roll back the already executed steps on the other cpus */ 1498 /* Roll back the already executed steps on the other cpus */
1440 for_each_present_cpu(cpu) { 1499 for_each_present_cpu(cpu) {
1441 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 1500 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
@@ -1446,7 +1505,7 @@ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1446 1505
1447 /* Did we invoke the startup call on that cpu ? */ 1506 /* Did we invoke the startup call on that cpu ? */
1448 if (cpustate >= state) 1507 if (cpustate >= state)
1449 cpuhp_issue_call(cpu, state, teardown, false); 1508 cpuhp_issue_call(cpu, state, false, node);
1450 } 1509 }
1451} 1510}
1452 1511
@@ -1473,6 +1532,52 @@ static int cpuhp_reserve_state(enum cpuhp_state state)
1473 return -ENOSPC; 1532 return -ENOSPC;
1474} 1533}
1475 1534
1535int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1536 bool invoke)
1537{
1538 struct cpuhp_step *sp;
1539 int cpu;
1540 int ret;
1541
1542 sp = cpuhp_get_step(state);
1543 if (sp->multi_instance == false)
1544 return -EINVAL;
1545
1546 get_online_cpus();
1547
1548 if (!invoke || !sp->startup.multi)
1549 goto add_node;
1550
1551 /*
1552 * Try to call the startup callback for each present cpu
1553 * depending on the hotplug state of the cpu.
1554 */
1555 for_each_present_cpu(cpu) {
1556 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1557 int cpustate = st->state;
1558
1559 if (cpustate < state)
1560 continue;
1561
1562 ret = cpuhp_issue_call(cpu, state, true, node);
1563 if (ret) {
1564 if (sp->teardown.multi)
1565 cpuhp_rollback_install(cpu, state, node);
1566 goto err;
1567 }
1568 }
1569add_node:
1570 ret = 0;
1571 mutex_lock(&cpuhp_state_mutex);
1572 hlist_add_head(node, &sp->list);
1573 mutex_unlock(&cpuhp_state_mutex);
1574
1575err:
1576 put_online_cpus();
1577 return ret;
1578}
1579EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1580
1476/** 1581/**
1477 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state 1582 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1478 * @state: The state to setup 1583 * @state: The state to setup
@@ -1486,7 +1591,8 @@ static int cpuhp_reserve_state(enum cpuhp_state state)
1486int __cpuhp_setup_state(enum cpuhp_state state, 1591int __cpuhp_setup_state(enum cpuhp_state state,
1487 const char *name, bool invoke, 1592 const char *name, bool invoke,
1488 int (*startup)(unsigned int cpu), 1593 int (*startup)(unsigned int cpu),
1489 int (*teardown)(unsigned int cpu)) 1594 int (*teardown)(unsigned int cpu),
1595 bool multi_instance)
1490{ 1596{
1491 int cpu, ret = 0; 1597 int cpu, ret = 0;
1492 int dyn_state = 0; 1598 int dyn_state = 0;
@@ -1505,7 +1611,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
1505 state = ret; 1611 state = ret;
1506 } 1612 }
1507 1613
1508 cpuhp_store_callbacks(state, name, startup, teardown); 1614 cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
1509 1615
1510 if (!invoke || !startup) 1616 if (!invoke || !startup)
1511 goto out; 1617 goto out;
@@ -1521,10 +1627,11 @@ int __cpuhp_setup_state(enum cpuhp_state state,
1521 if (cpustate < state) 1627 if (cpustate < state)
1522 continue; 1628 continue;
1523 1629
1524 ret = cpuhp_issue_call(cpu, state, startup, true); 1630 ret = cpuhp_issue_call(cpu, state, true, NULL);
1525 if (ret) { 1631 if (ret) {
1526 cpuhp_rollback_install(cpu, state, teardown); 1632 if (teardown)
1527 cpuhp_store_callbacks(state, NULL, NULL, NULL); 1633 cpuhp_rollback_install(cpu, state, NULL);
1634 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1528 goto out; 1635 goto out;
1529 } 1636 }
1530 } 1637 }
@@ -1536,6 +1643,42 @@ out:
1536} 1643}
1537EXPORT_SYMBOL(__cpuhp_setup_state); 1644EXPORT_SYMBOL(__cpuhp_setup_state);
1538 1645
1646int __cpuhp_state_remove_instance(enum cpuhp_state state,
1647 struct hlist_node *node, bool invoke)
1648{
1649 struct cpuhp_step *sp = cpuhp_get_step(state);
1650 int cpu;
1651
1652 BUG_ON(cpuhp_cb_check(state));
1653
1654 if (!sp->multi_instance)
1655 return -EINVAL;
1656
1657 get_online_cpus();
1658 if (!invoke || !cpuhp_get_teardown_cb(state))
1659 goto remove;
1660 /*
1661 * Call the teardown callback for each present cpu depending
1662 * on the hotplug state of the cpu. This function is not
1663 * allowed to fail currently!
1664 */
1665 for_each_present_cpu(cpu) {
1666 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1667 int cpustate = st->state;
1668
1669 if (cpustate >= state)
1670 cpuhp_issue_call(cpu, state, false, node);
1671 }
1672
1673remove:
1674 mutex_lock(&cpuhp_state_mutex);
1675 hlist_del(node);
1676 mutex_unlock(&cpuhp_state_mutex);
1677 put_online_cpus();
1678
1679 return 0;
1680}
1681EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1539/** 1682/**
1540 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state 1683 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1541 * @state: The state to remove 1684 * @state: The state to remove
@@ -1547,14 +1690,21 @@ EXPORT_SYMBOL(__cpuhp_setup_state);
1547 */ 1690 */
1548void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) 1691void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1549{ 1692{
1550 int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state); 1693 struct cpuhp_step *sp = cpuhp_get_step(state);
1551 int cpu; 1694 int cpu;
1552 1695
1553 BUG_ON(cpuhp_cb_check(state)); 1696 BUG_ON(cpuhp_cb_check(state));
1554 1697
1555 get_online_cpus(); 1698 get_online_cpus();
1556 1699
1557 if (!invoke || !teardown) 1700 if (sp->multi_instance) {
1701 WARN(!hlist_empty(&sp->list),
1702 "Error: Removing state %d which has instances left.\n",
1703 state);
1704 goto remove;
1705 }
1706
1707 if (!invoke || !cpuhp_get_teardown_cb(state))
1558 goto remove; 1708 goto remove;
1559 1709
1560 /* 1710 /*
@@ -1567,10 +1717,10 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1567 int cpustate = st->state; 1717 int cpustate = st->state;
1568 1718
1569 if (cpustate >= state) 1719 if (cpustate >= state)
1570 cpuhp_issue_call(cpu, state, teardown, false); 1720 cpuhp_issue_call(cpu, state, false, NULL);
1571 } 1721 }
1572remove: 1722remove:
1573 cpuhp_store_callbacks(state, NULL, NULL, NULL); 1723 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1574 put_online_cpus(); 1724 put_online_cpus();
1575} 1725}
1576EXPORT_SYMBOL(__cpuhp_remove_state); 1726EXPORT_SYMBOL(__cpuhp_remove_state);
diff --git a/kernel/padata.c b/kernel/padata.c
index 993278895ccc..7848f0566403 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -30,6 +30,7 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/sysfs.h> 31#include <linux/sysfs.h>
32#include <linux/rcupdate.h> 32#include <linux/rcupdate.h>
33#include <linux/module.h>
33 34
34#define MAX_OBJ_NUM 1000 35#define MAX_OBJ_NUM 1000
35 36
@@ -769,52 +770,43 @@ static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
769 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); 770 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
770} 771}
771 772
772 773static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
773static int padata_cpu_callback(struct notifier_block *nfb,
774 unsigned long action, void *hcpu)
775{ 774{
776 int err;
777 struct padata_instance *pinst; 775 struct padata_instance *pinst;
778 int cpu = (unsigned long)hcpu; 776 int ret;
779 777
780 pinst = container_of(nfb, struct padata_instance, cpu_notifier); 778 pinst = hlist_entry_safe(node, struct padata_instance, node);
779 if (!pinst_has_cpu(pinst, cpu))
780 return 0;
781 781
782 switch (action) { 782 mutex_lock(&pinst->lock);
783 case CPU_ONLINE: 783 ret = __padata_add_cpu(pinst, cpu);
784 case CPU_ONLINE_FROZEN: 784 mutex_unlock(&pinst->lock);
785 case CPU_DOWN_FAILED: 785 return ret;
786 case CPU_DOWN_FAILED_FROZEN: 786}
787 if (!pinst_has_cpu(pinst, cpu))
788 break;
789 mutex_lock(&pinst->lock);
790 err = __padata_add_cpu(pinst, cpu);
791 mutex_unlock(&pinst->lock);
792 if (err)
793 return notifier_from_errno(err);
794 break;
795 787
796 case CPU_DOWN_PREPARE: 788static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node)
797 case CPU_DOWN_PREPARE_FROZEN: 789{
798 case CPU_UP_CANCELED: 790 struct padata_instance *pinst;
799 case CPU_UP_CANCELED_FROZEN: 791 int ret;
800 if (!pinst_has_cpu(pinst, cpu)) 792
801 break; 793 pinst = hlist_entry_safe(node, struct padata_instance, node);
802 mutex_lock(&pinst->lock); 794 if (!pinst_has_cpu(pinst, cpu))
803 err = __padata_remove_cpu(pinst, cpu); 795 return 0;
804 mutex_unlock(&pinst->lock);
805 if (err)
806 return notifier_from_errno(err);
807 break;
808 }
809 796
810 return NOTIFY_OK; 797 mutex_lock(&pinst->lock);
798 ret = __padata_remove_cpu(pinst, cpu);
799 mutex_unlock(&pinst->lock);
800 return ret;
811} 801}
802
803static enum cpuhp_state hp_online;
812#endif 804#endif
813 805
814static void __padata_free(struct padata_instance *pinst) 806static void __padata_free(struct padata_instance *pinst)
815{ 807{
816#ifdef CONFIG_HOTPLUG_CPU 808#ifdef CONFIG_HOTPLUG_CPU
817 unregister_hotcpu_notifier(&pinst->cpu_notifier); 809 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
818#endif 810#endif
819 811
820 padata_stop(pinst); 812 padata_stop(pinst);
@@ -1012,11 +1004,8 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq,
1012 mutex_init(&pinst->lock); 1004 mutex_init(&pinst->lock);
1013 1005
1014#ifdef CONFIG_HOTPLUG_CPU 1006#ifdef CONFIG_HOTPLUG_CPU
1015 pinst->cpu_notifier.notifier_call = padata_cpu_callback; 1007 cpuhp_state_add_instance_nocalls(hp_online, &pinst->node);
1016 pinst->cpu_notifier.priority = 0;
1017 register_hotcpu_notifier(&pinst->cpu_notifier);
1018#endif 1008#endif
1019
1020 return pinst; 1009 return pinst;
1021 1010
1022err_free_masks: 1011err_free_masks:
@@ -1039,3 +1028,26 @@ void padata_free(struct padata_instance *pinst)
1039 kobject_put(&pinst->kobj); 1028 kobject_put(&pinst->kobj);
1040} 1029}
1041EXPORT_SYMBOL(padata_free); 1030EXPORT_SYMBOL(padata_free);
1031
1032#ifdef CONFIG_HOTPLUG_CPU
1033
1034static __init int padata_driver_init(void)
1035{
1036 int ret;
1037
1038 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1039 padata_cpu_online,
1040 padata_cpu_prep_down);
1041 if (ret < 0)
1042 return ret;
1043 hp_online = ret;
1044 return 0;
1045}
1046module_init(padata_driver_init);
1047
1048static __exit void padata_driver_exit(void)
1049{
1050 cpuhp_remove_multi_state(hp_online);
1051}
1052module_exit(padata_driver_exit);
1053#endif
diff --git a/kernel/relay.c b/kernel/relay.c
index d797502140b9..fc9b4a4af463 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -214,7 +214,7 @@ static void relay_destroy_buf(struct rchan_buf *buf)
214 __free_page(buf->page_array[i]); 214 __free_page(buf->page_array[i]);
215 relay_free_page_array(buf->page_array); 215 relay_free_page_array(buf->page_array);
216 } 216 }
217 chan->buf[buf->cpu] = NULL; 217 *per_cpu_ptr(chan->buf, buf->cpu) = NULL;
218 kfree(buf->padding); 218 kfree(buf->padding);
219 kfree(buf); 219 kfree(buf);
220 kref_put(&chan->kref, relay_destroy_channel); 220 kref_put(&chan->kref, relay_destroy_channel);
@@ -382,20 +382,21 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
382 */ 382 */
383void relay_reset(struct rchan *chan) 383void relay_reset(struct rchan *chan)
384{ 384{
385 struct rchan_buf *buf;
385 unsigned int i; 386 unsigned int i;
386 387
387 if (!chan) 388 if (!chan)
388 return; 389 return;
389 390
390 if (chan->is_global && chan->buf[0]) { 391 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
391 __relay_reset(chan->buf[0], 0); 392 __relay_reset(buf, 0);
392 return; 393 return;
393 } 394 }
394 395
395 mutex_lock(&relay_channels_mutex); 396 mutex_lock(&relay_channels_mutex);
396 for_each_possible_cpu(i) 397 for_each_possible_cpu(i)
397 if (chan->buf[i]) 398 if ((buf = *per_cpu_ptr(chan->buf, i)))
398 __relay_reset(chan->buf[i], 0); 399 __relay_reset(buf, 0);
399 mutex_unlock(&relay_channels_mutex); 400 mutex_unlock(&relay_channels_mutex);
400} 401}
401EXPORT_SYMBOL_GPL(relay_reset); 402EXPORT_SYMBOL_GPL(relay_reset);
@@ -440,7 +441,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
440 struct dentry *dentry; 441 struct dentry *dentry;
441 442
442 if (chan->is_global) 443 if (chan->is_global)
443 return chan->buf[0]; 444 return *per_cpu_ptr(chan->buf, 0);
444 445
445 buf = relay_create_buf(chan); 446 buf = relay_create_buf(chan);
446 if (!buf) 447 if (!buf)
@@ -464,7 +465,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
464 __relay_reset(buf, 1); 465 __relay_reset(buf, 1);
465 466
466 if(chan->is_global) { 467 if(chan->is_global) {
467 chan->buf[0] = buf; 468 *per_cpu_ptr(chan->buf, 0) = buf;
468 buf->cpu = 0; 469 buf->cpu = 0;
469 } 470 }
470 471
@@ -512,46 +513,25 @@ static void setup_callbacks(struct rchan *chan,
512 chan->cb = cb; 513 chan->cb = cb;
513} 514}
514 515
515/** 516int relay_prepare_cpu(unsigned int cpu)
516 * relay_hotcpu_callback - CPU hotplug callback
517 * @nb: notifier block
518 * @action: hotplug action to take
519 * @hcpu: CPU number
520 *
521 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
522 */
523static int relay_hotcpu_callback(struct notifier_block *nb,
524 unsigned long action,
525 void *hcpu)
526{ 517{
527 unsigned int hotcpu = (unsigned long)hcpu;
528 struct rchan *chan; 518 struct rchan *chan;
519 struct rchan_buf *buf;
529 520
530 switch(action) { 521 mutex_lock(&relay_channels_mutex);
531 case CPU_UP_PREPARE: 522 list_for_each_entry(chan, &relay_channels, list) {
532 case CPU_UP_PREPARE_FROZEN: 523 if ((buf = *per_cpu_ptr(chan->buf, cpu)))
533 mutex_lock(&relay_channels_mutex); 524 continue;
534 list_for_each_entry(chan, &relay_channels, list) { 525 buf = relay_open_buf(chan, cpu);
535 if (chan->buf[hotcpu]) 526 if (!buf) {
536 continue; 527 pr_err("relay: cpu %d buffer creation failed\n", cpu);
537 chan->buf[hotcpu] = relay_open_buf(chan, hotcpu); 528 mutex_unlock(&relay_channels_mutex);
538 if(!chan->buf[hotcpu]) { 529 return -ENOMEM;
539 printk(KERN_ERR
540 "relay_hotcpu_callback: cpu %d buffer "
541 "creation failed\n", hotcpu);
542 mutex_unlock(&relay_channels_mutex);
543 return notifier_from_errno(-ENOMEM);
544 }
545 } 530 }
546 mutex_unlock(&relay_channels_mutex); 531 *per_cpu_ptr(chan->buf, cpu) = buf;
547 break;
548 case CPU_DEAD:
549 case CPU_DEAD_FROZEN:
550 /* No need to flush the cpu : will be flushed upon
551 * final relay_flush() call. */
552 break;
553 } 532 }
554 return NOTIFY_OK; 533 mutex_unlock(&relay_channels_mutex);
534 return 0;
555} 535}
556 536
557/** 537/**
@@ -583,6 +563,7 @@ struct rchan *relay_open(const char *base_filename,
583{ 563{
584 unsigned int i; 564 unsigned int i;
585 struct rchan *chan; 565 struct rchan *chan;
566 struct rchan_buf *buf;
586 567
587 if (!(subbuf_size && n_subbufs)) 568 if (!(subbuf_size && n_subbufs))
588 return NULL; 569 return NULL;
@@ -593,6 +574,7 @@ struct rchan *relay_open(const char *base_filename,
593 if (!chan) 574 if (!chan)
594 return NULL; 575 return NULL;
595 576
577 chan->buf = alloc_percpu(struct rchan_buf *);
596 chan->version = RELAYFS_CHANNEL_VERSION; 578 chan->version = RELAYFS_CHANNEL_VERSION;
597 chan->n_subbufs = n_subbufs; 579 chan->n_subbufs = n_subbufs;
598 chan->subbuf_size = subbuf_size; 580 chan->subbuf_size = subbuf_size;
@@ -608,9 +590,10 @@ struct rchan *relay_open(const char *base_filename,
608 590
609 mutex_lock(&relay_channels_mutex); 591 mutex_lock(&relay_channels_mutex);
610 for_each_online_cpu(i) { 592 for_each_online_cpu(i) {
611 chan->buf[i] = relay_open_buf(chan, i); 593 buf = relay_open_buf(chan, i);
612 if (!chan->buf[i]) 594 if (!buf)
613 goto free_bufs; 595 goto free_bufs;
596 *per_cpu_ptr(chan->buf, i) = buf;
614 } 597 }
615 list_add(&chan->list, &relay_channels); 598 list_add(&chan->list, &relay_channels);
616 mutex_unlock(&relay_channels_mutex); 599 mutex_unlock(&relay_channels_mutex);
@@ -619,8 +602,8 @@ struct rchan *relay_open(const char *base_filename,
619 602
620free_bufs: 603free_bufs:
621 for_each_possible_cpu(i) { 604 for_each_possible_cpu(i) {
622 if (chan->buf[i]) 605 if ((buf = *per_cpu_ptr(chan->buf, i)))
623 relay_close_buf(chan->buf[i]); 606 relay_close_buf(buf);
624 } 607 }
625 608
626 kref_put(&chan->kref, relay_destroy_channel); 609 kref_put(&chan->kref, relay_destroy_channel);
@@ -666,6 +649,7 @@ int relay_late_setup_files(struct rchan *chan,
666 unsigned int i, curr_cpu; 649 unsigned int i, curr_cpu;
667 unsigned long flags; 650 unsigned long flags;
668 struct dentry *dentry; 651 struct dentry *dentry;
652 struct rchan_buf *buf;
669 struct rchan_percpu_buf_dispatcher disp; 653 struct rchan_percpu_buf_dispatcher disp;
670 654
671 if (!chan || !base_filename) 655 if (!chan || !base_filename)
@@ -684,10 +668,11 @@ int relay_late_setup_files(struct rchan *chan,
684 668
685 if (chan->is_global) { 669 if (chan->is_global) {
686 err = -EINVAL; 670 err = -EINVAL;
687 if (!WARN_ON_ONCE(!chan->buf[0])) { 671 buf = *per_cpu_ptr(chan->buf, 0);
688 dentry = relay_create_buf_file(chan, chan->buf[0], 0); 672 if (!WARN_ON_ONCE(!buf)) {
673 dentry = relay_create_buf_file(chan, buf, 0);
689 if (dentry && !WARN_ON_ONCE(!chan->is_global)) { 674 if (dentry && !WARN_ON_ONCE(!chan->is_global)) {
690 relay_set_buf_dentry(chan->buf[0], dentry); 675 relay_set_buf_dentry(buf, dentry);
691 err = 0; 676 err = 0;
692 } 677 }
693 } 678 }
@@ -702,13 +687,14 @@ int relay_late_setup_files(struct rchan *chan,
702 * on all currently online CPUs. 687 * on all currently online CPUs.
703 */ 688 */
704 for_each_online_cpu(i) { 689 for_each_online_cpu(i) {
705 if (unlikely(!chan->buf[i])) { 690 buf = *per_cpu_ptr(chan->buf, i);
691 if (unlikely(!buf)) {
706 WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n"); 692 WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n");
707 err = -EINVAL; 693 err = -EINVAL;
708 break; 694 break;
709 } 695 }
710 696
711 dentry = relay_create_buf_file(chan, chan->buf[i], i); 697 dentry = relay_create_buf_file(chan, buf, i);
712 if (unlikely(!dentry)) { 698 if (unlikely(!dentry)) {
713 err = -EINVAL; 699 err = -EINVAL;
714 break; 700 break;
@@ -716,10 +702,10 @@ int relay_late_setup_files(struct rchan *chan,
716 702
717 if (curr_cpu == i) { 703 if (curr_cpu == i) {
718 local_irq_save(flags); 704 local_irq_save(flags);
719 relay_set_buf_dentry(chan->buf[i], dentry); 705 relay_set_buf_dentry(buf, dentry);
720 local_irq_restore(flags); 706 local_irq_restore(flags);
721 } else { 707 } else {
722 disp.buf = chan->buf[i]; 708 disp.buf = buf;
723 disp.dentry = dentry; 709 disp.dentry = dentry;
724 smp_mb(); 710 smp_mb();
725 /* relay_channels_mutex must be held, so wait. */ 711 /* relay_channels_mutex must be held, so wait. */
@@ -822,11 +808,10 @@ void relay_subbufs_consumed(struct rchan *chan,
822 if (!chan) 808 if (!chan)
823 return; 809 return;
824 810
825 if (cpu >= NR_CPUS || !chan->buf[cpu] || 811 buf = *per_cpu_ptr(chan->buf, cpu);
826 subbufs_consumed > chan->n_subbufs) 812 if (cpu >= NR_CPUS || !buf || subbufs_consumed > chan->n_subbufs)
827 return; 813 return;
828 814
829 buf = chan->buf[cpu];
830 if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed) 815 if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed)
831 buf->subbufs_consumed = buf->subbufs_produced; 816 buf->subbufs_consumed = buf->subbufs_produced;
832 else 817 else
@@ -842,18 +827,19 @@ EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
842 */ 827 */
843void relay_close(struct rchan *chan) 828void relay_close(struct rchan *chan)
844{ 829{
830 struct rchan_buf *buf;
845 unsigned int i; 831 unsigned int i;
846 832
847 if (!chan) 833 if (!chan)
848 return; 834 return;
849 835
850 mutex_lock(&relay_channels_mutex); 836 mutex_lock(&relay_channels_mutex);
851 if (chan->is_global && chan->buf[0]) 837 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0)))
852 relay_close_buf(chan->buf[0]); 838 relay_close_buf(buf);
853 else 839 else
854 for_each_possible_cpu(i) 840 for_each_possible_cpu(i)
855 if (chan->buf[i]) 841 if ((buf = *per_cpu_ptr(chan->buf, i)))
856 relay_close_buf(chan->buf[i]); 842 relay_close_buf(buf);
857 843
858 if (chan->last_toobig) 844 if (chan->last_toobig)
859 printk(KERN_WARNING "relay: one or more items not logged " 845 printk(KERN_WARNING "relay: one or more items not logged "
@@ -874,20 +860,21 @@ EXPORT_SYMBOL_GPL(relay_close);
874 */ 860 */
875void relay_flush(struct rchan *chan) 861void relay_flush(struct rchan *chan)
876{ 862{
863 struct rchan_buf *buf;
877 unsigned int i; 864 unsigned int i;
878 865
879 if (!chan) 866 if (!chan)
880 return; 867 return;
881 868
882 if (chan->is_global && chan->buf[0]) { 869 if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
883 relay_switch_subbuf(chan->buf[0], 0); 870 relay_switch_subbuf(buf, 0);
884 return; 871 return;
885 } 872 }
886 873
887 mutex_lock(&relay_channels_mutex); 874 mutex_lock(&relay_channels_mutex);
888 for_each_possible_cpu(i) 875 for_each_possible_cpu(i)
889 if (chan->buf[i]) 876 if ((buf = *per_cpu_ptr(chan->buf, i)))
890 relay_switch_subbuf(chan->buf[i], 0); 877 relay_switch_subbuf(buf, 0);
891 mutex_unlock(&relay_channels_mutex); 878 mutex_unlock(&relay_channels_mutex);
892} 879}
893EXPORT_SYMBOL_GPL(relay_flush); 880EXPORT_SYMBOL_GPL(relay_flush);
@@ -1377,12 +1364,3 @@ const struct file_operations relay_file_operations = {
1377 .splice_read = relay_file_splice_read, 1364 .splice_read = relay_file_splice_read,
1378}; 1365};
1379EXPORT_SYMBOL_GPL(relay_file_operations); 1366EXPORT_SYMBOL_GPL(relay_file_operations);
1380
1381static __init int relay_init(void)
1382{
1383
1384 hotcpu_notifier(relay_hotcpu_callback, 0);
1385 return 0;
1386}
1387
1388early_initcall(relay_init);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8ed90e3a88d6..66762645f9e8 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -714,7 +714,7 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
714 BUG(); 714 BUG();
715} 715}
716 716
717static void takeover_tasklets(unsigned int cpu) 717static int takeover_tasklets(unsigned int cpu)
718{ 718{
719 /* CPU is dead, so no lock needed. */ 719 /* CPU is dead, so no lock needed. */
720 local_irq_disable(); 720 local_irq_disable();
@@ -737,27 +737,12 @@ static void takeover_tasklets(unsigned int cpu)
737 raise_softirq_irqoff(HI_SOFTIRQ); 737 raise_softirq_irqoff(HI_SOFTIRQ);
738 738
739 local_irq_enable(); 739 local_irq_enable();
740 return 0;
740} 741}
742#else
743#define takeover_tasklets NULL
741#endif /* CONFIG_HOTPLUG_CPU */ 744#endif /* CONFIG_HOTPLUG_CPU */
742 745
743static int cpu_callback(struct notifier_block *nfb, unsigned long action,
744 void *hcpu)
745{
746 switch (action) {
747#ifdef CONFIG_HOTPLUG_CPU
748 case CPU_DEAD:
749 case CPU_DEAD_FROZEN:
750 takeover_tasklets((unsigned long)hcpu);
751 break;
752#endif /* CONFIG_HOTPLUG_CPU */
753 }
754 return NOTIFY_OK;
755}
756
757static struct notifier_block cpu_nfb = {
758 .notifier_call = cpu_callback
759};
760
761static struct smp_hotplug_thread softirq_threads = { 746static struct smp_hotplug_thread softirq_threads = {
762 .store = &ksoftirqd, 747 .store = &ksoftirqd,
763 .thread_should_run = ksoftirqd_should_run, 748 .thread_should_run = ksoftirqd_should_run,
@@ -767,8 +752,8 @@ static struct smp_hotplug_thread softirq_threads = {
767 752
768static __init int spawn_ksoftirqd(void) 753static __init int spawn_ksoftirqd(void)
769{ 754{
770 register_cpu_notifier(&cpu_nfb); 755 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
771 756 takeover_tasklets);
772 BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); 757 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
773 758
774 return 0; 759 return 0;
diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c
index 707ca24f7b18..0e2c9a1e958a 100644
--- a/lib/cpu-notifier-error-inject.c
+++ b/lib/cpu-notifier-error-inject.c
@@ -8,16 +8,47 @@ static int priority;
8module_param(priority, int, 0); 8module_param(priority, int, 0);
9MODULE_PARM_DESC(priority, "specify cpu notifier priority"); 9MODULE_PARM_DESC(priority, "specify cpu notifier priority");
10 10
11#define UP_PREPARE 0
12#define UP_PREPARE_FROZEN 0
13#define DOWN_PREPARE 0
14#define DOWN_PREPARE_FROZEN 0
15
11static struct notifier_err_inject cpu_notifier_err_inject = { 16static struct notifier_err_inject cpu_notifier_err_inject = {
12 .actions = { 17 .actions = {
13 { NOTIFIER_ERR_INJECT_ACTION(CPU_UP_PREPARE) }, 18 { NOTIFIER_ERR_INJECT_ACTION(UP_PREPARE) },
14 { NOTIFIER_ERR_INJECT_ACTION(CPU_UP_PREPARE_FROZEN) }, 19 { NOTIFIER_ERR_INJECT_ACTION(UP_PREPARE_FROZEN) },
15 { NOTIFIER_ERR_INJECT_ACTION(CPU_DOWN_PREPARE) }, 20 { NOTIFIER_ERR_INJECT_ACTION(DOWN_PREPARE) },
16 { NOTIFIER_ERR_INJECT_ACTION(CPU_DOWN_PREPARE_FROZEN) }, 21 { NOTIFIER_ERR_INJECT_ACTION(DOWN_PREPARE_FROZEN) },
17 {} 22 {}
18 } 23 }
19}; 24};
20 25
26static int notf_err_handle(struct notifier_err_inject_action *action)
27{
28 int ret;
29
30 ret = action->error;
31 if (ret)
32 pr_info("Injecting error (%d) to %s\n", ret, action->name);
33 return ret;
34}
35
36static int notf_err_inj_up_prepare(unsigned int cpu)
37{
38 if (!cpuhp_tasks_frozen)
39 return notf_err_handle(&cpu_notifier_err_inject.actions[0]);
40 else
41 return notf_err_handle(&cpu_notifier_err_inject.actions[1]);
42}
43
44static int notf_err_inj_dead(unsigned int cpu)
45{
46 if (!cpuhp_tasks_frozen)
47 return notf_err_handle(&cpu_notifier_err_inject.actions[2]);
48 else
49 return notf_err_handle(&cpu_notifier_err_inject.actions[3]);
50}
51
21static struct dentry *dir; 52static struct dentry *dir;
22 53
23static int err_inject_init(void) 54static int err_inject_init(void)
@@ -29,7 +60,10 @@ static int err_inject_init(void)
29 if (IS_ERR(dir)) 60 if (IS_ERR(dir))
30 return PTR_ERR(dir); 61 return PTR_ERR(dir);
31 62
32 err = register_hotcpu_notifier(&cpu_notifier_err_inject.nb); 63 err = cpuhp_setup_state_nocalls(CPUHP_NOTF_ERR_INJ_PREPARE,
64 "cpu-err-notif:prepare",
65 notf_err_inj_up_prepare,
66 notf_err_inj_dead);
33 if (err) 67 if (err)
34 debugfs_remove_recursive(dir); 68 debugfs_remove_recursive(dir);
35 69
@@ -38,7 +72,7 @@ static int err_inject_init(void)
38 72
39static void err_inject_exit(void) 73static void err_inject_exit(void)
40{ 74{
41 unregister_hotcpu_notifier(&cpu_notifier_err_inject.nb); 75 cpuhp_remove_state_nocalls(CPUHP_NOTF_ERR_INJ_PREPARE);
42 debugfs_remove_recursive(dir); 76 debugfs_remove_recursive(dir);
43} 77}
44 78
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
index 836f7db4e548..2be55692aa43 100644
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -184,30 +184,21 @@ void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)
184} 184}
185EXPORT_SYMBOL(irq_poll_init); 185EXPORT_SYMBOL(irq_poll_init);
186 186
187static int irq_poll_cpu_notify(struct notifier_block *self, 187static int irq_poll_cpu_dead(unsigned int cpu)
188 unsigned long action, void *hcpu)
189{ 188{
190 /* 189 /*
191 * If a CPU goes away, splice its entries to the current CPU 190 * If a CPU goes away, splice its entries to the current CPU
192 * and trigger a run of the softirq 191 * and trigger a run of the softirq
193 */ 192 */
194 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 193 local_irq_disable();
195 int cpu = (unsigned long) hcpu; 194 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
196 195 this_cpu_ptr(&blk_cpu_iopoll));
197 local_irq_disable(); 196 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
198 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), 197 local_irq_enable();
199 this_cpu_ptr(&blk_cpu_iopoll));
200 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
201 local_irq_enable();
202 }
203 198
204 return NOTIFY_OK; 199 return 0;
205} 200}
206 201
207static struct notifier_block irq_poll_cpu_notifier = {
208 .notifier_call = irq_poll_cpu_notify,
209};
210
211static __init int irq_poll_setup(void) 202static __init int irq_poll_setup(void)
212{ 203{
213 int i; 204 int i;
@@ -216,7 +207,8 @@ static __init int irq_poll_setup(void)
216 INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); 207 INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
217 208
218 open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq); 209 open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq);
219 register_hotcpu_notifier(&irq_poll_cpu_notifier); 210 cpuhp_setup_state_nocalls(CPUHP_IRQ_POLL_DEAD, "irq_poll:dead", NULL,
211 irq_poll_cpu_dead);
220 return 0; 212 return 0;
221} 213}
222subsys_initcall(irq_poll_setup); 214subsys_initcall(irq_poll_setup);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index f4cd7d8005c9..28d6f36a2d79 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2080,26 +2080,12 @@ void writeback_set_ratelimit(void)
2080 ratelimit_pages = 16; 2080 ratelimit_pages = 16;
2081} 2081}
2082 2082
2083static int 2083static int page_writeback_cpu_online(unsigned int cpu)
2084ratelimit_handler(struct notifier_block *self, unsigned long action,
2085 void *hcpu)
2086{ 2084{
2087 2085 writeback_set_ratelimit();
2088 switch (action & ~CPU_TASKS_FROZEN) { 2086 return 0;
2089 case CPU_ONLINE:
2090 case CPU_DEAD:
2091 writeback_set_ratelimit();
2092 return NOTIFY_OK;
2093 default:
2094 return NOTIFY_DONE;
2095 }
2096} 2087}
2097 2088
2098static struct notifier_block ratelimit_nb = {
2099 .notifier_call = ratelimit_handler,
2100 .next = NULL,
2101};
2102
2103/* 2089/*
2104 * Called early on to tune the page writeback dirty limits. 2090 * Called early on to tune the page writeback dirty limits.
2105 * 2091 *
@@ -2122,8 +2108,10 @@ void __init page_writeback_init(void)
2122{ 2108{
2123 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL)); 2109 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2124 2110
2125 writeback_set_ratelimit(); 2111 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
2126 register_cpu_notifier(&ratelimit_nb); 2112 page_writeback_cpu_online, NULL);
2113 cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
2114 page_writeback_cpu_online);
2127} 2115}
2128 2116
2129/** 2117/**
diff --git a/mm/slab.c b/mm/slab.c
index b67271024135..090fb26b3a39 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -886,6 +886,7 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
886 return 0; 886 return 0;
887} 887}
888 888
889#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
889/* 890/*
890 * Allocates and initializes node for a node on each slab cache, used for 891 * Allocates and initializes node for a node on each slab cache, used for
891 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node 892 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
@@ -908,6 +909,7 @@ static int init_cache_node_node(int node)
908 909
909 return 0; 910 return 0;
910} 911}
912#endif
911 913
912static int setup_kmem_cache_node(struct kmem_cache *cachep, 914static int setup_kmem_cache_node(struct kmem_cache *cachep,
913 int node, gfp_t gfp, bool force_change) 915 int node, gfp_t gfp, bool force_change)
@@ -975,6 +977,8 @@ fail:
975 return ret; 977 return ret;
976} 978}
977 979
980#ifdef CONFIG_SMP
981
978static void cpuup_canceled(long cpu) 982static void cpuup_canceled(long cpu)
979{ 983{
980 struct kmem_cache *cachep; 984 struct kmem_cache *cachep;
@@ -1075,65 +1079,54 @@ bad:
1075 return -ENOMEM; 1079 return -ENOMEM;
1076} 1080}
1077 1081
1078static int cpuup_callback(struct notifier_block *nfb, 1082int slab_prepare_cpu(unsigned int cpu)
1079 unsigned long action, void *hcpu)
1080{ 1083{
1081 long cpu = (long)hcpu; 1084 int err;
1082 int err = 0;
1083 1085
1084 switch (action) { 1086 mutex_lock(&slab_mutex);
1085 case CPU_UP_PREPARE: 1087 err = cpuup_prepare(cpu);
1086 case CPU_UP_PREPARE_FROZEN: 1088 mutex_unlock(&slab_mutex);
1087 mutex_lock(&slab_mutex); 1089 return err;
1088 err = cpuup_prepare(cpu); 1090}
1089 mutex_unlock(&slab_mutex); 1091
1090 break; 1092/*
1091 case CPU_ONLINE: 1093 * This is called for a failed online attempt and for a successful
1092 case CPU_ONLINE_FROZEN: 1094 * offline.
1093 start_cpu_timer(cpu); 1095 *
1094 break; 1096 * Even if all the cpus of a node are down, we don't free the
1095#ifdef CONFIG_HOTPLUG_CPU 1097 * kmem_list3 of any cache. This to avoid a race between cpu_down, and
1096 case CPU_DOWN_PREPARE: 1098 * a kmalloc allocation from another cpu for memory from the node of
1097 case CPU_DOWN_PREPARE_FROZEN: 1099 * the cpu going down. The list3 structure is usually allocated from
1098 /* 1100 * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
1099 * Shutdown cache reaper. Note that the slab_mutex is 1101 */
1100 * held so that if cache_reap() is invoked it cannot do 1102int slab_dead_cpu(unsigned int cpu)
1101 * anything expensive but will only modify reap_work 1103{
1102 * and reschedule the timer. 1104 mutex_lock(&slab_mutex);
1103 */ 1105 cpuup_canceled(cpu);
1104 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); 1106 mutex_unlock(&slab_mutex);
1105 /* Now the cache_reaper is guaranteed to be not running. */ 1107 return 0;
1106 per_cpu(slab_reap_work, cpu).work.func = NULL; 1108}
1107 break;
1108 case CPU_DOWN_FAILED:
1109 case CPU_DOWN_FAILED_FROZEN:
1110 start_cpu_timer(cpu);
1111 break;
1112 case CPU_DEAD:
1113 case CPU_DEAD_FROZEN:
1114 /*
1115 * Even if all the cpus of a node are down, we don't free the
1116 * kmem_cache_node of any cache. This to avoid a race between
1117 * cpu_down, and a kmalloc allocation from another cpu for
1118 * memory from the node of the cpu going down. The node
1119 * structure is usually allocated from kmem_cache_create() and
1120 * gets destroyed at kmem_cache_destroy().
1121 */
1122 /* fall through */
1123#endif 1109#endif
1124 case CPU_UP_CANCELED: 1110
1125 case CPU_UP_CANCELED_FROZEN: 1111static int slab_online_cpu(unsigned int cpu)
1126 mutex_lock(&slab_mutex); 1112{
1127 cpuup_canceled(cpu); 1113 start_cpu_timer(cpu);
1128 mutex_unlock(&slab_mutex); 1114 return 0;
1129 break;
1130 }
1131 return notifier_from_errno(err);
1132} 1115}
1133 1116
1134static struct notifier_block cpucache_notifier = { 1117static int slab_offline_cpu(unsigned int cpu)
1135 &cpuup_callback, NULL, 0 1118{
1136}; 1119 /*
1120 * Shutdown cache reaper. Note that the slab_mutex is held so
1121 * that if cache_reap() is invoked it cannot do anything
1122 * expensive but will only modify reap_work and reschedule the
1123 * timer.
1124 */
1125 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1126 /* Now the cache_reaper is guaranteed to be not running. */
1127 per_cpu(slab_reap_work, cpu).work.func = NULL;
1128 return 0;
1129}
1137 1130
1138#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) 1131#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1139/* 1132/*
@@ -1336,12 +1329,6 @@ void __init kmem_cache_init_late(void)
1336 /* Done! */ 1329 /* Done! */
1337 slab_state = FULL; 1330 slab_state = FULL;
1338 1331
1339 /*
1340 * Register a cpu startup notifier callback that initializes
1341 * cpu_cache_get for all new cpus
1342 */
1343 register_cpu_notifier(&cpucache_notifier);
1344
1345#ifdef CONFIG_NUMA 1332#ifdef CONFIG_NUMA
1346 /* 1333 /*
1347 * Register a memory hotplug callback that initializes and frees 1334 * Register a memory hotplug callback that initializes and frees
@@ -1358,13 +1345,14 @@ void __init kmem_cache_init_late(void)
1358 1345
1359static int __init cpucache_init(void) 1346static int __init cpucache_init(void)
1360{ 1347{
1361 int cpu; 1348 int ret;
1362 1349
1363 /* 1350 /*
1364 * Register the timers that return unneeded pages to the page allocator 1351 * Register the timers that return unneeded pages to the page allocator
1365 */ 1352 */
1366 for_each_online_cpu(cpu) 1353 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
1367 start_cpu_timer(cpu); 1354 slab_online_cpu, slab_offline_cpu);
1355 WARN_ON(ret < 0);
1368 1356
1369 /* Done! */ 1357 /* Done! */
1370 slab_state = FULL; 1358 slab_state = FULL;
diff --git a/mm/slub.c b/mm/slub.c
index 9adae58462f8..2b3e740609e9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -194,10 +194,6 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
194#define __OBJECT_POISON 0x80000000UL /* Poison object */ 194#define __OBJECT_POISON 0x80000000UL /* Poison object */
195#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */ 195#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
196 196
197#ifdef CONFIG_SMP
198static struct notifier_block slab_notifier;
199#endif
200
201/* 197/*
202 * Tracking user of a slab. 198 * Tracking user of a slab.
203 */ 199 */
@@ -2305,6 +2301,25 @@ static void flush_all(struct kmem_cache *s)
2305} 2301}
2306 2302
2307/* 2303/*
2304 * Use the cpu notifier to insure that the cpu slabs are flushed when
2305 * necessary.
2306 */
2307static int slub_cpu_dead(unsigned int cpu)
2308{
2309 struct kmem_cache *s;
2310 unsigned long flags;
2311
2312 mutex_lock(&slab_mutex);
2313 list_for_each_entry(s, &slab_caches, list) {
2314 local_irq_save(flags);
2315 __flush_cpu_slab(s, cpu);
2316 local_irq_restore(flags);
2317 }
2318 mutex_unlock(&slab_mutex);
2319 return 0;
2320}
2321
2322/*
2308 * Check if the objects in a per cpu structure fit numa 2323 * Check if the objects in a per cpu structure fit numa
2309 * locality expectations. 2324 * locality expectations.
2310 */ 2325 */
@@ -4144,9 +4159,8 @@ void __init kmem_cache_init(void)
4144 /* Setup random freelists for each cache */ 4159 /* Setup random freelists for each cache */
4145 init_freelist_randomization(); 4160 init_freelist_randomization();
4146 4161
4147#ifdef CONFIG_SMP 4162 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4148 register_cpu_notifier(&slab_notifier); 4163 slub_cpu_dead);
4149#endif
4150 4164
4151 pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n", 4165 pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n",
4152 cache_line_size(), 4166 cache_line_size(),
@@ -4210,43 +4224,6 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
4210 return err; 4224 return err;
4211} 4225}
4212 4226
4213#ifdef CONFIG_SMP
4214/*
4215 * Use the cpu notifier to insure that the cpu slabs are flushed when
4216 * necessary.
4217 */
4218static int slab_cpuup_callback(struct notifier_block *nfb,
4219 unsigned long action, void *hcpu)
4220{
4221 long cpu = (long)hcpu;
4222 struct kmem_cache *s;
4223 unsigned long flags;
4224
4225 switch (action) {
4226 case CPU_UP_CANCELED:
4227 case CPU_UP_CANCELED_FROZEN:
4228 case CPU_DEAD:
4229 case CPU_DEAD_FROZEN:
4230 mutex_lock(&slab_mutex);
4231 list_for_each_entry(s, &slab_caches, list) {
4232 local_irq_save(flags);
4233 __flush_cpu_slab(s, cpu);
4234 local_irq_restore(flags);
4235 }
4236 mutex_unlock(&slab_mutex);
4237 break;
4238 default:
4239 break;
4240 }
4241 return NOTIFY_OK;
4242}
4243
4244static struct notifier_block slab_notifier = {
4245 .notifier_call = slab_cpuup_callback
4246};
4247
4248#endif
4249
4250void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) 4227void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4251{ 4228{
4252 struct kmem_cache *s; 4229 struct kmem_cache *s;
diff --git a/tools/testing/radix-tree/linux/cpu.h b/tools/testing/radix-tree/linux/cpu.h
index 60a40459f269..7cf412103205 100644
--- a/tools/testing/radix-tree/linux/cpu.h
+++ b/tools/testing/radix-tree/linux/cpu.h
@@ -7,19 +7,8 @@
7#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ 7#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
8#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ 8#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
9#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ 9#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
10#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
11 * not handling interrupts, soon dead.
12 * Called on the dying cpu, interrupts
13 * are already disabled. Must not
14 * sleep, must not fail */
15#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug 10#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
16 * lock is dropped */ 11 * lock is dropped */
17#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
18 * Called on the new cpu, just before
19 * enabling interrupts. Must not sleep,
20 * must not fail */
21#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
22 * idle loop. */
23#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly, 12#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
24 * perhaps due to preemption. */ 13 * perhaps due to preemption. */
25#define CPU_TASKS_FROZEN 0x0010 14#define CPU_TASKS_FROZEN 0x0010
@@ -30,5 +19,3 @@
30#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) 19#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
31#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) 20#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
32#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) 21#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
33#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
34#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)