aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 17:55:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 17:55:46 -0400
commit467a9e1633043810259a7f5368fbcc1e84746137 (patch)
treec8a5bfd2a65455d7f6a59b312e348e069375bd9b
parentb8780c363d808a726a34793caa900923d32b6b80 (diff)
parenta0e247a8059223593f9c5c3d5c1fd50eedf415c0 (diff)
Merge tag 'cpu-hotplug-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull CPU hotplug notifiers registration fixes from Rafael Wysocki: "The purpose of this single series of commits from Srivatsa S Bhat (with a small piece from Gautham R Shenoy) touching multiple subsystems that use CPU hotplug notifiers is to provide a way to register them that will not lead to deadlocks with CPU online/offline operations as described in the changelog of commit 93ae4f978ca7f ("CPU hotplug: Provide lockless versions of callback registration functions"). The first three commits in the series introduce the API and document it and the rest simply goes through the users of CPU hotplug notifiers and converts them to using the new method" * tag 'cpu-hotplug-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (52 commits) net/iucv/iucv.c: Fix CPU hotplug callback registration net/core/flow.c: Fix CPU hotplug callback registration mm, zswap: Fix CPU hotplug callback registration mm, vmstat: Fix CPU hotplug callback registration profile: Fix CPU hotplug callback registration trace, ring-buffer: Fix CPU hotplug callback registration xen, balloon: Fix CPU hotplug callback registration hwmon, via-cputemp: Fix CPU hotplug callback registration hwmon, coretemp: Fix CPU hotplug callback registration thermal, x86-pkg-temp: Fix CPU hotplug callback registration octeon, watchdog: Fix CPU hotplug callback registration oprofile, nmi-timer: Fix CPU hotplug callback registration intel-idle: Fix CPU hotplug callback registration clocksource, dummy-timer: Fix CPU hotplug callback registration drivers/base/topology.c: Fix CPU hotplug callback registration acpi-cpufreq: Fix CPU hotplug callback registration zsmalloc: Fix CPU hotplug callback registration scsi, fcoe: Fix CPU hotplug callback registration scsi, bnx2fc: Fix CPU hotplug callback registration scsi, bnx2i: Fix CPU hotplug callback registration ...
-rw-r--r--Documentation/cpu-hotplug.txt45
-rw-r--r--arch/arm/kernel/hw_breakpoint.c8
-rw-r--r--arch/arm/kvm/arm.c7
-rw-r--r--arch/arm64/kernel/debug-monitors.c6
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c7
-rw-r--r--arch/ia64/kernel/err_inject.c15
-rw-r--r--arch/ia64/kernel/palinfo.c6
-rw-r--r--arch/ia64/kernel/salinfo.c6
-rw-r--r--arch/ia64/kernel/topology.c6
-rw-r--r--arch/powerpc/kernel/sysfs.c8
-rw-r--r--arch/s390/kernel/cache.c5
-rw-r--r--arch/s390/kernel/smp.c13
-rw-r--r--arch/sparc/kernel/sysfs.c6
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c13
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c8
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c6
-rw-r--r--arch/x86/kernel/cpuid.c15
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/msr.c16
-rw-r--r--arch/x86/kernel/vsyscall_64.c6
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/oprofile/nmi_int.c15
-rw-r--r--arch/x86/pci/amd_bus.c5
-rw-r--r--drivers/base/topology.c12
-rw-r--r--drivers/clocksource/dummy_timer.c11
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c7
-rw-r--r--drivers/hwmon/coretemp.c14
-rw-r--r--drivers/hwmon/via-cputemp.c14
-rw-r--r--drivers/idle/intel_idle.c12
-rw-r--r--drivers/oprofile/nmi_timer_int.c23
-rw-r--r--drivers/powercap/intel_rapl.c10
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c12
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c12
-rw-r--r--drivers/scsi/fcoe/fcoe.c15
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c14
-rw-r--r--drivers/watchdog/octeon-wdt-main.c11
-rw-r--r--drivers/xen/balloon.c36
-rw-r--r--include/linux/cpu.h47
-rw-r--r--include/linux/perf_event.h16
-rw-r--r--kernel/cpu.c38
-rw-r--r--kernel/profile.c20
-rw-r--r--kernel/trace/ring_buffer.c19
-rw-r--r--mm/vmstat.c6
-rw-r--r--mm/zsmalloc.c17
-rw-r--r--mm/zswap.c8
-rw-r--r--net/core/flow.c8
-rw-r--r--net/iucv/iucv.c121
51 files changed, 550 insertions, 226 deletions
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index be675d2d15a7..a0b005d2bd95 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -312,12 +312,57 @@ things will happen if a notifier in path sent a BAD notify code.
312Q: I don't see my action being called for all CPUs already up and running? 312Q: I don't see my action being called for all CPUs already up and running?
313A: Yes, CPU notifiers are called only when new CPUs are on-lined or offlined. 313A: Yes, CPU notifiers are called only when new CPUs are on-lined or offlined.
314 If you need to perform some action for each cpu already in the system, then 314 If you need to perform some action for each cpu already in the system, then
315 do this:
315 316
316 for_each_online_cpu(i) { 317 for_each_online_cpu(i) {
317 foobar_cpu_callback(&foobar_cpu_notifier, CPU_UP_PREPARE, i); 318 foobar_cpu_callback(&foobar_cpu_notifier, CPU_UP_PREPARE, i);
318 foobar_cpu_callback(&foobar_cpu_notifier, CPU_ONLINE, i); 319 foobar_cpu_callback(&foobar_cpu_notifier, CPU_ONLINE, i);
319 } 320 }
320 321
322 However, if you want to register a hotplug callback, as well as perform
323 some initialization for CPUs that are already online, then do this:
324
325 Version 1: (Correct)
326 ---------
327
328 cpu_notifier_register_begin();
329
330 for_each_online_cpu(i) {
331 foobar_cpu_callback(&foobar_cpu_notifier,
332 CPU_UP_PREPARE, i);
333 foobar_cpu_callback(&foobar_cpu_notifier,
334 CPU_ONLINE, i);
335 }
336
337 /* Note the use of the double underscored version of the API */
338 __register_cpu_notifier(&foobar_cpu_notifier);
339
340 cpu_notifier_register_done();
341
342 Note that the following code is *NOT* the right way to achieve this,
343 because it is prone to an ABBA deadlock between the cpu_add_remove_lock
344 and the cpu_hotplug.lock.
345
346 Version 2: (Wrong!)
347 ---------
348
349 get_online_cpus();
350
351 for_each_online_cpu(i) {
352 foobar_cpu_callback(&foobar_cpu_notifier,
353 CPU_UP_PREPARE, i);
354 foobar_cpu_callback(&foobar_cpu_notifier,
355 CPU_ONLINE, i);
356 }
357
358 register_cpu_notifier(&foobar_cpu_notifier);
359
360 put_online_cpus();
361
362 So always use the first version shown above when you want to register
363 callbacks as well as initialize the already online CPUs.
364
365
321Q: If i would like to develop cpu hotplug support for a new architecture, 366Q: If i would like to develop cpu hotplug support for a new architecture,
322 what do i need at a minimum? 367 what do i need at a minimum?
323A: The following are what is required for CPU hotplug infrastructure to work 368A: The following are what is required for CPU hotplug infrastructure to work
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 9da35c6d3411..4d963fb66e3f 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1073,6 +1073,8 @@ static int __init arch_hw_breakpoint_init(void)
1073 core_num_brps = get_num_brps(); 1073 core_num_brps = get_num_brps();
1074 core_num_wrps = get_num_wrps(); 1074 core_num_wrps = get_num_wrps();
1075 1075
1076 cpu_notifier_register_begin();
1077
1076 /* 1078 /*
1077 * We need to tread carefully here because DBGSWENABLE may be 1079 * We need to tread carefully here because DBGSWENABLE may be
1078 * driven low on this core and there isn't an architected way to 1080 * driven low on this core and there isn't an architected way to
@@ -1089,6 +1091,7 @@ static int __init arch_hw_breakpoint_init(void)
1089 if (!cpumask_empty(&debug_err_mask)) { 1091 if (!cpumask_empty(&debug_err_mask)) {
1090 core_num_brps = 0; 1092 core_num_brps = 0;
1091 core_num_wrps = 0; 1093 core_num_wrps = 0;
1094 cpu_notifier_register_done();
1092 return 0; 1095 return 0;
1093 } 1096 }
1094 1097
@@ -1108,7 +1111,10 @@ static int __init arch_hw_breakpoint_init(void)
1108 TRAP_HWBKPT, "breakpoint debug exception"); 1111 TRAP_HWBKPT, "breakpoint debug exception");
1109 1112
1110 /* Register hotplug and PM notifiers. */ 1113 /* Register hotplug and PM notifiers. */
1111 register_cpu_notifier(&dbg_reset_nb); 1114 __register_cpu_notifier(&dbg_reset_nb);
1115
1116 cpu_notifier_register_done();
1117
1112 pm_init(); 1118 pm_init();
1113 return 0; 1119 return 0;
1114} 1120}
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index bd18bb8b2770..f0e50a0f3a65 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1051,21 +1051,26 @@ int kvm_arch_init(void *opaque)
1051 } 1051 }
1052 } 1052 }
1053 1053
1054 cpu_notifier_register_begin();
1055
1054 err = init_hyp_mode(); 1056 err = init_hyp_mode();
1055 if (err) 1057 if (err)
1056 goto out_err; 1058 goto out_err;
1057 1059
1058 err = register_cpu_notifier(&hyp_init_cpu_nb); 1060 err = __register_cpu_notifier(&hyp_init_cpu_nb);
1059 if (err) { 1061 if (err) {
1060 kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); 1062 kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
1061 goto out_err; 1063 goto out_err;
1062 } 1064 }
1063 1065
1066 cpu_notifier_register_done();
1067
1064 hyp_cpu_pm_init(); 1068 hyp_cpu_pm_init();
1065 1069
1066 kvm_coproc_table_init(); 1070 kvm_coproc_table_init();
1067 return 0; 1071 return 0;
1068out_err: 1072out_err:
1073 cpu_notifier_register_done();
1069 return err; 1074 return err;
1070} 1075}
1071 1076
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 14ba23c61153..ed3955a95747 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -154,13 +154,17 @@ static struct notifier_block os_lock_nb = {
154 154
155static int debug_monitors_init(void) 155static int debug_monitors_init(void)
156{ 156{
157 cpu_notifier_register_begin();
158
157 /* Clear the OS lock. */ 159 /* Clear the OS lock. */
158 on_each_cpu(clear_os_lock, NULL, 1); 160 on_each_cpu(clear_os_lock, NULL, 1);
159 isb(); 161 isb();
160 local_dbg_enable(); 162 local_dbg_enable();
161 163
162 /* Register hotplug handler. */ 164 /* Register hotplug handler. */
163 register_cpu_notifier(&os_lock_nb); 165 __register_cpu_notifier(&os_lock_nb);
166
167 cpu_notifier_register_done();
164 return 0; 168 return 0;
165} 169}
166postcore_initcall(debug_monitors_init); 170postcore_initcall(debug_monitors_init);
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index f17f581116fc..bee789757806 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -913,6 +913,8 @@ static int __init arch_hw_breakpoint_init(void)
913 pr_info("found %d breakpoint and %d watchpoint registers.\n", 913 pr_info("found %d breakpoint and %d watchpoint registers.\n",
914 core_num_brps, core_num_wrps); 914 core_num_brps, core_num_wrps);
915 915
916 cpu_notifier_register_begin();
917
916 /* 918 /*
917 * Reset the breakpoint resources. We assume that a halting 919 * Reset the breakpoint resources. We assume that a halting
918 * debugger will leave the world in a nice state for us. 920 * debugger will leave the world in a nice state for us.
@@ -927,7 +929,10 @@ static int __init arch_hw_breakpoint_init(void)
927 TRAP_HWBKPT, "hw-watchpoint handler"); 929 TRAP_HWBKPT, "hw-watchpoint handler");
928 930
929 /* Register hotplug notifier. */ 931 /* Register hotplug notifier. */
930 register_cpu_notifier(&hw_breakpoint_reset_nb); 932 __register_cpu_notifier(&hw_breakpoint_reset_nb);
933
934 cpu_notifier_register_done();
935
931 /* Register cpu_suspend hw breakpoint restore hook */ 936 /* Register cpu_suspend hw breakpoint restore hook */
932 cpu_suspend_set_dbg_restorer(hw_breakpoint_reset); 937 cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
933 938
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index f59c0b844e88..0c161ed6d18e 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -269,12 +269,17 @@ err_inject_init(void)
269#ifdef ERR_INJ_DEBUG 269#ifdef ERR_INJ_DEBUG
270 printk(KERN_INFO "Enter error injection driver.\n"); 270 printk(KERN_INFO "Enter error injection driver.\n");
271#endif 271#endif
272
273 cpu_notifier_register_begin();
274
272 for_each_online_cpu(i) { 275 for_each_online_cpu(i) {
273 err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE, 276 err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE,
274 (void *)(long)i); 277 (void *)(long)i);
275 } 278 }
276 279
277 register_hotcpu_notifier(&err_inject_cpu_notifier); 280 __register_hotcpu_notifier(&err_inject_cpu_notifier);
281
282 cpu_notifier_register_done();
278 283
279 return 0; 284 return 0;
280} 285}
@@ -288,11 +293,17 @@ err_inject_exit(void)
288#ifdef ERR_INJ_DEBUG 293#ifdef ERR_INJ_DEBUG
289 printk(KERN_INFO "Exit error injection driver.\n"); 294 printk(KERN_INFO "Exit error injection driver.\n");
290#endif 295#endif
296
297 cpu_notifier_register_begin();
298
291 for_each_online_cpu(i) { 299 for_each_online_cpu(i) {
292 sys_dev = get_cpu_device(i); 300 sys_dev = get_cpu_device(i);
293 sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); 301 sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
294 } 302 }
295 unregister_hotcpu_notifier(&err_inject_cpu_notifier); 303
304 __unregister_hotcpu_notifier(&err_inject_cpu_notifier);
305
306 cpu_notifier_register_done();
296} 307}
297 308
298module_init(err_inject_init); 309module_init(err_inject_init);
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index ab333284f4b2..c39c3cd3ac34 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -996,13 +996,17 @@ palinfo_init(void)
996 if (!palinfo_dir) 996 if (!palinfo_dir)
997 return -ENOMEM; 997 return -ENOMEM;
998 998
999 cpu_notifier_register_begin();
1000
999 /* Create palinfo dirs in /proc for all online cpus */ 1001 /* Create palinfo dirs in /proc for all online cpus */
1000 for_each_online_cpu(i) { 1002 for_each_online_cpu(i) {
1001 create_palinfo_proc_entries(i); 1003 create_palinfo_proc_entries(i);
1002 } 1004 }
1003 1005
1004 /* Register for future delivery via notify registration */ 1006 /* Register for future delivery via notify registration */
1005 register_hotcpu_notifier(&palinfo_cpu_notifier); 1007 __register_hotcpu_notifier(&palinfo_cpu_notifier);
1008
1009 cpu_notifier_register_done();
1006 1010
1007 return 0; 1011 return 0;
1008} 1012}
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 960a396f5929..ee9719eebb1e 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -635,6 +635,8 @@ salinfo_init(void)
635 (void *)salinfo_entries[i].feature); 635 (void *)salinfo_entries[i].feature);
636 } 636 }
637 637
638 cpu_notifier_register_begin();
639
638 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { 640 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
639 data = salinfo_data + i; 641 data = salinfo_data + i;
640 data->type = i; 642 data->type = i;
@@ -669,7 +671,9 @@ salinfo_init(void)
669 salinfo_timer.function = &salinfo_timeout; 671 salinfo_timer.function = &salinfo_timeout;
670 add_timer(&salinfo_timer); 672 add_timer(&salinfo_timer);
671 673
672 register_hotcpu_notifier(&salinfo_cpu_notifier); 674 __register_hotcpu_notifier(&salinfo_cpu_notifier);
675
676 cpu_notifier_register_done();
673 677
674 return 0; 678 return 0;
675} 679}
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index ca69a5a96dcc..f295f9abba4b 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -454,12 +454,16 @@ static int __init cache_sysfs_init(void)
454{ 454{
455 int i; 455 int i;
456 456
457 cpu_notifier_register_begin();
458
457 for_each_online_cpu(i) { 459 for_each_online_cpu(i) {
458 struct device *sys_dev = get_cpu_device((unsigned int)i); 460 struct device *sys_dev = get_cpu_device((unsigned int)i);
459 cache_add_dev(sys_dev); 461 cache_add_dev(sys_dev);
460 } 462 }
461 463
462 register_hotcpu_notifier(&cache_cpu_notifier); 464 __register_hotcpu_notifier(&cache_cpu_notifier);
465
466 cpu_notifier_register_done();
463 467
464 return 0; 468 return 0;
465} 469}
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 97e1dc917683..d90d4b7810d6 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -975,7 +975,8 @@ static int __init topology_init(void)
975 int cpu; 975 int cpu;
976 976
977 register_nodes(); 977 register_nodes();
978 register_cpu_notifier(&sysfs_cpu_nb); 978
979 cpu_notifier_register_begin();
979 980
980 for_each_possible_cpu(cpu) { 981 for_each_possible_cpu(cpu) {
981 struct cpu *c = &per_cpu(cpu_devices, cpu); 982 struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -999,6 +1000,11 @@ static int __init topology_init(void)
999 if (cpu_online(cpu)) 1000 if (cpu_online(cpu))
1000 register_cpu_online(cpu); 1001 register_cpu_online(cpu);
1001 } 1002 }
1003
1004 __register_cpu_notifier(&sysfs_cpu_nb);
1005
1006 cpu_notifier_register_done();
1007
1002#ifdef CONFIG_PPC64 1008#ifdef CONFIG_PPC64
1003 sysfs_create_dscr_default(); 1009 sysfs_create_dscr_default();
1004#endif /* CONFIG_PPC64 */ 1010#endif /* CONFIG_PPC64 */
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 3a414c0f93ed..c0b03c28d157 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -378,9 +378,12 @@ static int __init cache_init(void)
378 if (!test_facility(34)) 378 if (!test_facility(34))
379 return 0; 379 return 0;
380 cache_build_info(); 380 cache_build_info();
381
382 cpu_notifier_register_begin();
381 for_each_online_cpu(cpu) 383 for_each_online_cpu(cpu)
382 cache_add_cpu(cpu); 384 cache_add_cpu(cpu);
383 hotcpu_notifier(cache_hotplug, 0); 385 __hotcpu_notifier(cache_hotplug, 0);
386 cpu_notifier_register_done();
384 return 0; 387 return 0;
385} 388}
386device_initcall(cache_init); 389device_initcall(cache_init);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 8827883310dd..5a640b395bd4 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1057,19 +1057,24 @@ static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1057 1057
1058static int __init s390_smp_init(void) 1058static int __init s390_smp_init(void)
1059{ 1059{
1060 int cpu, rc; 1060 int cpu, rc = 0;
1061 1061
1062 hotcpu_notifier(smp_cpu_notify, 0);
1063#ifdef CONFIG_HOTPLUG_CPU 1062#ifdef CONFIG_HOTPLUG_CPU
1064 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); 1063 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1065 if (rc) 1064 if (rc)
1066 return rc; 1065 return rc;
1067#endif 1066#endif
1067 cpu_notifier_register_begin();
1068 for_each_present_cpu(cpu) { 1068 for_each_present_cpu(cpu) {
1069 rc = smp_add_present_cpu(cpu); 1069 rc = smp_add_present_cpu(cpu);
1070 if (rc) 1070 if (rc)
1071 return rc; 1071 goto out;
1072 } 1072 }
1073 return 0; 1073
1074 __hotcpu_notifier(smp_cpu_notify, 0);
1075
1076out:
1077 cpu_notifier_register_done();
1078 return rc;
1074} 1079}
1075subsys_initcall(s390_smp_init); 1080subsys_initcall(s390_smp_init);
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index c21c673e5f7c..a364000ca1aa 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -300,7 +300,7 @@ static int __init topology_init(void)
300 300
301 check_mmu_stats(); 301 check_mmu_stats();
302 302
303 register_cpu_notifier(&sysfs_cpu_nb); 303 cpu_notifier_register_begin();
304 304
305 for_each_possible_cpu(cpu) { 305 for_each_possible_cpu(cpu) {
306 struct cpu *c = &per_cpu(cpu_devices, cpu); 306 struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -310,6 +310,10 @@ static int __init topology_init(void)
310 register_cpu_online(cpu); 310 register_cpu_online(cpu);
311 } 311 }
312 312
313 __register_cpu_notifier(&sysfs_cpu_nb);
314
315 cpu_notifier_register_done();
316
313 return 0; 317 return 0;
314} 318}
315 319
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 0641113e2965..a952e9c85b6f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -1225,21 +1225,24 @@ static struct notifier_block cacheinfo_cpu_notifier = {
1225 1225
1226static int __init cache_sysfs_init(void) 1226static int __init cache_sysfs_init(void)
1227{ 1227{
1228 int i; 1228 int i, err = 0;
1229 1229
1230 if (num_cache_leaves == 0) 1230 if (num_cache_leaves == 0)
1231 return 0; 1231 return 0;
1232 1232
1233 cpu_notifier_register_begin();
1233 for_each_online_cpu(i) { 1234 for_each_online_cpu(i) {
1234 int err;
1235 struct device *dev = get_cpu_device(i); 1235 struct device *dev = get_cpu_device(i);
1236 1236
1237 err = cache_add_dev(dev); 1237 err = cache_add_dev(dev);
1238 if (err) 1238 if (err)
1239 return err; 1239 goto out;
1240 } 1240 }
1241 register_hotcpu_notifier(&cacheinfo_cpu_notifier); 1241 __register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1242 return 0; 1242
1243out:
1244 cpu_notifier_register_done();
1245 return err;
1243} 1246}
1244 1247
1245device_initcall(cache_sysfs_init); 1248device_initcall(cache_sysfs_init);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 4d5419b249da..9b7734b1f975 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2434,14 +2434,18 @@ static __init int mcheck_init_device(void)
2434 if (err) 2434 if (err)
2435 return err; 2435 return err;
2436 2436
2437 cpu_notifier_register_begin();
2437 for_each_online_cpu(i) { 2438 for_each_online_cpu(i) {
2438 err = mce_device_create(i); 2439 err = mce_device_create(i);
2439 if (err) 2440 if (err) {
2441 cpu_notifier_register_done();
2440 return err; 2442 return err;
2443 }
2441 } 2444 }
2442 2445
2443 register_syscore_ops(&mce_syscore_ops); 2446 register_syscore_ops(&mce_syscore_ops);
2444 register_hotcpu_notifier(&mce_cpu_notifier); 2447 __register_hotcpu_notifier(&mce_cpu_notifier);
2448 cpu_notifier_register_done();
2445 2449
2446 /* register character device /dev/mcelog */ 2450 /* register character device /dev/mcelog */
2447 misc_register(&mce_chrdev_device); 2451 misc_register(&mce_chrdev_device);
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 3eec7de76efb..d921b7ee6595 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -271,9 +271,6 @@ static void thermal_throttle_remove_dev(struct device *dev)
271 sysfs_remove_group(&dev->kobj, &thermal_attr_group); 271 sysfs_remove_group(&dev->kobj, &thermal_attr_group);
272} 272}
273 273
274/* Mutex protecting device creation against CPU hotplug: */
275static DEFINE_MUTEX(therm_cpu_lock);
276
277/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 274/* Get notified when a cpu comes on/off. Be hotplug friendly. */
278static int 275static int
279thermal_throttle_cpu_callback(struct notifier_block *nfb, 276thermal_throttle_cpu_callback(struct notifier_block *nfb,
@@ -289,18 +286,14 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
289 switch (action) { 286 switch (action) {
290 case CPU_UP_PREPARE: 287 case CPU_UP_PREPARE:
291 case CPU_UP_PREPARE_FROZEN: 288 case CPU_UP_PREPARE_FROZEN:
292 mutex_lock(&therm_cpu_lock);
293 err = thermal_throttle_add_dev(dev, cpu); 289 err = thermal_throttle_add_dev(dev, cpu);
294 mutex_unlock(&therm_cpu_lock);
295 WARN_ON(err); 290 WARN_ON(err);
296 break; 291 break;
297 case CPU_UP_CANCELED: 292 case CPU_UP_CANCELED:
298 case CPU_UP_CANCELED_FROZEN: 293 case CPU_UP_CANCELED_FROZEN:
299 case CPU_DEAD: 294 case CPU_DEAD:
300 case CPU_DEAD_FROZEN: 295 case CPU_DEAD_FROZEN:
301 mutex_lock(&therm_cpu_lock);
302 thermal_throttle_remove_dev(dev); 296 thermal_throttle_remove_dev(dev);
303 mutex_unlock(&therm_cpu_lock);
304 break; 297 break;
305 } 298 }
306 return notifier_from_errno(err); 299 return notifier_from_errno(err);
@@ -319,19 +312,16 @@ static __init int thermal_throttle_init_device(void)
319 if (!atomic_read(&therm_throt_en)) 312 if (!atomic_read(&therm_throt_en))
320 return 0; 313 return 0;
321 314
322 register_hotcpu_notifier(&thermal_throttle_cpu_notifier); 315 cpu_notifier_register_begin();
323 316
324#ifdef CONFIG_HOTPLUG_CPU
325 mutex_lock(&therm_cpu_lock);
326#endif
327 /* connect live CPUs to sysfs */ 317 /* connect live CPUs to sysfs */
328 for_each_online_cpu(cpu) { 318 for_each_online_cpu(cpu) {
329 err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu); 319 err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
330 WARN_ON(err); 320 WARN_ON(err);
331 } 321 }
332#ifdef CONFIG_HOTPLUG_CPU 322
333 mutex_unlock(&therm_cpu_lock); 323 __register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
334#endif 324 cpu_notifier_register_done();
335 325
336 return 0; 326 return 0;
337} 327}
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 4b8e4d3cd6ea..4c36bbe3173a 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -926,13 +926,13 @@ static __init int amd_ibs_init(void)
926 goto out; 926 goto out;
927 927
928 perf_ibs_pm_init(); 928 perf_ibs_pm_init();
929 get_online_cpus(); 929 cpu_notifier_register_begin();
930 ibs_caps = caps; 930 ibs_caps = caps;
931 /* make ibs_caps visible to other cpus: */ 931 /* make ibs_caps visible to other cpus: */
932 smp_mb(); 932 smp_mb();
933 perf_cpu_notifier(perf_ibs_cpu_notifier);
934 smp_call_function(setup_APIC_ibs, NULL, 1); 933 smp_call_function(setup_APIC_ibs, NULL, 1);
935 put_online_cpus(); 934 __perf_cpu_notifier(perf_ibs_cpu_notifier);
935 cpu_notifier_register_done();
936 936
937 ret = perf_event_ibs_init(); 937 ret = perf_event_ibs_init();
938out: 938out:
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index 754291adec33..3bbdf4cd38b9 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -531,15 +531,16 @@ static int __init amd_uncore_init(void)
531 if (ret) 531 if (ret)
532 return -ENODEV; 532 return -ENODEV;
533 533
534 get_online_cpus(); 534 cpu_notifier_register_begin();
535
535 /* init cpus already online before registering for hotplug notifier */ 536 /* init cpus already online before registering for hotplug notifier */
536 for_each_online_cpu(cpu) { 537 for_each_online_cpu(cpu) {
537 amd_uncore_cpu_up_prepare(cpu); 538 amd_uncore_cpu_up_prepare(cpu);
538 smp_call_function_single(cpu, init_cpu_already_online, NULL, 1); 539 smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
539 } 540 }
540 541
541 register_cpu_notifier(&amd_uncore_cpu_notifier_block); 542 __register_cpu_notifier(&amd_uncore_cpu_notifier_block);
542 put_online_cpus(); 543 cpu_notifier_register_done();
543 544
544 return 0; 545 return 0;
545} 546}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 5ad35ad94d0f..059218ed5208 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -646,19 +646,20 @@ static int __init rapl_pmu_init(void)
646 /* unsupported */ 646 /* unsupported */
647 return 0; 647 return 0;
648 } 648 }
649 get_online_cpus(); 649
650 cpu_notifier_register_begin();
650 651
651 for_each_online_cpu(cpu) { 652 for_each_online_cpu(cpu) {
652 rapl_cpu_prepare(cpu); 653 rapl_cpu_prepare(cpu);
653 rapl_cpu_init(cpu); 654 rapl_cpu_init(cpu);
654 } 655 }
655 656
656 perf_cpu_notifier(rapl_cpu_notifier); 657 __perf_cpu_notifier(rapl_cpu_notifier);
657 658
658 ret = perf_pmu_register(&rapl_pmu_class, "power", -1); 659 ret = perf_pmu_register(&rapl_pmu_class, "power", -1);
659 if (WARN_ON(ret)) { 660 if (WARN_ON(ret)) {
660 pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret); 661 pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret);
661 put_online_cpus(); 662 cpu_notifier_register_done();
662 return -1; 663 return -1;
663 } 664 }
664 665
@@ -672,7 +673,7 @@ static int __init rapl_pmu_init(void)
672 hweight32(rapl_cntr_mask), 673 hweight32(rapl_cntr_mask),
673 ktime_to_ms(pmu->timer_interval)); 674 ktime_to_ms(pmu->timer_interval));
674 675
675 put_online_cpus(); 676 cpu_notifier_register_done();
676 677
677 return 0; 678 return 0;
678} 679}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index bd2253d40cff..65bbbea38b9c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -4244,7 +4244,7 @@ static void __init uncore_cpumask_init(void)
4244 if (!cpumask_empty(&uncore_cpu_mask)) 4244 if (!cpumask_empty(&uncore_cpu_mask))
4245 return; 4245 return;
4246 4246
4247 get_online_cpus(); 4247 cpu_notifier_register_begin();
4248 4248
4249 for_each_online_cpu(cpu) { 4249 for_each_online_cpu(cpu) {
4250 int i, phys_id = topology_physical_package_id(cpu); 4250 int i, phys_id = topology_physical_package_id(cpu);
@@ -4263,9 +4263,9 @@ static void __init uncore_cpumask_init(void)
4263 } 4263 }
4264 on_each_cpu(uncore_cpu_setup, NULL, 1); 4264 on_each_cpu(uncore_cpu_setup, NULL, 1);
4265 4265
4266 register_cpu_notifier(&uncore_cpu_nb); 4266 __register_cpu_notifier(&uncore_cpu_nb);
4267 4267
4268 put_online_cpus(); 4268 cpu_notifier_register_done();
4269} 4269}
4270 4270
4271 4271
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 7d9481c743f8..3225ae6c5180 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -198,14 +198,15 @@ static int __init cpuid_init(void)
198 goto out_chrdev; 198 goto out_chrdev;
199 } 199 }
200 cpuid_class->devnode = cpuid_devnode; 200 cpuid_class->devnode = cpuid_devnode;
201 get_online_cpus(); 201
202 cpu_notifier_register_begin();
202 for_each_online_cpu(i) { 203 for_each_online_cpu(i) {
203 err = cpuid_device_create(i); 204 err = cpuid_device_create(i);
204 if (err != 0) 205 if (err != 0)
205 goto out_class; 206 goto out_class;
206 } 207 }
207 register_hotcpu_notifier(&cpuid_class_cpu_notifier); 208 __register_hotcpu_notifier(&cpuid_class_cpu_notifier);
208 put_online_cpus(); 209 cpu_notifier_register_done();
209 210
210 err = 0; 211 err = 0;
211 goto out; 212 goto out;
@@ -215,7 +216,7 @@ out_class:
215 for_each_online_cpu(i) { 216 for_each_online_cpu(i) {
216 cpuid_device_destroy(i); 217 cpuid_device_destroy(i);
217 } 218 }
218 put_online_cpus(); 219 cpu_notifier_register_done();
219 class_destroy(cpuid_class); 220 class_destroy(cpuid_class);
220out_chrdev: 221out_chrdev:
221 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); 222 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
@@ -227,13 +228,13 @@ static void __exit cpuid_exit(void)
227{ 228{
228 int cpu = 0; 229 int cpu = 0;
229 230
230 get_online_cpus(); 231 cpu_notifier_register_begin();
231 for_each_online_cpu(cpu) 232 for_each_online_cpu(cpu)
232 cpuid_device_destroy(cpu); 233 cpuid_device_destroy(cpu);
233 class_destroy(cpuid_class); 234 class_destroy(cpuid_class);
234 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); 235 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
235 unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); 236 __unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
236 put_online_cpus(); 237 cpu_notifier_register_done();
237} 238}
238 239
239module_init(cpuid_init); 240module_init(cpuid_init);
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 93eed15a8fd4..8d80ae011603 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -941,12 +941,14 @@ static __init int hpet_late_init(void)
941 if (boot_cpu_has(X86_FEATURE_ARAT)) 941 if (boot_cpu_has(X86_FEATURE_ARAT))
942 return 0; 942 return 0;
943 943
944 cpu_notifier_register_begin();
944 for_each_online_cpu(cpu) { 945 for_each_online_cpu(cpu) {
945 hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); 946 hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
946 } 947 }
947 948
948 /* This notifier should be called after workqueue is ready */ 949 /* This notifier should be called after workqueue is ready */
949 hotcpu_notifier(hpet_cpuhp_notify, -20); 950 __hotcpu_notifier(hpet_cpuhp_notify, -20);
951 cpu_notifier_register_done();
950 952
951 return 0; 953 return 0;
952} 954}
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 05266b5aae22..c9603ac80de5 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -259,14 +259,15 @@ static int __init msr_init(void)
259 goto out_chrdev; 259 goto out_chrdev;
260 } 260 }
261 msr_class->devnode = msr_devnode; 261 msr_class->devnode = msr_devnode;
262 get_online_cpus(); 262
263 cpu_notifier_register_begin();
263 for_each_online_cpu(i) { 264 for_each_online_cpu(i) {
264 err = msr_device_create(i); 265 err = msr_device_create(i);
265 if (err != 0) 266 if (err != 0)
266 goto out_class; 267 goto out_class;
267 } 268 }
268 register_hotcpu_notifier(&msr_class_cpu_notifier); 269 __register_hotcpu_notifier(&msr_class_cpu_notifier);
269 put_online_cpus(); 270 cpu_notifier_register_done();
270 271
271 err = 0; 272 err = 0;
272 goto out; 273 goto out;
@@ -275,7 +276,7 @@ out_class:
275 i = 0; 276 i = 0;
276 for_each_online_cpu(i) 277 for_each_online_cpu(i)
277 msr_device_destroy(i); 278 msr_device_destroy(i);
278 put_online_cpus(); 279 cpu_notifier_register_done();
279 class_destroy(msr_class); 280 class_destroy(msr_class);
280out_chrdev: 281out_chrdev:
281 __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); 282 __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
@@ -286,13 +287,14 @@ out:
286static void __exit msr_exit(void) 287static void __exit msr_exit(void)
287{ 288{
288 int cpu = 0; 289 int cpu = 0;
289 get_online_cpus(); 290
291 cpu_notifier_register_begin();
290 for_each_online_cpu(cpu) 292 for_each_online_cpu(cpu)
291 msr_device_destroy(cpu); 293 msr_device_destroy(cpu);
292 class_destroy(msr_class); 294 class_destroy(msr_class);
293 __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); 295 __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
294 unregister_hotcpu_notifier(&msr_class_cpu_notifier); 296 __unregister_hotcpu_notifier(&msr_class_cpu_notifier);
295 put_online_cpus(); 297 cpu_notifier_register_done();
296} 298}
297 299
298module_init(msr_init); 300module_init(msr_init);
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 9ea287666c65..8b3b3eb3cead 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -348,9 +348,13 @@ static int __init vsyscall_init(void)
348{ 348{
349 BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)); 349 BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
350 350
351 cpu_notifier_register_begin();
352
351 on_each_cpu(cpu_vsyscall_init, NULL, 1); 353 on_each_cpu(cpu_vsyscall_init, NULL, 1);
352 /* notifier priority > KVM */ 354 /* notifier priority > KVM */
353 hotcpu_notifier(cpu_vsyscall_notifier, 30); 355 __hotcpu_notifier(cpu_vsyscall_notifier, 30);
356
357 cpu_notifier_register_done();
354 358
355 return 0; 359 return 0;
356} 360}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d1c55f8722c6..9d1b5cd4d34c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5422,7 +5422,8 @@ static void kvm_timer_init(void)
5422 int cpu; 5422 int cpu;
5423 5423
5424 max_tsc_khz = tsc_khz; 5424 max_tsc_khz = tsc_khz;
5425 register_hotcpu_notifier(&kvmclock_cpu_notifier_block); 5425
5426 cpu_notifier_register_begin();
5426 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 5427 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5427#ifdef CONFIG_CPU_FREQ 5428#ifdef CONFIG_CPU_FREQ
5428 struct cpufreq_policy policy; 5429 struct cpufreq_policy policy;
@@ -5439,6 +5440,10 @@ static void kvm_timer_init(void)
5439 pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); 5440 pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
5440 for_each_online_cpu(cpu) 5441 for_each_online_cpu(cpu)
5441 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); 5442 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
5443
5444 __register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
5445 cpu_notifier_register_done();
5446
5442} 5447}
5443 5448
5444static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); 5449static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 6890d8498e0b..379e8bd0deea 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -494,14 +494,19 @@ static int nmi_setup(void)
494 if (err) 494 if (err)
495 goto fail; 495 goto fail;
496 496
497 cpu_notifier_register_begin();
498
499 /* Use get/put_online_cpus() to protect 'nmi_enabled' */
497 get_online_cpus(); 500 get_online_cpus();
498 register_cpu_notifier(&oprofile_cpu_nb);
499 nmi_enabled = 1; 501 nmi_enabled = 1;
500 /* make nmi_enabled visible to the nmi handler: */ 502 /* make nmi_enabled visible to the nmi handler: */
501 smp_mb(); 503 smp_mb();
502 on_each_cpu(nmi_cpu_setup, NULL, 1); 504 on_each_cpu(nmi_cpu_setup, NULL, 1);
505 __register_cpu_notifier(&oprofile_cpu_nb);
503 put_online_cpus(); 506 put_online_cpus();
504 507
508 cpu_notifier_register_done();
509
505 return 0; 510 return 0;
506fail: 511fail:
507 free_msrs(); 512 free_msrs();
@@ -512,12 +517,18 @@ static void nmi_shutdown(void)
512{ 517{
513 struct op_msrs *msrs; 518 struct op_msrs *msrs;
514 519
520 cpu_notifier_register_begin();
521
522 /* Use get/put_online_cpus() to protect 'nmi_enabled' & 'ctr_running' */
515 get_online_cpus(); 523 get_online_cpus();
516 unregister_cpu_notifier(&oprofile_cpu_nb);
517 on_each_cpu(nmi_cpu_shutdown, NULL, 1); 524 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
518 nmi_enabled = 0; 525 nmi_enabled = 0;
519 ctr_running = 0; 526 ctr_running = 0;
527 __unregister_cpu_notifier(&oprofile_cpu_nb);
520 put_online_cpus(); 528 put_online_cpus();
529
530 cpu_notifier_register_done();
531
521 /* make variables visible to the nmi handler: */ 532 /* make variables visible to the nmi handler: */
522 smp_mb(); 533 smp_mb();
523 unregister_nmi_handler(NMI_LOCAL, "oprofile"); 534 unregister_nmi_handler(NMI_LOCAL, "oprofile");
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index a313a7fb6b86..e88f4c53d7f6 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -370,10 +370,13 @@ static int __init pci_io_ecs_init(void)
370 if (early_pci_allowed()) 370 if (early_pci_allowed())
371 pci_enable_pci_io_ecs(); 371 pci_enable_pci_io_ecs();
372 372
373 register_cpu_notifier(&amd_cpu_notifier); 373 cpu_notifier_register_begin();
374 for_each_online_cpu(cpu) 374 for_each_online_cpu(cpu)
375 amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE, 375 amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
376 (void *)(long)cpu); 376 (void *)(long)cpu);
377 __register_cpu_notifier(&amd_cpu_notifier);
378 cpu_notifier_register_done();
379
377 pci_probe |= PCI_HAS_IO_ECS; 380 pci_probe |= PCI_HAS_IO_ECS;
378 381
379 return 0; 382 return 0;
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index ad9d17762664..bbcbd3c43926 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -160,16 +160,20 @@ static int topology_cpu_callback(struct notifier_block *nfb,
160static int topology_sysfs_init(void) 160static int topology_sysfs_init(void)
161{ 161{
162 int cpu; 162 int cpu;
163 int rc; 163 int rc = 0;
164
165 cpu_notifier_register_begin();
164 166
165 for_each_online_cpu(cpu) { 167 for_each_online_cpu(cpu) {
166 rc = topology_add_dev(cpu); 168 rc = topology_add_dev(cpu);
167 if (rc) 169 if (rc)
168 return rc; 170 goto out;
169 } 171 }
170 hotcpu_notifier(topology_cpu_callback, 0); 172 __hotcpu_notifier(topology_cpu_callback, 0);
171 173
172 return 0; 174out:
175 cpu_notifier_register_done();
176 return rc;
173} 177}
174 178
175device_initcall(topology_sysfs_init); 179device_initcall(topology_sysfs_init);
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c
index b3eb582d6a6f..ad3572541728 100644
--- a/drivers/clocksource/dummy_timer.c
+++ b/drivers/clocksource/dummy_timer.c
@@ -56,14 +56,19 @@ static struct notifier_block dummy_timer_cpu_nb = {
56 56
57static int __init dummy_timer_register(void) 57static int __init dummy_timer_register(void)
58{ 58{
59 int err = register_cpu_notifier(&dummy_timer_cpu_nb); 59 int err = 0;
60
61 cpu_notifier_register_begin();
62 err = __register_cpu_notifier(&dummy_timer_cpu_nb);
60 if (err) 63 if (err)
61 return err; 64 goto out;
62 65
63 /* We won't get a call on the boot CPU, so register immediately */ 66 /* We won't get a call on the boot CPU, so register immediately */
64 if (num_possible_cpus() > 1) 67 if (num_possible_cpus() > 1)
65 dummy_timer_setup(); 68 dummy_timer_setup();
66 69
67 return 0; 70out:
71 cpu_notifier_register_done();
72 return err;
68} 73}
69early_initcall(dummy_timer_register); 74early_initcall(dummy_timer_register);
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 822ca03a87f7..d5eaedbe464f 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -906,15 +906,16 @@ static void __init acpi_cpufreq_boost_init(void)
906 906
907 acpi_cpufreq_driver.boost_supported = true; 907 acpi_cpufreq_driver.boost_supported = true;
908 acpi_cpufreq_driver.boost_enabled = boost_state(0); 908 acpi_cpufreq_driver.boost_enabled = boost_state(0);
909 get_online_cpus(); 909
910 cpu_notifier_register_begin();
910 911
911 /* Force all MSRs to the same value */ 912 /* Force all MSRs to the same value */
912 boost_set_msrs(acpi_cpufreq_driver.boost_enabled, 913 boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
913 cpu_online_mask); 914 cpu_online_mask);
914 915
915 register_cpu_notifier(&boost_nb); 916 __register_cpu_notifier(&boost_nb);
916 917
917 put_online_cpus(); 918 cpu_notifier_register_done();
918 } 919 }
919} 920}
920 921
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index f31bc4c48644..6d02e3b06375 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -810,20 +810,20 @@ static int __init coretemp_init(void)
810 if (err) 810 if (err)
811 goto exit; 811 goto exit;
812 812
813 get_online_cpus(); 813 cpu_notifier_register_begin();
814 for_each_online_cpu(i) 814 for_each_online_cpu(i)
815 get_core_online(i); 815 get_core_online(i);
816 816
817#ifndef CONFIG_HOTPLUG_CPU 817#ifndef CONFIG_HOTPLUG_CPU
818 if (list_empty(&pdev_list)) { 818 if (list_empty(&pdev_list)) {
819 put_online_cpus(); 819 cpu_notifier_register_done();
820 err = -ENODEV; 820 err = -ENODEV;
821 goto exit_driver_unreg; 821 goto exit_driver_unreg;
822 } 822 }
823#endif 823#endif
824 824
825 register_hotcpu_notifier(&coretemp_cpu_notifier); 825 __register_hotcpu_notifier(&coretemp_cpu_notifier);
826 put_online_cpus(); 826 cpu_notifier_register_done();
827 return 0; 827 return 0;
828 828
829#ifndef CONFIG_HOTPLUG_CPU 829#ifndef CONFIG_HOTPLUG_CPU
@@ -838,8 +838,8 @@ static void __exit coretemp_exit(void)
838{ 838{
839 struct pdev_entry *p, *n; 839 struct pdev_entry *p, *n;
840 840
841 get_online_cpus(); 841 cpu_notifier_register_begin();
842 unregister_hotcpu_notifier(&coretemp_cpu_notifier); 842 __unregister_hotcpu_notifier(&coretemp_cpu_notifier);
843 mutex_lock(&pdev_list_mutex); 843 mutex_lock(&pdev_list_mutex);
844 list_for_each_entry_safe(p, n, &pdev_list, list) { 844 list_for_each_entry_safe(p, n, &pdev_list, list) {
845 platform_device_unregister(p->pdev); 845 platform_device_unregister(p->pdev);
@@ -847,7 +847,7 @@ static void __exit coretemp_exit(void)
847 kfree(p); 847 kfree(p);
848 } 848 }
849 mutex_unlock(&pdev_list_mutex); 849 mutex_unlock(&pdev_list_mutex);
850 put_online_cpus(); 850 cpu_notifier_register_done();
851 platform_driver_unregister(&coretemp_driver); 851 platform_driver_unregister(&coretemp_driver);
852} 852}
853 853
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index 38944e94f65f..8df43c51de2c 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -319,7 +319,7 @@ static int __init via_cputemp_init(void)
319 if (err) 319 if (err)
320 goto exit; 320 goto exit;
321 321
322 get_online_cpus(); 322 cpu_notifier_register_begin();
323 for_each_online_cpu(i) { 323 for_each_online_cpu(i) {
324 struct cpuinfo_x86 *c = &cpu_data(i); 324 struct cpuinfo_x86 *c = &cpu_data(i);
325 325
@@ -339,14 +339,14 @@ static int __init via_cputemp_init(void)
339 339
340#ifndef CONFIG_HOTPLUG_CPU 340#ifndef CONFIG_HOTPLUG_CPU
341 if (list_empty(&pdev_list)) { 341 if (list_empty(&pdev_list)) {
342 put_online_cpus(); 342 cpu_notifier_register_done();
343 err = -ENODEV; 343 err = -ENODEV;
344 goto exit_driver_unreg; 344 goto exit_driver_unreg;
345 } 345 }
346#endif 346#endif
347 347
348 register_hotcpu_notifier(&via_cputemp_cpu_notifier); 348 __register_hotcpu_notifier(&via_cputemp_cpu_notifier);
349 put_online_cpus(); 349 cpu_notifier_register_done();
350 return 0; 350 return 0;
351 351
352#ifndef CONFIG_HOTPLUG_CPU 352#ifndef CONFIG_HOTPLUG_CPU
@@ -361,8 +361,8 @@ static void __exit via_cputemp_exit(void)
361{ 361{
362 struct pdev_entry *p, *n; 362 struct pdev_entry *p, *n;
363 363
364 get_online_cpus(); 364 cpu_notifier_register_begin();
365 unregister_hotcpu_notifier(&via_cputemp_cpu_notifier); 365 __unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
366 mutex_lock(&pdev_list_mutex); 366 mutex_lock(&pdev_list_mutex);
367 list_for_each_entry_safe(p, n, &pdev_list, list) { 367 list_for_each_entry_safe(p, n, &pdev_list, list) {
368 platform_device_unregister(p->pdev); 368 platform_device_unregister(p->pdev);
@@ -370,7 +370,7 @@ static void __exit via_cputemp_exit(void)
370 kfree(p); 370 kfree(p);
371 } 371 }
372 mutex_unlock(&pdev_list_mutex); 372 mutex_unlock(&pdev_list_mutex);
373 put_online_cpus(); 373 cpu_notifier_register_done();
374 platform_driver_unregister(&via_cputemp_driver); 374 platform_driver_unregister(&via_cputemp_driver);
375} 375}
376 376
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 8e1939f564f4..51493ed4643b 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -681,14 +681,19 @@ static int __init intel_idle_init(void)
681 if (intel_idle_cpuidle_devices == NULL) 681 if (intel_idle_cpuidle_devices == NULL)
682 return -ENOMEM; 682 return -ENOMEM;
683 683
684 cpu_notifier_register_begin();
685
684 for_each_online_cpu(i) { 686 for_each_online_cpu(i) {
685 retval = intel_idle_cpu_init(i); 687 retval = intel_idle_cpu_init(i);
686 if (retval) { 688 if (retval) {
689 cpu_notifier_register_done();
687 cpuidle_unregister_driver(&intel_idle_driver); 690 cpuidle_unregister_driver(&intel_idle_driver);
688 return retval; 691 return retval;
689 } 692 }
690 } 693 }
691 register_cpu_notifier(&cpu_hotplug_notifier); 694 __register_cpu_notifier(&cpu_hotplug_notifier);
695
696 cpu_notifier_register_done();
692 697
693 return 0; 698 return 0;
694} 699}
@@ -698,10 +703,13 @@ static void __exit intel_idle_exit(void)
698 intel_idle_cpuidle_devices_uninit(); 703 intel_idle_cpuidle_devices_uninit();
699 cpuidle_unregister_driver(&intel_idle_driver); 704 cpuidle_unregister_driver(&intel_idle_driver);
700 705
706 cpu_notifier_register_begin();
701 707
702 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) 708 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
703 on_each_cpu(__setup_broadcast_timer, (void *)false, 1); 709 on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
704 unregister_cpu_notifier(&cpu_hotplug_notifier); 710 __unregister_cpu_notifier(&cpu_hotplug_notifier);
711
712 cpu_notifier_register_done();
705 713
706 return; 714 return;
707} 715}
diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c
index 76f1c9357f39..9559829fb234 100644
--- a/drivers/oprofile/nmi_timer_int.c
+++ b/drivers/oprofile/nmi_timer_int.c
@@ -108,8 +108,8 @@ static void nmi_timer_shutdown(void)
108 struct perf_event *event; 108 struct perf_event *event;
109 int cpu; 109 int cpu;
110 110
111 get_online_cpus(); 111 cpu_notifier_register_begin();
112 unregister_cpu_notifier(&nmi_timer_cpu_nb); 112 __unregister_cpu_notifier(&nmi_timer_cpu_nb);
113 for_each_possible_cpu(cpu) { 113 for_each_possible_cpu(cpu) {
114 event = per_cpu(nmi_timer_events, cpu); 114 event = per_cpu(nmi_timer_events, cpu);
115 if (!event) 115 if (!event)
@@ -119,7 +119,7 @@ static void nmi_timer_shutdown(void)
119 perf_event_release_kernel(event); 119 perf_event_release_kernel(event);
120 } 120 }
121 121
122 put_online_cpus(); 122 cpu_notifier_register_done();
123} 123}
124 124
125static int nmi_timer_setup(void) 125static int nmi_timer_setup(void)
@@ -132,20 +132,23 @@ static int nmi_timer_setup(void)
132 do_div(period, HZ); 132 do_div(period, HZ);
133 nmi_timer_attr.sample_period = period; 133 nmi_timer_attr.sample_period = period;
134 134
135 get_online_cpus(); 135 cpu_notifier_register_begin();
136 err = register_cpu_notifier(&nmi_timer_cpu_nb); 136 err = __register_cpu_notifier(&nmi_timer_cpu_nb);
137 if (err) 137 if (err)
138 goto out; 138 goto out;
139
139 /* can't attach events to offline cpus: */ 140 /* can't attach events to offline cpus: */
140 for_each_online_cpu(cpu) { 141 for_each_online_cpu(cpu) {
141 err = nmi_timer_start_cpu(cpu); 142 err = nmi_timer_start_cpu(cpu);
142 if (err) 143 if (err) {
143 break; 144 cpu_notifier_register_done();
145 nmi_timer_shutdown();
146 return err;
147 }
144 } 148 }
145 if (err) 149
146 nmi_timer_shutdown();
147out: 150out:
148 put_online_cpus(); 151 cpu_notifier_register_done();
149 return err; 152 return err;
150} 153}
151 154
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 61b51e17d932..d9a0770b6c73 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1374,6 +1374,9 @@ static int __init rapl_init(void)
1374 1374
1375 return -ENODEV; 1375 return -ENODEV;
1376 } 1376 }
1377
1378 cpu_notifier_register_begin();
1379
1377 /* prevent CPU hotplug during detection */ 1380 /* prevent CPU hotplug during detection */
1378 get_online_cpus(); 1381 get_online_cpus();
1379 ret = rapl_detect_topology(); 1382 ret = rapl_detect_topology();
@@ -1385,20 +1388,23 @@ static int __init rapl_init(void)
1385 ret = -ENODEV; 1388 ret = -ENODEV;
1386 goto done; 1389 goto done;
1387 } 1390 }
1388 register_hotcpu_notifier(&rapl_cpu_notifier); 1391 __register_hotcpu_notifier(&rapl_cpu_notifier);
1389done: 1392done:
1390 put_online_cpus(); 1393 put_online_cpus();
1394 cpu_notifier_register_done();
1391 1395
1392 return ret; 1396 return ret;
1393} 1397}
1394 1398
1395static void __exit rapl_exit(void) 1399static void __exit rapl_exit(void)
1396{ 1400{
1401 cpu_notifier_register_begin();
1397 get_online_cpus(); 1402 get_online_cpus();
1398 unregister_hotcpu_notifier(&rapl_cpu_notifier); 1403 __unregister_hotcpu_notifier(&rapl_cpu_notifier);
1399 rapl_unregister_powercap(); 1404 rapl_unregister_powercap();
1400 rapl_cleanup_data(); 1405 rapl_cleanup_data();
1401 put_online_cpus(); 1406 put_online_cpus();
1407 cpu_notifier_register_done();
1402} 1408}
1403 1409
1404module_init(rapl_init); 1410module_init(rapl_init);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 6287f6a8b79d..1d41f4b9114f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2592,12 +2592,16 @@ static int __init bnx2fc_mod_init(void)
2592 spin_lock_init(&p->fp_work_lock); 2592 spin_lock_init(&p->fp_work_lock);
2593 } 2593 }
2594 2594
2595 cpu_notifier_register_begin();
2596
2595 for_each_online_cpu(cpu) { 2597 for_each_online_cpu(cpu) {
2596 bnx2fc_percpu_thread_create(cpu); 2598 bnx2fc_percpu_thread_create(cpu);
2597 } 2599 }
2598 2600
2599 /* Initialize per CPU interrupt thread */ 2601 /* Initialize per CPU interrupt thread */
2600 register_hotcpu_notifier(&bnx2fc_cpu_notifier); 2602 __register_hotcpu_notifier(&bnx2fc_cpu_notifier);
2603
2604 cpu_notifier_register_done();
2601 2605
2602 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); 2606 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
2603 2607
@@ -2662,13 +2666,17 @@ static void __exit bnx2fc_mod_exit(void)
2662 if (l2_thread) 2666 if (l2_thread)
2663 kthread_stop(l2_thread); 2667 kthread_stop(l2_thread);
2664 2668
2665 unregister_hotcpu_notifier(&bnx2fc_cpu_notifier); 2669 cpu_notifier_register_begin();
2666 2670
2667 /* Destroy per cpu threads */ 2671 /* Destroy per cpu threads */
2668 for_each_online_cpu(cpu) { 2672 for_each_online_cpu(cpu) {
2669 bnx2fc_percpu_thread_destroy(cpu); 2673 bnx2fc_percpu_thread_destroy(cpu);
2670 } 2674 }
2671 2675
2676 __unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
2677
2678 cpu_notifier_register_done();
2679
2672 destroy_workqueue(bnx2fc_wq); 2680 destroy_workqueue(bnx2fc_wq);
2673 /* 2681 /*
2674 * detach from scsi transport 2682 * detach from scsi transport
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 34c294b42c84..80c03b452d61 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -537,11 +537,15 @@ static int __init bnx2i_mod_init(void)
537 p->iothread = NULL; 537 p->iothread = NULL;
538 } 538 }
539 539
540 cpu_notifier_register_begin();
541
540 for_each_online_cpu(cpu) 542 for_each_online_cpu(cpu)
541 bnx2i_percpu_thread_create(cpu); 543 bnx2i_percpu_thread_create(cpu);
542 544
543 /* Initialize per CPU interrupt thread */ 545 /* Initialize per CPU interrupt thread */
544 register_hotcpu_notifier(&bnx2i_cpu_notifier); 546 __register_hotcpu_notifier(&bnx2i_cpu_notifier);
547
548 cpu_notifier_register_done();
545 549
546 return 0; 550 return 0;
547 551
@@ -581,11 +585,15 @@ static void __exit bnx2i_mod_exit(void)
581 } 585 }
582 mutex_unlock(&bnx2i_dev_lock); 586 mutex_unlock(&bnx2i_dev_lock);
583 587
584 unregister_hotcpu_notifier(&bnx2i_cpu_notifier); 588 cpu_notifier_register_begin();
585 589
586 for_each_online_cpu(cpu) 590 for_each_online_cpu(cpu)
587 bnx2i_percpu_thread_destroy(cpu); 591 bnx2i_percpu_thread_destroy(cpu);
588 592
593 __unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
594
595 cpu_notifier_register_done();
596
589 iscsi_unregister_transport(&bnx2i_iscsi_transport); 597 iscsi_unregister_transport(&bnx2i_iscsi_transport);
590 cnic_unregister_driver(CNIC_ULP_ISCSI); 598 cnic_unregister_driver(CNIC_ULP_ISCSI);
591} 599}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index f3170008ae71..d5e105b173f0 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -2633,14 +2633,18 @@ static int __init fcoe_init(void)
2633 skb_queue_head_init(&p->fcoe_rx_list); 2633 skb_queue_head_init(&p->fcoe_rx_list);
2634 } 2634 }
2635 2635
2636 cpu_notifier_register_begin();
2637
2636 for_each_online_cpu(cpu) 2638 for_each_online_cpu(cpu)
2637 fcoe_percpu_thread_create(cpu); 2639 fcoe_percpu_thread_create(cpu);
2638 2640
2639 /* Initialize per CPU interrupt thread */ 2641 /* Initialize per CPU interrupt thread */
2640 rc = register_hotcpu_notifier(&fcoe_cpu_notifier); 2642 rc = __register_hotcpu_notifier(&fcoe_cpu_notifier);
2641 if (rc) 2643 if (rc)
2642 goto out_free; 2644 goto out_free;
2643 2645
2646 cpu_notifier_register_done();
2647
2644 /* Setup link change notification */ 2648 /* Setup link change notification */
2645 fcoe_dev_setup(); 2649 fcoe_dev_setup();
2646 2650
@@ -2655,6 +2659,9 @@ out_free:
2655 for_each_online_cpu(cpu) { 2659 for_each_online_cpu(cpu) {
2656 fcoe_percpu_thread_destroy(cpu); 2660 fcoe_percpu_thread_destroy(cpu);
2657 } 2661 }
2662
2663 cpu_notifier_register_done();
2664
2658 mutex_unlock(&fcoe_config_mutex); 2665 mutex_unlock(&fcoe_config_mutex);
2659 destroy_workqueue(fcoe_wq); 2666 destroy_workqueue(fcoe_wq);
2660 return rc; 2667 return rc;
@@ -2687,11 +2694,15 @@ static void __exit fcoe_exit(void)
2687 } 2694 }
2688 rtnl_unlock(); 2695 rtnl_unlock();
2689 2696
2690 unregister_hotcpu_notifier(&fcoe_cpu_notifier); 2697 cpu_notifier_register_begin();
2691 2698
2692 for_each_online_cpu(cpu) 2699 for_each_online_cpu(cpu)
2693 fcoe_percpu_thread_destroy(cpu); 2700 fcoe_percpu_thread_destroy(cpu);
2694 2701
2702 __unregister_hotcpu_notifier(&fcoe_cpu_notifier);
2703
2704 cpu_notifier_register_done();
2705
2695 mutex_unlock(&fcoe_config_mutex); 2706 mutex_unlock(&fcoe_config_mutex);
2696 2707
2697 /* 2708 /*
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 081fd7e6a9f0..9ea3d9d49ffc 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -590,12 +590,12 @@ static int __init pkg_temp_thermal_init(void)
590 platform_thermal_package_rate_control = 590 platform_thermal_package_rate_control =
591 pkg_temp_thermal_platform_thermal_rate_control; 591 pkg_temp_thermal_platform_thermal_rate_control;
592 592
593 get_online_cpus(); 593 cpu_notifier_register_begin();
594 for_each_online_cpu(i) 594 for_each_online_cpu(i)
595 if (get_core_online(i)) 595 if (get_core_online(i))
596 goto err_ret; 596 goto err_ret;
597 register_hotcpu_notifier(&pkg_temp_thermal_notifier); 597 __register_hotcpu_notifier(&pkg_temp_thermal_notifier);
598 put_online_cpus(); 598 cpu_notifier_register_done();
599 599
600 pkg_temp_debugfs_init(); /* Don't care if fails */ 600 pkg_temp_debugfs_init(); /* Don't care if fails */
601 601
@@ -604,7 +604,7 @@ static int __init pkg_temp_thermal_init(void)
604err_ret: 604err_ret:
605 for_each_online_cpu(i) 605 for_each_online_cpu(i)
606 put_core_offline(i); 606 put_core_offline(i);
607 put_online_cpus(); 607 cpu_notifier_register_done();
608 kfree(pkg_work_scheduled); 608 kfree(pkg_work_scheduled);
609 platform_thermal_package_notify = NULL; 609 platform_thermal_package_notify = NULL;
610 platform_thermal_package_rate_control = NULL; 610 platform_thermal_package_rate_control = NULL;
@@ -617,8 +617,8 @@ static void __exit pkg_temp_thermal_exit(void)
617 struct phy_dev_entry *phdev, *n; 617 struct phy_dev_entry *phdev, *n;
618 int i; 618 int i;
619 619
620 get_online_cpus(); 620 cpu_notifier_register_begin();
621 unregister_hotcpu_notifier(&pkg_temp_thermal_notifier); 621 __unregister_hotcpu_notifier(&pkg_temp_thermal_notifier);
622 mutex_lock(&phy_dev_list_mutex); 622 mutex_lock(&phy_dev_list_mutex);
623 list_for_each_entry_safe(phdev, n, &phy_dev_list, list) { 623 list_for_each_entry_safe(phdev, n, &phy_dev_list, list) {
624 /* Retore old MSR value for package thermal interrupt */ 624 /* Retore old MSR value for package thermal interrupt */
@@ -636,7 +636,7 @@ static void __exit pkg_temp_thermal_exit(void)
636 for_each_online_cpu(i) 636 for_each_online_cpu(i)
637 cancel_delayed_work_sync( 637 cancel_delayed_work_sync(
638 &per_cpu(pkg_temp_thermal_threshold_work, i)); 638 &per_cpu(pkg_temp_thermal_threshold_work, i));
639 put_online_cpus(); 639 cpu_notifier_register_done();
640 640
641 kfree(pkg_work_scheduled); 641 kfree(pkg_work_scheduled);
642 642
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index 461208831428..4baf2d788920 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -708,10 +708,13 @@ static int __init octeon_wdt_init(void)
708 708
709 cpumask_clear(&irq_enabled_cpus); 709 cpumask_clear(&irq_enabled_cpus);
710 710
711 cpu_notifier_register_begin();
711 for_each_online_cpu(cpu) 712 for_each_online_cpu(cpu)
712 octeon_wdt_setup_interrupt(cpu); 713 octeon_wdt_setup_interrupt(cpu);
713 714
714 register_hotcpu_notifier(&octeon_wdt_cpu_notifier); 715 __register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
716 cpu_notifier_register_done();
717
715out: 718out:
716 return ret; 719 return ret;
717} 720}
@@ -725,7 +728,8 @@ static void __exit octeon_wdt_cleanup(void)
725 728
726 misc_deregister(&octeon_wdt_miscdev); 729 misc_deregister(&octeon_wdt_miscdev);
727 730
728 unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier); 731 cpu_notifier_register_begin();
732 __unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
729 733
730 for_each_online_cpu(cpu) { 734 for_each_online_cpu(cpu) {
731 int core = cpu2core(cpu); 735 int core = cpu2core(cpu);
@@ -734,6 +738,9 @@ static void __exit octeon_wdt_cleanup(void)
734 /* Free the interrupt handler */ 738 /* Free the interrupt handler */
735 free_irq(OCTEON_IRQ_WDOG0 + core, octeon_wdt_poke_irq); 739 free_irq(OCTEON_IRQ_WDOG0 + core, octeon_wdt_poke_irq);
736 } 740 }
741
742 cpu_notifier_register_done();
743
737 /* 744 /*
738 * Disable the boot-bus memory, the code it points to is soon 745 * Disable the boot-bus memory, the code it points to is soon
739 * to go missing. 746 * to go missing.
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 61a6ac8fa8fc..b7a506f2bb14 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -604,19 +604,29 @@ static void __init balloon_add_region(unsigned long start_pfn,
604 } 604 }
605} 605}
606 606
607static int alloc_balloon_scratch_page(int cpu)
608{
609 if (per_cpu(balloon_scratch_page, cpu) != NULL)
610 return 0;
611
612 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
613 if (per_cpu(balloon_scratch_page, cpu) == NULL) {
614 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
615 return -ENOMEM;
616 }
617
618 return 0;
619}
620
621
607static int balloon_cpu_notify(struct notifier_block *self, 622static int balloon_cpu_notify(struct notifier_block *self,
608 unsigned long action, void *hcpu) 623 unsigned long action, void *hcpu)
609{ 624{
610 int cpu = (long)hcpu; 625 int cpu = (long)hcpu;
611 switch (action) { 626 switch (action) {
612 case CPU_UP_PREPARE: 627 case CPU_UP_PREPARE:
613 if (per_cpu(balloon_scratch_page, cpu) != NULL) 628 if (alloc_balloon_scratch_page(cpu))
614 break;
615 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
616 if (per_cpu(balloon_scratch_page, cpu) == NULL) {
617 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
618 return NOTIFY_BAD; 629 return NOTIFY_BAD;
619 }
620 break; 630 break;
621 default: 631 default:
622 break; 632 break;
@@ -636,15 +646,17 @@ static int __init balloon_init(void)
636 return -ENODEV; 646 return -ENODEV;
637 647
638 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 648 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
639 for_each_online_cpu(cpu) 649 register_cpu_notifier(&balloon_cpu_notifier);
640 { 650
641 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); 651 get_online_cpus();
642 if (per_cpu(balloon_scratch_page, cpu) == NULL) { 652 for_each_online_cpu(cpu) {
643 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); 653 if (alloc_balloon_scratch_page(cpu)) {
654 put_online_cpus();
655 unregister_cpu_notifier(&balloon_cpu_notifier);
644 return -ENOMEM; 656 return -ENOMEM;
645 } 657 }
646 } 658 }
647 register_cpu_notifier(&balloon_cpu_notifier); 659 put_online_cpus();
648 } 660 }
649 661
650 pr_info("Initialising balloon driver\n"); 662 pr_info("Initialising balloon driver\n");
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 03e962e23eaf..81887120395c 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -115,26 +115,46 @@ enum {
115 { .notifier_call = fn, .priority = pri }; \ 115 { .notifier_call = fn, .priority = pri }; \
116 register_cpu_notifier(&fn##_nb); \ 116 register_cpu_notifier(&fn##_nb); \
117} 117}
118
119#define __cpu_notifier(fn, pri) { \
120 static struct notifier_block fn##_nb = \
121 { .notifier_call = fn, .priority = pri }; \
122 __register_cpu_notifier(&fn##_nb); \
123}
118#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ 124#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
119#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) 125#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
126#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
120#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ 127#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
128
121#ifdef CONFIG_HOTPLUG_CPU 129#ifdef CONFIG_HOTPLUG_CPU
122extern int register_cpu_notifier(struct notifier_block *nb); 130extern int register_cpu_notifier(struct notifier_block *nb);
131extern int __register_cpu_notifier(struct notifier_block *nb);
123extern void unregister_cpu_notifier(struct notifier_block *nb); 132extern void unregister_cpu_notifier(struct notifier_block *nb);
133extern void __unregister_cpu_notifier(struct notifier_block *nb);
124#else 134#else
125 135
126#ifndef MODULE 136#ifndef MODULE
127extern int register_cpu_notifier(struct notifier_block *nb); 137extern int register_cpu_notifier(struct notifier_block *nb);
138extern int __register_cpu_notifier(struct notifier_block *nb);
128#else 139#else
129static inline int register_cpu_notifier(struct notifier_block *nb) 140static inline int register_cpu_notifier(struct notifier_block *nb)
130{ 141{
131 return 0; 142 return 0;
132} 143}
144
145static inline int __register_cpu_notifier(struct notifier_block *nb)
146{
147 return 0;
148}
133#endif 149#endif
134 150
135static inline void unregister_cpu_notifier(struct notifier_block *nb) 151static inline void unregister_cpu_notifier(struct notifier_block *nb)
136{ 152{
137} 153}
154
155static inline void __unregister_cpu_notifier(struct notifier_block *nb)
156{
157}
138#endif 158#endif
139 159
140int cpu_up(unsigned int cpu); 160int cpu_up(unsigned int cpu);
@@ -142,19 +162,32 @@ void notify_cpu_starting(unsigned int cpu);
142extern void cpu_maps_update_begin(void); 162extern void cpu_maps_update_begin(void);
143extern void cpu_maps_update_done(void); 163extern void cpu_maps_update_done(void);
144 164
165#define cpu_notifier_register_begin cpu_maps_update_begin
166#define cpu_notifier_register_done cpu_maps_update_done
167
145#else /* CONFIG_SMP */ 168#else /* CONFIG_SMP */
146 169
147#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) 170#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
171#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
148 172
149static inline int register_cpu_notifier(struct notifier_block *nb) 173static inline int register_cpu_notifier(struct notifier_block *nb)
150{ 174{
151 return 0; 175 return 0;
152} 176}
153 177
178static inline int __register_cpu_notifier(struct notifier_block *nb)
179{
180 return 0;
181}
182
154static inline void unregister_cpu_notifier(struct notifier_block *nb) 183static inline void unregister_cpu_notifier(struct notifier_block *nb)
155{ 184{
156} 185}
157 186
187static inline void __unregister_cpu_notifier(struct notifier_block *nb)
188{
189}
190
158static inline void cpu_maps_update_begin(void) 191static inline void cpu_maps_update_begin(void)
159{ 192{
160} 193}
@@ -163,6 +196,14 @@ static inline void cpu_maps_update_done(void)
163{ 196{
164} 197}
165 198
199static inline void cpu_notifier_register_begin(void)
200{
201}
202
203static inline void cpu_notifier_register_done(void)
204{
205}
206
166#endif /* CONFIG_SMP */ 207#endif /* CONFIG_SMP */
167extern struct bus_type cpu_subsys; 208extern struct bus_type cpu_subsys;
168 209
@@ -176,8 +217,11 @@ extern void put_online_cpus(void);
176extern void cpu_hotplug_disable(void); 217extern void cpu_hotplug_disable(void);
177extern void cpu_hotplug_enable(void); 218extern void cpu_hotplug_enable(void);
178#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) 219#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
220#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
179#define register_hotcpu_notifier(nb) register_cpu_notifier(nb) 221#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
222#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
180#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) 223#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
224#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
181void clear_tasks_mm_cpumask(int cpu); 225void clear_tasks_mm_cpumask(int cpu);
182int cpu_down(unsigned int cpu); 226int cpu_down(unsigned int cpu);
183 227
@@ -190,9 +234,12 @@ static inline void cpu_hotplug_done(void) {}
190#define cpu_hotplug_disable() do { } while (0) 234#define cpu_hotplug_disable() do { } while (0)
191#define cpu_hotplug_enable() do { } while (0) 235#define cpu_hotplug_enable() do { } while (0)
192#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) 236#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
237#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
193/* These aren't inline functions due to a GCC bug. */ 238/* These aren't inline functions due to a GCC bug. */
194#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) 239#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
240#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
195#define unregister_hotcpu_notifier(nb) ({ (void)(nb); }) 241#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
242#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
196#endif /* CONFIG_HOTPLUG_CPU */ 243#endif /* CONFIG_HOTPLUG_CPU */
197 244
198#ifdef CONFIG_PM_SLEEP_SMP 245#ifdef CONFIG_PM_SLEEP_SMP
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e56b07f5c9b6..3356abcfff18 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -835,6 +835,8 @@ do { \
835 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ 835 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
836 unsigned long cpu = smp_processor_id(); \ 836 unsigned long cpu = smp_processor_id(); \
837 unsigned long flags; \ 837 unsigned long flags; \
838 \
839 cpu_notifier_register_begin(); \
838 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ 840 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
839 (void *)(unsigned long)cpu); \ 841 (void *)(unsigned long)cpu); \
840 local_irq_save(flags); \ 842 local_irq_save(flags); \
@@ -843,9 +845,21 @@ do { \
843 local_irq_restore(flags); \ 845 local_irq_restore(flags); \
844 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ 846 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
845 (void *)(unsigned long)cpu); \ 847 (void *)(unsigned long)cpu); \
846 register_cpu_notifier(&fn##_nb); \ 848 __register_cpu_notifier(&fn##_nb); \
849 cpu_notifier_register_done(); \
847} while (0) 850} while (0)
848 851
852/*
853 * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
854 * callback for already online CPUs.
855 */
856#define __perf_cpu_notifier(fn) \
857do { \
858 static struct notifier_block fn##_nb = \
859 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
860 \
861 __register_cpu_notifier(&fn##_nb); \
862} while (0)
849 863
850struct perf_pmu_events_attr { 864struct perf_pmu_events_attr {
851 struct device_attribute attr; 865 struct device_attribute attr;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index deff2e693766..a9e710eef0e2 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -19,6 +19,7 @@
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/gfp.h> 20#include <linux/gfp.h>
21#include <linux/suspend.h> 21#include <linux/suspend.h>
22#include <linux/lockdep.h>
22 23
23#include "smpboot.h" 24#include "smpboot.h"
24 25
@@ -27,18 +28,23 @@
27static DEFINE_MUTEX(cpu_add_remove_lock); 28static DEFINE_MUTEX(cpu_add_remove_lock);
28 29
29/* 30/*
30 * The following two API's must be used when attempting 31 * The following two APIs (cpu_maps_update_begin/done) must be used when
31 * to serialize the updates to cpu_online_mask, cpu_present_mask. 32 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
33 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
34 * hotplug callback (un)registration performed using __register_cpu_notifier()
35 * or __unregister_cpu_notifier().
32 */ 36 */
33void cpu_maps_update_begin(void) 37void cpu_maps_update_begin(void)
34{ 38{
35 mutex_lock(&cpu_add_remove_lock); 39 mutex_lock(&cpu_add_remove_lock);
36} 40}
41EXPORT_SYMBOL(cpu_notifier_register_begin);
37 42
38void cpu_maps_update_done(void) 43void cpu_maps_update_done(void)
39{ 44{
40 mutex_unlock(&cpu_add_remove_lock); 45 mutex_unlock(&cpu_add_remove_lock);
41} 46}
47EXPORT_SYMBOL(cpu_notifier_register_done);
42 48
43static RAW_NOTIFIER_HEAD(cpu_chain); 49static RAW_NOTIFIER_HEAD(cpu_chain);
44 50
@@ -57,17 +63,30 @@ static struct {
57 * an ongoing cpu hotplug operation. 63 * an ongoing cpu hotplug operation.
58 */ 64 */
59 int refcount; 65 int refcount;
66
67#ifdef CONFIG_DEBUG_LOCK_ALLOC
68 struct lockdep_map dep_map;
69#endif
60} cpu_hotplug = { 70} cpu_hotplug = {
61 .active_writer = NULL, 71 .active_writer = NULL,
62 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 72 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
63 .refcount = 0, 73 .refcount = 0,
74#ifdef CONFIG_DEBUG_LOCK_ALLOC
75 .dep_map = {.name = "cpu_hotplug.lock" },
76#endif
64}; 77};
65 78
79/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
80#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
81#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
82#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
83
66void get_online_cpus(void) 84void get_online_cpus(void)
67{ 85{
68 might_sleep(); 86 might_sleep();
69 if (cpu_hotplug.active_writer == current) 87 if (cpu_hotplug.active_writer == current)
70 return; 88 return;
89 cpuhp_lock_acquire_read();
71 mutex_lock(&cpu_hotplug.lock); 90 mutex_lock(&cpu_hotplug.lock);
72 cpu_hotplug.refcount++; 91 cpu_hotplug.refcount++;
73 mutex_unlock(&cpu_hotplug.lock); 92 mutex_unlock(&cpu_hotplug.lock);
@@ -87,6 +106,7 @@ void put_online_cpus(void)
87 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 106 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
88 wake_up_process(cpu_hotplug.active_writer); 107 wake_up_process(cpu_hotplug.active_writer);
89 mutex_unlock(&cpu_hotplug.lock); 108 mutex_unlock(&cpu_hotplug.lock);
109 cpuhp_lock_release();
90 110
91} 111}
92EXPORT_SYMBOL_GPL(put_online_cpus); 112EXPORT_SYMBOL_GPL(put_online_cpus);
@@ -117,6 +137,7 @@ void cpu_hotplug_begin(void)
117{ 137{
118 cpu_hotplug.active_writer = current; 138 cpu_hotplug.active_writer = current;
119 139
140 cpuhp_lock_acquire();
120 for (;;) { 141 for (;;) {
121 mutex_lock(&cpu_hotplug.lock); 142 mutex_lock(&cpu_hotplug.lock);
122 if (likely(!cpu_hotplug.refcount)) 143 if (likely(!cpu_hotplug.refcount))
@@ -131,6 +152,7 @@ void cpu_hotplug_done(void)
131{ 152{
132 cpu_hotplug.active_writer = NULL; 153 cpu_hotplug.active_writer = NULL;
133 mutex_unlock(&cpu_hotplug.lock); 154 mutex_unlock(&cpu_hotplug.lock);
155 cpuhp_lock_release();
134} 156}
135 157
136/* 158/*
@@ -166,6 +188,11 @@ int __ref register_cpu_notifier(struct notifier_block *nb)
166 return ret; 188 return ret;
167} 189}
168 190
191int __ref __register_cpu_notifier(struct notifier_block *nb)
192{
193 return raw_notifier_chain_register(&cpu_chain, nb);
194}
195
169static int __cpu_notify(unsigned long val, void *v, int nr_to_call, 196static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
170 int *nr_calls) 197 int *nr_calls)
171{ 198{
@@ -189,6 +216,7 @@ static void cpu_notify_nofail(unsigned long val, void *v)
189 BUG_ON(cpu_notify(val, v)); 216 BUG_ON(cpu_notify(val, v));
190} 217}
191EXPORT_SYMBOL(register_cpu_notifier); 218EXPORT_SYMBOL(register_cpu_notifier);
219EXPORT_SYMBOL(__register_cpu_notifier);
192 220
193void __ref unregister_cpu_notifier(struct notifier_block *nb) 221void __ref unregister_cpu_notifier(struct notifier_block *nb)
194{ 222{
@@ -198,6 +226,12 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb)
198} 226}
199EXPORT_SYMBOL(unregister_cpu_notifier); 227EXPORT_SYMBOL(unregister_cpu_notifier);
200 228
229void __ref __unregister_cpu_notifier(struct notifier_block *nb)
230{
231 raw_notifier_chain_unregister(&cpu_chain, nb);
232}
233EXPORT_SYMBOL(__unregister_cpu_notifier);
234
201/** 235/**
202 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 236 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
203 * @cpu: a CPU id 237 * @cpu: a CPU id
diff --git a/kernel/profile.c b/kernel/profile.c
index 1b266dbe755a..cb980f0c731b 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -591,18 +591,28 @@ out_cleanup:
591int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ 591int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
592{ 592{
593 struct proc_dir_entry *entry; 593 struct proc_dir_entry *entry;
594 int err = 0;
594 595
595 if (!prof_on) 596 if (!prof_on)
596 return 0; 597 return 0;
597 if (create_hash_tables()) 598
598 return -ENOMEM; 599 cpu_notifier_register_begin();
600
601 if (create_hash_tables()) {
602 err = -ENOMEM;
603 goto out;
604 }
605
599 entry = proc_create("profile", S_IWUSR | S_IRUGO, 606 entry = proc_create("profile", S_IWUSR | S_IRUGO,
600 NULL, &proc_profile_operations); 607 NULL, &proc_profile_operations);
601 if (!entry) 608 if (!entry)
602 return 0; 609 goto out;
603 proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); 610 proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
604 hotcpu_notifier(profile_cpu_callback, 0); 611 __hotcpu_notifier(profile_cpu_callback, 0);
605 return 0; 612
613out:
614 cpu_notifier_register_done();
615 return err;
606} 616}
607subsys_initcall(create_proc_profile); 617subsys_initcall(create_proc_profile);
608#endif /* CONFIG_PROC_FS */ 618#endif /* CONFIG_PROC_FS */
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index fc4da2d97f9b..c634868c2921 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1301,7 +1301,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1301 * In that off case, we need to allocate for all possible cpus. 1301 * In that off case, we need to allocate for all possible cpus.
1302 */ 1302 */
1303#ifdef CONFIG_HOTPLUG_CPU 1303#ifdef CONFIG_HOTPLUG_CPU
1304 get_online_cpus(); 1304 cpu_notifier_register_begin();
1305 cpumask_copy(buffer->cpumask, cpu_online_mask); 1305 cpumask_copy(buffer->cpumask, cpu_online_mask);
1306#else 1306#else
1307 cpumask_copy(buffer->cpumask, cpu_possible_mask); 1307 cpumask_copy(buffer->cpumask, cpu_possible_mask);
@@ -1324,10 +1324,10 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1324#ifdef CONFIG_HOTPLUG_CPU 1324#ifdef CONFIG_HOTPLUG_CPU
1325 buffer->cpu_notify.notifier_call = rb_cpu_notify; 1325 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1326 buffer->cpu_notify.priority = 0; 1326 buffer->cpu_notify.priority = 0;
1327 register_cpu_notifier(&buffer->cpu_notify); 1327 __register_cpu_notifier(&buffer->cpu_notify);
1328 cpu_notifier_register_done();
1328#endif 1329#endif
1329 1330
1330 put_online_cpus();
1331 mutex_init(&buffer->mutex); 1331 mutex_init(&buffer->mutex);
1332 1332
1333 return buffer; 1333 return buffer;
@@ -1341,7 +1341,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1341 1341
1342 fail_free_cpumask: 1342 fail_free_cpumask:
1343 free_cpumask_var(buffer->cpumask); 1343 free_cpumask_var(buffer->cpumask);
1344 put_online_cpus(); 1344#ifdef CONFIG_HOTPLUG_CPU
1345 cpu_notifier_register_done();
1346#endif
1345 1347
1346 fail_free_buffer: 1348 fail_free_buffer:
1347 kfree(buffer); 1349 kfree(buffer);
@@ -1358,16 +1360,17 @@ ring_buffer_free(struct ring_buffer *buffer)
1358{ 1360{
1359 int cpu; 1361 int cpu;
1360 1362
1361 get_online_cpus();
1362
1363#ifdef CONFIG_HOTPLUG_CPU 1363#ifdef CONFIG_HOTPLUG_CPU
1364 unregister_cpu_notifier(&buffer->cpu_notify); 1364 cpu_notifier_register_begin();
1365 __unregister_cpu_notifier(&buffer->cpu_notify);
1365#endif 1366#endif
1366 1367
1367 for_each_buffer_cpu(buffer, cpu) 1368 for_each_buffer_cpu(buffer, cpu)
1368 rb_free_cpu_buffer(buffer->buffers[cpu]); 1369 rb_free_cpu_buffer(buffer->buffers[cpu]);
1369 1370
1370 put_online_cpus(); 1371#ifdef CONFIG_HOTPLUG_CPU
1372 cpu_notifier_register_done();
1373#endif
1371 1374
1372 kfree(buffer->buffers); 1375 kfree(buffer->buffers);
1373 free_cpumask_var(buffer->cpumask); 1376 free_cpumask_var(buffer->cpumask);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 197b4c4a9587..302dd076b8bf 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1298,14 +1298,14 @@ static int __init setup_vmstat(void)
1298#ifdef CONFIG_SMP 1298#ifdef CONFIG_SMP
1299 int cpu; 1299 int cpu;
1300 1300
1301 register_cpu_notifier(&vmstat_notifier); 1301 cpu_notifier_register_begin();
1302 __register_cpu_notifier(&vmstat_notifier);
1302 1303
1303 get_online_cpus();
1304 for_each_online_cpu(cpu) { 1304 for_each_online_cpu(cpu) {
1305 start_cpu_timer(cpu); 1305 start_cpu_timer(cpu);
1306 node_set_state(cpu_to_node(cpu), N_CPU); 1306 node_set_state(cpu_to_node(cpu), N_CPU);
1307 } 1307 }
1308 put_online_cpus(); 1308 cpu_notifier_register_done();
1309#endif 1309#endif
1310#ifdef CONFIG_PROC_FS 1310#ifdef CONFIG_PROC_FS
1311 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); 1311 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index c03ca5e9fe15..36b4591a7a2d 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -814,21 +814,32 @@ static void zs_exit(void)
814{ 814{
815 int cpu; 815 int cpu;
816 816
817 cpu_notifier_register_begin();
818
817 for_each_online_cpu(cpu) 819 for_each_online_cpu(cpu)
818 zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); 820 zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
819 unregister_cpu_notifier(&zs_cpu_nb); 821 __unregister_cpu_notifier(&zs_cpu_nb);
822
823 cpu_notifier_register_done();
820} 824}
821 825
822static int zs_init(void) 826static int zs_init(void)
823{ 827{
824 int cpu, ret; 828 int cpu, ret;
825 829
826 register_cpu_notifier(&zs_cpu_nb); 830 cpu_notifier_register_begin();
831
832 __register_cpu_notifier(&zs_cpu_nb);
827 for_each_online_cpu(cpu) { 833 for_each_online_cpu(cpu) {
828 ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); 834 ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
829 if (notifier_to_errno(ret)) 835 if (notifier_to_errno(ret)) {
836 cpu_notifier_register_done();
830 goto fail; 837 goto fail;
838 }
831 } 839 }
840
841 cpu_notifier_register_done();
842
832 return 0; 843 return 0;
833fail: 844fail:
834 zs_exit(); 845 zs_exit();
diff --git a/mm/zswap.c b/mm/zswap.c
index e55bab9dc41f..d7337fbf6605 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -387,18 +387,18 @@ static int zswap_cpu_init(void)
387{ 387{
388 unsigned long cpu; 388 unsigned long cpu;
389 389
390 get_online_cpus(); 390 cpu_notifier_register_begin();
391 for_each_online_cpu(cpu) 391 for_each_online_cpu(cpu)
392 if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK) 392 if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK)
393 goto cleanup; 393 goto cleanup;
394 register_cpu_notifier(&zswap_cpu_notifier_block); 394 __register_cpu_notifier(&zswap_cpu_notifier_block);
395 put_online_cpus(); 395 cpu_notifier_register_done();
396 return 0; 396 return 0;
397 397
398cleanup: 398cleanup:
399 for_each_online_cpu(cpu) 399 for_each_online_cpu(cpu)
400 __zswap_cpu_notifier(CPU_UP_CANCELED, cpu); 400 __zswap_cpu_notifier(CPU_UP_CANCELED, cpu);
401 put_online_cpus(); 401 cpu_notifier_register_done();
402 return -ENOMEM; 402 return -ENOMEM;
403} 403}
404 404
diff --git a/net/core/flow.c b/net/core/flow.c
index 31cfb365e0c6..a0348fde1fdf 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -455,6 +455,8 @@ int flow_cache_init(struct net *net)
455 if (!fc->percpu) 455 if (!fc->percpu)
456 return -ENOMEM; 456 return -ENOMEM;
457 457
458 cpu_notifier_register_begin();
459
458 for_each_online_cpu(i) { 460 for_each_online_cpu(i) {
459 if (flow_cache_cpu_prepare(fc, i)) 461 if (flow_cache_cpu_prepare(fc, i))
460 goto err; 462 goto err;
@@ -462,7 +464,9 @@ int flow_cache_init(struct net *net)
462 fc->hotcpu_notifier = (struct notifier_block){ 464 fc->hotcpu_notifier = (struct notifier_block){
463 .notifier_call = flow_cache_cpu, 465 .notifier_call = flow_cache_cpu,
464 }; 466 };
465 register_hotcpu_notifier(&fc->hotcpu_notifier); 467 __register_hotcpu_notifier(&fc->hotcpu_notifier);
468
469 cpu_notifier_register_done();
466 470
467 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, 471 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
468 (unsigned long) fc); 472 (unsigned long) fc);
@@ -478,6 +482,8 @@ err:
478 fcp->hash_table = NULL; 482 fcp->hash_table = NULL;
479 } 483 }
480 484
485 cpu_notifier_register_done();
486
481 free_percpu(fc->percpu); 487 free_percpu(fc->percpu);
482 fc->percpu = NULL; 488 fc->percpu = NULL;
483 489
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index cd5b8ec9be04..79a0ce95799f 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -621,6 +621,42 @@ static void iucv_disable(void)
621 put_online_cpus(); 621 put_online_cpus();
622} 622}
623 623
624static void free_iucv_data(int cpu)
625{
626 kfree(iucv_param_irq[cpu]);
627 iucv_param_irq[cpu] = NULL;
628 kfree(iucv_param[cpu]);
629 iucv_param[cpu] = NULL;
630 kfree(iucv_irq_data[cpu]);
631 iucv_irq_data[cpu] = NULL;
632}
633
634static int alloc_iucv_data(int cpu)
635{
636 /* Note: GFP_DMA used to get memory below 2G */
637 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
638 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
639 if (!iucv_irq_data[cpu])
640 goto out_free;
641
642 /* Allocate parameter blocks. */
643 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
644 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
645 if (!iucv_param[cpu])
646 goto out_free;
647
648 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
649 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
650 if (!iucv_param_irq[cpu])
651 goto out_free;
652
653 return 0;
654
655out_free:
656 free_iucv_data(cpu);
657 return -ENOMEM;
658}
659
624static int iucv_cpu_notify(struct notifier_block *self, 660static int iucv_cpu_notify(struct notifier_block *self,
625 unsigned long action, void *hcpu) 661 unsigned long action, void *hcpu)
626{ 662{
@@ -630,38 +666,14 @@ static int iucv_cpu_notify(struct notifier_block *self,
630 switch (action) { 666 switch (action) {
631 case CPU_UP_PREPARE: 667 case CPU_UP_PREPARE:
632 case CPU_UP_PREPARE_FROZEN: 668 case CPU_UP_PREPARE_FROZEN:
633 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), 669 if (alloc_iucv_data(cpu))
634 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
635 if (!iucv_irq_data[cpu])
636 return notifier_from_errno(-ENOMEM);
637
638 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
639 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
640 if (!iucv_param[cpu]) {
641 kfree(iucv_irq_data[cpu]);
642 iucv_irq_data[cpu] = NULL;
643 return notifier_from_errno(-ENOMEM); 670 return notifier_from_errno(-ENOMEM);
644 }
645 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
646 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
647 if (!iucv_param_irq[cpu]) {
648 kfree(iucv_param[cpu]);
649 iucv_param[cpu] = NULL;
650 kfree(iucv_irq_data[cpu]);
651 iucv_irq_data[cpu] = NULL;
652 return notifier_from_errno(-ENOMEM);
653 }
654 break; 671 break;
655 case CPU_UP_CANCELED: 672 case CPU_UP_CANCELED:
656 case CPU_UP_CANCELED_FROZEN: 673 case CPU_UP_CANCELED_FROZEN:
657 case CPU_DEAD: 674 case CPU_DEAD:
658 case CPU_DEAD_FROZEN: 675 case CPU_DEAD_FROZEN:
659 kfree(iucv_param_irq[cpu]); 676 free_iucv_data(cpu);
660 iucv_param_irq[cpu] = NULL;
661 kfree(iucv_param[cpu]);
662 iucv_param[cpu] = NULL;
663 kfree(iucv_irq_data[cpu]);
664 iucv_irq_data[cpu] = NULL;
665 break; 677 break;
666 case CPU_ONLINE: 678 case CPU_ONLINE:
667 case CPU_ONLINE_FROZEN: 679 case CPU_ONLINE_FROZEN:
@@ -2025,33 +2037,20 @@ static int __init iucv_init(void)
2025 goto out_int; 2037 goto out_int;
2026 } 2038 }
2027 2039
2028 for_each_online_cpu(cpu) { 2040 cpu_notifier_register_begin();
2029 /* Note: GFP_DMA used to get memory below 2G */
2030 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
2031 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
2032 if (!iucv_irq_data[cpu]) {
2033 rc = -ENOMEM;
2034 goto out_free;
2035 }
2036 2041
2037 /* Allocate parameter blocks. */ 2042 for_each_online_cpu(cpu) {
2038 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), 2043 if (alloc_iucv_data(cpu)) {
2039 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
2040 if (!iucv_param[cpu]) {
2041 rc = -ENOMEM;
2042 goto out_free;
2043 }
2044 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
2045 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
2046 if (!iucv_param_irq[cpu]) {
2047 rc = -ENOMEM; 2044 rc = -ENOMEM;
2048 goto out_free; 2045 goto out_free;
2049 } 2046 }
2050
2051 } 2047 }
2052 rc = register_hotcpu_notifier(&iucv_cpu_notifier); 2048 rc = __register_hotcpu_notifier(&iucv_cpu_notifier);
2053 if (rc) 2049 if (rc)
2054 goto out_free; 2050 goto out_free;
2051
2052 cpu_notifier_register_done();
2053
2055 rc = register_reboot_notifier(&iucv_reboot_notifier); 2054 rc = register_reboot_notifier(&iucv_reboot_notifier);
2056 if (rc) 2055 if (rc)
2057 goto out_cpu; 2056 goto out_cpu;
@@ -2069,16 +2068,14 @@ static int __init iucv_init(void)
2069out_reboot: 2068out_reboot:
2070 unregister_reboot_notifier(&iucv_reboot_notifier); 2069 unregister_reboot_notifier(&iucv_reboot_notifier);
2071out_cpu: 2070out_cpu:
2072 unregister_hotcpu_notifier(&iucv_cpu_notifier); 2071 cpu_notifier_register_begin();
2072 __unregister_hotcpu_notifier(&iucv_cpu_notifier);
2073out_free: 2073out_free:
2074 for_each_possible_cpu(cpu) { 2074 for_each_possible_cpu(cpu)
2075 kfree(iucv_param_irq[cpu]); 2075 free_iucv_data(cpu);
2076 iucv_param_irq[cpu] = NULL; 2076
2077 kfree(iucv_param[cpu]); 2077 cpu_notifier_register_done();
2078 iucv_param[cpu] = NULL; 2078
2079 kfree(iucv_irq_data[cpu]);
2080 iucv_irq_data[cpu] = NULL;
2081 }
2082 root_device_unregister(iucv_root); 2079 root_device_unregister(iucv_root);
2083out_int: 2080out_int:
2084 unregister_external_interrupt(0x4000, iucv_external_interrupt); 2081 unregister_external_interrupt(0x4000, iucv_external_interrupt);
@@ -2105,15 +2102,11 @@ static void __exit iucv_exit(void)
2105 kfree(p); 2102 kfree(p);
2106 spin_unlock_irq(&iucv_queue_lock); 2103 spin_unlock_irq(&iucv_queue_lock);
2107 unregister_reboot_notifier(&iucv_reboot_notifier); 2104 unregister_reboot_notifier(&iucv_reboot_notifier);
2108 unregister_hotcpu_notifier(&iucv_cpu_notifier); 2105 cpu_notifier_register_begin();
2109 for_each_possible_cpu(cpu) { 2106 __unregister_hotcpu_notifier(&iucv_cpu_notifier);
2110 kfree(iucv_param_irq[cpu]); 2107 for_each_possible_cpu(cpu)
2111 iucv_param_irq[cpu] = NULL; 2108 free_iucv_data(cpu);
2112 kfree(iucv_param[cpu]); 2109 cpu_notifier_register_done();
2113 iucv_param[cpu] = NULL;
2114 kfree(iucv_irq_data[cpu]);
2115 iucv_irq_data[cpu] = NULL;
2116 }
2117 root_device_unregister(iucv_root); 2110 root_device_unregister(iucv_root);
2118 bus_unregister(&iucv_bus); 2111 bus_unregister(&iucv_bus);
2119 unregister_external_interrupt(0x4000, iucv_external_interrupt); 2112 unregister_external_interrupt(0x4000, iucv_external_interrupt);