aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSrivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>2014-03-10 16:38:09 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-03-20 08:43:43 -0400
commitfd537e56f65c61bb4cece99ac3a1a1bef6676df9 (patch)
tree0ec3fd6e59ae1d4d67d24c361d072dbeee0b957d
parent8c60ea146499b9d2a81ceb5e3e0bd215ef0b6287 (diff)
x86, intel, rapl: Fix CPU hotplug callback registration
Subsystems that want to register CPU hotplug callbacks, as well as perform initialization for the CPUs that are already online, often do it as shown below: get_online_cpus(); for_each_online_cpu(cpu) init_cpu(cpu); register_cpu_notifier(&foobar_cpu_notifier); put_online_cpus(); This is wrong, since it is prone to ABBA deadlocks involving the cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently with CPU hotplug operations). Instead, the correct and race-free way of performing the callback registration is: cpu_notifier_register_begin(); for_each_online_cpu(cpu) init_cpu(cpu); /* Note the use of the double underscored version of the API */ __register_cpu_notifier(&foobar_cpu_notifier); cpu_notifier_register_done(); Fix the intel rapl code in x86 by using this latter form of callback registration. Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 5ad35ad94d0f..059218ed5208 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -646,19 +646,20 @@ static int __init rapl_pmu_init(void)
646 /* unsupported */ 646 /* unsupported */
647 return 0; 647 return 0;
648 } 648 }
649 get_online_cpus(); 649
650 cpu_notifier_register_begin();
650 651
651 for_each_online_cpu(cpu) { 652 for_each_online_cpu(cpu) {
652 rapl_cpu_prepare(cpu); 653 rapl_cpu_prepare(cpu);
653 rapl_cpu_init(cpu); 654 rapl_cpu_init(cpu);
654 } 655 }
655 656
656 perf_cpu_notifier(rapl_cpu_notifier); 657 __perf_cpu_notifier(rapl_cpu_notifier);
657 658
658 ret = perf_pmu_register(&rapl_pmu_class, "power", -1); 659 ret = perf_pmu_register(&rapl_pmu_class, "power", -1);
659 if (WARN_ON(ret)) { 660 if (WARN_ON(ret)) {
660 pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret); 661 pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret);
661 put_online_cpus(); 662 cpu_notifier_register_done();
662 return -1; 663 return -1;
663 } 664 }
664 665
@@ -672,7 +673,7 @@ static int __init rapl_pmu_init(void)
672 hweight32(rapl_cntr_mask), 673 hweight32(rapl_cntr_mask),
673 ktime_to_ms(pmu->timer_interval)); 674 ktime_to_ms(pmu->timer_interval));
674 675
675 put_online_cpus(); 676 cpu_notifier_register_done();
676 677
677 return 0; 678 return 0;
678} 679}