aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSrivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>2014-03-10 16:40:36 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-03-20 08:43:46 -0400
commit180d86463257812dc17e5df912f3bddcc96abb00 (patch)
tree12a3ef30ebfa8bea3554b26e379403a8bcd495d3
parent07494d547e92bde6857522d2a92ff70896aecadb (diff)
oprofile, nmi-timer: Fix CPU hotplug callback registration
Subsystems that want to register CPU hotplug callbacks, as well as perform initialization for the CPUs that are already online, often do it as shown below: get_online_cpus(); for_each_online_cpu(cpu) init_cpu(cpu); register_cpu_notifier(&foobar_cpu_notifier); put_online_cpus(); This is wrong, since it is prone to ABBA deadlocks involving the cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently with CPU hotplug operations). Instead, the correct and race-free way of performing the callback registration is: cpu_notifier_register_begin(); for_each_online_cpu(cpu) init_cpu(cpu); /* Note the use of the double underscored version of the API */ __register_cpu_notifier(&foobar_cpu_notifier); cpu_notifier_register_done(); Fix the nmi-timer code in oprofile by using this latter form of callback registration. Cc: Robert Richter <rric@kernel.org> Cc: Ingo Molnar <mingo@kernel.org> Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--drivers/oprofile/nmi_timer_int.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c
index 76f1c9357f39..9559829fb234 100644
--- a/drivers/oprofile/nmi_timer_int.c
+++ b/drivers/oprofile/nmi_timer_int.c
@@ -108,8 +108,8 @@ static void nmi_timer_shutdown(void)
108 struct perf_event *event; 108 struct perf_event *event;
109 int cpu; 109 int cpu;
110 110
111 get_online_cpus(); 111 cpu_notifier_register_begin();
112 unregister_cpu_notifier(&nmi_timer_cpu_nb); 112 __unregister_cpu_notifier(&nmi_timer_cpu_nb);
113 for_each_possible_cpu(cpu) { 113 for_each_possible_cpu(cpu) {
114 event = per_cpu(nmi_timer_events, cpu); 114 event = per_cpu(nmi_timer_events, cpu);
115 if (!event) 115 if (!event)
@@ -119,7 +119,7 @@ static void nmi_timer_shutdown(void)
119 perf_event_release_kernel(event); 119 perf_event_release_kernel(event);
120 } 120 }
121 121
122 put_online_cpus(); 122 cpu_notifier_register_done();
123} 123}
124 124
125static int nmi_timer_setup(void) 125static int nmi_timer_setup(void)
@@ -132,20 +132,23 @@ static int nmi_timer_setup(void)
132 do_div(period, HZ); 132 do_div(period, HZ);
133 nmi_timer_attr.sample_period = period; 133 nmi_timer_attr.sample_period = period;
134 134
135 get_online_cpus(); 135 cpu_notifier_register_begin();
136 err = register_cpu_notifier(&nmi_timer_cpu_nb); 136 err = __register_cpu_notifier(&nmi_timer_cpu_nb);
137 if (err) 137 if (err)
138 goto out; 138 goto out;
139
139 /* can't attach events to offline cpus: */ 140 /* can't attach events to offline cpus: */
140 for_each_online_cpu(cpu) { 141 for_each_online_cpu(cpu) {
141 err = nmi_timer_start_cpu(cpu); 142 err = nmi_timer_start_cpu(cpu);
142 if (err) 143 if (err) {
143 break; 144 cpu_notifier_register_done();
145 nmi_timer_shutdown();
146 return err;
147 }
144 } 148 }
145 if (err) 149
146 nmi_timer_shutdown();
147out: 150out:
148 put_online_cpus(); 151 cpu_notifier_register_done();
149 return err; 152 return err;
150} 153}
151 154