aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 69d4de815963..08584c16049f 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -208,18 +208,17 @@ static void __perf_counter_remove_from_context(void *info)
208 return; 208 return;
209 209
210 spin_lock_irqsave(&ctx->lock, flags); 210 spin_lock_irqsave(&ctx->lock, flags);
211 /*
212 * Protect the list operation against NMI by disabling the
213 * counters on a global level.
214 */
215 perf_disable();
211 216
212 counter_sched_out(counter, cpuctx, ctx); 217 counter_sched_out(counter, cpuctx, ctx);
213 218
214 counter->task = NULL; 219 counter->task = NULL;
215 220
216 /*
217 * Protect the list operation against NMI by disabling the
218 * counters on a global level. NOP for non NMI based counters.
219 */
220 perf_disable();
221 list_del_counter(counter, ctx); 221 list_del_counter(counter, ctx);
222 perf_enable();
223 222
224 if (!ctx->task) { 223 if (!ctx->task) {
225 /* 224 /*
@@ -231,6 +230,7 @@ static void __perf_counter_remove_from_context(void *info)
231 perf_max_counters - perf_reserved_percpu); 230 perf_max_counters - perf_reserved_percpu);
232 } 231 }
233 232
233 perf_enable();
234 spin_unlock_irqrestore(&ctx->lock, flags); 234 spin_unlock_irqrestore(&ctx->lock, flags);
235} 235}
236 236