aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c1
-rw-r--r--kernel/perf_counter.c12
2 files changed, 7 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index c109819c2cb9..6cc1660db8d6 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -740,6 +740,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
740again: 740again:
741 if (++loops > 100) { 741 if (++loops > 100) {
742 WARN_ONCE(1, "perfcounters: irq loop stuck!\n"); 742 WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
743 perf_counter_print_debug();
743 return 1; 744 return 1;
744 } 745 }
745 746
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 69d4de815963..08584c16049f 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -208,18 +208,17 @@ static void __perf_counter_remove_from_context(void *info)
208 return; 208 return;
209 209
210 spin_lock_irqsave(&ctx->lock, flags); 210 spin_lock_irqsave(&ctx->lock, flags);
211 /*
212 * Protect the list operation against NMI by disabling the
213 * counters on a global level.
214 */
215 perf_disable();
211 216
212 counter_sched_out(counter, cpuctx, ctx); 217 counter_sched_out(counter, cpuctx, ctx);
213 218
214 counter->task = NULL; 219 counter->task = NULL;
215 220
216 /*
217 * Protect the list operation against NMI by disabling the
218 * counters on a global level. NOP for non NMI based counters.
219 */
220 perf_disable();
221 list_del_counter(counter, ctx); 221 list_del_counter(counter, ctx);
222 perf_enable();
223 222
224 if (!ctx->task) { 223 if (!ctx->task) {
225 /* 224 /*
@@ -231,6 +230,7 @@ static void __perf_counter_remove_from_context(void *info)
231 perf_max_counters - perf_reserved_percpu); 230 perf_max_counters - perf_reserved_percpu);
232 } 231 }
233 232
233 perf_enable();
234 spin_unlock_irqrestore(&ctx->lock, flags); 234 spin_unlock_irqrestore(&ctx->lock, flags);
235} 235}
236 236