diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-05-26 02:10:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-26 03:49:28 -0400 |
commit | 79202ba9ff8cf570a75596f42e011167734d1c4b (patch) | |
tree | a7a2ebd002db9a212319da0bdc82609fe47e49e1 /arch/x86 | |
parent | 8a7b8cb91f26a671f22cedc7fd54508667f2d9b9 (diff) |
perf_counter, x86: Fix APIC NMI programming
My Nehalem box locks up in certain situations (with an
always-asserted NMI causing a lockup) if the PMU LVT
entry is programmed between NMI and IRQ mode with a
high frequency.
Standardize exlusively on NMIs instead.
[ Impact: fix lockup ]
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 16 |
1 files changed, 3 insertions, 13 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 189bf9d7cdab..ece3813c7a3c 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -285,14 +285,10 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
285 | hwc->config |= ARCH_PERFMON_EVENTSEL_OS; | 285 | hwc->config |= ARCH_PERFMON_EVENTSEL_OS; |
286 | 286 | ||
287 | /* | 287 | /* |
288 | * If privileged enough, allow NMI events: | 288 | * Use NMI events all the time: |
289 | */ | 289 | */ |
290 | hwc->nmi = 0; | 290 | hwc->nmi = 1; |
291 | if (hw_event->nmi) { | 291 | hw_event->nmi = 1; |
292 | if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN)) | ||
293 | return -EACCES; | ||
294 | hwc->nmi = 1; | ||
295 | } | ||
296 | 292 | ||
297 | if (!hwc->irq_period) | 293 | if (!hwc->irq_period) |
298 | hwc->irq_period = x86_pmu.max_period; | 294 | hwc->irq_period = x86_pmu.max_period; |
@@ -553,9 +549,6 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) | |||
553 | if (!x86_pmu.num_counters_fixed) | 549 | if (!x86_pmu.num_counters_fixed) |
554 | return -1; | 550 | return -1; |
555 | 551 | ||
556 | if (unlikely(hwc->nmi)) | ||
557 | return -1; | ||
558 | |||
559 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | 552 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; |
560 | 553 | ||
561 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS))) | 554 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS))) |
@@ -806,9 +799,6 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) | |||
806 | counter = cpuc->counters[idx]; | 799 | counter = cpuc->counters[idx]; |
807 | hwc = &counter->hw; | 800 | hwc = &counter->hw; |
808 | 801 | ||
809 | if (counter->hw_event.nmi != nmi) | ||
810 | continue; | ||
811 | |||
812 | val = x86_perf_counter_update(counter, hwc, idx); | 802 | val = x86_perf_counter_update(counter, hwc, idx); |
813 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) | 803 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) |
814 | continue; | 804 | continue; |