diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-05-14 08:52:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-15 03:47:03 -0400 |
commit | a4016a79fcbd139e7378944c0d86a39fdbc70ecc (patch) | |
tree | 5cb378852a6714d84c6e5db1936bfff14c5d9c0d /arch | |
parent | 9e35ad388bea89f7d6f375af4c0ae98803688666 (diff) |
perf_counter: x86: Robustify interrupt handling
Two consecutive NMIs could daze and confuse the machine when the
first would handle the overflow of both counters.
[ Impact: fix false-positive syslog messages under multi-session profiling ]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 313638cecbb5..1dcf67057f16 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -783,6 +783,10 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) | |||
783 | 783 | ||
784 | counter = cpuc->counters[idx]; | 784 | counter = cpuc->counters[idx]; |
785 | hwc = &counter->hw; | 785 | hwc = &counter->hw; |
786 | |||
787 | if (counter->hw_event.nmi != nmi) | ||
788 | goto next; | ||
789 | |||
786 | val = x86_perf_counter_update(counter, hwc, idx); | 790 | val = x86_perf_counter_update(counter, hwc, idx); |
787 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) | 791 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) |
788 | goto next; | 792 | goto next; |
@@ -869,7 +873,6 @@ perf_counter_nmi_handler(struct notifier_block *self, | |||
869 | { | 873 | { |
870 | struct die_args *args = __args; | 874 | struct die_args *args = __args; |
871 | struct pt_regs *regs; | 875 | struct pt_regs *regs; |
872 | int ret; | ||
873 | 876 | ||
874 | if (!atomic_read(&active_counters)) | 877 | if (!atomic_read(&active_counters)) |
875 | return NOTIFY_DONE; | 878 | return NOTIFY_DONE; |
@@ -886,9 +889,16 @@ perf_counter_nmi_handler(struct notifier_block *self, | |||
886 | regs = args->regs; | 889 | regs = args->regs; |
887 | 890 | ||
888 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 891 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
889 | ret = x86_pmu.handle_irq(regs, 1); | 892 | /* |
893 | * Can't rely on the handled return value to say it was our NMI, two | ||
894 | * counters could trigger 'simultaneously' raising two back-to-back NMIs. | ||
895 | * | ||
896 | * If the first NMI handles both, the latter will be empty and daze | ||
897 | * the CPU. | ||
898 | */ | ||
899 | x86_pmu.handle_irq(regs, 1); | ||
890 | 900 | ||
891 | return ret ? NOTIFY_STOP : NOTIFY_OK; | 901 | return NOTIFY_STOP; |
892 | } | 902 | } |
893 | 903 | ||
894 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | 904 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { |