diff options
author | Robert Richter <robert.richter@amd.com> | 2010-09-15 12:20:34 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-09-24 06:21:41 -0400 |
commit | 63e6be6d98e1a2bcdca86872b67052e51ab6afa1 (patch) | |
tree | c7aa3de767ab2f5441c66bdd3ad38cf3b60306ee /arch/x86/kernel/cpu | |
parent | 258af47479980d8238a04568b94a4e55aa1cb537 (diff) |
perf, x86: Catch spurious interrupts after disabling counters
Some cpus still deliver spurious interrupts after disabling a
counter. This caused 'undelivered NMI' messages. This patch
fixes this. Introduced by:
4177c42: perf, x86: Try to handle unknown nmis with an enabled PMU
Reported-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: gorcunov@gmail.com <gorcunov@gmail.com>
Cc: fweisbec@gmail.com <fweisbec@gmail.com>
Cc: ying.huang@intel.com <ying.huang@intel.com>
Cc: ming.m.lin@intel.com <ming.m.lin@intel.com>
Cc: yinghai@kernel.org <yinghai@kernel.org>
Cc: andi@firstfloor.org <andi@firstfloor.org>
Cc: eranian@google.com <eranian@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20100915162034.GO13563@erda.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 12 |
1 files changed, 11 insertions, 1 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 3efdf2870a35..03a5b0385ad6 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -102,6 +102,7 @@ struct cpu_hw_events { | |||
102 | */ | 102 | */ |
103 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ | 103 | struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ |
104 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 104 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
105 | unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
105 | int enabled; | 106 | int enabled; |
106 | 107 | ||
107 | int n_events; | 108 | int n_events; |
@@ -1010,6 +1011,7 @@ static int x86_pmu_start(struct perf_event *event) | |||
1010 | x86_perf_event_set_period(event); | 1011 | x86_perf_event_set_period(event); |
1011 | cpuc->events[idx] = event; | 1012 | cpuc->events[idx] = event; |
1012 | __set_bit(idx, cpuc->active_mask); | 1013 | __set_bit(idx, cpuc->active_mask); |
1014 | __set_bit(idx, cpuc->running); | ||
1013 | x86_pmu.enable(event); | 1015 | x86_pmu.enable(event); |
1014 | perf_event_update_userpage(event); | 1016 | perf_event_update_userpage(event); |
1015 | 1017 | ||
@@ -1141,8 +1143,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1141 | cpuc = &__get_cpu_var(cpu_hw_events); | 1143 | cpuc = &__get_cpu_var(cpu_hw_events); |
1142 | 1144 | ||
1143 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1145 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1144 | if (!test_bit(idx, cpuc->active_mask)) | 1146 | if (!test_bit(idx, cpuc->active_mask)) { |
1147 | /* | ||
1148 | * Though we deactivated the counter some cpus | ||
1149 | * might still deliver spurious interrupts still | ||
1150 | * in flight. Catch them: | ||
1151 | */ | ||
1152 | if (__test_and_clear_bit(idx, cpuc->running)) | ||
1153 | handled++; | ||
1145 | continue; | 1154 | continue; |
1155 | } | ||
1146 | 1156 | ||
1147 | event = cpuc->events[idx]; | 1157 | event = cpuc->events[idx]; |
1148 | hwc = &event->hw; | 1158 | hwc = &event->hw; |