diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-09 05:40:46 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-09 13:28:48 -0500 |
commit | 7e2ae34749edf19e76e594b9c4b2cdde1066afc5 (patch) | |
tree | 165879c7b508cac9b3238750bb8a3786e018b352 /arch/x86/kernel/cpu/perf_counter.c | |
parent | 4c59e4676dc95f6f58a2cff5390b2699fa5b5549 (diff) |
perfcounters, x86: simplify disable/enable of counters
Impact: fix spurious missed counter wakeups
In the case of NMI events, close a race window that can occur if an NMI
hits counter code that temporarily disables+enables a counter, and the NMI
leaks into the disabled section.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 40 |
1 files changed, 28 insertions, 12 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 615e953208e..7d528ffc2d2 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -136,14 +136,25 @@ void hw_perf_disable_all(void) | |||
136 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); | 136 | wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); |
137 | } | 137 | } |
138 | 138 | ||
139 | static inline void | ||
140 | __hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) | ||
141 | { | ||
142 | wrmsr(hwc->config_base + idx, hwc->config, 0); | ||
143 | } | ||
144 | |||
139 | static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]); | 145 | static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]); |
140 | 146 | ||
141 | static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx) | 147 | static void __hw_perf_counter_set_period(struct hw_perf_counter *hwc, int idx) |
142 | { | 148 | { |
143 | per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count; | 149 | per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count; |
144 | 150 | ||
145 | wrmsr(hwc->counter_base + idx, hwc->next_count, 0); | 151 | wrmsr(hwc->counter_base + idx, hwc->next_count, 0); |
146 | wrmsr(hwc->config_base + idx, hwc->config, 0); | 152 | } |
153 | |||
154 | static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx) | ||
155 | { | ||
156 | wrmsr(hwc->config_base + idx, | ||
157 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); | ||
147 | } | 158 | } |
148 | 159 | ||
149 | void hw_perf_counter_enable(struct perf_counter *counter) | 160 | void hw_perf_counter_enable(struct perf_counter *counter) |
@@ -161,11 +172,11 @@ void hw_perf_counter_enable(struct perf_counter *counter) | |||
161 | 172 | ||
162 | perf_counters_lapic_init(hwc->nmi); | 173 | perf_counters_lapic_init(hwc->nmi); |
163 | 174 | ||
164 | wrmsr(hwc->config_base + idx, | 175 | __hw_perf_counter_disable(hwc, idx); |
165 | hwc->config & ~ARCH_PERFMON_EVENTSEL0_ENABLE, 0); | ||
166 | 176 | ||
167 | cpuc->counters[idx] = counter; | 177 | cpuc->counters[idx] = counter; |
168 | counter->hw.config |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 178 | |
179 | __hw_perf_counter_set_period(hwc, idx); | ||
169 | __hw_perf_counter_enable(hwc, idx); | 180 | __hw_perf_counter_enable(hwc, idx); |
170 | } | 181 | } |
171 | 182 | ||
@@ -286,8 +297,7 @@ void hw_perf_counter_disable(struct perf_counter *counter) | |||
286 | struct hw_perf_counter *hwc = &counter->hw; | 297 | struct hw_perf_counter *hwc = &counter->hw; |
287 | unsigned int idx = hwc->idx; | 298 | unsigned int idx = hwc->idx; |
288 | 299 | ||
289 | counter->hw.config &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | 300 | __hw_perf_counter_disable(hwc, idx); |
290 | wrmsr(hwc->config_base + idx, hwc->config, 0); | ||
291 | 301 | ||
292 | clear_bit(idx, cpuc->used); | 302 | clear_bit(idx, cpuc->used); |
293 | cpuc->counters[idx] = NULL; | 303 | cpuc->counters[idx] = NULL; |
@@ -328,18 +338,24 @@ static void perf_store_irq_data(struct perf_counter *counter, u64 data) | |||
328 | } | 338 | } |
329 | } | 339 | } |
330 | 340 | ||
341 | /* | ||
342 | * NMI-safe enable method: | ||
343 | */ | ||
331 | static void perf_save_and_restart(struct perf_counter *counter) | 344 | static void perf_save_and_restart(struct perf_counter *counter) |
332 | { | 345 | { |
333 | struct hw_perf_counter *hwc = &counter->hw; | 346 | struct hw_perf_counter *hwc = &counter->hw; |
334 | int idx = hwc->idx; | 347 | int idx = hwc->idx; |
348 | u64 pmc_ctrl; | ||
349 | int err; | ||
335 | 350 | ||
336 | wrmsr(hwc->config_base + idx, | 351 | err = rdmsrl_safe(MSR_ARCH_PERFMON_EVENTSEL0 + idx, &pmc_ctrl); |
337 | hwc->config & ~ARCH_PERFMON_EVENTSEL0_ENABLE, 0); | 352 | WARN_ON_ONCE(err); |
338 | 353 | ||
339 | if (hwc->config & ARCH_PERFMON_EVENTSEL0_ENABLE) { | 354 | __hw_perf_save_counter(counter, hwc, idx); |
340 | __hw_perf_save_counter(counter, hwc, idx); | 355 | __hw_perf_counter_set_period(hwc, idx); |
356 | |||
357 | if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) | ||
341 | __hw_perf_counter_enable(hwc, idx); | 358 | __hw_perf_counter_enable(hwc, idx); |
342 | } | ||
343 | } | 359 | } |
344 | 360 | ||
345 | static void | 361 | static void |