aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 615e953208e..7d528ffc2d2 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -136,14 +136,25 @@ void hw_perf_disable_all(void)
136 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); 136 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
137} 137}
138 138
139static inline void
140__hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx)
141{
142 wrmsr(hwc->config_base + idx, hwc->config, 0);
143}
144
139static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]); 145static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]);
140 146
141static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx) 147static void __hw_perf_counter_set_period(struct hw_perf_counter *hwc, int idx)
142{ 148{
143 per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count; 149 per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count;
144 150
145 wrmsr(hwc->counter_base + idx, hwc->next_count, 0); 151 wrmsr(hwc->counter_base + idx, hwc->next_count, 0);
146 wrmsr(hwc->config_base + idx, hwc->config, 0); 152}
153
154static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx)
155{
156 wrmsr(hwc->config_base + idx,
157 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
147} 158}
148 159
149void hw_perf_counter_enable(struct perf_counter *counter) 160void hw_perf_counter_enable(struct perf_counter *counter)
@@ -161,11 +172,11 @@ void hw_perf_counter_enable(struct perf_counter *counter)
161 172
162 perf_counters_lapic_init(hwc->nmi); 173 perf_counters_lapic_init(hwc->nmi);
163 174
164 wrmsr(hwc->config_base + idx, 175 __hw_perf_counter_disable(hwc, idx);
165 hwc->config & ~ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
166 176
167 cpuc->counters[idx] = counter; 177 cpuc->counters[idx] = counter;
168 counter->hw.config |= ARCH_PERFMON_EVENTSEL0_ENABLE; 178
179 __hw_perf_counter_set_period(hwc, idx);
169 __hw_perf_counter_enable(hwc, idx); 180 __hw_perf_counter_enable(hwc, idx);
170} 181}
171 182
@@ -286,8 +297,7 @@ void hw_perf_counter_disable(struct perf_counter *counter)
286 struct hw_perf_counter *hwc = &counter->hw; 297 struct hw_perf_counter *hwc = &counter->hw;
287 unsigned int idx = hwc->idx; 298 unsigned int idx = hwc->idx;
288 299
289 counter->hw.config &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; 300 __hw_perf_counter_disable(hwc, idx);
290 wrmsr(hwc->config_base + idx, hwc->config, 0);
291 301
292 clear_bit(idx, cpuc->used); 302 clear_bit(idx, cpuc->used);
293 cpuc->counters[idx] = NULL; 303 cpuc->counters[idx] = NULL;
@@ -328,18 +338,24 @@ static void perf_store_irq_data(struct perf_counter *counter, u64 data)
328 } 338 }
329} 339}
330 340
341/*
342 * NMI-safe enable method:
343 */
331static void perf_save_and_restart(struct perf_counter *counter) 344static void perf_save_and_restart(struct perf_counter *counter)
332{ 345{
333 struct hw_perf_counter *hwc = &counter->hw; 346 struct hw_perf_counter *hwc = &counter->hw;
334 int idx = hwc->idx; 347 int idx = hwc->idx;
348 u64 pmc_ctrl;
349 int err;
335 350
336 wrmsr(hwc->config_base + idx, 351 err = rdmsrl_safe(MSR_ARCH_PERFMON_EVENTSEL0 + idx, &pmc_ctrl);
337 hwc->config & ~ARCH_PERFMON_EVENTSEL0_ENABLE, 0); 352 WARN_ON_ONCE(err);
338 353
339 if (hwc->config & ARCH_PERFMON_EVENTSEL0_ENABLE) { 354 __hw_perf_save_counter(counter, hwc, idx);
340 __hw_perf_save_counter(counter, hwc, idx); 355 __hw_perf_counter_set_period(hwc, idx);
356
357 if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE)
341 __hw_perf_counter_enable(hwc, idx); 358 __hw_perf_counter_enable(hwc, idx);
342 }
343} 359}
344 360
345static void 361static void