aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-09-12 06:53:44 -0400
committerIngo Molnar <mingo@kernel.org>2013-09-12 13:13:37 -0400
commit2b9e344df384e595db24ac61ae5f780e9b024878 (patch)
tree8fdbf56c0a5f2700a104f546ac1871382cf45e0c
parent4b2c4f1f1b71fe18381f089c501ac21cd2167dfa (diff)
perf/x86/intel: Clean up checkpoint-interrupt bits
Clean up the weird CP interrupt exception code by keeping a CP mask. Andi suggested this implementation but weirdly didn't actually implement it himself, do so now because it removes the conditional in the interrupt handler and avoids the assumption its only on cnt2. Suggested-by: Andi Kleen <andi@firstfloor.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-dvb4q0rydkfp00kqat4p5bah@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/cpu/perf_event.h5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c22
2 files changed, 18 insertions, 9 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index cc16faae0538..ce84edeeae27 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -164,6 +164,11 @@ struct cpu_hw_events {
164 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; 164 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
165 165
166 /* 166 /*
167 * Intel checkpoint mask
168 */
169 u64 intel_cp_status;
170
171 /*
167 * manage shared (per-core, per-cpu) registers 172 * manage shared (per-core, per-cpu) registers
168 * used on Intel NHM/WSM/SNB 173 * used on Intel NHM/WSM/SNB
169 */ 174 */
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index dd1d4f3e18e6..ec70d0cce555 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1184,6 +1184,11 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
1184 wrmsrl(hwc->config_base, ctrl_val); 1184 wrmsrl(hwc->config_base, ctrl_val);
1185} 1185}
1186 1186
1187static inline bool event_is_checkpointed(struct perf_event *event)
1188{
1189 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
1190}
1191
1187static void intel_pmu_disable_event(struct perf_event *event) 1192static void intel_pmu_disable_event(struct perf_event *event)
1188{ 1193{
1189 struct hw_perf_event *hwc = &event->hw; 1194 struct hw_perf_event *hwc = &event->hw;
@@ -1197,6 +1202,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
1197 1202
1198 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); 1203 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1199 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); 1204 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
1205 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
1200 1206
1201 /* 1207 /*
1202 * must disable before any actual event 1208 * must disable before any actual event
@@ -1271,6 +1277,9 @@ static void intel_pmu_enable_event(struct perf_event *event)
1271 if (event->attr.exclude_guest) 1277 if (event->attr.exclude_guest)
1272 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx); 1278 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1273 1279
1280 if (unlikely(event_is_checkpointed(event)))
1281 cpuc->intel_cp_status |= (1ull << hwc->idx);
1282
1274 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 1283 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1275 intel_pmu_enable_fixed(hwc); 1284 intel_pmu_enable_fixed(hwc);
1276 return; 1285 return;
@@ -1282,11 +1291,6 @@ static void intel_pmu_enable_event(struct perf_event *event)
1282 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); 1291 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1283} 1292}
1284 1293
1285static inline bool event_is_checkpointed(struct perf_event *event)
1286{
1287 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
1288}
1289
1290/* 1294/*
1291 * Save and restart an expired event. Called by NMI contexts, 1295 * Save and restart an expired event. Called by NMI contexts,
1292 * so it has to be careful about preempting normal event ops: 1296 * so it has to be careful about preempting normal event ops:
@@ -1389,11 +1393,11 @@ again:
1389 } 1393 }
1390 1394
1391 /* 1395 /*
1392 * To avoid spurious interrupts with perf stat always reset checkpointed 1396 * Checkpointed counters can lead to 'spurious' PMIs because the
1393 * counters. 1397 * rollback caused by the PMI will have cleared the overflow status
1398 * bit. Therefore always force probe these counters.
1394 */ 1399 */
1395 if (cpuc->events[2] && event_is_checkpointed(cpuc->events[2])) 1400 status |= cpuc->intel_cp_status;
1396 status |= (1ULL << 2);
1397 1401
1398 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 1402 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1399 struct perf_event *event = cpuc->events[bit]; 1403 struct perf_event *event = cpuc->events[bit];