aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perf_event.h5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c22
2 files changed, 18 insertions, 9 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index cc16faae0538..ce84edeeae27 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -164,6 +164,11 @@ struct cpu_hw_events {
164 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; 164 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
165 165
166 /* 166 /*
167 * Intel checkpoint mask
168 */
169 u64 intel_cp_status;
170
171 /*
167 * manage shared (per-core, per-cpu) registers 172 * manage shared (per-core, per-cpu) registers
168 * used on Intel NHM/WSM/SNB 173 * used on Intel NHM/WSM/SNB
169 */ 174 */
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index dd1d4f3e18e6..ec70d0cce555 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1184,6 +1184,11 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
1184 wrmsrl(hwc->config_base, ctrl_val); 1184 wrmsrl(hwc->config_base, ctrl_val);
1185} 1185}
1186 1186
1187static inline bool event_is_checkpointed(struct perf_event *event)
1188{
1189 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
1190}
1191
1187static void intel_pmu_disable_event(struct perf_event *event) 1192static void intel_pmu_disable_event(struct perf_event *event)
1188{ 1193{
1189 struct hw_perf_event *hwc = &event->hw; 1194 struct hw_perf_event *hwc = &event->hw;
@@ -1197,6 +1202,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
1197 1202
1198 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); 1203 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1199 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); 1204 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
1205 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
1200 1206
1201 /* 1207 /*
1202 * must disable before any actual event 1208 * must disable before any actual event
@@ -1271,6 +1277,9 @@ static void intel_pmu_enable_event(struct perf_event *event)
1271 if (event->attr.exclude_guest) 1277 if (event->attr.exclude_guest)
1272 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx); 1278 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1273 1279
1280 if (unlikely(event_is_checkpointed(event)))
1281 cpuc->intel_cp_status |= (1ull << hwc->idx);
1282
1274 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 1283 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1275 intel_pmu_enable_fixed(hwc); 1284 intel_pmu_enable_fixed(hwc);
1276 return; 1285 return;
@@ -1282,11 +1291,6 @@ static void intel_pmu_enable_event(struct perf_event *event)
1282 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); 1291 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1283} 1292}
1284 1293
1285static inline bool event_is_checkpointed(struct perf_event *event)
1286{
1287 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
1288}
1289
1290/* 1294/*
1291 * Save and restart an expired event. Called by NMI contexts, 1295 * Save and restart an expired event. Called by NMI contexts,
1292 * so it has to be careful about preempting normal event ops: 1296 * so it has to be careful about preempting normal event ops:
@@ -1389,11 +1393,11 @@ again:
1389 } 1393 }
1390 1394
1391 /* 1395 /*
1392 * To avoid spurious interrupts with perf stat always reset checkpointed 1396 * Checkpointed counters can lead to 'spurious' PMIs because the
1393 * counters. 1397 * rollback caused by the PMI will have cleared the overflow status
1398 * bit. Therefore always force probe these counters.
1394 */ 1399 */
1395 if (cpuc->events[2] && event_is_checkpointed(cpuc->events[2])) 1400 status |= cpuc->intel_cp_status;
1396 status |= (1ULL << 2);
1397 1401
1398 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 1402 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1399 struct perf_event *event = cpuc->events[bit]; 1403 struct perf_event *event = cpuc->events[bit];