aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event_intel.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2015-03-20 13:11:23 -0400
committerIngo Molnar <mingo@kernel.org>2015-04-02 11:33:19 -0400
commit1a78d93750bb5f61abdc59a91fc3bd06a214542a (patch)
treee8307794f69c68367d42c7a0cef8c9a1d5b89461 /arch/x86/kernel/cpu/perf_event_intel.c
parent15fde1101a1aed11958e0d86bc360f01866a74b1 (diff)
perf/x86/intel: Streamline LBR MSR handling in PMI
The perf PMI currently does unnecessary MSR accesses when LBRs are enabled. We use LBR freezing, or when in callstack mode force the LBRs to only filter on ring 3. So there is no need to disable the LBRs explicitely in the PMI handler. Also we always unnecessarily rewrite LBR_SELECT in the LBR handler, even though it can never change. 5) | /* write_msr: MSR_LBR_SELECT(1c8), value 0 */ 5) | /* read_msr: MSR_IA32_DEBUGCTLMSR(1d9), value 1801 */ 5) | /* write_msr: MSR_IA32_DEBUGCTLMSR(1d9), value 1801 */ 5) | /* write_msr: MSR_CORE_PERF_GLOBAL_CTRL(38f), value 70000000f */ 5) | /* write_msr: MSR_CORE_PERF_GLOBAL_CTRL(38f), value 0 */ 5) | /* write_msr: MSR_LBR_SELECT(1c8), value 0 */ 5) | /* read_msr: MSR_IA32_DEBUGCTLMSR(1d9), value 1801 */ 5) | /* write_msr: MSR_IA32_DEBUGCTLMSR(1d9), value 1801 */ This patch: - Avoids disabling already frozen LBRs unnecessarily in the PMI - Avoids changing LBR_SELECT in the PMI Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: eranian@google.com Link: http://lkml.kernel.org/r/1426871484-21285-1-git-send-email-andi@firstfloor.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_intel.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 59994602bb94..9da2400c2ec3 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1244,7 +1244,10 @@ static __initconst const u64 slm_hw_cache_event_ids
1244 }, 1244 },
1245}; 1245};
1246 1246
1247static void intel_pmu_disable_all(void) 1247/*
1248 * Use from PMIs where the LBRs are already disabled.
1249 */
1250static void __intel_pmu_disable_all(void)
1248{ 1251{
1249 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1252 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1250 1253
@@ -1256,15 +1259,20 @@ static void intel_pmu_disable_all(void)
1256 intel_bts_disable_local(); 1259 intel_bts_disable_local();
1257 1260
1258 intel_pmu_pebs_disable_all(); 1261 intel_pmu_pebs_disable_all();
1262}
1263
1264static void intel_pmu_disable_all(void)
1265{
1266 __intel_pmu_disable_all();
1259 intel_pmu_lbr_disable_all(); 1267 intel_pmu_lbr_disable_all();
1260} 1268}
1261 1269
1262static void intel_pmu_enable_all(int added) 1270static void __intel_pmu_enable_all(int added, bool pmi)
1263{ 1271{
1264 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1272 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1265 1273
1266 intel_pmu_pebs_enable_all(); 1274 intel_pmu_pebs_enable_all();
1267 intel_pmu_lbr_enable_all(); 1275 intel_pmu_lbr_enable_all(pmi);
1268 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 1276 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1269 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); 1277 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1270 1278
@@ -1280,6 +1288,11 @@ static void intel_pmu_enable_all(int added)
1280 intel_bts_enable_local(); 1288 intel_bts_enable_local();
1281} 1289}
1282 1290
1291static void intel_pmu_enable_all(int added)
1292{
1293 __intel_pmu_enable_all(added, false);
1294}
1295
1283/* 1296/*
1284 * Workaround for: 1297 * Workaround for:
1285 * Intel Errata AAK100 (model 26) 1298 * Intel Errata AAK100 (model 26)
@@ -1573,7 +1586,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1573 */ 1586 */
1574 if (!x86_pmu.late_ack) 1587 if (!x86_pmu.late_ack)
1575 apic_write(APIC_LVTPC, APIC_DM_NMI); 1588 apic_write(APIC_LVTPC, APIC_DM_NMI);
1576 intel_pmu_disable_all(); 1589 __intel_pmu_disable_all();
1577 handled = intel_pmu_drain_bts_buffer(); 1590 handled = intel_pmu_drain_bts_buffer();
1578 handled += intel_bts_interrupt(); 1591 handled += intel_bts_interrupt();
1579 status = intel_pmu_get_status(); 1592 status = intel_pmu_get_status();
@@ -1658,7 +1671,7 @@ again:
1658 goto again; 1671 goto again;
1659 1672
1660done: 1673done:
1661 intel_pmu_enable_all(0); 1674 __intel_pmu_enable_all(0, true);
1662 /* 1675 /*
1663 * Only unmask the NMI after the overflow counters 1676 * Only unmask the NMI after the overflow counters
1664 * have been reset. This avoids spurious NMIs on 1677 * have been reset. This avoids spurious NMIs on