aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c20
1 files changed, 3 insertions, 17 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index a485ba124476..9f1dd18fa395 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1029,20 +1029,6 @@ static __initconst const u64 slm_hw_cache_event_ids
1029 }, 1029 },
1030}; 1030};
1031 1031
1032static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
1033{
1034 /* user explicitly requested branch sampling */
1035 if (has_branch_stack(event))
1036 return true;
1037
1038 /* implicit branch sampling to correct PEBS skid */
1039 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
1040 x86_pmu.intel_cap.pebs_format < 2)
1041 return true;
1042
1043 return false;
1044}
1045
1046static void intel_pmu_disable_all(void) 1032static void intel_pmu_disable_all(void)
1047{ 1033{
1048 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1034 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1207,7 +1193,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
1207 * must disable before any actual event 1193 * must disable before any actual event
1208 * because any event may be combined with LBR 1194 * because any event may be combined with LBR
1209 */ 1195 */
1210 if (intel_pmu_needs_lbr_smpl(event)) 1196 if (needs_branch_stack(event))
1211 intel_pmu_lbr_disable(event); 1197 intel_pmu_lbr_disable(event);
1212 1198
1213 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 1199 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
@@ -1268,7 +1254,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
1268 * must enabled before any actual event 1254 * must enabled before any actual event
1269 * because any event may be combined with LBR 1255 * because any event may be combined with LBR
1270 */ 1256 */
1271 if (intel_pmu_needs_lbr_smpl(event)) 1257 if (needs_branch_stack(event))
1272 intel_pmu_lbr_enable(event); 1258 intel_pmu_lbr_enable(event);
1273 1259
1274 if (event->attr.exclude_host) 1260 if (event->attr.exclude_host)
@@ -1747,7 +1733,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
1747 if (event->attr.precise_ip && x86_pmu.pebs_aliases) 1733 if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1748 x86_pmu.pebs_aliases(event); 1734 x86_pmu.pebs_aliases(event);
1749 1735
1750 if (intel_pmu_needs_lbr_smpl(event)) { 1736 if (needs_branch_stack(event)) {
1751 ret = intel_pmu_setup_lbr_filter(event); 1737 ret = intel_pmu_setup_lbr_filter(event);
1752 if (ret) 1738 if (ret)
1753 return ret; 1739 return ret;