aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorYan, Zheng <zheng.z.yan@intel.com>2014-11-04 21:56:06 -0500
committerIngo Molnar <mingo@kernel.org>2015-02-18 11:16:11 -0500
commita46a23000198d929391aa9dac8de68734efa2703 (patch)
tree667ff791575984b15e3e6c430668125141497758 /arch
parent76cb2c617f12a4dd53c0e899972813b805ad6cc2 (diff)
perf: Simplify the branch stack check
Use event->attr.branch_sample_type to replace intel_pmu_needs_lbr_smpl() for avoiding duplicated code that implicitly enables the LBR. Currently, branch stack can be enabled by user explicitly requesting branch sampling or implicit branch sampling to correct PEBS skid. For user explicitly requested branch sampling, the branch_sample_type is explicitly set by user. For PEBS case, the branch_sample_type is also implicitly set to PERF_SAMPLE_BRANCH_ANY in x86_pmu_hw_config. Signed-off-by: Yan, Zheng <zheng.z.yan@intel.com> Signed-off-by: Kan Liang <kan.liang@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul Mackerras <paulus@samba.org> Cc: eranian@google.com Cc: jolsa@redhat.com Link: http://lkml.kernel.org/r/1415156173-10035-11-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c20
1 files changed, 3 insertions, 17 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index a485ba124476..9f1dd18fa395 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1029,20 +1029,6 @@ static __initconst const u64 slm_hw_cache_event_ids
1029 }, 1029 },
1030}; 1030};
1031 1031
1032static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
1033{
1034 /* user explicitly requested branch sampling */
1035 if (has_branch_stack(event))
1036 return true;
1037
1038 /* implicit branch sampling to correct PEBS skid */
1039 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
1040 x86_pmu.intel_cap.pebs_format < 2)
1041 return true;
1042
1043 return false;
1044}
1045
1046static void intel_pmu_disable_all(void) 1032static void intel_pmu_disable_all(void)
1047{ 1033{
1048 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1034 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1207,7 +1193,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
1207 * must disable before any actual event 1193 * must disable before any actual event
1208 * because any event may be combined with LBR 1194 * because any event may be combined with LBR
1209 */ 1195 */
1210 if (intel_pmu_needs_lbr_smpl(event)) 1196 if (needs_branch_stack(event))
1211 intel_pmu_lbr_disable(event); 1197 intel_pmu_lbr_disable(event);
1212 1198
1213 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 1199 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
@@ -1268,7 +1254,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
1268 * must enabled before any actual event 1254 * must enabled before any actual event
1269 * because any event may be combined with LBR 1255 * because any event may be combined with LBR
1270 */ 1256 */
1271 if (intel_pmu_needs_lbr_smpl(event)) 1257 if (needs_branch_stack(event))
1272 intel_pmu_lbr_enable(event); 1258 intel_pmu_lbr_enable(event);
1273 1259
1274 if (event->attr.exclude_host) 1260 if (event->attr.exclude_host)
@@ -1747,7 +1733,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
1747 if (event->attr.precise_ip && x86_pmu.pebs_aliases) 1733 if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1748 x86_pmu.pebs_aliases(event); 1734 x86_pmu.pebs_aliases(event);
1749 1735
1750 if (intel_pmu_needs_lbr_smpl(event)) { 1736 if (needs_branch_stack(event)) {
1751 ret = intel_pmu_setup_lbr_filter(event); 1737 ret = intel_pmu_setup_lbr_filter(event);
1752 if (ret) 1738 if (ret)
1753 return ret; 1739 return ret;