aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorYan, Zheng <zheng.z.yan@intel.com>2014-11-04 21:56:04 -0500
committerIngo Molnar <mingo@kernel.org>2015-02-18 11:16:09 -0500
commit63f0c1d84196b712fe9de99a8514486ab416d517 (patch)
tree513ecf7a3e73b59fb9224b26b3a797a7738d0de3 /arch/x86
parente18bf526422769611e7248135e36a4cea0e4e38d (diff)
perf/x86/intel: Track number of events that use the LBR callstack
When enabling/disabling an event, check if the event uses the LBR callstack feature, adjust the LBR callstack usage count accordingly. Later patch will use the usage count to decide if LBR stack should be saved/restored. Signed-off-by: Yan, Zheng <zheng.z.yan@intel.com> Signed-off-by: Kan Liang <kan.liang@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul Mackerras <paulus@samba.org> Cc: eranian@google.com Cc: jolsa@redhat.com Link: http://lkml.kernel.org/r/1415156173-10035-9-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c19
1 files changed, 19 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index ac8279e560a1..ac8e54200934 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -205,9 +205,15 @@ void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
205 } 205 }
206} 206}
207 207
208static inline bool branch_user_callstack(unsigned br_sel)
209{
210 return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
211}
212
208void intel_pmu_lbr_enable(struct perf_event *event) 213void intel_pmu_lbr_enable(struct perf_event *event)
209{ 214{
210 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 215 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
216 struct x86_perf_task_context *task_ctx;
211 217
212 if (!x86_pmu.lbr_nr) 218 if (!x86_pmu.lbr_nr)
213 return; 219 return;
@@ -222,6 +228,12 @@ void intel_pmu_lbr_enable(struct perf_event *event)
222 } 228 }
223 cpuc->br_sel = event->hw.branch_reg.reg; 229 cpuc->br_sel = event->hw.branch_reg.reg;
224 230
231 if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
232 event->ctx->task_ctx_data) {
233 task_ctx = event->ctx->task_ctx_data;
234 task_ctx->lbr_callstack_users++;
235 }
236
225 cpuc->lbr_users++; 237 cpuc->lbr_users++;
226 perf_sched_cb_inc(event->ctx->pmu); 238 perf_sched_cb_inc(event->ctx->pmu);
227} 239}
@@ -229,10 +241,17 @@ void intel_pmu_lbr_enable(struct perf_event *event)
229void intel_pmu_lbr_disable(struct perf_event *event) 241void intel_pmu_lbr_disable(struct perf_event *event)
230{ 242{
231 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 243 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
244 struct x86_perf_task_context *task_ctx;
232 245
233 if (!x86_pmu.lbr_nr) 246 if (!x86_pmu.lbr_nr)
234 return; 247 return;
235 248
249 if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
250 event->ctx->task_ctx_data) {
251 task_ctx = event->ctx->task_ctx_data;
252 task_ctx->lbr_callstack_users--;
253 }
254
236 cpuc->lbr_users--; 255 cpuc->lbr_users--;
237 WARN_ON_ONCE(cpuc->lbr_users < 0); 256 WARN_ON_ONCE(cpuc->lbr_users < 0);
238 perf_sched_cb_dec(event->ctx->pmu); 257 perf_sched_cb_dec(event->ctx->pmu);