aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event_intel_lbr.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2015-10-20 14:46:33 -0400
committerIngo Molnar <mingo@kernel.org>2015-11-23 03:44:57 -0500
commitb28ae9560b693bcd2e9f4d6d9c415d5380b7c3c5 (patch)
tree44d175d2ef3b8a3a596d93510f8f31db4d3cd5e6 /arch/x86/kernel/cpu/perf_event_intel_lbr.c
parentdaecbd267daad36c1cd90254f502610f82efcb30 (diff)
perf/x86: Fix LBR call stack save/restore
This fixes a bug I added in the following commit: 90405aa02247 ("perf/x86/intel/lbr: Limit LBR accesses to TOS in callstack mode") The bug could lead to lost LBR call stacks. When restoring the LBR state we need to use the TOS of the previous context, not the current context. To do that we need to save/restore the TOS. Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: acme@kernel.org Cc: jolsa@kernel.org Link: http://lkml.kernel.org/r/1445366797-30894-1-git-send-email-andi@firstfloor.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_intel_lbr.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index bfd0b717e944..659f01e165d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
239 } 239 }
240 240
241 mask = x86_pmu.lbr_nr - 1; 241 mask = x86_pmu.lbr_nr - 1;
242 tos = intel_pmu_lbr_tos(); 242 tos = task_ctx->tos;
243 for (i = 0; i < tos; i++) { 243 for (i = 0; i < tos; i++) {
244 lbr_idx = (tos - i) & mask; 244 lbr_idx = (tos - i) & mask;
245 wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); 245 wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
@@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
247 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 247 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
248 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 248 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
249 } 249 }
250 wrmsrl(x86_pmu.lbr_tos, tos);
250 task_ctx->lbr_stack_state = LBR_NONE; 251 task_ctx->lbr_stack_state = LBR_NONE;
251} 252}
252 253
@@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
270 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 271 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
271 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 272 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
272 } 273 }
274 task_ctx->tos = tos;
273 task_ctx->lbr_stack_state = LBR_VALID; 275 task_ctx->lbr_stack_state = LBR_VALID;
274} 276}
275 277