diff options
-rw-r--r-- | arch/x86/events/intel/core.c | 6 | ||||
-rw-r--r-- | arch/x86/events/intel/ds.c | 14 | ||||
-rw-r--r-- | arch/x86/events/intel/lbr.c | 4 |
3 files changed, 14 insertions, 10 deletions
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index ede97710c2f4..98b0f0729527 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
@@ -3397,10 +3397,8 @@ static void intel_pmu_cpu_dying(int cpu) | |||
3397 | static void intel_pmu_sched_task(struct perf_event_context *ctx, | 3397 | static void intel_pmu_sched_task(struct perf_event_context *ctx, |
3398 | bool sched_in) | 3398 | bool sched_in) |
3399 | { | 3399 | { |
3400 | if (x86_pmu.pebs_active) | 3400 | intel_pmu_pebs_sched_task(ctx, sched_in); |
3401 | intel_pmu_pebs_sched_task(ctx, sched_in); | 3401 | intel_pmu_lbr_sched_task(ctx, sched_in); |
3402 | if (x86_pmu.lbr_nr) | ||
3403 | intel_pmu_lbr_sched_task(ctx, sched_in); | ||
3404 | } | 3402 | } |
3405 | 3403 | ||
3406 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); | 3404 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); |
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 6dc8a59e1bfb..a322fed5f8ed 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c | |||
@@ -606,12 +606,6 @@ static inline void intel_pmu_drain_pebs_buffer(void) | |||
606 | x86_pmu.drain_pebs(®s); | 606 | x86_pmu.drain_pebs(®s); |
607 | } | 607 | } |
608 | 608 | ||
609 | void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in) | ||
610 | { | ||
611 | if (!sched_in) | ||
612 | intel_pmu_drain_pebs_buffer(); | ||
613 | } | ||
614 | |||
615 | /* | 609 | /* |
616 | * PEBS | 610 | * PEBS |
617 | */ | 611 | */ |
@@ -822,6 +816,14 @@ static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc) | |||
822 | return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); | 816 | return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); |
823 | } | 817 | } |
824 | 818 | ||
819 | void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in) | ||
820 | { | ||
821 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
822 | |||
823 | if (!sched_in && pebs_needs_sched_cb(cpuc)) | ||
824 | intel_pmu_drain_pebs_buffer(); | ||
825 | } | ||
826 | |||
825 | static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) | 827 | static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) |
826 | { | 828 | { |
827 | struct debug_store *ds = cpuc->ds; | 829 | struct debug_store *ds = cpuc->ds; |
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index eb261656a320..955457a30197 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c | |||
@@ -380,8 +380,12 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) | |||
380 | 380 | ||
381 | void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) | 381 | void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) |
382 | { | 382 | { |
383 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
383 | struct x86_perf_task_context *task_ctx; | 384 | struct x86_perf_task_context *task_ctx; |
384 | 385 | ||
386 | if (!cpuc->lbr_users) | ||
387 | return; | ||
388 | |||
385 | /* | 389 | /* |
386 | * If LBR callstack feature is enabled and the stack was saved when | 390 | * If LBR callstack feature is enabled and the stack was saved when |
387 | * the task was scheduled out, restore the stack. Otherwise flush | 391 | * the task was scheduled out, restore the stack. Otherwise flush |