aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perf_event.h6
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c11
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c32
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c3
4 files changed, 47 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index a73dfc97226b..74089bcb6d74 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -92,9 +92,11 @@ struct amd_nb {
92/* 92/*
93 * Flags PEBS can handle without an PMI. 93 * Flags PEBS can handle without an PMI.
94 * 94 *
95 * TID can only be handled by flushing at context switch.
96 *
95 */ 97 */
96#define PEBS_FREERUNNING_FLAGS \ 98#define PEBS_FREERUNNING_FLAGS \
97 (PERF_SAMPLE_IP | PERF_SAMPLE_ADDR | \ 99 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
98 PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ 100 PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
99 PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ 101 PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
100 PERF_SAMPLE_TRANSACTION) 102 PERF_SAMPLE_TRANSACTION)
@@ -877,6 +879,8 @@ void intel_pmu_pebs_enable_all(void);
877 879
878void intel_pmu_pebs_disable_all(void); 880void intel_pmu_pebs_disable_all(void);
879 881
882void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
883
880void intel_ds_init(void); 884void intel_ds_init(void);
881 885
882void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); 886void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 6985f43c5eb9..d455e2a61287 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2642,6 +2642,15 @@ static void intel_pmu_cpu_dying(int cpu)
2642 fini_debug_store_on_cpu(cpu); 2642 fini_debug_store_on_cpu(cpu);
2643} 2643}
2644 2644
2645static void intel_pmu_sched_task(struct perf_event_context *ctx,
2646 bool sched_in)
2647{
2648 if (x86_pmu.pebs_active)
2649 intel_pmu_pebs_sched_task(ctx, sched_in);
2650 if (x86_pmu.lbr_nr)
2651 intel_pmu_lbr_sched_task(ctx, sched_in);
2652}
2653
2645PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 2654PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
2646 2655
2647PMU_FORMAT_ATTR(ldlat, "config1:0-15"); 2656PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -2731,7 +2740,7 @@ static __initconst const struct x86_pmu intel_pmu = {
2731 .cpu_starting = intel_pmu_cpu_starting, 2740 .cpu_starting = intel_pmu_cpu_starting,
2732 .cpu_dying = intel_pmu_cpu_dying, 2741 .cpu_dying = intel_pmu_cpu_dying,
2733 .guest_get_msrs = intel_guest_get_msrs, 2742 .guest_get_msrs = intel_guest_get_msrs,
2734 .sched_task = intel_pmu_lbr_sched_task, 2743 .sched_task = intel_pmu_sched_task,
2735}; 2744};
2736 2745
2737static __init void intel_clovertown_quirk(void) 2746static __init void intel_clovertown_quirk(void)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 0ce455d958b8..62852470ccbb 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -546,6 +546,19 @@ int intel_pmu_drain_bts_buffer(void)
546 return 1; 546 return 1;
547} 547}
548 548
549static inline void intel_pmu_drain_pebs_buffer(void)
550{
551 struct pt_regs regs;
552
553 x86_pmu.drain_pebs(&regs);
554}
555
556void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
557{
558 if (!sched_in)
559 intel_pmu_drain_pebs_buffer();
560}
561
549/* 562/*
550 * PEBS 563 * PEBS
551 */ 564 */
@@ -711,8 +724,19 @@ void intel_pmu_pebs_enable(struct perf_event *event)
711 if (hwc->flags & PERF_X86_EVENT_FREERUNNING) { 724 if (hwc->flags & PERF_X86_EVENT_FREERUNNING) {
712 threshold = ds->pebs_absolute_maximum - 725 threshold = ds->pebs_absolute_maximum -
713 x86_pmu.max_pebs_events * x86_pmu.pebs_record_size; 726 x86_pmu.max_pebs_events * x86_pmu.pebs_record_size;
727
728 if (first_pebs)
729 perf_sched_cb_inc(event->ctx->pmu);
714 } else { 730 } else {
715 threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size; 731 threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
732
733 /*
734 * If not all events can use larger buffer,
735 * roll back to threshold = 1
736 */
737 if (!first_pebs &&
738 (ds->pebs_interrupt_threshold > threshold))
739 perf_sched_cb_dec(event->ctx->pmu);
716 } 740 }
717 741
718 /* Use auto-reload if possible to save a MSR write in the PMI */ 742 /* Use auto-reload if possible to save a MSR write in the PMI */
@@ -729,6 +753,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)
729{ 753{
730 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 754 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
731 struct hw_perf_event *hwc = &event->hw; 755 struct hw_perf_event *hwc = &event->hw;
756 struct debug_store *ds = cpuc->ds;
732 757
733 cpuc->pebs_enabled &= ~(1ULL << hwc->idx); 758 cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
734 759
@@ -737,6 +762,13 @@ void intel_pmu_pebs_disable(struct perf_event *event)
737 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) 762 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
738 cpuc->pebs_enabled &= ~(1ULL << 63); 763 cpuc->pebs_enabled &= ~(1ULL << 63);
739 764
765 if (ds->pebs_interrupt_threshold >
766 ds->pebs_buffer_base + x86_pmu.pebs_record_size) {
767 intel_pmu_drain_pebs_buffer();
768 if (!pebs_is_enabled(cpuc))
769 perf_sched_cb_dec(event->ctx->pmu);
770 }
771
740 if (cpuc->enabled) 772 if (cpuc->enabled)
741 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 773 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
742 774
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 201e16f6655a..452a7bd2dedb 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -264,9 +264,6 @@ void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
264 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 264 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
265 struct x86_perf_task_context *task_ctx; 265 struct x86_perf_task_context *task_ctx;
266 266
267 if (!x86_pmu.lbr_nr)
268 return;
269
270 /* 267 /*
271 * If LBR callstack feature is enabled and the stack was saved when 268 * If LBR callstack feature is enabled and the stack was saved when
272 * the task was scheduled out, restore the stack. Otherwise flush 269 * the task was scheduled out, restore the stack. Otherwise flush