aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perf_event.h3
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c8
3 files changed, 8 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 23b5710b174..3df3de9452a 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -69,7 +69,7 @@ struct amd_nb {
69}; 69};
70 70
71/* The maximal number of PEBS events: */ 71/* The maximal number of PEBS events: */
72#define MAX_PEBS_EVENTS 4 72#define MAX_PEBS_EVENTS 8
73 73
74/* 74/*
75 * A debug store configuration. 75 * A debug store configuration.
@@ -378,6 +378,7 @@ struct x86_pmu {
378 void (*drain_pebs)(struct pt_regs *regs); 378 void (*drain_pebs)(struct pt_regs *regs);
379 struct event_constraint *pebs_constraints; 379 struct event_constraint *pebs_constraints;
380 void (*pebs_aliases)(struct perf_event *event); 380 void (*pebs_aliases)(struct perf_event *event);
381 int max_pebs_events;
381 382
382 /* 383 /*
383 * Intel LBR 384 * Intel LBR
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 187c294bc65..e23e71f2526 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1800,6 +1800,8 @@ __init int intel_pmu_init(void)
1800 x86_pmu.events_maskl = ebx.full; 1800 x86_pmu.events_maskl = ebx.full;
1801 x86_pmu.events_mask_len = eax.split.mask_length; 1801 x86_pmu.events_mask_len = eax.split.mask_length;
1802 1802
1803 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
1804
1803 /* 1805 /*
1804 * Quirk: v2 perfmon does not report fixed-purpose events, so 1806 * Quirk: v2 perfmon does not report fixed-purpose events, so
1805 * assume at least 3 events: 1807 * assume at least 3 events:
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 35e2192df9f..026373edef7 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -620,7 +620,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
620 * Should not happen, we program the threshold at 1 and do not 620 * Should not happen, we program the threshold at 1 and do not
621 * set a reset value. 621 * set a reset value.
622 */ 622 */
623 WARN_ON_ONCE(n > 1); 623 WARN_ONCE(n > 1, "bad leftover pebs %d\n", n);
624 at += n - 1; 624 at += n - 1;
625 625
626 __intel_pmu_pebs_event(event, iregs, at); 626 __intel_pmu_pebs_event(event, iregs, at);
@@ -651,10 +651,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
651 * Should not happen, we program the threshold at 1 and do not 651 * Should not happen, we program the threshold at 1 and do not
652 * set a reset value. 652 * set a reset value.
653 */ 653 */
654 WARN_ON_ONCE(n > MAX_PEBS_EVENTS); 654 WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n);
655 655
656 for ( ; at < top; at++) { 656 for ( ; at < top; at++) {
657 for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) { 657 for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) {
658 event = cpuc->events[bit]; 658 event = cpuc->events[bit];
659 if (!test_bit(bit, cpuc->active_mask)) 659 if (!test_bit(bit, cpuc->active_mask))
660 continue; 660 continue;
@@ -670,7 +670,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
670 break; 670 break;
671 } 671 }
672 672
673 if (!event || bit >= MAX_PEBS_EVENTS) 673 if (!event || bit >= x86_pmu.max_pebs_events)
674 continue; 674 continue;
675 675
676 __intel_pmu_pebs_event(event, iregs, at); 676 __intel_pmu_pebs_event(event, iregs, at);