aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event_p4.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_p4.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c41
1 files changed, 21 insertions, 20 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 424fc8de68e4..ae85d69644d1 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -465,15 +465,21 @@ out:
465 return rc; 465 return rc;
466} 466}
467 467
468static inline void p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) 468static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
469{ 469{
470 unsigned long dummy; 470 int overflow = 0;
471 u32 low, high;
471 472
472 rdmsrl(hwc->config_base + hwc->idx, dummy); 473 rdmsr(hwc->config_base + hwc->idx, low, high);
473 if (dummy & P4_CCCR_OVF) { 474
475 /* we need to check high bit for unflagged overflows */
476 if ((low & P4_CCCR_OVF) || !(high & (1 << 31))) {
477 overflow = 1;
474 (void)checking_wrmsrl(hwc->config_base + hwc->idx, 478 (void)checking_wrmsrl(hwc->config_base + hwc->idx,
475 ((u64)dummy) & ~P4_CCCR_OVF); 479 ((u64)low) & ~P4_CCCR_OVF);
476 } 480 }
481
482 return overflow;
477} 483}
478 484
479static inline void p4_pmu_disable_event(struct perf_event *event) 485static inline void p4_pmu_disable_event(struct perf_event *event)
@@ -584,21 +590,15 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
584 590
585 WARN_ON_ONCE(hwc->idx != idx); 591 WARN_ON_ONCE(hwc->idx != idx);
586 592
587 /* 593 /* it might be unflagged overflow */
588 * FIXME: Redundant call, actually not needed 594 handled = p4_pmu_clear_cccr_ovf(hwc);
589 * but just to check if we're screwed
590 */
591 p4_pmu_clear_cccr_ovf(hwc);
592 595
593 val = x86_perf_event_update(event); 596 val = x86_perf_event_update(event);
594 if (val & (1ULL << (x86_pmu.cntval_bits - 1))) 597 if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
595 continue; 598 continue;
596 599
597 /* 600 /* event overflow for sure */
598 * event overflow 601 data.period = event->hw.last_period;
599 */
600 handled = 1;
601 data.period = event->hw.last_period;
602 602
603 if (!x86_perf_event_set_period(event)) 603 if (!x86_perf_event_set_period(event))
604 continue; 604 continue;
@@ -670,7 +670,7 @@ static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
670 670
671/* 671/*
672 * ESCR address hashing is tricky, ESCRs are not sequential 672 * ESCR address hashing is tricky, ESCRs are not sequential
673 * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03e0) and 673 * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and
674 * the metric between any ESCRs is laid in range [0xa0,0xe1] 674 * the metric between any ESCRs is laid in range [0xa0,0xe1]
675 * 675 *
676 * so we make ~70% filled hashtable 676 * so we make ~70% filled hashtable
@@ -735,8 +735,9 @@ static int p4_get_escr_idx(unsigned int addr)
735{ 735{
736 unsigned int idx = P4_ESCR_MSR_IDX(addr); 736 unsigned int idx = P4_ESCR_MSR_IDX(addr);
737 737
738 if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE || 738 if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE ||
739 !p4_escr_table[idx])) { 739 !p4_escr_table[idx] ||
740 p4_escr_table[idx] != addr)) {
740 WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr); 741 WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr);
741 return -1; 742 return -1;
742 } 743 }
@@ -762,7 +763,7 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
762{ 763{
763 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 764 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
764 unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)]; 765 unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)];
765 int cpu = raw_smp_processor_id(); 766 int cpu = smp_processor_id();
766 struct hw_perf_event *hwc; 767 struct hw_perf_event *hwc;
767 struct p4_event_bind *bind; 768 struct p4_event_bind *bind;
768 unsigned int i, thread, num; 769 unsigned int i, thread, num;