aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorCyrill Gorcunov <gorcunov@openvz.org>2010-08-05 11:09:17 -0400
committerIngo Molnar <mingo@elte.hu>2010-08-08 16:53:50 -0400
commit1c250d709fdc8aa5bf42d90be99428a01a256a55 (patch)
treee71c6d304b12017a034a6ad26468abe296ea5a6c /arch/x86
parentef8f34aabf2450a9fb36b2c87fe0ea0b86a38195 (diff)
perf, x86: P4 PMU -- update nmi irq statistics and unmask lvt entry properly
In case if last active performance counter is not overflowed at moment of NMI being triggered by another counter, the irq statistics may miss an update stage. As a more serious consequence -- apic quirk may not be triggered so apic lvt entry stay masked. Tested-by: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Stephane Eranian <eranian@google.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <20100805150917.GA6311@lenovo> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 107711bf0ee8..febb12cea795 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -656,6 +656,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
656 cpuc = &__get_cpu_var(cpu_hw_events); 656 cpuc = &__get_cpu_var(cpu_hw_events);
657 657
658 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 658 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
659 int overflow;
659 660
660 if (!test_bit(idx, cpuc->active_mask)) 661 if (!test_bit(idx, cpuc->active_mask))
661 continue; 662 continue;
@@ -666,12 +667,14 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
666 WARN_ON_ONCE(hwc->idx != idx); 667 WARN_ON_ONCE(hwc->idx != idx);
667 668
668 /* it might be unflagged overflow */ 669 /* it might be unflagged overflow */
669 handled = p4_pmu_clear_cccr_ovf(hwc); 670 overflow = p4_pmu_clear_cccr_ovf(hwc);
670 671
671 val = x86_perf_event_update(event); 672 val = x86_perf_event_update(event);
672 if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) 673 if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
673 continue; 674 continue;
674 675
676 handled += overflow;
677
675 /* event overflow for sure */ 678 /* event overflow for sure */
676 data.period = event->hw.last_period; 679 data.period = event->hw.last_period;
677 680
@@ -687,7 +690,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
687 inc_irq_stat(apic_perf_irqs); 690 inc_irq_stat(apic_perf_irqs);
688 } 691 }
689 692
690 return handled; 693 return handled > 0;
691} 694}
692 695
693/* 696/*