aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_counter.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-13 06:54:01 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-15 03:46:56 -0400
commitf5a5a2f6e69e88647ae12da39f0ff3a510bcf0a6 (patch)
tree85f7777f0b306fb553a3efc825b5337d301a8e34 /arch/x86/kernel/cpu/perf_counter.c
parentec3232bdf8518bea8410f0027f870b24d3aa8753 (diff)
perf_counter: x86: Fix throttling
If counters are disabled globally when a perfcounter IRQ/NMI hits, and if we throttle in that case, we'll promote the '0' value to the next lapic IRQ and disable all perfcounters at that point, permanently ... Fix it. [ Impact: fix hung perfcounters under load ] Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 3a92a2b2a80f..88ae8cebf3c1 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -765,8 +765,13 @@ out:
765 /* 765 /*
766 * Restore - do not reenable when global enable is off or throttled: 766 * Restore - do not reenable when global enable is off or throttled:
767 */ 767 */
768 if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) 768 if (cpuc->throttle_ctrl) {
769 intel_pmu_restore_all(cpuc->throttle_ctrl); 769 if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) {
770 intel_pmu_restore_all(cpuc->throttle_ctrl);
771 } else {
772 pr_info("CPU#%d: perfcounters: max interrupt rate exceeded! Throttle on.\n", smp_processor_id());
773 }
774 }
770 775
771 return ret; 776 return ret;
772} 777}
@@ -817,11 +822,16 @@ void perf_counter_unthrottle(void)
817 822
818 cpuc = &__get_cpu_var(cpu_hw_counters); 823 cpuc = &__get_cpu_var(cpu_hw_counters);
819 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { 824 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
820 if (printk_ratelimit()) 825 pr_info("CPU#%d: perfcounters: throttle off.\n", smp_processor_id());
821 printk(KERN_WARNING "perfcounters: max interrupts exceeded!\n"); 826
827 /*
828 * Clear them before re-enabling irqs/NMIs again:
829 */
830 cpuc->interrupts = 0;
822 hw_perf_restore(cpuc->throttle_ctrl); 831 hw_perf_restore(cpuc->throttle_ctrl);
832 } else {
833 cpuc->interrupts = 0;
823 } 834 }
824 cpuc->interrupts = 0;
825} 835}
826 836
827void smp_perf_counter_interrupt(struct pt_regs *regs) 837void smp_perf_counter_interrupt(struct pt_regs *regs)