aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_counter.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-15 02:26:20 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-15 03:47:06 -0400
commit9029a5e3801f1cc7cdaab80169d82427acf928d8 (patch)
treee2acb45f70744ffd5c47c31be67e482df2ef17c4 /arch/x86/kernel/cpu/perf_counter.c
parent1c80f4b598d9b075a2a0be694e28be93a6702bcc (diff)
perf_counter: x86: Protect against infinite loops in intel_pmu_handle_irq()
intel_pmu_handle_irq() can lock up in an infinite loop if the hardware does not allow the acking of irqs. Alas, this happened in testing so make this robust and emit a warning if it happens in the future. Also, clean up the IRQ handlers a bit. [ Impact: improve perfcounter irq/nmi handling robustness ] Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 46a82d1e4cbe..5a7f718eb1e1 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -722,9 +722,13 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter)
722 */ 722 */
723static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) 723static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
724{ 724{
725 int bit, cpu = smp_processor_id(); 725 struct cpu_hw_counters *cpuc;
726 struct cpu_hw_counters;
727 int bit, cpu, loops;
726 u64 ack, status; 728 u64 ack, status;
727 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); 729
730 cpu = smp_processor_id();
731 cpuc = &per_cpu(cpu_hw_counters, cpu);
728 732
729 perf_disable(); 733 perf_disable();
730 status = intel_pmu_get_status(); 734 status = intel_pmu_get_status();
@@ -733,7 +737,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
733 return 0; 737 return 0;
734 } 738 }
735 739
740 loops = 0;
736again: 741again:
742 if (++loops > 100) {
743 WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
744 return 1;
745 }
746
737 inc_irq_stat(apic_perf_irqs); 747 inc_irq_stat(apic_perf_irqs);
738 ack = status; 748 ack = status;
739 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 749 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
@@ -765,13 +775,14 @@ again:
765 775
766static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) 776static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
767{ 777{
768 int cpu = smp_processor_id(); 778 int cpu, idx, throttle = 0, handled = 0;
769 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); 779 struct cpu_hw_counters *cpuc;
770 u64 val;
771 int handled = 0;
772 struct perf_counter *counter; 780 struct perf_counter *counter;
773 struct hw_perf_counter *hwc; 781 struct hw_perf_counter *hwc;
774 int idx, throttle = 0; 782 u64 val;
783
784 cpu = smp_processor_id();
785 cpuc = &per_cpu(cpu_hw_counters, cpu);
775 786
776 if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) { 787 if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) {
777 throttle = 1; 788 throttle = 1;