diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-05-25 11:39:04 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-25 15:41:12 -0400 |
commit | 48e22d56ecdeddd1ffb42a02fccba5c6ef42b133 (patch) | |
tree | 43f69f34e888053a9a8b6405995a2ae9f5b173e2 /arch/x86/kernel | |
parent | ff99be573e02e9f7edc23b472c7f9a5ddba12795 (diff) |
perf_counter: x86: Remove interrupt throttle
remove the x86 specific interrupt throttle
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090525153931.616671838@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 47 |
2 files changed, 5 insertions, 44 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index b4f64402a82a..89b63b5fad33 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -763,8 +763,6 @@ static void local_apic_timer_interrupt(void) | |||
763 | inc_irq_stat(apic_timer_irqs); | 763 | inc_irq_stat(apic_timer_irqs); |
764 | 764 | ||
765 | evt->event_handler(evt); | 765 | evt->event_handler(evt); |
766 | |||
767 | perf_counter_unthrottle(); | ||
768 | } | 766 | } |
769 | 767 | ||
770 | /* | 768 | /* |
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index c14437faf5d2..8c8177f859fe 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -719,11 +719,6 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter) | |||
719 | } | 719 | } |
720 | 720 | ||
721 | /* | 721 | /* |
722 | * Maximum interrupt frequency of 100KHz per CPU | ||
723 | */ | ||
724 | #define PERFMON_MAX_INTERRUPTS (100000/HZ) | ||
725 | |||
726 | /* | ||
727 | * This handler is triggered by the local APIC, so the APIC IRQ handling | 722 | * This handler is triggered by the local APIC, so the APIC IRQ handling |
728 | * rules apply: | 723 | * rules apply: |
729 | */ | 724 | */ |
@@ -775,15 +770,14 @@ again: | |||
775 | if (status) | 770 | if (status) |
776 | goto again; | 771 | goto again; |
777 | 772 | ||
778 | if (++cpuc->interrupts != PERFMON_MAX_INTERRUPTS) | 773 | perf_enable(); |
779 | perf_enable(); | ||
780 | 774 | ||
781 | return 1; | 775 | return 1; |
782 | } | 776 | } |
783 | 777 | ||
784 | static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) | 778 | static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) |
785 | { | 779 | { |
786 | int cpu, idx, throttle = 0, handled = 0; | 780 | int cpu, idx, handled = 0; |
787 | struct cpu_hw_counters *cpuc; | 781 | struct cpu_hw_counters *cpuc; |
788 | struct perf_counter *counter; | 782 | struct perf_counter *counter; |
789 | struct hw_perf_counter *hwc; | 783 | struct hw_perf_counter *hwc; |
@@ -792,16 +786,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) | |||
792 | cpu = smp_processor_id(); | 786 | cpu = smp_processor_id(); |
793 | cpuc = &per_cpu(cpu_hw_counters, cpu); | 787 | cpuc = &per_cpu(cpu_hw_counters, cpu); |
794 | 788 | ||
795 | if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) { | ||
796 | throttle = 1; | ||
797 | __perf_disable(); | ||
798 | cpuc->enabled = 0; | ||
799 | barrier(); | ||
800 | } | ||
801 | |||
802 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 789 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
803 | int disable = 0; | ||
804 | |||
805 | if (!test_bit(idx, cpuc->active_mask)) | 790 | if (!test_bit(idx, cpuc->active_mask)) |
806 | continue; | 791 | continue; |
807 | 792 | ||
@@ -809,45 +794,23 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) | |||
809 | hwc = &counter->hw; | 794 | hwc = &counter->hw; |
810 | 795 | ||
811 | if (counter->hw_event.nmi != nmi) | 796 | if (counter->hw_event.nmi != nmi) |
812 | goto next; | 797 | continue; |
813 | 798 | ||
814 | val = x86_perf_counter_update(counter, hwc, idx); | 799 | val = x86_perf_counter_update(counter, hwc, idx); |
815 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) | 800 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) |
816 | goto next; | 801 | continue; |
817 | 802 | ||
818 | /* counter overflow */ | 803 | /* counter overflow */ |
819 | x86_perf_counter_set_period(counter, hwc, idx); | 804 | x86_perf_counter_set_period(counter, hwc, idx); |
820 | handled = 1; | 805 | handled = 1; |
821 | inc_irq_stat(apic_perf_irqs); | 806 | inc_irq_stat(apic_perf_irqs); |
822 | disable = perf_counter_overflow(counter, nmi, regs, 0); | 807 | if (perf_counter_overflow(counter, nmi, regs, 0)) |
823 | |||
824 | next: | ||
825 | if (disable || throttle) | ||
826 | amd_pmu_disable_counter(hwc, idx); | 808 | amd_pmu_disable_counter(hwc, idx); |
827 | } | 809 | } |
828 | 810 | ||
829 | return handled; | 811 | return handled; |
830 | } | 812 | } |
831 | 813 | ||
832 | void perf_counter_unthrottle(void) | ||
833 | { | ||
834 | struct cpu_hw_counters *cpuc; | ||
835 | |||
836 | if (!x86_pmu_initialized()) | ||
837 | return; | ||
838 | |||
839 | cpuc = &__get_cpu_var(cpu_hw_counters); | ||
840 | if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { | ||
841 | /* | ||
842 | * Clear them before re-enabling irqs/NMIs again: | ||
843 | */ | ||
844 | cpuc->interrupts = 0; | ||
845 | perf_enable(); | ||
846 | } else { | ||
847 | cpuc->interrupts = 0; | ||
848 | } | ||
849 | } | ||
850 | |||
851 | void smp_perf_counter_interrupt(struct pt_regs *regs) | 814 | void smp_perf_counter_interrupt(struct pt_regs *regs) |
852 | { | 815 | { |
853 | irq_enter(); | 816 | irq_enter(); |