diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-05-13 07:21:36 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-15 03:47:01 -0400 |
commit | 962bf7a66edca4d36a730a38ff8410a67f560e40 (patch) | |
tree | 86a22c33a9daed37db6afccfa5ed01e06ea5c00e | |
parent | 53020fe81eecd0b7be295868ce5850ef8f41074e (diff) |
perf_counter: x86: Fix up the amd NMI/INT throttle
perf_counter_unthrottle() restores throttle_ctrl, buts its never set.
Also, we fail to disable all counters when throttling.
[ Impact: fix rare stuck perf-counters when they are throttled ]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 38 |
1 files changed, 26 insertions, 12 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index c19e927b6979..7601c014f8f6 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -334,6 +334,8 @@ static u64 amd_pmu_save_disable_all(void) | |||
334 | * right thing. | 334 | * right thing. |
335 | */ | 335 | */ |
336 | barrier(); | 336 | barrier(); |
337 | if (!enabled) | ||
338 | goto out; | ||
337 | 339 | ||
338 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 340 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
339 | u64 val; | 341 | u64 val; |
@@ -347,6 +349,7 @@ static u64 amd_pmu_save_disable_all(void) | |||
347 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); | 349 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); |
348 | } | 350 | } |
349 | 351 | ||
352 | out: | ||
350 | return enabled; | 353 | return enabled; |
351 | } | 354 | } |
352 | 355 | ||
@@ -787,32 +790,43 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) | |||
787 | int handled = 0; | 790 | int handled = 0; |
788 | struct perf_counter *counter; | 791 | struct perf_counter *counter; |
789 | struct hw_perf_counter *hwc; | 792 | struct hw_perf_counter *hwc; |
790 | int idx; | 793 | int idx, throttle = 0; |
794 | |||
795 | cpuc->throttle_ctrl = cpuc->enabled; | ||
796 | cpuc->enabled = 0; | ||
797 | barrier(); | ||
798 | |||
799 | if (cpuc->throttle_ctrl) { | ||
800 | if (++cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) | ||
801 | throttle = 1; | ||
802 | } | ||
791 | 803 | ||
792 | ++cpuc->interrupts; | ||
793 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 804 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
805 | int disable = 0; | ||
806 | |||
794 | if (!test_bit(idx, cpuc->active_mask)) | 807 | if (!test_bit(idx, cpuc->active_mask)) |
795 | continue; | 808 | continue; |
809 | |||
796 | counter = cpuc->counters[idx]; | 810 | counter = cpuc->counters[idx]; |
797 | hwc = &counter->hw; | 811 | hwc = &counter->hw; |
798 | val = x86_perf_counter_update(counter, hwc, idx); | 812 | val = x86_perf_counter_update(counter, hwc, idx); |
799 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) | 813 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) |
800 | continue; | 814 | goto next; |
815 | |||
801 | /* counter overflow */ | 816 | /* counter overflow */ |
802 | x86_perf_counter_set_period(counter, hwc, idx); | 817 | x86_perf_counter_set_period(counter, hwc, idx); |
803 | handled = 1; | 818 | handled = 1; |
804 | inc_irq_stat(apic_perf_irqs); | 819 | inc_irq_stat(apic_perf_irqs); |
805 | if (perf_counter_overflow(counter, nmi, regs, 0)) | 820 | disable = perf_counter_overflow(counter, nmi, regs, 0); |
806 | amd_pmu_disable_counter(hwc, idx); | 821 | |
807 | else if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) | 822 | next: |
808 | /* | 823 | if (disable || throttle) |
809 | * do not reenable when throttled, but reload | ||
810 | * the register | ||
811 | */ | ||
812 | amd_pmu_disable_counter(hwc, idx); | 824 | amd_pmu_disable_counter(hwc, idx); |
813 | else if (counter->state == PERF_COUNTER_STATE_ACTIVE) | ||
814 | amd_pmu_enable_counter(hwc, idx); | ||
815 | } | 825 | } |
826 | |||
827 | if (cpuc->throttle_ctrl && !throttle) | ||
828 | cpuc->enabled = 1; | ||
829 | |||
816 | return handled; | 830 | return handled; |
817 | } | 831 | } |
818 | 832 | ||