aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-03 16:19:36 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-03 17:39:51 -0400
commit128f048f0f0d2a477ad2555e7acd2ad15a1b6061 (patch)
tree5927a26da343af2189e47b497eec38680f19294f
parent233f0b95ca3a0d1dcbd70bc7e519069a8e10d23e (diff)
perf_counter: Fix throttling lock-up
Throttling logic is broken and we can lock up with too small hw sampling intervals. Make the throttling code more robust: disable counters even if we already disabled them. ( Also clean up whitespace damage i noticed while reading various pieces of code related to throttling. ) Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c2
-rw-r--r--kernel/perf_counter.c19
2 files changed, 15 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 12cc05ed9f48..8f53f3a7da29 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -91,7 +91,7 @@ static u64 intel_pmu_raw_event(u64 event)
91#define CORE_EVNTSEL_INV_MASK 0x00800000ULL 91#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
92#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL 92#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
93 93
94#define CORE_EVNTSEL_MASK \ 94#define CORE_EVNTSEL_MASK \
95 (CORE_EVNTSEL_EVENT_MASK | \ 95 (CORE_EVNTSEL_EVENT_MASK | \
96 CORE_EVNTSEL_UNIT_MASK | \ 96 CORE_EVNTSEL_UNIT_MASK | \
97 CORE_EVNTSEL_EDGE_MASK | \ 97 CORE_EVNTSEL_EDGE_MASK | \
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index ab4455447f84..0bb03f15a5b6 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2822,11 +2822,20 @@ int perf_counter_overflow(struct perf_counter *counter,
2822 2822
2823 if (!throttle) { 2823 if (!throttle) {
2824 counter->hw.interrupts++; 2824 counter->hw.interrupts++;
2825 } else if (counter->hw.interrupts != MAX_INTERRUPTS) { 2825 } else {
2826 counter->hw.interrupts++; 2826 if (counter->hw.interrupts != MAX_INTERRUPTS) {
2827 if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) { 2827 counter->hw.interrupts++;
2828 counter->hw.interrupts = MAX_INTERRUPTS; 2828 if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
2829 perf_log_throttle(counter, 0); 2829 counter->hw.interrupts = MAX_INTERRUPTS;
2830 perf_log_throttle(counter, 0);
2831 ret = 1;
2832 }
2833 } else {
2834 /*
2835 * Keep re-disabling counters even though on the previous
2836 * pass we disabled it - just in case we raced with a
2837 * sched-in and the counter got enabled again:
2838 */
2830 ret = 1; 2839 ret = 1;
2831 } 2840 }
2832 } 2841 }