aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-10 07:40:57 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-10 10:55:26 -0400
commitbd2b5b12849a3446abad0b25e920f86f5480b309 (patch)
treeb0eacf6002f2015c0483390619a3f874bcb7e7d2 /arch
parentdc81081b2d9a6a9d64dad1bef1e5fc9fb660e53e (diff)
perf_counter: More aggressive frequency adjustment
Also employ the overflow handler to adjust the frequency, this results in a stable frequency in about 40~50 samples, instead of that many ticks. This also means we can start sampling at a sample period of 1 without running head-first into the throttle. It relies on sched_clock() to accurately measure the time difference between the overflow NMIs. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 49f258537cbf..240ca5630632 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -696,10 +696,11 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
696 if (!attr->exclude_kernel) 696 if (!attr->exclude_kernel)
697 hwc->config |= ARCH_PERFMON_EVENTSEL_OS; 697 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
698 698
699 if (!hwc->sample_period) 699 if (!hwc->sample_period) {
700 hwc->sample_period = x86_pmu.max_period; 700 hwc->sample_period = x86_pmu.max_period;
701 atomic64_set(&hwc->period_left, hwc->sample_period);
702 }
701 703
702 atomic64_set(&hwc->period_left, hwc->sample_period);
703 counter->destroy = hw_perf_counter_destroy; 704 counter->destroy = hw_perf_counter_destroy;
704 705
705 /* 706 /*