aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_event.h
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-01-26 12:50:16 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2010-03-15 12:06:17 -0400
commit21a6adcde06e129b055caa3256e65a97a2986770 (patch)
tree56663f2682b5114b92335c7c53ce26e1449ac8cf /include/linux/perf_event.h
parent69cb5f7cdc28a5352a03c16bbaa0a92cdf31b9d4 (diff)
perf: Reimplement frequency driven sampling
commit abd50713944c8ea9e0af5b7bffa0aacae21cc91a upstream. There was a bug in the old period code that caused intel_pmu_enable_all() or native_write_msr_safe() to show up quite high in the profiles. In staring at that code it made my head hurt, so I rewrote it in a hopefully simpler fashion. Its now fully symetric between tick and overflow driven adjustments and uses less data to boot. The only complication is that it basically wants to do a u128 division. The code approximates that in a rather simple truncate until it fits fashion, taking care to balance the terms while truncating. This version does not generate that sampling artefact. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r--include/linux/perf_event.h5
1 files changed, 2 insertions, 3 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a177698d95e2..c8ea0c77a625 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -496,9 +496,8 @@ struct hw_perf_event {
496 atomic64_t period_left; 496 atomic64_t period_left;
497 u64 interrupts; 497 u64 interrupts;
498 498
499 u64 freq_count; 499 u64 freq_time_stamp;
500 u64 freq_interrupts; 500 u64 freq_count_stamp;
501 u64 freq_stamp;
502#endif 501#endif
503}; 502};
504 503