aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-04-06 05:45:04 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 04:48:56 -0400
commitf6c7d5fe58b4846ee0cb4b98b6042489705eced4 (patch)
treeb37bda884e0740489269da5ddc3401ffa61f076e /kernel/perf_counter.c
parentb6276f353bf490add62dcf7db0ebd75baa3e1a37 (diff)
perf_counter: theres more to overflow than writing events
Prepare for more generic overflow handling. The new perf_counter_overflow() method will handle the generic bits of the counter overflow, and can return a !0 return value, in which case the counter should be (soft) disabled, so that it won't count until it's properly disabled. XXX: do powerpc and swcounter Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090406094517.812109629@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c29
1 files changed, 23 insertions, 6 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 0a2ade2e4f11..195e976eb07d 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1800,8 +1800,8 @@ static void perf_output_end(struct perf_output_handle *handle)
1800 rcu_read_unlock(); 1800 rcu_read_unlock();
1801} 1801}
1802 1802
1803void perf_counter_output(struct perf_counter *counter, 1803static void perf_counter_output(struct perf_counter *counter,
1804 int nmi, struct pt_regs *regs) 1804 int nmi, struct pt_regs *regs)
1805{ 1805{
1806 int ret; 1806 int ret;
1807 u64 record_type = counter->hw_event.record_type; 1807 u64 record_type = counter->hw_event.record_type;
@@ -2034,6 +2034,17 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
2034} 2034}
2035 2035
2036/* 2036/*
2037 * Generic counter overflow handling.
2038 */
2039
2040int perf_counter_overflow(struct perf_counter *counter,
2041 int nmi, struct pt_regs *regs)
2042{
2043 perf_counter_output(counter, nmi, regs);
2044 return 0;
2045}
2046
2047/*
2037 * Generic software counter infrastructure 2048 * Generic software counter infrastructure
2038 */ 2049 */
2039 2050
@@ -2077,6 +2088,7 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
2077 2088
2078static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 2089static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2079{ 2090{
2091 enum hrtimer_restart ret = HRTIMER_RESTART;
2080 struct perf_counter *counter; 2092 struct perf_counter *counter;
2081 struct pt_regs *regs; 2093 struct pt_regs *regs;
2082 2094
@@ -2092,12 +2104,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2092 !counter->hw_event.exclude_user) 2104 !counter->hw_event.exclude_user)
2093 regs = task_pt_regs(current); 2105 regs = task_pt_regs(current);
2094 2106
2095 if (regs) 2107 if (regs) {
2096 perf_counter_output(counter, 0, regs); 2108 if (perf_counter_overflow(counter, 0, regs))
2109 ret = HRTIMER_NORESTART;
2110 }
2097 2111
2098 hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period)); 2112 hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
2099 2113
2100 return HRTIMER_RESTART; 2114 return ret;
2101} 2115}
2102 2116
2103static void perf_swcounter_overflow(struct perf_counter *counter, 2117static void perf_swcounter_overflow(struct perf_counter *counter,
@@ -2105,7 +2119,10 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
2105{ 2119{
2106 perf_swcounter_update(counter); 2120 perf_swcounter_update(counter);
2107 perf_swcounter_set_period(counter); 2121 perf_swcounter_set_period(counter);
2108 perf_counter_output(counter, nmi, regs); 2122 if (perf_counter_overflow(counter, nmi, regs))
2123 /* soft-disable the counter */
2124 ;
2125
2109} 2126}
2110 2127
2111static int perf_swcounter_match(struct perf_counter *counter, 2128static int perf_swcounter_match(struct perf_counter *counter,