diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-03-19 15:26:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:30:16 -0400 |
commit | e077df4f439681e43f0db8255b2d215b342ebdc6 (patch) | |
tree | 2deebf630405b09cee8d43056ecd110f4bb0fb70 /kernel/perf_counter.c | |
parent | f16009527595ee562308653bc3d0039166d2ab15 (diff) |
perf_counter: hook up the tracepoint events
Impact: new perfcounters feature
Enable usage of tracepoints as perf counter events.
tracepoint event ids can be found in /debug/tracing/event/*/*/id
and (for now) are represented as -65536+id in the type field.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Orig-LKML-Reference: <20090319194233.744044174@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 43 |
1 files changed, 43 insertions, 0 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 97f891ffeb40..0bbe3e45ba0d 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1152,6 +1152,9 @@ static void free_counter_rcu(struct rcu_head *head) | |||
1152 | 1152 | ||
1153 | static void free_counter(struct perf_counter *counter) | 1153 | static void free_counter(struct perf_counter *counter) |
1154 | { | 1154 | { |
1155 | if (counter->destroy) | ||
1156 | counter->destroy(counter); | ||
1157 | |||
1155 | call_rcu(&counter->rcu_head, free_counter_rcu); | 1158 | call_rcu(&counter->rcu_head, free_counter_rcu); |
1156 | } | 1159 | } |
1157 | 1160 | ||
@@ -1727,6 +1730,45 @@ static const struct hw_perf_counter_ops perf_ops_cpu_migrations = { | |||
1727 | .read = cpu_migrations_perf_counter_read, | 1730 | .read = cpu_migrations_perf_counter_read, |
1728 | }; | 1731 | }; |
1729 | 1732 | ||
1733 | #ifdef CONFIG_EVENT_PROFILE | ||
1734 | void perf_tpcounter_event(int event_id) | ||
1735 | { | ||
1736 | perf_swcounter_event(PERF_TP_EVENTS_MIN + event_id, 1, 1, | ||
1737 | task_pt_regs(current)); | ||
1738 | } | ||
1739 | |||
1740 | extern int ftrace_profile_enable(int); | ||
1741 | extern void ftrace_profile_disable(int); | ||
1742 | |||
1743 | static void tp_perf_counter_destroy(struct perf_counter *counter) | ||
1744 | { | ||
1745 | int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN; | ||
1746 | |||
1747 | ftrace_profile_disable(event_id); | ||
1748 | } | ||
1749 | |||
1750 | static const struct hw_perf_counter_ops * | ||
1751 | tp_perf_counter_init(struct perf_counter *counter) | ||
1752 | { | ||
1753 | int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN; | ||
1754 | int ret; | ||
1755 | |||
1756 | ret = ftrace_profile_enable(event_id); | ||
1757 | if (ret) | ||
1758 | return NULL; | ||
1759 | |||
1760 | counter->destroy = tp_perf_counter_destroy; | ||
1761 | |||
1762 | return &perf_ops_generic; | ||
1763 | } | ||
1764 | #else | ||
1765 | static const struct hw_perf_counter_ops * | ||
1766 | tp_perf_counter_init(struct perf_counter *counter) | ||
1767 | { | ||
1768 | return NULL; | ||
1769 | } | ||
1770 | #endif | ||
1771 | |||
1730 | static const struct hw_perf_counter_ops * | 1772 | static const struct hw_perf_counter_ops * |
1731 | sw_perf_counter_init(struct perf_counter *counter) | 1773 | sw_perf_counter_init(struct perf_counter *counter) |
1732 | { | 1774 | { |
@@ -1772,6 +1814,7 @@ sw_perf_counter_init(struct perf_counter *counter) | |||
1772 | hw_ops = &perf_ops_cpu_migrations; | 1814 | hw_ops = &perf_ops_cpu_migrations; |
1773 | break; | 1815 | break; |
1774 | default: | 1816 | default: |
1817 | hw_ops = tp_perf_counter_init(counter); | ||
1775 | break; | 1818 | break; |
1776 | } | 1819 | } |
1777 | 1820 | ||