diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-05-18 12:08:32 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-05-18 12:35:46 -0400 |
commit | 4f41c013f553957765902fb01475972f0af3e8e7 (patch) | |
tree | ddaa54947cc990094a4b270f2f8b3d6da195044f /kernel/trace/trace_event_perf.c | |
parent | ef4f30f54e265c2f6f9ac9eda4db158a4e16050b (diff) |
perf/ftrace: Optimize perf/tracepoint interaction for single events
When we've got but a single event per tracepoint
there is no reason to try and multiplex it so don't.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_event_perf.c')
-rw-r--r-- | kernel/trace/trace_event_perf.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 0565bb42566f..89b780a7c522 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -27,13 +27,15 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | |||
27 | /* Count the events in use (per event id, not per instance) */ | 27 | /* Count the events in use (per event id, not per instance) */ |
28 | static int total_ref_count; | 28 | static int total_ref_count; |
29 | 29 | ||
30 | static int perf_trace_event_enable(struct ftrace_event_call *event) | 30 | static int perf_trace_event_enable(struct ftrace_event_call *event, void *data) |
31 | { | 31 | { |
32 | char *buf; | 32 | char *buf; |
33 | int ret = -ENOMEM; | 33 | int ret = -ENOMEM; |
34 | 34 | ||
35 | if (event->perf_refcount++ > 0) | 35 | if (event->perf_refcount++ > 0) { |
36 | event->perf_data = NULL; | ||
36 | return 0; | 37 | return 0; |
38 | } | ||
37 | 39 | ||
38 | if (!total_ref_count) { | 40 | if (!total_ref_count) { |
39 | buf = (char *)alloc_percpu(perf_trace_t); | 41 | buf = (char *)alloc_percpu(perf_trace_t); |
@@ -51,6 +53,7 @@ static int perf_trace_event_enable(struct ftrace_event_call *event) | |||
51 | 53 | ||
52 | ret = event->perf_event_enable(event); | 54 | ret = event->perf_event_enable(event); |
53 | if (!ret) { | 55 | if (!ret) { |
56 | event->perf_data = data; | ||
54 | total_ref_count++; | 57 | total_ref_count++; |
55 | return 0; | 58 | return 0; |
56 | } | 59 | } |
@@ -68,7 +71,7 @@ fail_buf: | |||
68 | return ret; | 71 | return ret; |
69 | } | 72 | } |
70 | 73 | ||
71 | int perf_trace_enable(int event_id) | 74 | int perf_trace_enable(int event_id, void *data) |
72 | { | 75 | { |
73 | struct ftrace_event_call *event; | 76 | struct ftrace_event_call *event; |
74 | int ret = -EINVAL; | 77 | int ret = -EINVAL; |
@@ -77,7 +80,7 @@ int perf_trace_enable(int event_id) | |||
77 | list_for_each_entry(event, &ftrace_events, list) { | 80 | list_for_each_entry(event, &ftrace_events, list) { |
78 | if (event->id == event_id && event->perf_event_enable && | 81 | if (event->id == event_id && event->perf_event_enable && |
79 | try_module_get(event->mod)) { | 82 | try_module_get(event->mod)) { |
80 | ret = perf_trace_event_enable(event); | 83 | ret = perf_trace_event_enable(event, data); |
81 | break; | 84 | break; |
82 | } | 85 | } |
83 | } | 86 | } |