diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-21 12:05:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-21 12:05:47 -0400 |
commit | bd4c3a3441144cd46d1f544046523724c5bc6e94 (patch) | |
tree | 8b5c67249a7a163caf3f88cbcb9df5236fcc3b93 /kernel/trace/trace_event_profile.c | |
parent | b3727c24da69971503a4ca98b3b877753c6a4393 (diff) | |
parent | 583a22e7c154dc0a3938db522696b4bc7f098f59 (diff) |
Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
kernel/profile.c: Switch /proc/irq/prof_cpu_mask to seq_file
tracing: Export trace_profile_buf symbols
tracing/events: use list_for_entry_continue
tracing: remove max_tracer_type_len
function-graph: use ftrace_graph_funcs directly
tracing: Remove markers
tracing: Allocate the ftrace event profile buffer dynamically
tracing: Factorize the events profile accounting
Diffstat (limited to 'kernel/trace/trace_event_profile.c')
-rw-r--r-- | kernel/trace/trace_event_profile.c | 82 |
1 files changed, 80 insertions, 2 deletions
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 55a25c933d15..dd44b8768867 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
@@ -8,6 +8,57 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include "trace.h" | 9 | #include "trace.h" |
10 | 10 | ||
11 | /* | ||
12 | * We can't use a size but a type in alloc_percpu() | ||
13 | * So let's create a dummy type that matches the desired size | ||
14 | */ | ||
15 | typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t; | ||
16 | |||
17 | char *trace_profile_buf; | ||
18 | EXPORT_SYMBOL_GPL(trace_profile_buf); | ||
19 | |||
20 | char *trace_profile_buf_nmi; | ||
21 | EXPORT_SYMBOL_GPL(trace_profile_buf_nmi); | ||
22 | |||
23 | /* Count the events in use (per event id, not per instance) */ | ||
24 | static int total_profile_count; | ||
25 | |||
26 | static int ftrace_profile_enable_event(struct ftrace_event_call *event) | ||
27 | { | ||
28 | char *buf; | ||
29 | int ret = -ENOMEM; | ||
30 | |||
31 | if (atomic_inc_return(&event->profile_count)) | ||
32 | return 0; | ||
33 | |||
34 | if (!total_profile_count++) { | ||
35 | buf = (char *)alloc_percpu(profile_buf_t); | ||
36 | if (!buf) | ||
37 | goto fail_buf; | ||
38 | |||
39 | rcu_assign_pointer(trace_profile_buf, buf); | ||
40 | |||
41 | buf = (char *)alloc_percpu(profile_buf_t); | ||
42 | if (!buf) | ||
43 | goto fail_buf_nmi; | ||
44 | |||
45 | rcu_assign_pointer(trace_profile_buf_nmi, buf); | ||
46 | } | ||
47 | |||
48 | ret = event->profile_enable(); | ||
49 | if (!ret) | ||
50 | return 0; | ||
51 | |||
52 | kfree(trace_profile_buf_nmi); | ||
53 | fail_buf_nmi: | ||
54 | kfree(trace_profile_buf); | ||
55 | fail_buf: | ||
56 | total_profile_count--; | ||
57 | atomic_dec(&event->profile_count); | ||
58 | |||
59 | return ret; | ||
60 | } | ||
61 | |||
11 | int ftrace_profile_enable(int event_id) | 62 | int ftrace_profile_enable(int event_id) |
12 | { | 63 | { |
13 | struct ftrace_event_call *event; | 64 | struct ftrace_event_call *event; |
@@ -17,7 +68,7 @@ int ftrace_profile_enable(int event_id) | |||
17 | list_for_each_entry(event, &ftrace_events, list) { | 68 | list_for_each_entry(event, &ftrace_events, list) { |
18 | if (event->id == event_id && event->profile_enable && | 69 | if (event->id == event_id && event->profile_enable && |
19 | try_module_get(event->mod)) { | 70 | try_module_get(event->mod)) { |
20 | ret = event->profile_enable(event); | 71 | ret = ftrace_profile_enable_event(event); |
21 | break; | 72 | break; |
22 | } | 73 | } |
23 | } | 74 | } |
@@ -26,6 +77,33 @@ int ftrace_profile_enable(int event_id) | |||
26 | return ret; | 77 | return ret; |
27 | } | 78 | } |
28 | 79 | ||
80 | static void ftrace_profile_disable_event(struct ftrace_event_call *event) | ||
81 | { | ||
82 | char *buf, *nmi_buf; | ||
83 | |||
84 | if (!atomic_add_negative(-1, &event->profile_count)) | ||
85 | return; | ||
86 | |||
87 | event->profile_disable(); | ||
88 | |||
89 | if (!--total_profile_count) { | ||
90 | buf = trace_profile_buf; | ||
91 | rcu_assign_pointer(trace_profile_buf, NULL); | ||
92 | |||
93 | nmi_buf = trace_profile_buf_nmi; | ||
94 | rcu_assign_pointer(trace_profile_buf_nmi, NULL); | ||
95 | |||
96 | /* | ||
97 | * Ensure every events in profiling have finished before | ||
98 | * releasing the buffers | ||
99 | */ | ||
100 | synchronize_sched(); | ||
101 | |||
102 | free_percpu(buf); | ||
103 | free_percpu(nmi_buf); | ||
104 | } | ||
105 | } | ||
106 | |||
29 | void ftrace_profile_disable(int event_id) | 107 | void ftrace_profile_disable(int event_id) |
30 | { | 108 | { |
31 | struct ftrace_event_call *event; | 109 | struct ftrace_event_call *event; |
@@ -33,7 +111,7 @@ void ftrace_profile_disable(int event_id) | |||
33 | mutex_lock(&event_mutex); | 111 | mutex_lock(&event_mutex); |
34 | list_for_each_entry(event, &ftrace_events, list) { | 112 | list_for_each_entry(event, &ftrace_events, list) { |
35 | if (event->id == event_id) { | 113 | if (event->id == event_id) { |
36 | event->profile_disable(event); | 114 | ftrace_profile_disable_event(event); |
37 | module_put(event->mod); | 115 | module_put(event->mod); |
38 | break; | 116 | break; |
39 | } | 117 | } |