diff options
author | Jiri Olsa <jolsa@redhat.com> | 2012-02-15 09:51:49 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2012-02-21 11:08:24 -0500 |
commit | ceec0b6fc7cd43b38a40c2d40223f9cd0616f0cd (patch) | |
tree | 31e7deb76a5827883251dc578300bdc35ef62538 /kernel | |
parent | e248491ac283b516958ca9ab62c8e74b6718bca8 (diff) |
ftrace, perf: Add open/close tracepoint perf registration actions
Adding TRACE_REG_PERF_OPEN and TRACE_REG_PERF_CLOSE to differentiate
register/unregister from open/close actions.
The register/unregister actions are invoked for the first/last
tracepoint user when opening/closing the event.
The open/close actions are invoked for each tracepoint user when
opening/closing the event.
Link: http://lkml.kernel.org/r/1329317514-8131-3-git-send-email-jolsa@redhat.com
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace_event_perf.c | 116 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 14 |
4 files changed, 97 insertions, 49 deletions
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 19a359d5e6d5..0cfcc37f63de 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -44,23 +44,17 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event, | |||
44 | return 0; | 44 | return 0; |
45 | } | 45 | } |
46 | 46 | ||
47 | static int perf_trace_event_init(struct ftrace_event_call *tp_event, | 47 | static int perf_trace_event_reg(struct ftrace_event_call *tp_event, |
48 | struct perf_event *p_event) | 48 | struct perf_event *p_event) |
49 | { | 49 | { |
50 | struct hlist_head __percpu *list; | 50 | struct hlist_head __percpu *list; |
51 | int ret; | 51 | int ret = -ENOMEM; |
52 | int cpu; | 52 | int cpu; |
53 | 53 | ||
54 | ret = perf_trace_event_perm(tp_event, p_event); | ||
55 | if (ret) | ||
56 | return ret; | ||
57 | |||
58 | p_event->tp_event = tp_event; | 54 | p_event->tp_event = tp_event; |
59 | if (tp_event->perf_refcount++ > 0) | 55 | if (tp_event->perf_refcount++ > 0) |
60 | return 0; | 56 | return 0; |
61 | 57 | ||
62 | ret = -ENOMEM; | ||
63 | |||
64 | list = alloc_percpu(struct hlist_head); | 58 | list = alloc_percpu(struct hlist_head); |
65 | if (!list) | 59 | if (!list) |
66 | goto fail; | 60 | goto fail; |
@@ -83,7 +77,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, | |||
83 | } | 77 | } |
84 | } | 78 | } |
85 | 79 | ||
86 | ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER); | 80 | ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL); |
87 | if (ret) | 81 | if (ret) |
88 | goto fail; | 82 | goto fail; |
89 | 83 | ||
@@ -108,6 +102,69 @@ fail: | |||
108 | return ret; | 102 | return ret; |
109 | } | 103 | } |
110 | 104 | ||
105 | static void perf_trace_event_unreg(struct perf_event *p_event) | ||
106 | { | ||
107 | struct ftrace_event_call *tp_event = p_event->tp_event; | ||
108 | int i; | ||
109 | |||
110 | if (--tp_event->perf_refcount > 0) | ||
111 | goto out; | ||
112 | |||
113 | tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL); | ||
114 | |||
115 | /* | ||
116 | * Ensure our callback won't be called anymore. The buffers | ||
117 | * will be freed after that. | ||
118 | */ | ||
119 | tracepoint_synchronize_unregister(); | ||
120 | |||
121 | free_percpu(tp_event->perf_events); | ||
122 | tp_event->perf_events = NULL; | ||
123 | |||
124 | if (!--total_ref_count) { | ||
125 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { | ||
126 | free_percpu(perf_trace_buf[i]); | ||
127 | perf_trace_buf[i] = NULL; | ||
128 | } | ||
129 | } | ||
130 | out: | ||
131 | module_put(tp_event->mod); | ||
132 | } | ||
133 | |||
134 | static int perf_trace_event_open(struct perf_event *p_event) | ||
135 | { | ||
136 | struct ftrace_event_call *tp_event = p_event->tp_event; | ||
137 | return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event); | ||
138 | } | ||
139 | |||
140 | static void perf_trace_event_close(struct perf_event *p_event) | ||
141 | { | ||
142 | struct ftrace_event_call *tp_event = p_event->tp_event; | ||
143 | tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event); | ||
144 | } | ||
145 | |||
146 | static int perf_trace_event_init(struct ftrace_event_call *tp_event, | ||
147 | struct perf_event *p_event) | ||
148 | { | ||
149 | int ret; | ||
150 | |||
151 | ret = perf_trace_event_perm(tp_event, p_event); | ||
152 | if (ret) | ||
153 | return ret; | ||
154 | |||
155 | ret = perf_trace_event_reg(tp_event, p_event); | ||
156 | if (ret) | ||
157 | return ret; | ||
158 | |||
159 | ret = perf_trace_event_open(p_event); | ||
160 | if (ret) { | ||
161 | perf_trace_event_unreg(p_event); | ||
162 | return ret; | ||
163 | } | ||
164 | |||
165 | return 0; | ||
166 | } | ||
167 | |||
111 | int perf_trace_init(struct perf_event *p_event) | 168 | int perf_trace_init(struct perf_event *p_event) |
112 | { | 169 | { |
113 | struct ftrace_event_call *tp_event; | 170 | struct ftrace_event_call *tp_event; |
@@ -130,6 +187,14 @@ int perf_trace_init(struct perf_event *p_event) | |||
130 | return ret; | 187 | return ret; |
131 | } | 188 | } |
132 | 189 | ||
190 | void perf_trace_destroy(struct perf_event *p_event) | ||
191 | { | ||
192 | mutex_lock(&event_mutex); | ||
193 | perf_trace_event_close(p_event); | ||
194 | perf_trace_event_unreg(p_event); | ||
195 | mutex_unlock(&event_mutex); | ||
196 | } | ||
197 | |||
133 | int perf_trace_add(struct perf_event *p_event, int flags) | 198 | int perf_trace_add(struct perf_event *p_event, int flags) |
134 | { | 199 | { |
135 | struct ftrace_event_call *tp_event = p_event->tp_event; | 200 | struct ftrace_event_call *tp_event = p_event->tp_event; |
@@ -154,37 +219,6 @@ void perf_trace_del(struct perf_event *p_event, int flags) | |||
154 | hlist_del_rcu(&p_event->hlist_entry); | 219 | hlist_del_rcu(&p_event->hlist_entry); |
155 | } | 220 | } |
156 | 221 | ||
157 | void perf_trace_destroy(struct perf_event *p_event) | ||
158 | { | ||
159 | struct ftrace_event_call *tp_event = p_event->tp_event; | ||
160 | int i; | ||
161 | |||
162 | mutex_lock(&event_mutex); | ||
163 | if (--tp_event->perf_refcount > 0) | ||
164 | goto out; | ||
165 | |||
166 | tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER); | ||
167 | |||
168 | /* | ||
169 | * Ensure our callback won't be called anymore. The buffers | ||
170 | * will be freed after that. | ||
171 | */ | ||
172 | tracepoint_synchronize_unregister(); | ||
173 | |||
174 | free_percpu(tp_event->perf_events); | ||
175 | tp_event->perf_events = NULL; | ||
176 | |||
177 | if (!--total_ref_count) { | ||
178 | for (i = 0; i < PERF_NR_CONTEXTS; i++) { | ||
179 | free_percpu(perf_trace_buf[i]); | ||
180 | perf_trace_buf[i] = NULL; | ||
181 | } | ||
182 | } | ||
183 | out: | ||
184 | module_put(tp_event->mod); | ||
185 | mutex_unlock(&event_mutex); | ||
186 | } | ||
187 | |||
188 | __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | 222 | __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, |
189 | struct pt_regs *regs, int *rctxp) | 223 | struct pt_regs *regs, int *rctxp) |
190 | { | 224 | { |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index c212a7f934ec..5138fea37908 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -147,7 +147,8 @@ int trace_event_raw_init(struct ftrace_event_call *call) | |||
147 | } | 147 | } |
148 | EXPORT_SYMBOL_GPL(trace_event_raw_init); | 148 | EXPORT_SYMBOL_GPL(trace_event_raw_init); |
149 | 149 | ||
150 | int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type) | 150 | int ftrace_event_reg(struct ftrace_event_call *call, |
151 | enum trace_reg type, void *data) | ||
151 | { | 152 | { |
152 | switch (type) { | 153 | switch (type) { |
153 | case TRACE_REG_REGISTER: | 154 | case TRACE_REG_REGISTER: |
@@ -170,6 +171,9 @@ int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type) | |||
170 | call->class->perf_probe, | 171 | call->class->perf_probe, |
171 | call); | 172 | call); |
172 | return 0; | 173 | return 0; |
174 | case TRACE_REG_PERF_OPEN: | ||
175 | case TRACE_REG_PERF_CLOSE: | ||
176 | return 0; | ||
173 | #endif | 177 | #endif |
174 | } | 178 | } |
175 | return 0; | 179 | return 0; |
@@ -209,7 +213,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, | |||
209 | tracing_stop_cmdline_record(); | 213 | tracing_stop_cmdline_record(); |
210 | call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; | 214 | call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; |
211 | } | 215 | } |
212 | call->class->reg(call, TRACE_REG_UNREGISTER); | 216 | call->class->reg(call, TRACE_REG_UNREGISTER, NULL); |
213 | } | 217 | } |
214 | break; | 218 | break; |
215 | case 1: | 219 | case 1: |
@@ -218,7 +222,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, | |||
218 | tracing_start_cmdline_record(); | 222 | tracing_start_cmdline_record(); |
219 | call->flags |= TRACE_EVENT_FL_RECORDED_CMD; | 223 | call->flags |= TRACE_EVENT_FL_RECORDED_CMD; |
220 | } | 224 | } |
221 | ret = call->class->reg(call, TRACE_REG_REGISTER); | 225 | ret = call->class->reg(call, TRACE_REG_REGISTER, NULL); |
222 | if (ret) { | 226 | if (ret) { |
223 | tracing_stop_cmdline_record(); | 227 | tracing_stop_cmdline_record(); |
224 | pr_info("event trace: Could not enable event " | 228 | pr_info("event trace: Could not enable event " |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 00d527c945a4..5667f8958cc9 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1892,7 +1892,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, | |||
1892 | #endif /* CONFIG_PERF_EVENTS */ | 1892 | #endif /* CONFIG_PERF_EVENTS */ |
1893 | 1893 | ||
1894 | static __kprobes | 1894 | static __kprobes |
1895 | int kprobe_register(struct ftrace_event_call *event, enum trace_reg type) | 1895 | int kprobe_register(struct ftrace_event_call *event, |
1896 | enum trace_reg type, void *data) | ||
1896 | { | 1897 | { |
1897 | struct trace_probe *tp = (struct trace_probe *)event->data; | 1898 | struct trace_probe *tp = (struct trace_probe *)event->data; |
1898 | 1899 | ||
@@ -1909,6 +1910,9 @@ int kprobe_register(struct ftrace_event_call *event, enum trace_reg type) | |||
1909 | case TRACE_REG_PERF_UNREGISTER: | 1910 | case TRACE_REG_PERF_UNREGISTER: |
1910 | disable_trace_probe(tp, TP_FLAG_PROFILE); | 1911 | disable_trace_probe(tp, TP_FLAG_PROFILE); |
1911 | return 0; | 1912 | return 0; |
1913 | case TRACE_REG_PERF_OPEN: | ||
1914 | case TRACE_REG_PERF_CLOSE: | ||
1915 | return 0; | ||
1912 | #endif | 1916 | #endif |
1913 | } | 1917 | } |
1914 | return 0; | 1918 | return 0; |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 43500153dd1e..e23515f51ed4 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -17,9 +17,9 @@ static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); | |||
17 | static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); | 17 | static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); |
18 | 18 | ||
19 | static int syscall_enter_register(struct ftrace_event_call *event, | 19 | static int syscall_enter_register(struct ftrace_event_call *event, |
20 | enum trace_reg type); | 20 | enum trace_reg type, void *data); |
21 | static int syscall_exit_register(struct ftrace_event_call *event, | 21 | static int syscall_exit_register(struct ftrace_event_call *event, |
22 | enum trace_reg type); | 22 | enum trace_reg type, void *data); |
23 | 23 | ||
24 | static int syscall_enter_define_fields(struct ftrace_event_call *call); | 24 | static int syscall_enter_define_fields(struct ftrace_event_call *call); |
25 | static int syscall_exit_define_fields(struct ftrace_event_call *call); | 25 | static int syscall_exit_define_fields(struct ftrace_event_call *call); |
@@ -649,7 +649,7 @@ void perf_sysexit_disable(struct ftrace_event_call *call) | |||
649 | #endif /* CONFIG_PERF_EVENTS */ | 649 | #endif /* CONFIG_PERF_EVENTS */ |
650 | 650 | ||
651 | static int syscall_enter_register(struct ftrace_event_call *event, | 651 | static int syscall_enter_register(struct ftrace_event_call *event, |
652 | enum trace_reg type) | 652 | enum trace_reg type, void *data) |
653 | { | 653 | { |
654 | switch (type) { | 654 | switch (type) { |
655 | case TRACE_REG_REGISTER: | 655 | case TRACE_REG_REGISTER: |
@@ -664,13 +664,16 @@ static int syscall_enter_register(struct ftrace_event_call *event, | |||
664 | case TRACE_REG_PERF_UNREGISTER: | 664 | case TRACE_REG_PERF_UNREGISTER: |
665 | perf_sysenter_disable(event); | 665 | perf_sysenter_disable(event); |
666 | return 0; | 666 | return 0; |
667 | case TRACE_REG_PERF_OPEN: | ||
668 | case TRACE_REG_PERF_CLOSE: | ||
669 | return 0; | ||
667 | #endif | 670 | #endif |
668 | } | 671 | } |
669 | return 0; | 672 | return 0; |
670 | } | 673 | } |
671 | 674 | ||
672 | static int syscall_exit_register(struct ftrace_event_call *event, | 675 | static int syscall_exit_register(struct ftrace_event_call *event, |
673 | enum trace_reg type) | 676 | enum trace_reg type, void *data) |
674 | { | 677 | { |
675 | switch (type) { | 678 | switch (type) { |
676 | case TRACE_REG_REGISTER: | 679 | case TRACE_REG_REGISTER: |
@@ -685,6 +688,9 @@ static int syscall_exit_register(struct ftrace_event_call *event, | |||
685 | case TRACE_REG_PERF_UNREGISTER: | 688 | case TRACE_REG_PERF_UNREGISTER: |
686 | perf_sysexit_disable(event); | 689 | perf_sysexit_disable(event); |
687 | return 0; | 690 | return 0; |
691 | case TRACE_REG_PERF_OPEN: | ||
692 | case TRACE_REG_PERF_CLOSE: | ||
693 | return 0; | ||
688 | #endif | 694 | #endif |
689 | } | 695 | } |
690 | return 0; | 696 | return 0; |