diff options
41 files changed, 1024 insertions, 1075 deletions
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index ce463a9cc8fb..5a24b846a1cb 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _TRACE_KVMMMU_H | 2 | #define _TRACE_KVMMMU_H |
3 | 3 | ||
4 | #include <linux/tracepoint.h> | 4 | #include <linux/tracepoint.h> |
5 | #include <linux/ftrace_event.h> | 5 | #include <linux/trace_events.h> |
6 | 6 | ||
7 | #undef TRACE_SYSTEM | 7 | #undef TRACE_SYSTEM |
8 | #define TRACE_SYSTEM kvmmmu | 8 | #define TRACE_SYSTEM kvmmmu |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 851a9a1c6dfc..602b974a60a6 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/vmalloc.h> | 29 | #include <linux/vmalloc.h> |
30 | #include <linux/highmem.h> | 30 | #include <linux/highmem.h> |
31 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
32 | #include <linux/ftrace_event.h> | 32 | #include <linux/trace_events.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | 34 | ||
35 | #include <asm/perf_event.h> | 35 | #include <asm/perf_event.h> |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ab53d80b0f64..e856dd566f4c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
29 | #include <linux/moduleparam.h> | 29 | #include <linux/moduleparam.h> |
30 | #include <linux/mod_devicetable.h> | 30 | #include <linux/mod_devicetable.h> |
31 | #include <linux/ftrace_event.h> | 31 | #include <linux/trace_events.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/tboot.h> | 33 | #include <linux/tboot.h> |
34 | #include <linux/hrtimer.h> | 34 | #include <linux/hrtimer.h> |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 060dd7b61c6d..5acf5b70866d 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -533,12 +533,6 @@ bool mac_pton(const char *s, u8 *mac); | |||
533 | * | 533 | * |
534 | * Most likely, you want to use tracing_on/tracing_off. | 534 | * Most likely, you want to use tracing_on/tracing_off. |
535 | */ | 535 | */ |
536 | #ifdef CONFIG_RING_BUFFER | ||
537 | /* trace_off_permanent stops recording with no way to bring it back */ | ||
538 | void tracing_off_permanent(void); | ||
539 | #else | ||
540 | static inline void tracing_off_permanent(void) { } | ||
541 | #endif | ||
542 | 536 | ||
543 | enum ftrace_dump_mode { | 537 | enum ftrace_dump_mode { |
544 | DUMP_NONE, | 538 | DUMP_NONE, |
diff --git a/include/linux/module.h b/include/linux/module.h index 1e5436042eb0..255fca74de7d 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -336,7 +336,7 @@ struct module { | |||
336 | const char **trace_bprintk_fmt_start; | 336 | const char **trace_bprintk_fmt_start; |
337 | #endif | 337 | #endif |
338 | #ifdef CONFIG_EVENT_TRACING | 338 | #ifdef CONFIG_EVENT_TRACING |
339 | struct ftrace_event_call **trace_events; | 339 | struct trace_event_call **trace_events; |
340 | unsigned int num_trace_events; | 340 | unsigned int num_trace_events; |
341 | struct trace_enum_map **trace_enums; | 341 | struct trace_enum_map **trace_enums; |
342 | unsigned int num_trace_enums; | 342 | unsigned int num_trace_enums; |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3d80c432ede7..2027809433b3 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -484,7 +484,7 @@ struct perf_event { | |||
484 | void *overflow_handler_context; | 484 | void *overflow_handler_context; |
485 | 485 | ||
486 | #ifdef CONFIG_EVENT_TRACING | 486 | #ifdef CONFIG_EVENT_TRACING |
487 | struct ftrace_event_call *tp_event; | 487 | struct trace_event_call *tp_event; |
488 | struct event_filter *filter; | 488 | struct event_filter *filter; |
489 | #ifdef CONFIG_FUNCTION_TRACER | 489 | #ifdef CONFIG_FUNCTION_TRACER |
490 | struct ftrace_ops ftrace_ops; | 490 | struct ftrace_ops ftrace_ops; |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index bb51becf23f8..b45c45b8c829 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -111,14 +111,14 @@ union bpf_attr; | |||
111 | #define __SC_STR_ADECL(t, a) #a | 111 | #define __SC_STR_ADECL(t, a) #a |
112 | #define __SC_STR_TDECL(t, a) #t | 112 | #define __SC_STR_TDECL(t, a) #t |
113 | 113 | ||
114 | extern struct ftrace_event_class event_class_syscall_enter; | 114 | extern struct trace_event_class event_class_syscall_enter; |
115 | extern struct ftrace_event_class event_class_syscall_exit; | 115 | extern struct trace_event_class event_class_syscall_exit; |
116 | extern struct trace_event_functions enter_syscall_print_funcs; | 116 | extern struct trace_event_functions enter_syscall_print_funcs; |
117 | extern struct trace_event_functions exit_syscall_print_funcs; | 117 | extern struct trace_event_functions exit_syscall_print_funcs; |
118 | 118 | ||
119 | #define SYSCALL_TRACE_ENTER_EVENT(sname) \ | 119 | #define SYSCALL_TRACE_ENTER_EVENT(sname) \ |
120 | static struct syscall_metadata __syscall_meta_##sname; \ | 120 | static struct syscall_metadata __syscall_meta_##sname; \ |
121 | static struct ftrace_event_call __used \ | 121 | static struct trace_event_call __used \ |
122 | event_enter_##sname = { \ | 122 | event_enter_##sname = { \ |
123 | .class = &event_class_syscall_enter, \ | 123 | .class = &event_class_syscall_enter, \ |
124 | { \ | 124 | { \ |
@@ -128,13 +128,13 @@ extern struct trace_event_functions exit_syscall_print_funcs; | |||
128 | .data = (void *)&__syscall_meta_##sname,\ | 128 | .data = (void *)&__syscall_meta_##sname,\ |
129 | .flags = TRACE_EVENT_FL_CAP_ANY, \ | 129 | .flags = TRACE_EVENT_FL_CAP_ANY, \ |
130 | }; \ | 130 | }; \ |
131 | static struct ftrace_event_call __used \ | 131 | static struct trace_event_call __used \ |
132 | __attribute__((section("_ftrace_events"))) \ | 132 | __attribute__((section("_ftrace_events"))) \ |
133 | *__event_enter_##sname = &event_enter_##sname; | 133 | *__event_enter_##sname = &event_enter_##sname; |
134 | 134 | ||
135 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ | 135 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ |
136 | static struct syscall_metadata __syscall_meta_##sname; \ | 136 | static struct syscall_metadata __syscall_meta_##sname; \ |
137 | static struct ftrace_event_call __used \ | 137 | static struct trace_event_call __used \ |
138 | event_exit_##sname = { \ | 138 | event_exit_##sname = { \ |
139 | .class = &event_class_syscall_exit, \ | 139 | .class = &event_class_syscall_exit, \ |
140 | { \ | 140 | { \ |
@@ -144,7 +144,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; | |||
144 | .data = (void *)&__syscall_meta_##sname,\ | 144 | .data = (void *)&__syscall_meta_##sname,\ |
145 | .flags = TRACE_EVENT_FL_CAP_ANY, \ | 145 | .flags = TRACE_EVENT_FL_CAP_ANY, \ |
146 | }; \ | 146 | }; \ |
147 | static struct ftrace_event_call __used \ | 147 | static struct trace_event_call __used \ |
148 | __attribute__((section("_ftrace_events"))) \ | 148 | __attribute__((section("_ftrace_events"))) \ |
149 | *__event_exit_##sname = &event_exit_##sname; | 149 | *__event_exit_##sname = &event_exit_##sname; |
150 | 150 | ||
diff --git a/include/linux/ftrace_event.h b/include/linux/trace_events.h index f9ecf63d47f1..1063c850dbab 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/trace_events.h | |||
@@ -1,6 +1,6 @@ | |||
1 | 1 | ||
2 | #ifndef _LINUX_FTRACE_EVENT_H | 2 | #ifndef _LINUX_TRACE_EVENT_H |
3 | #define _LINUX_FTRACE_EVENT_H | 3 | #define _LINUX_TRACE_EVENT_H |
4 | 4 | ||
5 | #include <linux/ring_buffer.h> | 5 | #include <linux/ring_buffer.h> |
6 | #include <linux/trace_seq.h> | 6 | #include <linux/trace_seq.h> |
@@ -25,35 +25,35 @@ struct trace_print_flags_u64 { | |||
25 | const char *name; | 25 | const char *name; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, | 28 | const char *trace_print_flags_seq(struct trace_seq *p, const char *delim, |
29 | unsigned long flags, | 29 | unsigned long flags, |
30 | const struct trace_print_flags *flag_array); | 30 | const struct trace_print_flags *flag_array); |
31 | 31 | ||
32 | const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | 32 | const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val, |
33 | const struct trace_print_flags *symbol_array); | 33 | const struct trace_print_flags *symbol_array); |
34 | 34 | ||
35 | #if BITS_PER_LONG == 32 | 35 | #if BITS_PER_LONG == 32 |
36 | const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, | 36 | const char *trace_print_symbols_seq_u64(struct trace_seq *p, |
37 | unsigned long long val, | 37 | unsigned long long val, |
38 | const struct trace_print_flags_u64 | 38 | const struct trace_print_flags_u64 |
39 | *symbol_array); | 39 | *symbol_array); |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, | 42 | const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, |
43 | unsigned int bitmask_size); | 43 | unsigned int bitmask_size); |
44 | 44 | ||
45 | const char *ftrace_print_hex_seq(struct trace_seq *p, | 45 | const char *trace_print_hex_seq(struct trace_seq *p, |
46 | const unsigned char *buf, int len); | 46 | const unsigned char *buf, int len); |
47 | 47 | ||
48 | const char *ftrace_print_array_seq(struct trace_seq *p, | 48 | const char *trace_print_array_seq(struct trace_seq *p, |
49 | const void *buf, int count, | 49 | const void *buf, int count, |
50 | size_t el_size); | 50 | size_t el_size); |
51 | 51 | ||
52 | struct trace_iterator; | 52 | struct trace_iterator; |
53 | struct trace_event; | 53 | struct trace_event; |
54 | 54 | ||
55 | int ftrace_raw_output_prep(struct trace_iterator *iter, | 55 | int trace_raw_output_prep(struct trace_iterator *iter, |
56 | struct trace_event *event); | 56 | struct trace_event *event); |
57 | 57 | ||
58 | /* | 58 | /* |
59 | * The trace entry - the most basic unit of tracing. This is what | 59 | * The trace entry - the most basic unit of tracing. This is what |
@@ -68,7 +68,7 @@ struct trace_entry { | |||
68 | int pid; | 68 | int pid; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | #define FTRACE_MAX_EVENT \ | 71 | #define TRACE_EVENT_TYPE_MAX \ |
72 | ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) | 72 | ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) |
73 | 73 | ||
74 | /* | 74 | /* |
@@ -132,8 +132,8 @@ struct trace_event { | |||
132 | struct trace_event_functions *funcs; | 132 | struct trace_event_functions *funcs; |
133 | }; | 133 | }; |
134 | 134 | ||
135 | extern int register_ftrace_event(struct trace_event *event); | 135 | extern int register_trace_event(struct trace_event *event); |
136 | extern int unregister_ftrace_event(struct trace_event *event); | 136 | extern int unregister_trace_event(struct trace_event *event); |
137 | 137 | ||
138 | /* Return values for print_line callback */ | 138 | /* Return values for print_line callback */ |
139 | enum print_line_t { | 139 | enum print_line_t { |
@@ -157,11 +157,11 @@ static inline enum print_line_t trace_handle_return(struct trace_seq *s) | |||
157 | void tracing_generic_entry_update(struct trace_entry *entry, | 157 | void tracing_generic_entry_update(struct trace_entry *entry, |
158 | unsigned long flags, | 158 | unsigned long flags, |
159 | int pc); | 159 | int pc); |
160 | struct ftrace_event_file; | 160 | struct trace_event_file; |
161 | 161 | ||
162 | struct ring_buffer_event * | 162 | struct ring_buffer_event * |
163 | trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, | 163 | trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, |
164 | struct ftrace_event_file *ftrace_file, | 164 | struct trace_event_file *trace_file, |
165 | int type, unsigned long len, | 165 | int type, unsigned long len, |
166 | unsigned long flags, int pc); | 166 | unsigned long flags, int pc); |
167 | struct ring_buffer_event * | 167 | struct ring_buffer_event * |
@@ -183,7 +183,7 @@ void trace_current_buffer_discard_commit(struct ring_buffer *buffer, | |||
183 | 183 | ||
184 | void tracing_record_cmdline(struct task_struct *tsk); | 184 | void tracing_record_cmdline(struct task_struct *tsk); |
185 | 185 | ||
186 | int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...); | 186 | int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...); |
187 | 187 | ||
188 | struct event_filter; | 188 | struct event_filter; |
189 | 189 | ||
@@ -200,50 +200,39 @@ enum trace_reg { | |||
200 | #endif | 200 | #endif |
201 | }; | 201 | }; |
202 | 202 | ||
203 | struct ftrace_event_call; | 203 | struct trace_event_call; |
204 | 204 | ||
205 | struct ftrace_event_class { | 205 | struct trace_event_class { |
206 | const char *system; | 206 | const char *system; |
207 | void *probe; | 207 | void *probe; |
208 | #ifdef CONFIG_PERF_EVENTS | 208 | #ifdef CONFIG_PERF_EVENTS |
209 | void *perf_probe; | 209 | void *perf_probe; |
210 | #endif | 210 | #endif |
211 | int (*reg)(struct ftrace_event_call *event, | 211 | int (*reg)(struct trace_event_call *event, |
212 | enum trace_reg type, void *data); | 212 | enum trace_reg type, void *data); |
213 | int (*define_fields)(struct ftrace_event_call *); | 213 | int (*define_fields)(struct trace_event_call *); |
214 | struct list_head *(*get_fields)(struct ftrace_event_call *); | 214 | struct list_head *(*get_fields)(struct trace_event_call *); |
215 | struct list_head fields; | 215 | struct list_head fields; |
216 | int (*raw_init)(struct ftrace_event_call *); | 216 | int (*raw_init)(struct trace_event_call *); |
217 | }; | 217 | }; |
218 | 218 | ||
219 | extern int ftrace_event_reg(struct ftrace_event_call *event, | 219 | extern int trace_event_reg(struct trace_event_call *event, |
220 | enum trace_reg type, void *data); | 220 | enum trace_reg type, void *data); |
221 | 221 | ||
222 | int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event, | 222 | struct trace_event_buffer { |
223 | char *fmt, ...); | ||
224 | |||
225 | int ftrace_event_define_field(struct ftrace_event_call *call, | ||
226 | char *type, int len, char *item, int offset, | ||
227 | int field_size, int sign, int filter); | ||
228 | |||
229 | struct ftrace_event_buffer { | ||
230 | struct ring_buffer *buffer; | 223 | struct ring_buffer *buffer; |
231 | struct ring_buffer_event *event; | 224 | struct ring_buffer_event *event; |
232 | struct ftrace_event_file *ftrace_file; | 225 | struct trace_event_file *trace_file; |
233 | void *entry; | 226 | void *entry; |
234 | unsigned long flags; | 227 | unsigned long flags; |
235 | int pc; | 228 | int pc; |
236 | }; | 229 | }; |
237 | 230 | ||
238 | void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, | 231 | void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, |
239 | struct ftrace_event_file *ftrace_file, | 232 | struct trace_event_file *trace_file, |
240 | unsigned long len); | 233 | unsigned long len); |
241 | 234 | ||
242 | void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer); | 235 | void trace_event_buffer_commit(struct trace_event_buffer *fbuffer); |
243 | |||
244 | int ftrace_event_define_field(struct ftrace_event_call *call, | ||
245 | char *type, int len, char *item, int offset, | ||
246 | int field_size, int sign, int filter); | ||
247 | 236 | ||
248 | enum { | 237 | enum { |
249 | TRACE_EVENT_FL_FILTERED_BIT, | 238 | TRACE_EVENT_FL_FILTERED_BIT, |
@@ -261,11 +250,11 @@ enum { | |||
261 | * FILTERED - The event has a filter attached | 250 | * FILTERED - The event has a filter attached |
262 | * CAP_ANY - Any user can enable for perf | 251 | * CAP_ANY - Any user can enable for perf |
263 | * NO_SET_FILTER - Set when filter has error and is to be ignored | 252 | * NO_SET_FILTER - Set when filter has error and is to be ignored |
264 | * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file | 253 | * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file |
265 | * WAS_ENABLED - Set and stays set when an event was ever enabled | 254 | * WAS_ENABLED - Set and stays set when an event was ever enabled |
266 | * (used for module unloading, if a module event is enabled, | 255 | * (used for module unloading, if a module event is enabled, |
267 | * it is best to clear the buffers that used it). | 256 | * it is best to clear the buffers that used it). |
268 | * USE_CALL_FILTER - For ftrace internal events, don't use file filter | 257 | * USE_CALL_FILTER - For trace internal events, don't use file filter |
269 | * TRACEPOINT - Event is a tracepoint | 258 | * TRACEPOINT - Event is a tracepoint |
270 | * KPROBE - Event is a kprobe | 259 | * KPROBE - Event is a kprobe |
271 | */ | 260 | */ |
@@ -280,9 +269,9 @@ enum { | |||
280 | TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), | 269 | TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), |
281 | }; | 270 | }; |
282 | 271 | ||
283 | struct ftrace_event_call { | 272 | struct trace_event_call { |
284 | struct list_head list; | 273 | struct list_head list; |
285 | struct ftrace_event_class *class; | 274 | struct trace_event_class *class; |
286 | union { | 275 | union { |
287 | char *name; | 276 | char *name; |
288 | /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ | 277 | /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ |
@@ -297,7 +286,7 @@ struct ftrace_event_call { | |||
297 | * bit 0: filter_active | 286 | * bit 0: filter_active |
298 | * bit 1: allow trace by non root (cap any) | 287 | * bit 1: allow trace by non root (cap any) |
299 | * bit 2: failed to apply filter | 288 | * bit 2: failed to apply filter |
300 | * bit 3: ftrace internal event (do not enable) | 289 | * bit 3: trace internal event (do not enable) |
301 | * bit 4: Event was enabled by module | 290 | * bit 4: Event was enabled by module |
302 | * bit 5: use call filter rather than file filter | 291 | * bit 5: use call filter rather than file filter |
303 | * bit 6: Event is a tracepoint | 292 | * bit 6: Event is a tracepoint |
@@ -309,13 +298,13 @@ struct ftrace_event_call { | |||
309 | struct hlist_head __percpu *perf_events; | 298 | struct hlist_head __percpu *perf_events; |
310 | struct bpf_prog *prog; | 299 | struct bpf_prog *prog; |
311 | 300 | ||
312 | int (*perf_perm)(struct ftrace_event_call *, | 301 | int (*perf_perm)(struct trace_event_call *, |
313 | struct perf_event *); | 302 | struct perf_event *); |
314 | #endif | 303 | #endif |
315 | }; | 304 | }; |
316 | 305 | ||
317 | static inline const char * | 306 | static inline const char * |
318 | ftrace_event_name(struct ftrace_event_call *call) | 307 | trace_event_name(struct trace_event_call *call) |
319 | { | 308 | { |
320 | if (call->flags & TRACE_EVENT_FL_TRACEPOINT) | 309 | if (call->flags & TRACE_EVENT_FL_TRACEPOINT) |
321 | return call->tp ? call->tp->name : NULL; | 310 | return call->tp ? call->tp->name : NULL; |
@@ -324,21 +313,21 @@ ftrace_event_name(struct ftrace_event_call *call) | |||
324 | } | 313 | } |
325 | 314 | ||
326 | struct trace_array; | 315 | struct trace_array; |
327 | struct ftrace_subsystem_dir; | 316 | struct trace_subsystem_dir; |
328 | 317 | ||
329 | enum { | 318 | enum { |
330 | FTRACE_EVENT_FL_ENABLED_BIT, | 319 | EVENT_FILE_FL_ENABLED_BIT, |
331 | FTRACE_EVENT_FL_RECORDED_CMD_BIT, | 320 | EVENT_FILE_FL_RECORDED_CMD_BIT, |
332 | FTRACE_EVENT_FL_FILTERED_BIT, | 321 | EVENT_FILE_FL_FILTERED_BIT, |
333 | FTRACE_EVENT_FL_NO_SET_FILTER_BIT, | 322 | EVENT_FILE_FL_NO_SET_FILTER_BIT, |
334 | FTRACE_EVENT_FL_SOFT_MODE_BIT, | 323 | EVENT_FILE_FL_SOFT_MODE_BIT, |
335 | FTRACE_EVENT_FL_SOFT_DISABLED_BIT, | 324 | EVENT_FILE_FL_SOFT_DISABLED_BIT, |
336 | FTRACE_EVENT_FL_TRIGGER_MODE_BIT, | 325 | EVENT_FILE_FL_TRIGGER_MODE_BIT, |
337 | FTRACE_EVENT_FL_TRIGGER_COND_BIT, | 326 | EVENT_FILE_FL_TRIGGER_COND_BIT, |
338 | }; | 327 | }; |
339 | 328 | ||
340 | /* | 329 | /* |
341 | * Ftrace event file flags: | 330 | * Event file flags: |
342 | * ENABLED - The event is enabled | 331 | * ENABLED - The event is enabled |
343 | * RECORDED_CMD - The comms should be recorded at sched_switch | 332 | * RECORDED_CMD - The comms should be recorded at sched_switch |
344 | * FILTERED - The event has a filter attached | 333 | * FILTERED - The event has a filter attached |
@@ -350,23 +339,23 @@ enum { | |||
350 | * TRIGGER_COND - When set, one or more triggers has an associated filter | 339 | * TRIGGER_COND - When set, one or more triggers has an associated filter |
351 | */ | 340 | */ |
352 | enum { | 341 | enum { |
353 | FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), | 342 | EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), |
354 | FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), | 343 | EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT), |
355 | FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT), | 344 | EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT), |
356 | FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT), | 345 | EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT), |
357 | FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), | 346 | EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT), |
358 | FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), | 347 | EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), |
359 | FTRACE_EVENT_FL_TRIGGER_MODE = (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT), | 348 | EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), |
360 | FTRACE_EVENT_FL_TRIGGER_COND = (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT), | 349 | EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), |
361 | }; | 350 | }; |
362 | 351 | ||
363 | struct ftrace_event_file { | 352 | struct trace_event_file { |
364 | struct list_head list; | 353 | struct list_head list; |
365 | struct ftrace_event_call *event_call; | 354 | struct trace_event_call *event_call; |
366 | struct event_filter *filter; | 355 | struct event_filter *filter; |
367 | struct dentry *dir; | 356 | struct dentry *dir; |
368 | struct trace_array *tr; | 357 | struct trace_array *tr; |
369 | struct ftrace_subsystem_dir *system; | 358 | struct trace_subsystem_dir *system; |
370 | struct list_head triggers; | 359 | struct list_head triggers; |
371 | 360 | ||
372 | /* | 361 | /* |
@@ -399,7 +388,7 @@ struct ftrace_event_file { | |||
399 | early_initcall(trace_init_flags_##name); | 388 | early_initcall(trace_init_flags_##name); |
400 | 389 | ||
401 | #define __TRACE_EVENT_PERF_PERM(name, expr...) \ | 390 | #define __TRACE_EVENT_PERF_PERM(name, expr...) \ |
402 | static int perf_perm_##name(struct ftrace_event_call *tp_event, \ | 391 | static int perf_perm_##name(struct trace_event_call *tp_event, \ |
403 | struct perf_event *p_event) \ | 392 | struct perf_event *p_event) \ |
404 | { \ | 393 | { \ |
405 | return ({ expr; }); \ | 394 | return ({ expr; }); \ |
@@ -425,19 +414,19 @@ enum event_trigger_type { | |||
425 | 414 | ||
426 | extern int filter_match_preds(struct event_filter *filter, void *rec); | 415 | extern int filter_match_preds(struct event_filter *filter, void *rec); |
427 | 416 | ||
428 | extern int filter_check_discard(struct ftrace_event_file *file, void *rec, | 417 | extern int filter_check_discard(struct trace_event_file *file, void *rec, |
429 | struct ring_buffer *buffer, | 418 | struct ring_buffer *buffer, |
430 | struct ring_buffer_event *event); | 419 | struct ring_buffer_event *event); |
431 | extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec, | 420 | extern int call_filter_check_discard(struct trace_event_call *call, void *rec, |
432 | struct ring_buffer *buffer, | 421 | struct ring_buffer *buffer, |
433 | struct ring_buffer_event *event); | 422 | struct ring_buffer_event *event); |
434 | extern enum event_trigger_type event_triggers_call(struct ftrace_event_file *file, | 423 | extern enum event_trigger_type event_triggers_call(struct trace_event_file *file, |
435 | void *rec); | 424 | void *rec); |
436 | extern void event_triggers_post_call(struct ftrace_event_file *file, | 425 | extern void event_triggers_post_call(struct trace_event_file *file, |
437 | enum event_trigger_type tt); | 426 | enum event_trigger_type tt); |
438 | 427 | ||
439 | /** | 428 | /** |
440 | * ftrace_trigger_soft_disabled - do triggers and test if soft disabled | 429 | * trace_trigger_soft_disabled - do triggers and test if soft disabled |
441 | * @file: The file pointer of the event to test | 430 | * @file: The file pointer of the event to test |
442 | * | 431 | * |
443 | * If any triggers without filters are attached to this event, they | 432 | * If any triggers without filters are attached to this event, they |
@@ -446,14 +435,14 @@ extern void event_triggers_post_call(struct ftrace_event_file *file, | |||
446 | * otherwise false. | 435 | * otherwise false. |
447 | */ | 436 | */ |
448 | static inline bool | 437 | static inline bool |
449 | ftrace_trigger_soft_disabled(struct ftrace_event_file *file) | 438 | trace_trigger_soft_disabled(struct trace_event_file *file) |
450 | { | 439 | { |
451 | unsigned long eflags = file->flags; | 440 | unsigned long eflags = file->flags; |
452 | 441 | ||
453 | if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { | 442 | if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { |
454 | if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) | 443 | if (eflags & EVENT_FILE_FL_TRIGGER_MODE) |
455 | event_triggers_call(file, NULL); | 444 | event_triggers_call(file, NULL); |
456 | if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) | 445 | if (eflags & EVENT_FILE_FL_SOFT_DISABLED) |
457 | return true; | 446 | return true; |
458 | } | 447 | } |
459 | return false; | 448 | return false; |
@@ -473,7 +462,7 @@ ftrace_trigger_soft_disabled(struct ftrace_event_file *file) | |||
473 | * Returns true if the event is discarded, false otherwise. | 462 | * Returns true if the event is discarded, false otherwise. |
474 | */ | 463 | */ |
475 | static inline bool | 464 | static inline bool |
476 | __event_trigger_test_discard(struct ftrace_event_file *file, | 465 | __event_trigger_test_discard(struct trace_event_file *file, |
477 | struct ring_buffer *buffer, | 466 | struct ring_buffer *buffer, |
478 | struct ring_buffer_event *event, | 467 | struct ring_buffer_event *event, |
479 | void *entry, | 468 | void *entry, |
@@ -481,10 +470,10 @@ __event_trigger_test_discard(struct ftrace_event_file *file, | |||
481 | { | 470 | { |
482 | unsigned long eflags = file->flags; | 471 | unsigned long eflags = file->flags; |
483 | 472 | ||
484 | if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) | 473 | if (eflags & EVENT_FILE_FL_TRIGGER_COND) |
485 | *tt = event_triggers_call(file, entry); | 474 | *tt = event_triggers_call(file, entry); |
486 | 475 | ||
487 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags)) | 476 | if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags)) |
488 | ring_buffer_discard_commit(buffer, event); | 477 | ring_buffer_discard_commit(buffer, event); |
489 | else if (!filter_check_discard(file, entry, buffer, event)) | 478 | else if (!filter_check_discard(file, entry, buffer, event)) |
490 | return false; | 479 | return false; |
@@ -506,7 +495,7 @@ __event_trigger_test_discard(struct ftrace_event_file *file, | |||
506 | * if the event is soft disabled and should be discarded. | 495 | * if the event is soft disabled and should be discarded. |
507 | */ | 496 | */ |
508 | static inline void | 497 | static inline void |
509 | event_trigger_unlock_commit(struct ftrace_event_file *file, | 498 | event_trigger_unlock_commit(struct trace_event_file *file, |
510 | struct ring_buffer *buffer, | 499 | struct ring_buffer *buffer, |
511 | struct ring_buffer_event *event, | 500 | struct ring_buffer_event *event, |
512 | void *entry, unsigned long irq_flags, int pc) | 501 | void *entry, unsigned long irq_flags, int pc) |
@@ -537,7 +526,7 @@ event_trigger_unlock_commit(struct ftrace_event_file *file, | |||
537 | * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). | 526 | * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). |
538 | */ | 527 | */ |
539 | static inline void | 528 | static inline void |
540 | event_trigger_unlock_commit_regs(struct ftrace_event_file *file, | 529 | event_trigger_unlock_commit_regs(struct trace_event_file *file, |
541 | struct ring_buffer *buffer, | 530 | struct ring_buffer *buffer, |
542 | struct ring_buffer_event *event, | 531 | struct ring_buffer_event *event, |
543 | void *entry, unsigned long irq_flags, int pc, | 532 | void *entry, unsigned long irq_flags, int pc, |
@@ -570,12 +559,12 @@ enum { | |||
570 | FILTER_TRACE_FN, | 559 | FILTER_TRACE_FN, |
571 | }; | 560 | }; |
572 | 561 | ||
573 | extern int trace_event_raw_init(struct ftrace_event_call *call); | 562 | extern int trace_event_raw_init(struct trace_event_call *call); |
574 | extern int trace_define_field(struct ftrace_event_call *call, const char *type, | 563 | extern int trace_define_field(struct trace_event_call *call, const char *type, |
575 | const char *name, int offset, int size, | 564 | const char *name, int offset, int size, |
576 | int is_signed, int filter_type); | 565 | int is_signed, int filter_type); |
577 | extern int trace_add_event_call(struct ftrace_event_call *call); | 566 | extern int trace_add_event_call(struct trace_event_call *call); |
578 | extern int trace_remove_event_call(struct ftrace_event_call *call); | 567 | extern int trace_remove_event_call(struct trace_event_call *call); |
579 | 568 | ||
580 | #define is_signed_type(type) (((type)(-1)) < (type)1) | 569 | #define is_signed_type(type) (((type)(-1)) < (type)1) |
581 | 570 | ||
@@ -624,4 +613,4 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, | |||
624 | } | 613 | } |
625 | #endif | 614 | #endif |
626 | 615 | ||
627 | #endif /* _LINUX_FTRACE_EVENT_H */ | 616 | #endif /* _LINUX_TRACE_EVENT_H */ |
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index 02e1003568a4..09b3880105a9 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h | |||
@@ -87,7 +87,8 @@ | |||
87 | #define DECLARE_TRACE(name, proto, args) | 87 | #define DECLARE_TRACE(name, proto, args) |
88 | 88 | ||
89 | #ifdef CONFIG_EVENT_TRACING | 89 | #ifdef CONFIG_EVENT_TRACING |
90 | #include <trace/ftrace.h> | 90 | #include <trace/trace_events.h> |
91 | #include <trace/perf.h> | ||
91 | #endif | 92 | #endif |
92 | 93 | ||
93 | #undef TRACE_EVENT | 94 | #undef TRACE_EVENT |
diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 630d1e5e4de0..284244ebfe8d 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/ktime.h> | 7 | #include <linux/ktime.h> |
8 | #include <linux/pm_qos.h> | 8 | #include <linux/pm_qos.h> |
9 | #include <linux/tracepoint.h> | 9 | #include <linux/tracepoint.h> |
10 | #include <linux/ftrace_event.h> | 10 | #include <linux/trace_events.h> |
11 | 11 | ||
12 | #define TPS(x) tracepoint_string(x) | 12 | #define TPS(x) tracepoint_string(x) |
13 | 13 | ||
diff --git a/include/trace/perf.h b/include/trace/perf.h new file mode 100644 index 000000000000..1b5443cebedc --- /dev/null +++ b/include/trace/perf.h | |||
@@ -0,0 +1,350 @@ | |||
1 | /* | ||
2 | * Stage 4 of the trace events. | ||
3 | * | ||
4 | * Override the macros in <trace/trace_events.h> to include the following: | ||
5 | * | ||
6 | * For those macros defined with TRACE_EVENT: | ||
7 | * | ||
8 | * static struct trace_event_call event_<call>; | ||
9 | * | ||
10 | * static void trace_event_raw_event_<call>(void *__data, proto) | ||
11 | * { | ||
12 | * struct trace_event_file *trace_file = __data; | ||
13 | * struct trace_event_call *event_call = trace_file->event_call; | ||
14 | * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets; | ||
15 | * unsigned long eflags = trace_file->flags; | ||
16 | * enum event_trigger_type __tt = ETT_NONE; | ||
17 | * struct ring_buffer_event *event; | ||
18 | * struct trace_event_raw_<call> *entry; <-- defined in stage 1 | ||
19 | * struct ring_buffer *buffer; | ||
20 | * unsigned long irq_flags; | ||
21 | * int __data_size; | ||
22 | * int pc; | ||
23 | * | ||
24 | * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { | ||
25 | * if (eflags & EVENT_FILE_FL_TRIGGER_MODE) | ||
26 | * event_triggers_call(trace_file, NULL); | ||
27 | * if (eflags & EVENT_FILE_FL_SOFT_DISABLED) | ||
28 | * return; | ||
29 | * } | ||
30 | * | ||
31 | * local_save_flags(irq_flags); | ||
32 | * pc = preempt_count(); | ||
33 | * | ||
34 | * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args); | ||
35 | * | ||
36 | * event = trace_event_buffer_lock_reserve(&buffer, trace_file, | ||
37 | * event_<call>->event.type, | ||
38 | * sizeof(*entry) + __data_size, | ||
39 | * irq_flags, pc); | ||
40 | * if (!event) | ||
41 | * return; | ||
42 | * entry = ring_buffer_event_data(event); | ||
43 | * | ||
44 | * { <assign>; } <-- Here we assign the entries by the __field and | ||
45 | * __array macros. | ||
46 | * | ||
47 | * if (eflags & EVENT_FILE_FL_TRIGGER_COND) | ||
48 | * __tt = event_triggers_call(trace_file, entry); | ||
49 | * | ||
50 | * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, | ||
51 | * &trace_file->flags)) | ||
52 | * ring_buffer_discard_commit(buffer, event); | ||
53 | * else if (!filter_check_discard(trace_file, entry, buffer, event)) | ||
54 | * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); | ||
55 | * | ||
56 | * if (__tt) | ||
57 | * event_triggers_post_call(trace_file, __tt); | ||
58 | * } | ||
59 | * | ||
60 | * static struct trace_event ftrace_event_type_<call> = { | ||
61 | * .trace = trace_raw_output_<call>, <-- stage 2 | ||
62 | * }; | ||
63 | * | ||
64 | * static char print_fmt_<call>[] = <TP_printk>; | ||
65 | * | ||
66 | * static struct trace_event_class __used event_class_<template> = { | ||
67 | * .system = "<system>", | ||
68 | * .define_fields = trace_event_define_fields_<call>, | ||
69 | * .fields = LIST_HEAD_INIT(event_class_##call.fields), | ||
70 | * .raw_init = trace_event_raw_init, | ||
71 | * .probe = trace_event_raw_event_##call, | ||
72 | * .reg = trace_event_reg, | ||
73 | * }; | ||
74 | * | ||
75 | * static struct trace_event_call event_<call> = { | ||
76 | * .class = event_class_<template>, | ||
77 | * { | ||
78 | * .tp = &__tracepoint_<call>, | ||
79 | * }, | ||
80 | * .event = &ftrace_event_type_<call>, | ||
81 | * .print_fmt = print_fmt_<call>, | ||
82 | * .flags = TRACE_EVENT_FL_TRACEPOINT, | ||
83 | * }; | ||
84 | * // its only safe to use pointers when doing linker tricks to | ||
85 | * // create an array. | ||
86 | * static struct trace_event_call __used | ||
87 | * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; | ||
88 | * | ||
89 | */ | ||
90 | |||
91 | #ifdef CONFIG_PERF_EVENTS | ||
92 | |||
93 | #define _TRACE_PERF_PROTO(call, proto) \ | ||
94 | static notrace void \ | ||
95 | perf_trace_##call(void *__data, proto); | ||
96 | |||
97 | #define _TRACE_PERF_INIT(call) \ | ||
98 | .perf_probe = perf_trace_##call, | ||
99 | |||
100 | #else | ||
101 | #define _TRACE_PERF_PROTO(call, proto) | ||
102 | #define _TRACE_PERF_INIT(call) | ||
103 | #endif /* CONFIG_PERF_EVENTS */ | ||
104 | |||
105 | #undef __entry | ||
106 | #define __entry entry | ||
107 | |||
108 | #undef __field | ||
109 | #define __field(type, item) | ||
110 | |||
111 | #undef __field_struct | ||
112 | #define __field_struct(type, item) | ||
113 | |||
114 | #undef __array | ||
115 | #define __array(type, item, len) | ||
116 | |||
117 | #undef __dynamic_array | ||
118 | #define __dynamic_array(type, item, len) \ | ||
119 | __entry->__data_loc_##item = __data_offsets.item; | ||
120 | |||
121 | #undef __string | ||
122 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
123 | |||
124 | #undef __assign_str | ||
125 | #define __assign_str(dst, src) \ | ||
126 | strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); | ||
127 | |||
128 | #undef __bitmask | ||
129 | #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) | ||
130 | |||
131 | #undef __get_bitmask | ||
132 | #define __get_bitmask(field) (char *)__get_dynamic_array(field) | ||
133 | |||
134 | #undef __assign_bitmask | ||
135 | #define __assign_bitmask(dst, src, nr_bits) \ | ||
136 | memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) | ||
137 | |||
138 | #undef TP_fast_assign | ||
139 | #define TP_fast_assign(args...) args | ||
140 | |||
141 | #undef __perf_addr | ||
142 | #define __perf_addr(a) (a) | ||
143 | |||
144 | #undef __perf_count | ||
145 | #define __perf_count(c) (c) | ||
146 | |||
147 | #undef __perf_task | ||
148 | #define __perf_task(t) (t) | ||
149 | |||
150 | #undef DECLARE_EVENT_CLASS | ||
151 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | ||
152 | \ | ||
153 | static notrace void \ | ||
154 | trace_event_raw_event_##call(void *__data, proto) \ | ||
155 | { \ | ||
156 | struct trace_event_file *trace_file = __data; \ | ||
157 | struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ | ||
158 | struct trace_event_buffer fbuffer; \ | ||
159 | struct trace_event_raw_##call *entry; \ | ||
160 | int __data_size; \ | ||
161 | \ | ||
162 | if (trace_trigger_soft_disabled(trace_file)) \ | ||
163 | return; \ | ||
164 | \ | ||
165 | __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ | ||
166 | \ | ||
167 | entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ | ||
168 | sizeof(*entry) + __data_size); \ | ||
169 | \ | ||
170 | if (!entry) \ | ||
171 | return; \ | ||
172 | \ | ||
173 | tstruct \ | ||
174 | \ | ||
175 | { assign; } \ | ||
176 | \ | ||
177 | trace_event_buffer_commit(&fbuffer); \ | ||
178 | } | ||
179 | /* | ||
180 | * The ftrace_test_probe is compiled out, it is only here as a build time check | ||
181 | * to make sure that if the tracepoint handling changes, the ftrace probe will | ||
182 | * fail to compile unless it too is updated. | ||
183 | */ | ||
184 | |||
185 | #undef DEFINE_EVENT | ||
186 | #define DEFINE_EVENT(template, call, proto, args) \ | ||
187 | static inline void ftrace_test_probe_##call(void) \ | ||
188 | { \ | ||
189 | check_trace_callback_type_##call(trace_event_raw_event_##template); \ | ||
190 | } | ||
191 | |||
192 | #undef DEFINE_EVENT_PRINT | ||
193 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) | ||
194 | |||
195 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
196 | |||
197 | #undef __entry | ||
198 | #define __entry REC | ||
199 | |||
200 | #undef __print_flags | ||
201 | #undef __print_symbolic | ||
202 | #undef __print_hex | ||
203 | #undef __get_dynamic_array | ||
204 | #undef __get_dynamic_array_len | ||
205 | #undef __get_str | ||
206 | #undef __get_bitmask | ||
207 | #undef __print_array | ||
208 | |||
209 | #undef TP_printk | ||
210 | #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) | ||
211 | |||
212 | #undef DECLARE_EVENT_CLASS | ||
213 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | ||
214 | _TRACE_PERF_PROTO(call, PARAMS(proto)); \ | ||
215 | static char print_fmt_##call[] = print; \ | ||
216 | static struct trace_event_class __used __refdata event_class_##call = { \ | ||
217 | .system = TRACE_SYSTEM_STRING, \ | ||
218 | .define_fields = trace_event_define_fields_##call, \ | ||
219 | .fields = LIST_HEAD_INIT(event_class_##call.fields),\ | ||
220 | .raw_init = trace_event_raw_init, \ | ||
221 | .probe = trace_event_raw_event_##call, \ | ||
222 | .reg = trace_event_reg, \ | ||
223 | _TRACE_PERF_INIT(call) \ | ||
224 | }; | ||
225 | |||
226 | #undef DEFINE_EVENT | ||
227 | #define DEFINE_EVENT(template, call, proto, args) \ | ||
228 | \ | ||
229 | static struct trace_event_call __used event_##call = { \ | ||
230 | .class = &event_class_##template, \ | ||
231 | { \ | ||
232 | .tp = &__tracepoint_##call, \ | ||
233 | }, \ | ||
234 | .event.funcs = &trace_event_type_funcs_##template, \ | ||
235 | .print_fmt = print_fmt_##template, \ | ||
236 | .flags = TRACE_EVENT_FL_TRACEPOINT, \ | ||
237 | }; \ | ||
238 | static struct trace_event_call __used \ | ||
239 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | ||
240 | |||
241 | #undef DEFINE_EVENT_PRINT | ||
242 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | ||
243 | \ | ||
244 | static char print_fmt_##call[] = print; \ | ||
245 | \ | ||
246 | static struct trace_event_call __used event_##call = { \ | ||
247 | .class = &event_class_##template, \ | ||
248 | { \ | ||
249 | .tp = &__tracepoint_##call, \ | ||
250 | }, \ | ||
251 | .event.funcs = &trace_event_type_funcs_##call, \ | ||
252 | .print_fmt = print_fmt_##call, \ | ||
253 | .flags = TRACE_EVENT_FL_TRACEPOINT, \ | ||
254 | }; \ | ||
255 | static struct trace_event_call __used \ | ||
256 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | ||
257 | |||
258 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
259 | |||
260 | #undef TRACE_SYSTEM_VAR | ||
261 | |||
262 | #ifdef CONFIG_PERF_EVENTS | ||
263 | |||
264 | #undef __entry | ||
265 | #define __entry entry | ||
266 | |||
267 | #undef __get_dynamic_array | ||
268 | #define __get_dynamic_array(field) \ | ||
269 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) | ||
270 | |||
271 | #undef __get_dynamic_array_len | ||
272 | #define __get_dynamic_array_len(field) \ | ||
273 | ((__entry->__data_loc_##field >> 16) & 0xffff) | ||
274 | |||
275 | #undef __get_str | ||
276 | #define __get_str(field) (char *)__get_dynamic_array(field) | ||
277 | |||
278 | #undef __get_bitmask | ||
279 | #define __get_bitmask(field) (char *)__get_dynamic_array(field) | ||
280 | |||
281 | #undef __perf_addr | ||
282 | #define __perf_addr(a) (__addr = (a)) | ||
283 | |||
284 | #undef __perf_count | ||
285 | #define __perf_count(c) (__count = (c)) | ||
286 | |||
287 | #undef __perf_task | ||
288 | #define __perf_task(t) (__task = (t)) | ||
289 | |||
290 | #undef DECLARE_EVENT_CLASS | ||
291 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | ||
292 | static notrace void \ | ||
293 | perf_trace_##call(void *__data, proto) \ | ||
294 | { \ | ||
295 | struct trace_event_call *event_call = __data; \ | ||
296 | struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ | ||
297 | struct trace_event_raw_##call *entry; \ | ||
298 | struct pt_regs *__regs; \ | ||
299 | u64 __addr = 0, __count = 1; \ | ||
300 | struct task_struct *__task = NULL; \ | ||
301 | struct hlist_head *head; \ | ||
302 | int __entry_size; \ | ||
303 | int __data_size; \ | ||
304 | int rctx; \ | ||
305 | \ | ||
306 | __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ | ||
307 | \ | ||
308 | head = this_cpu_ptr(event_call->perf_events); \ | ||
309 | if (__builtin_constant_p(!__task) && !__task && \ | ||
310 | hlist_empty(head)) \ | ||
311 | return; \ | ||
312 | \ | ||
313 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ | ||
314 | sizeof(u64)); \ | ||
315 | __entry_size -= sizeof(u32); \ | ||
316 | \ | ||
317 | entry = perf_trace_buf_prepare(__entry_size, \ | ||
318 | event_call->event.type, &__regs, &rctx); \ | ||
319 | if (!entry) \ | ||
320 | return; \ | ||
321 | \ | ||
322 | perf_fetch_caller_regs(__regs); \ | ||
323 | \ | ||
324 | tstruct \ | ||
325 | \ | ||
326 | { assign; } \ | ||
327 | \ | ||
328 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ | ||
329 | __count, __regs, head, __task); \ | ||
330 | } | ||
331 | |||
332 | /* | ||
333 | * This part is compiled out, it is only here as a build time check | ||
334 | * to make sure that if the tracepoint handling changes, the | ||
335 | * perf probe will fail to compile unless it too is updated. | ||
336 | */ | ||
337 | #undef DEFINE_EVENT | ||
338 | #define DEFINE_EVENT(template, call, proto, args) \ | ||
339 | static inline void perf_test_probe_##call(void) \ | ||
340 | { \ | ||
341 | check_trace_callback_type_##call(perf_trace_##template); \ | ||
342 | } | ||
343 | |||
344 | |||
345 | #undef DEFINE_EVENT_PRINT | ||
346 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
347 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | ||
348 | |||
349 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
350 | #endif /* CONFIG_PERF_EVENTS */ | ||
diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 9674145e2f6a..7434f0f5d3f6 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/tracepoint.h> | 4 | #include <linux/tracepoint.h> |
5 | #include <linux/unistd.h> | 5 | #include <linux/unistd.h> |
6 | #include <linux/ftrace_event.h> | 6 | #include <linux/trace_events.h> |
7 | #include <linux/thread_info.h> | 7 | #include <linux/thread_info.h> |
8 | 8 | ||
9 | #include <asm/ptrace.h> | 9 | #include <asm/ptrace.h> |
@@ -29,8 +29,8 @@ struct syscall_metadata { | |||
29 | const char **args; | 29 | const char **args; |
30 | struct list_head enter_fields; | 30 | struct list_head enter_fields; |
31 | 31 | ||
32 | struct ftrace_event_call *enter_event; | 32 | struct trace_event_call *enter_event; |
33 | struct ftrace_event_call *exit_event; | 33 | struct trace_event_call *exit_event; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | #if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS) | 36 | #if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS) |
diff --git a/include/trace/ftrace.h b/include/trace/trace_events.h index 37d4b10b111d..43be3b0e44d3 100644 --- a/include/trace/ftrace.h +++ b/include/trace/trace_events.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Override the macros in <trace/trace_events.h> to include the following: | 4 | * Override the macros in <trace/trace_events.h> to include the following: |
5 | * | 5 | * |
6 | * struct ftrace_raw_<call> { | 6 | * struct trace_event_raw_<call> { |
7 | * struct trace_entry ent; | 7 | * struct trace_entry ent; |
8 | * <type> <item>; | 8 | * <type> <item>; |
9 | * <type2> <item2>[<len>]; | 9 | * <type2> <item2>[<len>]; |
@@ -16,7 +16,7 @@ | |||
16 | * in the structure. | 16 | * in the structure. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/ftrace_event.h> | 19 | #include <linux/trace_events.h> |
20 | 20 | ||
21 | #ifndef TRACE_SYSTEM_VAR | 21 | #ifndef TRACE_SYSTEM_VAR |
22 | #define TRACE_SYSTEM_VAR TRACE_SYSTEM | 22 | #define TRACE_SYSTEM_VAR TRACE_SYSTEM |
@@ -95,17 +95,17 @@ TRACE_MAKE_SYSTEM_STR(); | |||
95 | 95 | ||
96 | #undef DECLARE_EVENT_CLASS | 96 | #undef DECLARE_EVENT_CLASS |
97 | #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ | 97 | #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ |
98 | struct ftrace_raw_##name { \ | 98 | struct trace_event_raw_##name { \ |
99 | struct trace_entry ent; \ | 99 | struct trace_entry ent; \ |
100 | tstruct \ | 100 | tstruct \ |
101 | char __data[0]; \ | 101 | char __data[0]; \ |
102 | }; \ | 102 | }; \ |
103 | \ | 103 | \ |
104 | static struct ftrace_event_class event_class_##name; | 104 | static struct trace_event_class event_class_##name; |
105 | 105 | ||
106 | #undef DEFINE_EVENT | 106 | #undef DEFINE_EVENT |
107 | #define DEFINE_EVENT(template, name, proto, args) \ | 107 | #define DEFINE_EVENT(template, name, proto, args) \ |
108 | static struct ftrace_event_call __used \ | 108 | static struct trace_event_call __used \ |
109 | __attribute__((__aligned__(4))) event_##name | 109 | __attribute__((__aligned__(4))) event_##name |
110 | 110 | ||
111 | #undef DEFINE_EVENT_FN | 111 | #undef DEFINE_EVENT_FN |
@@ -138,7 +138,7 @@ TRACE_MAKE_SYSTEM_STR(); | |||
138 | * | 138 | * |
139 | * Include the following: | 139 | * Include the following: |
140 | * | 140 | * |
141 | * struct ftrace_data_offsets_<call> { | 141 | * struct trace_event_data_offsets_<call> { |
142 | * u32 <item1>; | 142 | * u32 <item1>; |
143 | * u32 <item2>; | 143 | * u32 <item2>; |
144 | * [...] | 144 | * [...] |
@@ -178,7 +178,7 @@ TRACE_MAKE_SYSTEM_STR(); | |||
178 | 178 | ||
179 | #undef DECLARE_EVENT_CLASS | 179 | #undef DECLARE_EVENT_CLASS |
180 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 180 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
181 | struct ftrace_data_offsets_##call { \ | 181 | struct trace_event_data_offsets_##call { \ |
182 | tstruct; \ | 182 | tstruct; \ |
183 | }; | 183 | }; |
184 | 184 | ||
@@ -203,10 +203,10 @@ TRACE_MAKE_SYSTEM_STR(); | |||
203 | * Override the macros in <trace/trace_events.h> to include the following: | 203 | * Override the macros in <trace/trace_events.h> to include the following: |
204 | * | 204 | * |
205 | * enum print_line_t | 205 | * enum print_line_t |
206 | * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) | 206 | * trace_raw_output_<call>(struct trace_iterator *iter, int flags) |
207 | * { | 207 | * { |
208 | * struct trace_seq *s = &iter->seq; | 208 | * struct trace_seq *s = &iter->seq; |
209 | * struct ftrace_raw_<call> *field; <-- defined in stage 1 | 209 | * struct trace_event_raw_<call> *field; <-- defined in stage 1 |
210 | * struct trace_entry *entry; | 210 | * struct trace_entry *entry; |
211 | * struct trace_seq *p = &iter->tmp_seq; | 211 | * struct trace_seq *p = &iter->tmp_seq; |
212 | * int ret; | 212 | * int ret; |
@@ -258,7 +258,7 @@ TRACE_MAKE_SYSTEM_STR(); | |||
258 | void *__bitmask = __get_dynamic_array(field); \ | 258 | void *__bitmask = __get_dynamic_array(field); \ |
259 | unsigned int __bitmask_size; \ | 259 | unsigned int __bitmask_size; \ |
260 | __bitmask_size = __get_dynamic_array_len(field); \ | 260 | __bitmask_size = __get_dynamic_array_len(field); \ |
261 | ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ | 261 | trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ |
262 | }) | 262 | }) |
263 | 263 | ||
264 | #undef __print_flags | 264 | #undef __print_flags |
@@ -266,7 +266,7 @@ TRACE_MAKE_SYSTEM_STR(); | |||
266 | ({ \ | 266 | ({ \ |
267 | static const struct trace_print_flags __flags[] = \ | 267 | static const struct trace_print_flags __flags[] = \ |
268 | { flag_array, { -1, NULL }}; \ | 268 | { flag_array, { -1, NULL }}; \ |
269 | ftrace_print_flags_seq(p, delim, flag, __flags); \ | 269 | trace_print_flags_seq(p, delim, flag, __flags); \ |
270 | }) | 270 | }) |
271 | 271 | ||
272 | #undef __print_symbolic | 272 | #undef __print_symbolic |
@@ -274,7 +274,7 @@ TRACE_MAKE_SYSTEM_STR(); | |||
274 | ({ \ | 274 | ({ \ |
275 | static const struct trace_print_flags symbols[] = \ | 275 | static const struct trace_print_flags symbols[] = \ |
276 | { symbol_array, { -1, NULL }}; \ | 276 | { symbol_array, { -1, NULL }}; \ |
277 | ftrace_print_symbols_seq(p, value, symbols); \ | 277 | trace_print_symbols_seq(p, value, symbols); \ |
278 | }) | 278 | }) |
279 | 279 | ||
280 | #undef __print_symbolic_u64 | 280 | #undef __print_symbolic_u64 |
@@ -283,7 +283,7 @@ TRACE_MAKE_SYSTEM_STR(); | |||
283 | ({ \ | 283 | ({ \ |
284 | static const struct trace_print_flags_u64 symbols[] = \ | 284 | static const struct trace_print_flags_u64 symbols[] = \ |
285 | { symbol_array, { -1, NULL } }; \ | 285 | { symbol_array, { -1, NULL } }; \ |
286 | ftrace_print_symbols_seq_u64(p, value, symbols); \ | 286 | trace_print_symbols_seq_u64(p, value, symbols); \ |
287 | }) | 287 | }) |
288 | #else | 288 | #else |
289 | #define __print_symbolic_u64(value, symbol_array...) \ | 289 | #define __print_symbolic_u64(value, symbol_array...) \ |
@@ -291,30 +291,30 @@ TRACE_MAKE_SYSTEM_STR(); | |||
291 | #endif | 291 | #endif |
292 | 292 | ||
293 | #undef __print_hex | 293 | #undef __print_hex |
294 | #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) | 294 | #define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len) |
295 | 295 | ||
296 | #undef __print_array | 296 | #undef __print_array |
297 | #define __print_array(array, count, el_size) \ | 297 | #define __print_array(array, count, el_size) \ |
298 | ({ \ | 298 | ({ \ |
299 | BUILD_BUG_ON(el_size != 1 && el_size != 2 && \ | 299 | BUILD_BUG_ON(el_size != 1 && el_size != 2 && \ |
300 | el_size != 4 && el_size != 8); \ | 300 | el_size != 4 && el_size != 8); \ |
301 | ftrace_print_array_seq(p, array, count, el_size); \ | 301 | trace_print_array_seq(p, array, count, el_size); \ |
302 | }) | 302 | }) |
303 | 303 | ||
304 | #undef DECLARE_EVENT_CLASS | 304 | #undef DECLARE_EVENT_CLASS |
305 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 305 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
306 | static notrace enum print_line_t \ | 306 | static notrace enum print_line_t \ |
307 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | 307 | trace_raw_output_##call(struct trace_iterator *iter, int flags, \ |
308 | struct trace_event *trace_event) \ | 308 | struct trace_event *trace_event) \ |
309 | { \ | 309 | { \ |
310 | struct trace_seq *s = &iter->seq; \ | 310 | struct trace_seq *s = &iter->seq; \ |
311 | struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ | 311 | struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ |
312 | struct ftrace_raw_##call *field; \ | 312 | struct trace_event_raw_##call *field; \ |
313 | int ret; \ | 313 | int ret; \ |
314 | \ | 314 | \ |
315 | field = (typeof(field))iter->ent; \ | 315 | field = (typeof(field))iter->ent; \ |
316 | \ | 316 | \ |
317 | ret = ftrace_raw_output_prep(iter, trace_event); \ | 317 | ret = trace_raw_output_prep(iter, trace_event); \ |
318 | if (ret != TRACE_TYPE_HANDLED) \ | 318 | if (ret != TRACE_TYPE_HANDLED) \ |
319 | return ret; \ | 319 | return ret; \ |
320 | \ | 320 | \ |
@@ -322,17 +322,17 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | |||
322 | \ | 322 | \ |
323 | return trace_handle_return(s); \ | 323 | return trace_handle_return(s); \ |
324 | } \ | 324 | } \ |
325 | static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | 325 | static struct trace_event_functions trace_event_type_funcs_##call = { \ |
326 | .trace = ftrace_raw_output_##call, \ | 326 | .trace = trace_raw_output_##call, \ |
327 | }; | 327 | }; |
328 | 328 | ||
329 | #undef DEFINE_EVENT_PRINT | 329 | #undef DEFINE_EVENT_PRINT |
330 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | 330 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ |
331 | static notrace enum print_line_t \ | 331 | static notrace enum print_line_t \ |
332 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | 332 | trace_raw_output_##call(struct trace_iterator *iter, int flags, \ |
333 | struct trace_event *event) \ | 333 | struct trace_event *event) \ |
334 | { \ | 334 | { \ |
335 | struct ftrace_raw_##template *field; \ | 335 | struct trace_event_raw_##template *field; \ |
336 | struct trace_entry *entry; \ | 336 | struct trace_entry *entry; \ |
337 | struct trace_seq *p = &iter->tmp_seq; \ | 337 | struct trace_seq *p = &iter->tmp_seq; \ |
338 | \ | 338 | \ |
@@ -346,10 +346,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | |||
346 | field = (typeof(field))entry; \ | 346 | field = (typeof(field))entry; \ |
347 | \ | 347 | \ |
348 | trace_seq_init(p); \ | 348 | trace_seq_init(p); \ |
349 | return ftrace_output_call(iter, #call, print); \ | 349 | return trace_output_call(iter, #call, print); \ |
350 | } \ | 350 | } \ |
351 | static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | 351 | static struct trace_event_functions trace_event_type_funcs_##call = { \ |
352 | .trace = ftrace_raw_output_##call, \ | 352 | .trace = trace_raw_output_##call, \ |
353 | }; | 353 | }; |
354 | 354 | ||
355 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 355 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
@@ -407,9 +407,9 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | |||
407 | #undef DECLARE_EVENT_CLASS | 407 | #undef DECLARE_EVENT_CLASS |
408 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ | 408 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ |
409 | static int notrace __init \ | 409 | static int notrace __init \ |
410 | ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ | 410 | trace_event_define_fields_##call(struct trace_event_call *event_call) \ |
411 | { \ | 411 | { \ |
412 | struct ftrace_raw_##call field; \ | 412 | struct trace_event_raw_##call field; \ |
413 | int ret; \ | 413 | int ret; \ |
414 | \ | 414 | \ |
415 | tstruct; \ | 415 | tstruct; \ |
@@ -485,12 +485,12 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ | |||
485 | 485 | ||
486 | #undef DECLARE_EVENT_CLASS | 486 | #undef DECLARE_EVENT_CLASS |
487 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 487 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
488 | static inline notrace int ftrace_get_offsets_##call( \ | 488 | static inline notrace int trace_event_get_offsets_##call( \ |
489 | struct ftrace_data_offsets_##call *__data_offsets, proto) \ | 489 | struct trace_event_data_offsets_##call *__data_offsets, proto) \ |
490 | { \ | 490 | { \ |
491 | int __data_size = 0; \ | 491 | int __data_size = 0; \ |
492 | int __maybe_unused __item_length; \ | 492 | int __maybe_unused __item_length; \ |
493 | struct ftrace_raw_##call __maybe_unused *entry; \ | 493 | struct trace_event_raw_##call __maybe_unused *entry; \ |
494 | \ | 494 | \ |
495 | tstruct; \ | 495 | tstruct; \ |
496 | \ | 496 | \ |
@@ -506,354 +506,3 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
506 | 506 | ||
507 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 507 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
508 | 508 | ||
509 | /* | ||
510 | * Stage 4 of the trace events. | ||
511 | * | ||
512 | * Override the macros in <trace/trace_events.h> to include the following: | ||
513 | * | ||
514 | * For those macros defined with TRACE_EVENT: | ||
515 | * | ||
516 | * static struct ftrace_event_call event_<call>; | ||
517 | * | ||
518 | * static void ftrace_raw_event_<call>(void *__data, proto) | ||
519 | * { | ||
520 | * struct ftrace_event_file *ftrace_file = __data; | ||
521 | * struct ftrace_event_call *event_call = ftrace_file->event_call; | ||
522 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | ||
523 | * unsigned long eflags = ftrace_file->flags; | ||
524 | * enum event_trigger_type __tt = ETT_NONE; | ||
525 | * struct ring_buffer_event *event; | ||
526 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | ||
527 | * struct ring_buffer *buffer; | ||
528 | * unsigned long irq_flags; | ||
529 | * int __data_size; | ||
530 | * int pc; | ||
531 | * | ||
532 | * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { | ||
533 | * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) | ||
534 | * event_triggers_call(ftrace_file, NULL); | ||
535 | * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) | ||
536 | * return; | ||
537 | * } | ||
538 | * | ||
539 | * local_save_flags(irq_flags); | ||
540 | * pc = preempt_count(); | ||
541 | * | ||
542 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | ||
543 | * | ||
544 | * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, | ||
545 | * event_<call>->event.type, | ||
546 | * sizeof(*entry) + __data_size, | ||
547 | * irq_flags, pc); | ||
548 | * if (!event) | ||
549 | * return; | ||
550 | * entry = ring_buffer_event_data(event); | ||
551 | * | ||
552 | * { <assign>; } <-- Here we assign the entries by the __field and | ||
553 | * __array macros. | ||
554 | * | ||
555 | * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) | ||
556 | * __tt = event_triggers_call(ftrace_file, entry); | ||
557 | * | ||
558 | * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, | ||
559 | * &ftrace_file->flags)) | ||
560 | * ring_buffer_discard_commit(buffer, event); | ||
561 | * else if (!filter_check_discard(ftrace_file, entry, buffer, event)) | ||
562 | * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); | ||
563 | * | ||
564 | * if (__tt) | ||
565 | * event_triggers_post_call(ftrace_file, __tt); | ||
566 | * } | ||
567 | * | ||
568 | * static struct trace_event ftrace_event_type_<call> = { | ||
569 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | ||
570 | * }; | ||
571 | * | ||
572 | * static char print_fmt_<call>[] = <TP_printk>; | ||
573 | * | ||
574 | * static struct ftrace_event_class __used event_class_<template> = { | ||
575 | * .system = "<system>", | ||
576 | * .define_fields = ftrace_define_fields_<call>, | ||
577 | * .fields = LIST_HEAD_INIT(event_class_##call.fields), | ||
578 | * .raw_init = trace_event_raw_init, | ||
579 | * .probe = ftrace_raw_event_##call, | ||
580 | * .reg = ftrace_event_reg, | ||
581 | * }; | ||
582 | * | ||
583 | * static struct ftrace_event_call event_<call> = { | ||
584 | * .class = event_class_<template>, | ||
585 | * { | ||
586 | * .tp = &__tracepoint_<call>, | ||
587 | * }, | ||
588 | * .event = &ftrace_event_type_<call>, | ||
589 | * .print_fmt = print_fmt_<call>, | ||
590 | * .flags = TRACE_EVENT_FL_TRACEPOINT, | ||
591 | * }; | ||
592 | * // its only safe to use pointers when doing linker tricks to | ||
593 | * // create an array. | ||
594 | * static struct ftrace_event_call __used | ||
595 | * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; | ||
596 | * | ||
597 | */ | ||
598 | |||
599 | #ifdef CONFIG_PERF_EVENTS | ||
600 | |||
601 | #define _TRACE_PERF_PROTO(call, proto) \ | ||
602 | static notrace void \ | ||
603 | perf_trace_##call(void *__data, proto); | ||
604 | |||
605 | #define _TRACE_PERF_INIT(call) \ | ||
606 | .perf_probe = perf_trace_##call, | ||
607 | |||
608 | #else | ||
609 | #define _TRACE_PERF_PROTO(call, proto) | ||
610 | #define _TRACE_PERF_INIT(call) | ||
611 | #endif /* CONFIG_PERF_EVENTS */ | ||
612 | |||
613 | #undef __entry | ||
614 | #define __entry entry | ||
615 | |||
616 | #undef __field | ||
617 | #define __field(type, item) | ||
618 | |||
619 | #undef __field_struct | ||
620 | #define __field_struct(type, item) | ||
621 | |||
622 | #undef __array | ||
623 | #define __array(type, item, len) | ||
624 | |||
625 | #undef __dynamic_array | ||
626 | #define __dynamic_array(type, item, len) \ | ||
627 | __entry->__data_loc_##item = __data_offsets.item; | ||
628 | |||
629 | #undef __string | ||
630 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
631 | |||
632 | #undef __assign_str | ||
633 | #define __assign_str(dst, src) \ | ||
634 | strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); | ||
635 | |||
636 | #undef __bitmask | ||
637 | #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) | ||
638 | |||
639 | #undef __get_bitmask | ||
640 | #define __get_bitmask(field) (char *)__get_dynamic_array(field) | ||
641 | |||
642 | #undef __assign_bitmask | ||
643 | #define __assign_bitmask(dst, src, nr_bits) \ | ||
644 | memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) | ||
645 | |||
646 | #undef TP_fast_assign | ||
647 | #define TP_fast_assign(args...) args | ||
648 | |||
649 | #undef __perf_addr | ||
650 | #define __perf_addr(a) (a) | ||
651 | |||
652 | #undef __perf_count | ||
653 | #define __perf_count(c) (c) | ||
654 | |||
655 | #undef __perf_task | ||
656 | #define __perf_task(t) (t) | ||
657 | |||
658 | #undef DECLARE_EVENT_CLASS | ||
659 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | ||
660 | \ | ||
661 | static notrace void \ | ||
662 | ftrace_raw_event_##call(void *__data, proto) \ | ||
663 | { \ | ||
664 | struct ftrace_event_file *ftrace_file = __data; \ | ||
665 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | ||
666 | struct ftrace_event_buffer fbuffer; \ | ||
667 | struct ftrace_raw_##call *entry; \ | ||
668 | int __data_size; \ | ||
669 | \ | ||
670 | if (ftrace_trigger_soft_disabled(ftrace_file)) \ | ||
671 | return; \ | ||
672 | \ | ||
673 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | ||
674 | \ | ||
675 | entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \ | ||
676 | sizeof(*entry) + __data_size); \ | ||
677 | \ | ||
678 | if (!entry) \ | ||
679 | return; \ | ||
680 | \ | ||
681 | tstruct \ | ||
682 | \ | ||
683 | { assign; } \ | ||
684 | \ | ||
685 | ftrace_event_buffer_commit(&fbuffer); \ | ||
686 | } | ||
687 | /* | ||
688 | * The ftrace_test_probe is compiled out, it is only here as a build time check | ||
689 | * to make sure that if the tracepoint handling changes, the ftrace probe will | ||
690 | * fail to compile unless it too is updated. | ||
691 | */ | ||
692 | |||
693 | #undef DEFINE_EVENT | ||
694 | #define DEFINE_EVENT(template, call, proto, args) \ | ||
695 | static inline void ftrace_test_probe_##call(void) \ | ||
696 | { \ | ||
697 | check_trace_callback_type_##call(ftrace_raw_event_##template); \ | ||
698 | } | ||
699 | |||
700 | #undef DEFINE_EVENT_PRINT | ||
701 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) | ||
702 | |||
703 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
704 | |||
705 | #undef __entry | ||
706 | #define __entry REC | ||
707 | |||
708 | #undef __print_flags | ||
709 | #undef __print_symbolic | ||
710 | #undef __print_hex | ||
711 | #undef __get_dynamic_array | ||
712 | #undef __get_dynamic_array_len | ||
713 | #undef __get_str | ||
714 | #undef __get_bitmask | ||
715 | #undef __print_array | ||
716 | |||
717 | #undef TP_printk | ||
718 | #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) | ||
719 | |||
720 | #undef DECLARE_EVENT_CLASS | ||
721 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | ||
722 | _TRACE_PERF_PROTO(call, PARAMS(proto)); \ | ||
723 | static char print_fmt_##call[] = print; \ | ||
724 | static struct ftrace_event_class __used __refdata event_class_##call = { \ | ||
725 | .system = TRACE_SYSTEM_STRING, \ | ||
726 | .define_fields = ftrace_define_fields_##call, \ | ||
727 | .fields = LIST_HEAD_INIT(event_class_##call.fields),\ | ||
728 | .raw_init = trace_event_raw_init, \ | ||
729 | .probe = ftrace_raw_event_##call, \ | ||
730 | .reg = ftrace_event_reg, \ | ||
731 | _TRACE_PERF_INIT(call) \ | ||
732 | }; | ||
733 | |||
734 | #undef DEFINE_EVENT | ||
735 | #define DEFINE_EVENT(template, call, proto, args) \ | ||
736 | \ | ||
737 | static struct ftrace_event_call __used event_##call = { \ | ||
738 | .class = &event_class_##template, \ | ||
739 | { \ | ||
740 | .tp = &__tracepoint_##call, \ | ||
741 | }, \ | ||
742 | .event.funcs = &ftrace_event_type_funcs_##template, \ | ||
743 | .print_fmt = print_fmt_##template, \ | ||
744 | .flags = TRACE_EVENT_FL_TRACEPOINT, \ | ||
745 | }; \ | ||
746 | static struct ftrace_event_call __used \ | ||
747 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | ||
748 | |||
749 | #undef DEFINE_EVENT_PRINT | ||
750 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | ||
751 | \ | ||
752 | static char print_fmt_##call[] = print; \ | ||
753 | \ | ||
754 | static struct ftrace_event_call __used event_##call = { \ | ||
755 | .class = &event_class_##template, \ | ||
756 | { \ | ||
757 | .tp = &__tracepoint_##call, \ | ||
758 | }, \ | ||
759 | .event.funcs = &ftrace_event_type_funcs_##call, \ | ||
760 | .print_fmt = print_fmt_##call, \ | ||
761 | .flags = TRACE_EVENT_FL_TRACEPOINT, \ | ||
762 | }; \ | ||
763 | static struct ftrace_event_call __used \ | ||
764 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | ||
765 | |||
766 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
767 | |||
768 | #undef TRACE_SYSTEM_VAR | ||
769 | |||
770 | #ifdef CONFIG_PERF_EVENTS | ||
771 | |||
772 | #undef __entry | ||
773 | #define __entry entry | ||
774 | |||
775 | #undef __get_dynamic_array | ||
776 | #define __get_dynamic_array(field) \ | ||
777 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) | ||
778 | |||
779 | #undef __get_dynamic_array_len | ||
780 | #define __get_dynamic_array_len(field) \ | ||
781 | ((__entry->__data_loc_##field >> 16) & 0xffff) | ||
782 | |||
783 | #undef __get_str | ||
784 | #define __get_str(field) (char *)__get_dynamic_array(field) | ||
785 | |||
786 | #undef __get_bitmask | ||
787 | #define __get_bitmask(field) (char *)__get_dynamic_array(field) | ||
788 | |||
789 | #undef __perf_addr | ||
790 | #define __perf_addr(a) (__addr = (a)) | ||
791 | |||
792 | #undef __perf_count | ||
793 | #define __perf_count(c) (__count = (c)) | ||
794 | |||
795 | #undef __perf_task | ||
796 | #define __perf_task(t) (__task = (t)) | ||
797 | |||
798 | #undef DECLARE_EVENT_CLASS | ||
799 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | ||
800 | static notrace void \ | ||
801 | perf_trace_##call(void *__data, proto) \ | ||
802 | { \ | ||
803 | struct ftrace_event_call *event_call = __data; \ | ||
804 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | ||
805 | struct ftrace_raw_##call *entry; \ | ||
806 | struct pt_regs *__regs; \ | ||
807 | u64 __addr = 0, __count = 1; \ | ||
808 | struct task_struct *__task = NULL; \ | ||
809 | struct hlist_head *head; \ | ||
810 | int __entry_size; \ | ||
811 | int __data_size; \ | ||
812 | int rctx; \ | ||
813 | \ | ||
814 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | ||
815 | \ | ||
816 | head = this_cpu_ptr(event_call->perf_events); \ | ||
817 | if (__builtin_constant_p(!__task) && !__task && \ | ||
818 | hlist_empty(head)) \ | ||
819 | return; \ | ||
820 | \ | ||
821 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ | ||
822 | sizeof(u64)); \ | ||
823 | __entry_size -= sizeof(u32); \ | ||
824 | \ | ||
825 | entry = perf_trace_buf_prepare(__entry_size, \ | ||
826 | event_call->event.type, &__regs, &rctx); \ | ||
827 | if (!entry) \ | ||
828 | return; \ | ||
829 | \ | ||
830 | perf_fetch_caller_regs(__regs); \ | ||
831 | \ | ||
832 | tstruct \ | ||
833 | \ | ||
834 | { assign; } \ | ||
835 | \ | ||
836 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ | ||
837 | __count, __regs, head, __task); \ | ||
838 | } | ||
839 | |||
840 | /* | ||
841 | * This part is compiled out, it is only here as a build time check | ||
842 | * to make sure that if the tracepoint handling changes, the | ||
843 | * perf probe will fail to compile unless it too is updated. | ||
844 | */ | ||
845 | #undef DEFINE_EVENT | ||
846 | #define DEFINE_EVENT(template, call, proto, args) \ | ||
847 | static inline void perf_test_probe_##call(void) \ | ||
848 | { \ | ||
849 | check_trace_callback_type_##call(perf_trace_##template); \ | ||
850 | } | ||
851 | |||
852 | |||
853 | #undef DEFINE_EVENT_PRINT | ||
854 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
855 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | ||
856 | |||
857 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
858 | #endif /* CONFIG_PERF_EVENTS */ | ||
859 | |||
diff --git a/kernel/events/core.c b/kernel/events/core.c index bc95b6a6220b..d1f37ddd1960 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <linux/kernel_stat.h> | 36 | #include <linux/kernel_stat.h> |
37 | #include <linux/cgroup.h> | 37 | #include <linux/cgroup.h> |
38 | #include <linux/perf_event.h> | 38 | #include <linux/perf_event.h> |
39 | #include <linux/ftrace_event.h> | 39 | #include <linux/trace_events.h> |
40 | #include <linux/hw_breakpoint.h> | 40 | #include <linux/hw_breakpoint.h> |
41 | #include <linux/mm_types.h> | 41 | #include <linux/mm_types.h> |
42 | #include <linux/module.h> | 42 | #include <linux/module.h> |
diff --git a/kernel/module.c b/kernel/module.c index cfc9e843a924..b38f96a183b5 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | #include <linux/export.h> | 19 | #include <linux/export.h> |
20 | #include <linux/moduleloader.h> | 20 | #include <linux/moduleloader.h> |
21 | #include <linux/ftrace_event.h> | 21 | #include <linux/trace_events.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/kallsyms.h> | 23 | #include <linux/kallsyms.h> |
24 | #include <linux/file.h> | 24 | #include <linux/file.h> |
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 591af0cb7b9f..c291bd65d2cb 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/time.h> | 35 | #include <linux/time.h> |
36 | #include <linux/cpu.h> | 36 | #include <linux/cpu.h> |
37 | #include <linux/prefetch.h> | 37 | #include <linux/prefetch.h> |
38 | #include <linux/ftrace_event.h> | 38 | #include <linux/trace_events.h> |
39 | 39 | ||
40 | #include "rcu.h" | 40 | #include "rcu.h" |
41 | 41 | ||
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index add042926a66..65137bc28b2b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -54,7 +54,7 @@ | |||
54 | #include <linux/delay.h> | 54 | #include <linux/delay.h> |
55 | #include <linux/stop_machine.h> | 55 | #include <linux/stop_machine.h> |
56 | #include <linux/random.h> | 56 | #include <linux/random.h> |
57 | #include <linux/ftrace_event.h> | 57 | #include <linux/trace_events.h> |
58 | #include <linux/suspend.h> | 58 | #include <linux/suspend.h> |
59 | 59 | ||
60 | #include "tree.h" | 60 | #include "tree.h" |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 4eeae4674b5a..b3e6b39b6cf9 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -1448,14 +1448,14 @@ static struct trace_event trace_blk_event = { | |||
1448 | 1448 | ||
1449 | static int __init init_blk_tracer(void) | 1449 | static int __init init_blk_tracer(void) |
1450 | { | 1450 | { |
1451 | if (!register_ftrace_event(&trace_blk_event)) { | 1451 | if (!register_trace_event(&trace_blk_event)) { |
1452 | pr_warning("Warning: could not register block events\n"); | 1452 | pr_warning("Warning: could not register block events\n"); |
1453 | return 1; | 1453 | return 1; |
1454 | } | 1454 | } |
1455 | 1455 | ||
1456 | if (register_tracer(&blk_tracer) != 0) { | 1456 | if (register_tracer(&blk_tracer) != 0) { |
1457 | pr_warning("Warning: could not register the block tracer\n"); | 1457 | pr_warning("Warning: could not register the block tracer\n"); |
1458 | unregister_ftrace_event(&trace_blk_event); | 1458 | unregister_trace_event(&trace_blk_event); |
1459 | return 1; | 1459 | return 1; |
1460 | } | 1460 | } |
1461 | 1461 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 0315d43176d8..6260717c18e3 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> |
5 | */ | 5 | */ |
6 | #include <linux/ftrace_event.h> | 6 | #include <linux/trace_events.h> |
7 | #include <linux/ring_buffer.h> | 7 | #include <linux/ring_buffer.h> |
8 | #include <linux/trace_clock.h> | 8 | #include <linux/trace_clock.h> |
9 | #include <linux/trace_seq.h> | 9 | #include <linux/trace_seq.h> |
@@ -115,63 +115,11 @@ int ring_buffer_print_entry_header(struct trace_seq *s) | |||
115 | * | 115 | * |
116 | */ | 116 | */ |
117 | 117 | ||
118 | /* | ||
119 | * A fast way to enable or disable all ring buffers is to | ||
120 | * call tracing_on or tracing_off. Turning off the ring buffers | ||
121 | * prevents all ring buffers from being recorded to. | ||
122 | * Turning this switch on, makes it OK to write to the | ||
123 | * ring buffer, if the ring buffer is enabled itself. | ||
124 | * | ||
125 | * There's three layers that must be on in order to write | ||
126 | * to the ring buffer. | ||
127 | * | ||
128 | * 1) This global flag must be set. | ||
129 | * 2) The ring buffer must be enabled for recording. | ||
130 | * 3) The per cpu buffer must be enabled for recording. | ||
131 | * | ||
132 | * In case of an anomaly, this global flag has a bit set that | ||
133 | * will permantly disable all ring buffers. | ||
134 | */ | ||
135 | |||
136 | /* | ||
137 | * Global flag to disable all recording to ring buffers | ||
138 | * This has two bits: ON, DISABLED | ||
139 | * | ||
140 | * ON DISABLED | ||
141 | * ---- ---------- | ||
142 | * 0 0 : ring buffers are off | ||
143 | * 1 0 : ring buffers are on | ||
144 | * X 1 : ring buffers are permanently disabled | ||
145 | */ | ||
146 | |||
147 | enum { | ||
148 | RB_BUFFERS_ON_BIT = 0, | ||
149 | RB_BUFFERS_DISABLED_BIT = 1, | ||
150 | }; | ||
151 | |||
152 | enum { | ||
153 | RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, | ||
154 | RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, | ||
155 | }; | ||
156 | |||
157 | static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; | ||
158 | |||
159 | /* Used for individual buffers (after the counter) */ | 118 | /* Used for individual buffers (after the counter) */ |
160 | #define RB_BUFFER_OFF (1 << 20) | 119 | #define RB_BUFFER_OFF (1 << 20) |
161 | 120 | ||
162 | #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) | 121 | #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) |
163 | 122 | ||
164 | /** | ||
165 | * tracing_off_permanent - permanently disable ring buffers | ||
166 | * | ||
167 | * This function, once called, will disable all ring buffers | ||
168 | * permanently. | ||
169 | */ | ||
170 | void tracing_off_permanent(void) | ||
171 | { | ||
172 | set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); | ||
173 | } | ||
174 | |||
175 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) | 123 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) |
176 | #define RB_ALIGNMENT 4U | 124 | #define RB_ALIGNMENT 4U |
177 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 125 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) |
@@ -452,6 +400,23 @@ struct rb_irq_work { | |||
452 | }; | 400 | }; |
453 | 401 | ||
454 | /* | 402 | /* |
403 | * Used for which event context the event is in. | ||
404 | * NMI = 0 | ||
405 | * IRQ = 1 | ||
406 | * SOFTIRQ = 2 | ||
407 | * NORMAL = 3 | ||
408 | * | ||
409 | * See trace_recursive_lock() comment below for more details. | ||
410 | */ | ||
411 | enum { | ||
412 | RB_CTX_NMI, | ||
413 | RB_CTX_IRQ, | ||
414 | RB_CTX_SOFTIRQ, | ||
415 | RB_CTX_NORMAL, | ||
416 | RB_CTX_MAX | ||
417 | }; | ||
418 | |||
419 | /* | ||
455 | * head_page == tail_page && head == tail then buffer is empty. | 420 | * head_page == tail_page && head == tail then buffer is empty. |
456 | */ | 421 | */ |
457 | struct ring_buffer_per_cpu { | 422 | struct ring_buffer_per_cpu { |
@@ -462,6 +427,7 @@ struct ring_buffer_per_cpu { | |||
462 | arch_spinlock_t lock; | 427 | arch_spinlock_t lock; |
463 | struct lock_class_key lock_key; | 428 | struct lock_class_key lock_key; |
464 | unsigned int nr_pages; | 429 | unsigned int nr_pages; |
430 | unsigned int current_context; | ||
465 | struct list_head *pages; | 431 | struct list_head *pages; |
466 | struct buffer_page *head_page; /* read from head */ | 432 | struct buffer_page *head_page; /* read from head */ |
467 | struct buffer_page *tail_page; /* write to tail */ | 433 | struct buffer_page *tail_page; /* write to tail */ |
@@ -2224,7 +2190,7 @@ static unsigned rb_calculate_event_length(unsigned length) | |||
2224 | 2190 | ||
2225 | /* zero length can cause confusions */ | 2191 | /* zero length can cause confusions */ |
2226 | if (!length) | 2192 | if (!length) |
2227 | length = 1; | 2193 | length++; |
2228 | 2194 | ||
2229 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) | 2195 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) |
2230 | length += sizeof(event.array[0]); | 2196 | length += sizeof(event.array[0]); |
@@ -2636,8 +2602,6 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
2636 | return NULL; | 2602 | return NULL; |
2637 | } | 2603 | } |
2638 | 2604 | ||
2639 | #ifdef CONFIG_TRACING | ||
2640 | |||
2641 | /* | 2605 | /* |
2642 | * The lock and unlock are done within a preempt disable section. | 2606 | * The lock and unlock are done within a preempt disable section. |
2643 | * The current_context per_cpu variable can only be modified | 2607 | * The current_context per_cpu variable can only be modified |
@@ -2675,44 +2639,38 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
2675 | * just so happens that it is the same bit corresponding to | 2639 | * just so happens that it is the same bit corresponding to |
2676 | * the current context. | 2640 | * the current context. |
2677 | */ | 2641 | */ |
2678 | static DEFINE_PER_CPU(unsigned int, current_context); | ||
2679 | 2642 | ||
2680 | static __always_inline int trace_recursive_lock(void) | 2643 | static __always_inline int |
2644 | trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) | ||
2681 | { | 2645 | { |
2682 | unsigned int val = __this_cpu_read(current_context); | 2646 | unsigned int val = cpu_buffer->current_context; |
2683 | int bit; | 2647 | int bit; |
2684 | 2648 | ||
2685 | if (in_interrupt()) { | 2649 | if (in_interrupt()) { |
2686 | if (in_nmi()) | 2650 | if (in_nmi()) |
2687 | bit = 0; | 2651 | bit = RB_CTX_NMI; |
2688 | else if (in_irq()) | 2652 | else if (in_irq()) |
2689 | bit = 1; | 2653 | bit = RB_CTX_IRQ; |
2690 | else | 2654 | else |
2691 | bit = 2; | 2655 | bit = RB_CTX_SOFTIRQ; |
2692 | } else | 2656 | } else |
2693 | bit = 3; | 2657 | bit = RB_CTX_NORMAL; |
2694 | 2658 | ||
2695 | if (unlikely(val & (1 << bit))) | 2659 | if (unlikely(val & (1 << bit))) |
2696 | return 1; | 2660 | return 1; |
2697 | 2661 | ||
2698 | val |= (1 << bit); | 2662 | val |= (1 << bit); |
2699 | __this_cpu_write(current_context, val); | 2663 | cpu_buffer->current_context = val; |
2700 | 2664 | ||
2701 | return 0; | 2665 | return 0; |
2702 | } | 2666 | } |
2703 | 2667 | ||
2704 | static __always_inline void trace_recursive_unlock(void) | 2668 | static __always_inline void |
2669 | trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) | ||
2705 | { | 2670 | { |
2706 | __this_cpu_and(current_context, __this_cpu_read(current_context) - 1); | 2671 | cpu_buffer->current_context &= cpu_buffer->current_context - 1; |
2707 | } | 2672 | } |
2708 | 2673 | ||
2709 | #else | ||
2710 | |||
2711 | #define trace_recursive_lock() (0) | ||
2712 | #define trace_recursive_unlock() do { } while (0) | ||
2713 | |||
2714 | #endif | ||
2715 | |||
2716 | /** | 2674 | /** |
2717 | * ring_buffer_lock_reserve - reserve a part of the buffer | 2675 | * ring_buffer_lock_reserve - reserve a part of the buffer |
2718 | * @buffer: the ring buffer to reserve from | 2676 | * @buffer: the ring buffer to reserve from |
@@ -2735,41 +2693,37 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2735 | struct ring_buffer_event *event; | 2693 | struct ring_buffer_event *event; |
2736 | int cpu; | 2694 | int cpu; |
2737 | 2695 | ||
2738 | if (ring_buffer_flags != RB_BUFFERS_ON) | ||
2739 | return NULL; | ||
2740 | |||
2741 | /* If we are tracing schedule, we don't want to recurse */ | 2696 | /* If we are tracing schedule, we don't want to recurse */ |
2742 | preempt_disable_notrace(); | 2697 | preempt_disable_notrace(); |
2743 | 2698 | ||
2744 | if (atomic_read(&buffer->record_disabled)) | 2699 | if (unlikely(atomic_read(&buffer->record_disabled))) |
2745 | goto out_nocheck; | 2700 | goto out; |
2746 | |||
2747 | if (trace_recursive_lock()) | ||
2748 | goto out_nocheck; | ||
2749 | 2701 | ||
2750 | cpu = raw_smp_processor_id(); | 2702 | cpu = raw_smp_processor_id(); |
2751 | 2703 | ||
2752 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 2704 | if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) |
2753 | goto out; | 2705 | goto out; |
2754 | 2706 | ||
2755 | cpu_buffer = buffer->buffers[cpu]; | 2707 | cpu_buffer = buffer->buffers[cpu]; |
2756 | 2708 | ||
2757 | if (atomic_read(&cpu_buffer->record_disabled)) | 2709 | if (unlikely(atomic_read(&cpu_buffer->record_disabled))) |
2758 | goto out; | 2710 | goto out; |
2759 | 2711 | ||
2760 | if (length > BUF_MAX_DATA_SIZE) | 2712 | if (unlikely(length > BUF_MAX_DATA_SIZE)) |
2713 | goto out; | ||
2714 | |||
2715 | if (unlikely(trace_recursive_lock(cpu_buffer))) | ||
2761 | goto out; | 2716 | goto out; |
2762 | 2717 | ||
2763 | event = rb_reserve_next_event(buffer, cpu_buffer, length); | 2718 | event = rb_reserve_next_event(buffer, cpu_buffer, length); |
2764 | if (!event) | 2719 | if (!event) |
2765 | goto out; | 2720 | goto out_unlock; |
2766 | 2721 | ||
2767 | return event; | 2722 | return event; |
2768 | 2723 | ||
2724 | out_unlock: | ||
2725 | trace_recursive_unlock(cpu_buffer); | ||
2769 | out: | 2726 | out: |
2770 | trace_recursive_unlock(); | ||
2771 | |||
2772 | out_nocheck: | ||
2773 | preempt_enable_notrace(); | 2727 | preempt_enable_notrace(); |
2774 | return NULL; | 2728 | return NULL; |
2775 | } | 2729 | } |
@@ -2859,7 +2813,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
2859 | 2813 | ||
2860 | rb_wakeups(buffer, cpu_buffer); | 2814 | rb_wakeups(buffer, cpu_buffer); |
2861 | 2815 | ||
2862 | trace_recursive_unlock(); | 2816 | trace_recursive_unlock(cpu_buffer); |
2863 | 2817 | ||
2864 | preempt_enable_notrace(); | 2818 | preempt_enable_notrace(); |
2865 | 2819 | ||
@@ -2970,7 +2924,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, | |||
2970 | out: | 2924 | out: |
2971 | rb_end_commit(cpu_buffer); | 2925 | rb_end_commit(cpu_buffer); |
2972 | 2926 | ||
2973 | trace_recursive_unlock(); | 2927 | trace_recursive_unlock(cpu_buffer); |
2974 | 2928 | ||
2975 | preempt_enable_notrace(); | 2929 | preempt_enable_notrace(); |
2976 | 2930 | ||
@@ -3000,9 +2954,6 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
3000 | int ret = -EBUSY; | 2954 | int ret = -EBUSY; |
3001 | int cpu; | 2955 | int cpu; |
3002 | 2956 | ||
3003 | if (ring_buffer_flags != RB_BUFFERS_ON) | ||
3004 | return -EBUSY; | ||
3005 | |||
3006 | preempt_disable_notrace(); | 2957 | preempt_disable_notrace(); |
3007 | 2958 | ||
3008 | if (atomic_read(&buffer->record_disabled)) | 2959 | if (atomic_read(&buffer->record_disabled)) |
@@ -3021,9 +2972,12 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
3021 | if (length > BUF_MAX_DATA_SIZE) | 2972 | if (length > BUF_MAX_DATA_SIZE) |
3022 | goto out; | 2973 | goto out; |
3023 | 2974 | ||
2975 | if (unlikely(trace_recursive_lock(cpu_buffer))) | ||
2976 | goto out; | ||
2977 | |||
3024 | event = rb_reserve_next_event(buffer, cpu_buffer, length); | 2978 | event = rb_reserve_next_event(buffer, cpu_buffer, length); |
3025 | if (!event) | 2979 | if (!event) |
3026 | goto out; | 2980 | goto out_unlock; |
3027 | 2981 | ||
3028 | body = rb_event_data(event); | 2982 | body = rb_event_data(event); |
3029 | 2983 | ||
@@ -3034,6 +2988,10 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
3034 | rb_wakeups(buffer, cpu_buffer); | 2988 | rb_wakeups(buffer, cpu_buffer); |
3035 | 2989 | ||
3036 | ret = 0; | 2990 | ret = 0; |
2991 | |||
2992 | out_unlock: | ||
2993 | trace_recursive_unlock(cpu_buffer); | ||
2994 | |||
3037 | out: | 2995 | out: |
3038 | preempt_enable_notrace(); | 2996 | preempt_enable_notrace(); |
3039 | 2997 | ||
@@ -3860,19 +3818,36 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
3860 | } | 3818 | } |
3861 | EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); | 3819 | EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); |
3862 | 3820 | ||
3863 | static inline int rb_ok_to_lock(void) | 3821 | static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) |
3864 | { | 3822 | { |
3823 | if (likely(!in_nmi())) { | ||
3824 | raw_spin_lock(&cpu_buffer->reader_lock); | ||
3825 | return true; | ||
3826 | } | ||
3827 | |||
3865 | /* | 3828 | /* |
3866 | * If an NMI die dumps out the content of the ring buffer | 3829 | * If an NMI die dumps out the content of the ring buffer |
3867 | * do not grab locks. We also permanently disable the ring | 3830 | * trylock must be used to prevent a deadlock if the NMI |
3868 | * buffer too. A one time deal is all you get from reading | 3831 | * preempted a task that holds the ring buffer locks. If |
3869 | * the ring buffer from an NMI. | 3832 | * we get the lock then all is fine, if not, then continue |
3833 | * to do the read, but this can corrupt the ring buffer, | ||
3834 | * so it must be permanently disabled from future writes. | ||
3835 | * Reading from NMI is a oneshot deal. | ||
3870 | */ | 3836 | */ |
3871 | if (likely(!in_nmi())) | 3837 | if (raw_spin_trylock(&cpu_buffer->reader_lock)) |
3872 | return 1; | 3838 | return true; |
3873 | 3839 | ||
3874 | tracing_off_permanent(); | 3840 | /* Continue without locking, but disable the ring buffer */ |
3875 | return 0; | 3841 | atomic_inc(&cpu_buffer->record_disabled); |
3842 | return false; | ||
3843 | } | ||
3844 | |||
3845 | static inline void | ||
3846 | rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) | ||
3847 | { | ||
3848 | if (likely(locked)) | ||
3849 | raw_spin_unlock(&cpu_buffer->reader_lock); | ||
3850 | return; | ||
3876 | } | 3851 | } |
3877 | 3852 | ||
3878 | /** | 3853 | /** |
@@ -3892,21 +3867,18 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, | |||
3892 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 3867 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
3893 | struct ring_buffer_event *event; | 3868 | struct ring_buffer_event *event; |
3894 | unsigned long flags; | 3869 | unsigned long flags; |
3895 | int dolock; | 3870 | bool dolock; |
3896 | 3871 | ||
3897 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 3872 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
3898 | return NULL; | 3873 | return NULL; |
3899 | 3874 | ||
3900 | dolock = rb_ok_to_lock(); | ||
3901 | again: | 3875 | again: |
3902 | local_irq_save(flags); | 3876 | local_irq_save(flags); |
3903 | if (dolock) | 3877 | dolock = rb_reader_lock(cpu_buffer); |
3904 | raw_spin_lock(&cpu_buffer->reader_lock); | ||
3905 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); | 3878 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); |
3906 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3879 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
3907 | rb_advance_reader(cpu_buffer); | 3880 | rb_advance_reader(cpu_buffer); |
3908 | if (dolock) | 3881 | rb_reader_unlock(cpu_buffer, dolock); |
3909 | raw_spin_unlock(&cpu_buffer->reader_lock); | ||
3910 | local_irq_restore(flags); | 3882 | local_irq_restore(flags); |
3911 | 3883 | ||
3912 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | 3884 | if (event && event->type_len == RINGBUF_TYPE_PADDING) |
@@ -3959,9 +3931,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, | |||
3959 | struct ring_buffer_per_cpu *cpu_buffer; | 3931 | struct ring_buffer_per_cpu *cpu_buffer; |
3960 | struct ring_buffer_event *event = NULL; | 3932 | struct ring_buffer_event *event = NULL; |
3961 | unsigned long flags; | 3933 | unsigned long flags; |
3962 | int dolock; | 3934 | bool dolock; |
3963 | |||
3964 | dolock = rb_ok_to_lock(); | ||
3965 | 3935 | ||
3966 | again: | 3936 | again: |
3967 | /* might be called in atomic */ | 3937 | /* might be called in atomic */ |
@@ -3972,8 +3942,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, | |||
3972 | 3942 | ||
3973 | cpu_buffer = buffer->buffers[cpu]; | 3943 | cpu_buffer = buffer->buffers[cpu]; |
3974 | local_irq_save(flags); | 3944 | local_irq_save(flags); |
3975 | if (dolock) | 3945 | dolock = rb_reader_lock(cpu_buffer); |
3976 | raw_spin_lock(&cpu_buffer->reader_lock); | ||
3977 | 3946 | ||
3978 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); | 3947 | event = rb_buffer_peek(cpu_buffer, ts, lost_events); |
3979 | if (event) { | 3948 | if (event) { |
@@ -3981,8 +3950,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, | |||
3981 | rb_advance_reader(cpu_buffer); | 3950 | rb_advance_reader(cpu_buffer); |
3982 | } | 3951 | } |
3983 | 3952 | ||
3984 | if (dolock) | 3953 | rb_reader_unlock(cpu_buffer, dolock); |
3985 | raw_spin_unlock(&cpu_buffer->reader_lock); | ||
3986 | local_irq_restore(flags); | 3954 | local_irq_restore(flags); |
3987 | 3955 | ||
3988 | out: | 3956 | out: |
@@ -4263,21 +4231,17 @@ int ring_buffer_empty(struct ring_buffer *buffer) | |||
4263 | { | 4231 | { |
4264 | struct ring_buffer_per_cpu *cpu_buffer; | 4232 | struct ring_buffer_per_cpu *cpu_buffer; |
4265 | unsigned long flags; | 4233 | unsigned long flags; |
4266 | int dolock; | 4234 | bool dolock; |
4267 | int cpu; | 4235 | int cpu; |
4268 | int ret; | 4236 | int ret; |
4269 | 4237 | ||
4270 | dolock = rb_ok_to_lock(); | ||
4271 | |||
4272 | /* yes this is racy, but if you don't like the race, lock the buffer */ | 4238 | /* yes this is racy, but if you don't like the race, lock the buffer */ |
4273 | for_each_buffer_cpu(buffer, cpu) { | 4239 | for_each_buffer_cpu(buffer, cpu) { |
4274 | cpu_buffer = buffer->buffers[cpu]; | 4240 | cpu_buffer = buffer->buffers[cpu]; |
4275 | local_irq_save(flags); | 4241 | local_irq_save(flags); |
4276 | if (dolock) | 4242 | dolock = rb_reader_lock(cpu_buffer); |
4277 | raw_spin_lock(&cpu_buffer->reader_lock); | ||
4278 | ret = rb_per_cpu_empty(cpu_buffer); | 4243 | ret = rb_per_cpu_empty(cpu_buffer); |
4279 | if (dolock) | 4244 | rb_reader_unlock(cpu_buffer, dolock); |
4280 | raw_spin_unlock(&cpu_buffer->reader_lock); | ||
4281 | local_irq_restore(flags); | 4245 | local_irq_restore(flags); |
4282 | 4246 | ||
4283 | if (!ret) | 4247 | if (!ret) |
@@ -4297,21 +4261,17 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | |||
4297 | { | 4261 | { |
4298 | struct ring_buffer_per_cpu *cpu_buffer; | 4262 | struct ring_buffer_per_cpu *cpu_buffer; |
4299 | unsigned long flags; | 4263 | unsigned long flags; |
4300 | int dolock; | 4264 | bool dolock; |
4301 | int ret; | 4265 | int ret; |
4302 | 4266 | ||
4303 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 4267 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
4304 | return 1; | 4268 | return 1; |
4305 | 4269 | ||
4306 | dolock = rb_ok_to_lock(); | ||
4307 | |||
4308 | cpu_buffer = buffer->buffers[cpu]; | 4270 | cpu_buffer = buffer->buffers[cpu]; |
4309 | local_irq_save(flags); | 4271 | local_irq_save(flags); |
4310 | if (dolock) | 4272 | dolock = rb_reader_lock(cpu_buffer); |
4311 | raw_spin_lock(&cpu_buffer->reader_lock); | ||
4312 | ret = rb_per_cpu_empty(cpu_buffer); | 4273 | ret = rb_per_cpu_empty(cpu_buffer); |
4313 | if (dolock) | 4274 | rb_reader_unlock(cpu_buffer, dolock); |
4314 | raw_spin_unlock(&cpu_buffer->reader_lock); | ||
4315 | local_irq_restore(flags); | 4275 | local_irq_restore(flags); |
4316 | 4276 | ||
4317 | return ret; | 4277 | return ret; |
@@ -4349,9 +4309,6 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
4349 | 4309 | ||
4350 | ret = -EAGAIN; | 4310 | ret = -EAGAIN; |
4351 | 4311 | ||
4352 | if (ring_buffer_flags != RB_BUFFERS_ON) | ||
4353 | goto out; | ||
4354 | |||
4355 | if (atomic_read(&buffer_a->record_disabled)) | 4312 | if (atomic_read(&buffer_a->record_disabled)) |
4356 | goto out; | 4313 | goto out; |
4357 | 4314 | ||
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 1b28df2d9104..a1503a027ee2 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
@@ -32,11 +32,11 @@ static struct task_struct *producer; | |||
32 | static struct task_struct *consumer; | 32 | static struct task_struct *consumer; |
33 | static unsigned long read; | 33 | static unsigned long read; |
34 | 34 | ||
35 | static int disable_reader; | 35 | static unsigned int disable_reader; |
36 | module_param(disable_reader, uint, 0644); | 36 | module_param(disable_reader, uint, 0644); |
37 | MODULE_PARM_DESC(disable_reader, "only run producer"); | 37 | MODULE_PARM_DESC(disable_reader, "only run producer"); |
38 | 38 | ||
39 | static int write_iteration = 50; | 39 | static unsigned int write_iteration = 50; |
40 | module_param(write_iteration, uint, 0644); | 40 | module_param(write_iteration, uint, 0644); |
41 | MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); | 41 | MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); |
42 | 42 | ||
@@ -46,16 +46,16 @@ static int consumer_nice = MAX_NICE; | |||
46 | static int producer_fifo = -1; | 46 | static int producer_fifo = -1; |
47 | static int consumer_fifo = -1; | 47 | static int consumer_fifo = -1; |
48 | 48 | ||
49 | module_param(producer_nice, uint, 0644); | 49 | module_param(producer_nice, int, 0644); |
50 | MODULE_PARM_DESC(producer_nice, "nice prio for producer"); | 50 | MODULE_PARM_DESC(producer_nice, "nice prio for producer"); |
51 | 51 | ||
52 | module_param(consumer_nice, uint, 0644); | 52 | module_param(consumer_nice, int, 0644); |
53 | MODULE_PARM_DESC(consumer_nice, "nice prio for consumer"); | 53 | MODULE_PARM_DESC(consumer_nice, "nice prio for consumer"); |
54 | 54 | ||
55 | module_param(producer_fifo, uint, 0644); | 55 | module_param(producer_fifo, int, 0644); |
56 | MODULE_PARM_DESC(producer_fifo, "fifo prio for producer"); | 56 | MODULE_PARM_DESC(producer_fifo, "fifo prio for producer"); |
57 | 57 | ||
58 | module_param(consumer_fifo, uint, 0644); | 58 | module_param(consumer_fifo, int, 0644); |
59 | MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer"); | 59 | MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer"); |
60 | 60 | ||
61 | static int read_events; | 61 | static int read_events; |
@@ -263,6 +263,8 @@ static void ring_buffer_producer(void) | |||
263 | if (cnt % wakeup_interval) | 263 | if (cnt % wakeup_interval) |
264 | cond_resched(); | 264 | cond_resched(); |
265 | #endif | 265 | #endif |
266 | if (kthread_should_stop()) | ||
267 | kill_test = 1; | ||
266 | 268 | ||
267 | } while (ktime_before(end_time, timeout) && !kill_test); | 269 | } while (ktime_before(end_time, timeout) && !kill_test); |
268 | trace_printk("End ring buffer hammer\n"); | 270 | trace_printk("End ring buffer hammer\n"); |
@@ -285,7 +287,7 @@ static void ring_buffer_producer(void) | |||
285 | entries = ring_buffer_entries(buffer); | 287 | entries = ring_buffer_entries(buffer); |
286 | overruns = ring_buffer_overruns(buffer); | 288 | overruns = ring_buffer_overruns(buffer); |
287 | 289 | ||
288 | if (kill_test) | 290 | if (kill_test && !kthread_should_stop()) |
289 | trace_printk("ERROR!\n"); | 291 | trace_printk("ERROR!\n"); |
290 | 292 | ||
291 | if (!disable_reader) { | 293 | if (!disable_reader) { |
@@ -379,7 +381,7 @@ static int ring_buffer_consumer_thread(void *arg) | |||
379 | } | 381 | } |
380 | __set_current_state(TASK_RUNNING); | 382 | __set_current_state(TASK_RUNNING); |
381 | 383 | ||
382 | if (kill_test) | 384 | if (!kthread_should_stop()) |
383 | wait_to_die(); | 385 | wait_to_die(); |
384 | 386 | ||
385 | return 0; | 387 | return 0; |
@@ -399,13 +401,16 @@ static int ring_buffer_producer_thread(void *arg) | |||
399 | } | 401 | } |
400 | 402 | ||
401 | ring_buffer_producer(); | 403 | ring_buffer_producer(); |
404 | if (kill_test) | ||
405 | goto out_kill; | ||
402 | 406 | ||
403 | trace_printk("Sleeping for 10 secs\n"); | 407 | trace_printk("Sleeping for 10 secs\n"); |
404 | set_current_state(TASK_INTERRUPTIBLE); | 408 | set_current_state(TASK_INTERRUPTIBLE); |
405 | schedule_timeout(HZ * SLEEP_TIME); | 409 | schedule_timeout(HZ * SLEEP_TIME); |
406 | } | 410 | } |
407 | 411 | ||
408 | if (kill_test) | 412 | out_kill: |
413 | if (!kthread_should_stop()) | ||
409 | wait_to_die(); | 414 | wait_to_die(); |
410 | 415 | ||
411 | return 0; | 416 | return 0; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 05330494a0df..abcbf7ff8743 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -297,11 +297,11 @@ void trace_array_put(struct trace_array *this_tr) | |||
297 | mutex_unlock(&trace_types_lock); | 297 | mutex_unlock(&trace_types_lock); |
298 | } | 298 | } |
299 | 299 | ||
300 | int filter_check_discard(struct ftrace_event_file *file, void *rec, | 300 | int filter_check_discard(struct trace_event_file *file, void *rec, |
301 | struct ring_buffer *buffer, | 301 | struct ring_buffer *buffer, |
302 | struct ring_buffer_event *event) | 302 | struct ring_buffer_event *event) |
303 | { | 303 | { |
304 | if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) && | 304 | if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && |
305 | !filter_match_preds(file->filter, rec)) { | 305 | !filter_match_preds(file->filter, rec)) { |
306 | ring_buffer_discard_commit(buffer, event); | 306 | ring_buffer_discard_commit(buffer, event); |
307 | return 1; | 307 | return 1; |
@@ -311,7 +311,7 @@ int filter_check_discard(struct ftrace_event_file *file, void *rec, | |||
311 | } | 311 | } |
312 | EXPORT_SYMBOL_GPL(filter_check_discard); | 312 | EXPORT_SYMBOL_GPL(filter_check_discard); |
313 | 313 | ||
314 | int call_filter_check_discard(struct ftrace_event_call *call, void *rec, | 314 | int call_filter_check_discard(struct trace_event_call *call, void *rec, |
315 | struct ring_buffer *buffer, | 315 | struct ring_buffer *buffer, |
316 | struct ring_buffer_event *event) | 316 | struct ring_buffer_event *event) |
317 | { | 317 | { |
@@ -876,6 +876,7 @@ static struct { | |||
876 | { trace_clock_jiffies, "uptime", 0 }, | 876 | { trace_clock_jiffies, "uptime", 0 }, |
877 | { trace_clock, "perf", 1 }, | 877 | { trace_clock, "perf", 1 }, |
878 | { ktime_get_mono_fast_ns, "mono", 1 }, | 878 | { ktime_get_mono_fast_ns, "mono", 1 }, |
879 | { ktime_get_raw_fast_ns, "mono_raw", 1 }, | ||
879 | ARCH_TRACE_CLOCKS | 880 | ARCH_TRACE_CLOCKS |
880 | }; | 881 | }; |
881 | 882 | ||
@@ -1693,13 +1694,13 @@ static struct ring_buffer *temp_buffer; | |||
1693 | 1694 | ||
1694 | struct ring_buffer_event * | 1695 | struct ring_buffer_event * |
1695 | trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, | 1696 | trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, |
1696 | struct ftrace_event_file *ftrace_file, | 1697 | struct trace_event_file *trace_file, |
1697 | int type, unsigned long len, | 1698 | int type, unsigned long len, |
1698 | unsigned long flags, int pc) | 1699 | unsigned long flags, int pc) |
1699 | { | 1700 | { |
1700 | struct ring_buffer_event *entry; | 1701 | struct ring_buffer_event *entry; |
1701 | 1702 | ||
1702 | *current_rb = ftrace_file->tr->trace_buffer.buffer; | 1703 | *current_rb = trace_file->tr->trace_buffer.buffer; |
1703 | entry = trace_buffer_lock_reserve(*current_rb, | 1704 | entry = trace_buffer_lock_reserve(*current_rb, |
1704 | type, len, flags, pc); | 1705 | type, len, flags, pc); |
1705 | /* | 1706 | /* |
@@ -1708,7 +1709,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, | |||
1708 | * to store the trace event for the tigger to use. It's recusive | 1709 | * to store the trace event for the tigger to use. It's recusive |
1709 | * safe and will not be recorded anywhere. | 1710 | * safe and will not be recorded anywhere. |
1710 | */ | 1711 | */ |
1711 | if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) { | 1712 | if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { |
1712 | *current_rb = temp_buffer; | 1713 | *current_rb = temp_buffer; |
1713 | entry = trace_buffer_lock_reserve(*current_rb, | 1714 | entry = trace_buffer_lock_reserve(*current_rb, |
1714 | type, len, flags, pc); | 1715 | type, len, flags, pc); |
@@ -1760,7 +1761,7 @@ trace_function(struct trace_array *tr, | |||
1760 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 1761 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
1761 | int pc) | 1762 | int pc) |
1762 | { | 1763 | { |
1763 | struct ftrace_event_call *call = &event_function; | 1764 | struct trace_event_call *call = &event_function; |
1764 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | 1765 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
1765 | struct ring_buffer_event *event; | 1766 | struct ring_buffer_event *event; |
1766 | struct ftrace_entry *entry; | 1767 | struct ftrace_entry *entry; |
@@ -1795,7 +1796,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
1795 | unsigned long flags, | 1796 | unsigned long flags, |
1796 | int skip, int pc, struct pt_regs *regs) | 1797 | int skip, int pc, struct pt_regs *regs) |
1797 | { | 1798 | { |
1798 | struct ftrace_event_call *call = &event_kernel_stack; | 1799 | struct trace_event_call *call = &event_kernel_stack; |
1799 | struct ring_buffer_event *event; | 1800 | struct ring_buffer_event *event; |
1800 | struct stack_entry *entry; | 1801 | struct stack_entry *entry; |
1801 | struct stack_trace trace; | 1802 | struct stack_trace trace; |
@@ -1923,7 +1924,7 @@ static DEFINE_PER_CPU(int, user_stack_count); | |||
1923 | void | 1924 | void |
1924 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1925 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
1925 | { | 1926 | { |
1926 | struct ftrace_event_call *call = &event_user_stack; | 1927 | struct trace_event_call *call = &event_user_stack; |
1927 | struct ring_buffer_event *event; | 1928 | struct ring_buffer_event *event; |
1928 | struct userstack_entry *entry; | 1929 | struct userstack_entry *entry; |
1929 | struct stack_trace trace; | 1930 | struct stack_trace trace; |
@@ -2129,7 +2130,7 @@ static void trace_printk_start_stop_comm(int enabled) | |||
2129 | */ | 2130 | */ |
2130 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 2131 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
2131 | { | 2132 | { |
2132 | struct ftrace_event_call *call = &event_bprint; | 2133 | struct trace_event_call *call = &event_bprint; |
2133 | struct ring_buffer_event *event; | 2134 | struct ring_buffer_event *event; |
2134 | struct ring_buffer *buffer; | 2135 | struct ring_buffer *buffer; |
2135 | struct trace_array *tr = &global_trace; | 2136 | struct trace_array *tr = &global_trace; |
@@ -2187,7 +2188,7 @@ static int | |||
2187 | __trace_array_vprintk(struct ring_buffer *buffer, | 2188 | __trace_array_vprintk(struct ring_buffer *buffer, |
2188 | unsigned long ip, const char *fmt, va_list args) | 2189 | unsigned long ip, const char *fmt, va_list args) |
2189 | { | 2190 | { |
2190 | struct ftrace_event_call *call = &event_print; | 2191 | struct trace_event_call *call = &event_print; |
2191 | struct ring_buffer_event *event; | 2192 | struct ring_buffer_event *event; |
2192 | int len = 0, size, pc; | 2193 | int len = 0, size, pc; |
2193 | struct print_entry *entry; | 2194 | struct print_entry *entry; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 3d2ad5f83e94..f060716b02ae 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/ftrace.h> | 12 | #include <linux/ftrace.h> |
13 | #include <linux/hw_breakpoint.h> | 13 | #include <linux/hw_breakpoint.h> |
14 | #include <linux/trace_seq.h> | 14 | #include <linux/trace_seq.h> |
15 | #include <linux/ftrace_event.h> | 15 | #include <linux/trace_events.h> |
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/trace_seq.h> | 17 | #include <linux/trace_seq.h> |
18 | 18 | ||
@@ -211,8 +211,8 @@ struct trace_array { | |||
211 | #ifdef CONFIG_FTRACE_SYSCALLS | 211 | #ifdef CONFIG_FTRACE_SYSCALLS |
212 | int sys_refcount_enter; | 212 | int sys_refcount_enter; |
213 | int sys_refcount_exit; | 213 | int sys_refcount_exit; |
214 | struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls]; | 214 | struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; |
215 | struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls]; | 215 | struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; |
216 | #endif | 216 | #endif |
217 | int stop_count; | 217 | int stop_count; |
218 | int clock_id; | 218 | int clock_id; |
@@ -858,7 +858,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops); | |||
858 | #define ftrace_destroy_filter_files(ops) do { } while (0) | 858 | #define ftrace_destroy_filter_files(ops) do { } while (0) |
859 | #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ | 859 | #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ |
860 | 860 | ||
861 | int ftrace_event_is_function(struct ftrace_event_call *call); | 861 | int ftrace_event_is_function(struct trace_event_call *call); |
862 | 862 | ||
863 | /* | 863 | /* |
864 | * struct trace_parser - servers for reading the user input separated by spaces | 864 | * struct trace_parser - servers for reading the user input separated by spaces |
@@ -992,7 +992,7 @@ struct event_subsystem { | |||
992 | int ref_count; | 992 | int ref_count; |
993 | }; | 993 | }; |
994 | 994 | ||
995 | struct ftrace_subsystem_dir { | 995 | struct trace_subsystem_dir { |
996 | struct list_head list; | 996 | struct list_head list; |
997 | struct event_subsystem *subsystem; | 997 | struct event_subsystem *subsystem; |
998 | struct trace_array *tr; | 998 | struct trace_array *tr; |
@@ -1052,30 +1052,30 @@ struct filter_pred { | |||
1052 | 1052 | ||
1053 | extern enum regex_type | 1053 | extern enum regex_type |
1054 | filter_parse_regex(char *buff, int len, char **search, int *not); | 1054 | filter_parse_regex(char *buff, int len, char **search, int *not); |
1055 | extern void print_event_filter(struct ftrace_event_file *file, | 1055 | extern void print_event_filter(struct trace_event_file *file, |
1056 | struct trace_seq *s); | 1056 | struct trace_seq *s); |
1057 | extern int apply_event_filter(struct ftrace_event_file *file, | 1057 | extern int apply_event_filter(struct trace_event_file *file, |
1058 | char *filter_string); | 1058 | char *filter_string); |
1059 | extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, | 1059 | extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, |
1060 | char *filter_string); | 1060 | char *filter_string); |
1061 | extern void print_subsystem_event_filter(struct event_subsystem *system, | 1061 | extern void print_subsystem_event_filter(struct event_subsystem *system, |
1062 | struct trace_seq *s); | 1062 | struct trace_seq *s); |
1063 | extern int filter_assign_type(const char *type); | 1063 | extern int filter_assign_type(const char *type); |
1064 | extern int create_event_filter(struct ftrace_event_call *call, | 1064 | extern int create_event_filter(struct trace_event_call *call, |
1065 | char *filter_str, bool set_str, | 1065 | char *filter_str, bool set_str, |
1066 | struct event_filter **filterp); | 1066 | struct event_filter **filterp); |
1067 | extern void free_event_filter(struct event_filter *filter); | 1067 | extern void free_event_filter(struct event_filter *filter); |
1068 | 1068 | ||
1069 | struct ftrace_event_field * | 1069 | struct ftrace_event_field * |
1070 | trace_find_event_field(struct ftrace_event_call *call, char *name); | 1070 | trace_find_event_field(struct trace_event_call *call, char *name); |
1071 | 1071 | ||
1072 | extern void trace_event_enable_cmd_record(bool enable); | 1072 | extern void trace_event_enable_cmd_record(bool enable); |
1073 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); | 1073 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); |
1074 | extern int event_trace_del_tracer(struct trace_array *tr); | 1074 | extern int event_trace_del_tracer(struct trace_array *tr); |
1075 | 1075 | ||
1076 | extern struct ftrace_event_file *find_event_file(struct trace_array *tr, | 1076 | extern struct trace_event_file *find_event_file(struct trace_array *tr, |
1077 | const char *system, | 1077 | const char *system, |
1078 | const char *event); | 1078 | const char *event); |
1079 | 1079 | ||
1080 | static inline void *event_file_data(struct file *filp) | 1080 | static inline void *event_file_data(struct file *filp) |
1081 | { | 1081 | { |
@@ -1180,7 +1180,7 @@ struct event_trigger_ops { | |||
1180 | * commands need to do this if they themselves log to the trace | 1180 | * commands need to do this if they themselves log to the trace |
1181 | * buffer (see the @post_trigger() member below). @trigger_type | 1181 | * buffer (see the @post_trigger() member below). @trigger_type |
1182 | * values are defined by adding new values to the trigger_type | 1182 | * values are defined by adding new values to the trigger_type |
1183 | * enum in include/linux/ftrace_event.h. | 1183 | * enum in include/linux/trace_events.h. |
1184 | * | 1184 | * |
1185 | * @post_trigger: A flag that says whether or not this command needs | 1185 | * @post_trigger: A flag that says whether or not this command needs |
1186 | * to have its action delayed until after the current event has | 1186 | * to have its action delayed until after the current event has |
@@ -1242,23 +1242,23 @@ struct event_command { | |||
1242 | enum event_trigger_type trigger_type; | 1242 | enum event_trigger_type trigger_type; |
1243 | bool post_trigger; | 1243 | bool post_trigger; |
1244 | int (*func)(struct event_command *cmd_ops, | 1244 | int (*func)(struct event_command *cmd_ops, |
1245 | struct ftrace_event_file *file, | 1245 | struct trace_event_file *file, |
1246 | char *glob, char *cmd, char *params); | 1246 | char *glob, char *cmd, char *params); |
1247 | int (*reg)(char *glob, | 1247 | int (*reg)(char *glob, |
1248 | struct event_trigger_ops *ops, | 1248 | struct event_trigger_ops *ops, |
1249 | struct event_trigger_data *data, | 1249 | struct event_trigger_data *data, |
1250 | struct ftrace_event_file *file); | 1250 | struct trace_event_file *file); |
1251 | void (*unreg)(char *glob, | 1251 | void (*unreg)(char *glob, |
1252 | struct event_trigger_ops *ops, | 1252 | struct event_trigger_ops *ops, |
1253 | struct event_trigger_data *data, | 1253 | struct event_trigger_data *data, |
1254 | struct ftrace_event_file *file); | 1254 | struct trace_event_file *file); |
1255 | int (*set_filter)(char *filter_str, | 1255 | int (*set_filter)(char *filter_str, |
1256 | struct event_trigger_data *data, | 1256 | struct event_trigger_data *data, |
1257 | struct ftrace_event_file *file); | 1257 | struct trace_event_file *file); |
1258 | struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); | 1258 | struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); |
1259 | }; | 1259 | }; |
1260 | 1260 | ||
1261 | extern int trace_event_enable_disable(struct ftrace_event_file *file, | 1261 | extern int trace_event_enable_disable(struct trace_event_file *file, |
1262 | int enable, int soft_disable); | 1262 | int enable, int soft_disable); |
1263 | extern int tracing_alloc_snapshot(void); | 1263 | extern int tracing_alloc_snapshot(void); |
1264 | 1264 | ||
@@ -1286,7 +1286,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); | |||
1286 | 1286 | ||
1287 | #undef FTRACE_ENTRY | 1287 | #undef FTRACE_ENTRY |
1288 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ | 1288 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
1289 | extern struct ftrace_event_call \ | 1289 | extern struct trace_event_call \ |
1290 | __aligned(4) event_##call; | 1290 | __aligned(4) event_##call; |
1291 | #undef FTRACE_ENTRY_DUP | 1291 | #undef FTRACE_ENTRY_DUP |
1292 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ | 1292 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ |
@@ -1295,7 +1295,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); | |||
1295 | #include "trace_entries.h" | 1295 | #include "trace_entries.h" |
1296 | 1296 | ||
1297 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) | 1297 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) |
1298 | int perf_ftrace_event_register(struct ftrace_event_call *call, | 1298 | int perf_ftrace_event_register(struct trace_event_call *call, |
1299 | enum trace_reg type, void *data); | 1299 | enum trace_reg type, void *data); |
1300 | #else | 1300 | #else |
1301 | #define perf_ftrace_event_register NULL | 1301 | #define perf_ftrace_event_register NULL |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 57cbf1efdd44..a87b43f49eb4 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -29,7 +29,7 @@ static struct trace_array *branch_tracer; | |||
29 | static void | 29 | static void |
30 | probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | 30 | probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) |
31 | { | 31 | { |
32 | struct ftrace_event_call *call = &event_branch; | 32 | struct trace_event_call *call = &event_branch; |
33 | struct trace_array *tr = branch_tracer; | 33 | struct trace_array *tr = branch_tracer; |
34 | struct trace_array_cpu *data; | 34 | struct trace_array_cpu *data; |
35 | struct ring_buffer_event *event; | 35 | struct ring_buffer_event *event; |
@@ -191,7 +191,7 @@ __init static int init_branch_tracer(void) | |||
191 | { | 191 | { |
192 | int ret; | 192 | int ret; |
193 | 193 | ||
194 | ret = register_ftrace_event(&trace_branch_event); | 194 | ret = register_trace_event(&trace_branch_event); |
195 | if (!ret) { | 195 | if (!ret) { |
196 | printk(KERN_WARNING "Warning: could not register " | 196 | printk(KERN_WARNING "Warning: could not register " |
197 | "branch events\n"); | 197 | "branch events\n"); |
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 57b67b1f24d1..0f06532a755b 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -56,6 +56,7 @@ u64 notrace trace_clock(void) | |||
56 | { | 56 | { |
57 | return local_clock(); | 57 | return local_clock(); |
58 | } | 58 | } |
59 | EXPORT_SYMBOL_GPL(trace_clock); | ||
59 | 60 | ||
60 | /* | 61 | /* |
61 | * trace_jiffy_clock(): Simply use jiffies as a clock counter. | 62 | * trace_jiffy_clock(): Simply use jiffies as a clock counter. |
@@ -68,6 +69,7 @@ u64 notrace trace_clock_jiffies(void) | |||
68 | { | 69 | { |
69 | return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES); | 70 | return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES); |
70 | } | 71 | } |
72 | EXPORT_SYMBOL_GPL(trace_clock_jiffies); | ||
71 | 73 | ||
72 | /* | 74 | /* |
73 | * trace_clock_global(): special globally coherent trace clock | 75 | * trace_clock_global(): special globally coherent trace clock |
@@ -123,6 +125,7 @@ u64 notrace trace_clock_global(void) | |||
123 | 125 | ||
124 | return now; | 126 | return now; |
125 | } | 127 | } |
128 | EXPORT_SYMBOL_GPL(trace_clock_global); | ||
126 | 129 | ||
127 | static atomic64_t trace_counter; | 130 | static atomic64_t trace_counter; |
128 | 131 | ||
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 6fa484de2ba1..abfc903e741e 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -21,7 +21,7 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | |||
21 | /* Count the events in use (per event id, not per instance) */ | 21 | /* Count the events in use (per event id, not per instance) */ |
22 | static int total_ref_count; | 22 | static int total_ref_count; |
23 | 23 | ||
24 | static int perf_trace_event_perm(struct ftrace_event_call *tp_event, | 24 | static int perf_trace_event_perm(struct trace_event_call *tp_event, |
25 | struct perf_event *p_event) | 25 | struct perf_event *p_event) |
26 | { | 26 | { |
27 | if (tp_event->perf_perm) { | 27 | if (tp_event->perf_perm) { |
@@ -83,7 +83,7 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event, | |||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | static int perf_trace_event_reg(struct ftrace_event_call *tp_event, | 86 | static int perf_trace_event_reg(struct trace_event_call *tp_event, |
87 | struct perf_event *p_event) | 87 | struct perf_event *p_event) |
88 | { | 88 | { |
89 | struct hlist_head __percpu *list; | 89 | struct hlist_head __percpu *list; |
@@ -143,7 +143,7 @@ fail: | |||
143 | 143 | ||
144 | static void perf_trace_event_unreg(struct perf_event *p_event) | 144 | static void perf_trace_event_unreg(struct perf_event *p_event) |
145 | { | 145 | { |
146 | struct ftrace_event_call *tp_event = p_event->tp_event; | 146 | struct trace_event_call *tp_event = p_event->tp_event; |
147 | int i; | 147 | int i; |
148 | 148 | ||
149 | if (--tp_event->perf_refcount > 0) | 149 | if (--tp_event->perf_refcount > 0) |
@@ -172,17 +172,17 @@ out: | |||
172 | 172 | ||
173 | static int perf_trace_event_open(struct perf_event *p_event) | 173 | static int perf_trace_event_open(struct perf_event *p_event) |
174 | { | 174 | { |
175 | struct ftrace_event_call *tp_event = p_event->tp_event; | 175 | struct trace_event_call *tp_event = p_event->tp_event; |
176 | return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event); | 176 | return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event); |
177 | } | 177 | } |
178 | 178 | ||
179 | static void perf_trace_event_close(struct perf_event *p_event) | 179 | static void perf_trace_event_close(struct perf_event *p_event) |
180 | { | 180 | { |
181 | struct ftrace_event_call *tp_event = p_event->tp_event; | 181 | struct trace_event_call *tp_event = p_event->tp_event; |
182 | tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event); | 182 | tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event); |
183 | } | 183 | } |
184 | 184 | ||
185 | static int perf_trace_event_init(struct ftrace_event_call *tp_event, | 185 | static int perf_trace_event_init(struct trace_event_call *tp_event, |
186 | struct perf_event *p_event) | 186 | struct perf_event *p_event) |
187 | { | 187 | { |
188 | int ret; | 188 | int ret; |
@@ -206,7 +206,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, | |||
206 | 206 | ||
207 | int perf_trace_init(struct perf_event *p_event) | 207 | int perf_trace_init(struct perf_event *p_event) |
208 | { | 208 | { |
209 | struct ftrace_event_call *tp_event; | 209 | struct trace_event_call *tp_event; |
210 | u64 event_id = p_event->attr.config; | 210 | u64 event_id = p_event->attr.config; |
211 | int ret = -EINVAL; | 211 | int ret = -EINVAL; |
212 | 212 | ||
@@ -236,7 +236,7 @@ void perf_trace_destroy(struct perf_event *p_event) | |||
236 | 236 | ||
237 | int perf_trace_add(struct perf_event *p_event, int flags) | 237 | int perf_trace_add(struct perf_event *p_event, int flags) |
238 | { | 238 | { |
239 | struct ftrace_event_call *tp_event = p_event->tp_event; | 239 | struct trace_event_call *tp_event = p_event->tp_event; |
240 | struct hlist_head __percpu *pcpu_list; | 240 | struct hlist_head __percpu *pcpu_list; |
241 | struct hlist_head *list; | 241 | struct hlist_head *list; |
242 | 242 | ||
@@ -255,7 +255,7 @@ int perf_trace_add(struct perf_event *p_event, int flags) | |||
255 | 255 | ||
256 | void perf_trace_del(struct perf_event *p_event, int flags) | 256 | void perf_trace_del(struct perf_event *p_event, int flags) |
257 | { | 257 | { |
258 | struct ftrace_event_call *tp_event = p_event->tp_event; | 258 | struct trace_event_call *tp_event = p_event->tp_event; |
259 | hlist_del_rcu(&p_event->hlist_entry); | 259 | hlist_del_rcu(&p_event->hlist_entry); |
260 | tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event); | 260 | tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event); |
261 | } | 261 | } |
@@ -357,7 +357,7 @@ static void perf_ftrace_function_disable(struct perf_event *event) | |||
357 | ftrace_function_local_disable(&event->ftrace_ops); | 357 | ftrace_function_local_disable(&event->ftrace_ops); |
358 | } | 358 | } |
359 | 359 | ||
360 | int perf_ftrace_event_register(struct ftrace_event_call *call, | 360 | int perf_ftrace_event_register(struct trace_event_call *call, |
361 | enum trace_reg type, void *data) | 361 | enum trace_reg type, void *data) |
362 | { | 362 | { |
363 | switch (type) { | 363 | switch (type) { |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index c4de47fc5cca..404a372ad85a 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -61,14 +61,14 @@ static int system_refcount_dec(struct event_subsystem *system) | |||
61 | 61 | ||
62 | #define do_for_each_event_file_safe(tr, file) \ | 62 | #define do_for_each_event_file_safe(tr, file) \ |
63 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ | 63 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ |
64 | struct ftrace_event_file *___n; \ | 64 | struct trace_event_file *___n; \ |
65 | list_for_each_entry_safe(file, ___n, &tr->events, list) | 65 | list_for_each_entry_safe(file, ___n, &tr->events, list) |
66 | 66 | ||
67 | #define while_for_each_event_file() \ | 67 | #define while_for_each_event_file() \ |
68 | } | 68 | } |
69 | 69 | ||
70 | static struct list_head * | 70 | static struct list_head * |
71 | trace_get_fields(struct ftrace_event_call *event_call) | 71 | trace_get_fields(struct trace_event_call *event_call) |
72 | { | 72 | { |
73 | if (!event_call->class->get_fields) | 73 | if (!event_call->class->get_fields) |
74 | return &event_call->class->fields; | 74 | return &event_call->class->fields; |
@@ -89,7 +89,7 @@ __find_event_field(struct list_head *head, char *name) | |||
89 | } | 89 | } |
90 | 90 | ||
91 | struct ftrace_event_field * | 91 | struct ftrace_event_field * |
92 | trace_find_event_field(struct ftrace_event_call *call, char *name) | 92 | trace_find_event_field(struct trace_event_call *call, char *name) |
93 | { | 93 | { |
94 | struct ftrace_event_field *field; | 94 | struct ftrace_event_field *field; |
95 | struct list_head *head; | 95 | struct list_head *head; |
@@ -129,7 +129,7 @@ static int __trace_define_field(struct list_head *head, const char *type, | |||
129 | return 0; | 129 | return 0; |
130 | } | 130 | } |
131 | 131 | ||
132 | int trace_define_field(struct ftrace_event_call *call, const char *type, | 132 | int trace_define_field(struct trace_event_call *call, const char *type, |
133 | const char *name, int offset, int size, int is_signed, | 133 | const char *name, int offset, int size, int is_signed, |
134 | int filter_type) | 134 | int filter_type) |
135 | { | 135 | { |
@@ -166,7 +166,7 @@ static int trace_define_common_fields(void) | |||
166 | return ret; | 166 | return ret; |
167 | } | 167 | } |
168 | 168 | ||
169 | static void trace_destroy_fields(struct ftrace_event_call *call) | 169 | static void trace_destroy_fields(struct trace_event_call *call) |
170 | { | 170 | { |
171 | struct ftrace_event_field *field, *next; | 171 | struct ftrace_event_field *field, *next; |
172 | struct list_head *head; | 172 | struct list_head *head; |
@@ -178,11 +178,11 @@ static void trace_destroy_fields(struct ftrace_event_call *call) | |||
178 | } | 178 | } |
179 | } | 179 | } |
180 | 180 | ||
181 | int trace_event_raw_init(struct ftrace_event_call *call) | 181 | int trace_event_raw_init(struct trace_event_call *call) |
182 | { | 182 | { |
183 | int id; | 183 | int id; |
184 | 184 | ||
185 | id = register_ftrace_event(&call->event); | 185 | id = register_trace_event(&call->event); |
186 | if (!id) | 186 | if (!id) |
187 | return -ENODEV; | 187 | return -ENODEV; |
188 | 188 | ||
@@ -190,18 +190,18 @@ int trace_event_raw_init(struct ftrace_event_call *call) | |||
190 | } | 190 | } |
191 | EXPORT_SYMBOL_GPL(trace_event_raw_init); | 191 | EXPORT_SYMBOL_GPL(trace_event_raw_init); |
192 | 192 | ||
193 | void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, | 193 | void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, |
194 | struct ftrace_event_file *ftrace_file, | 194 | struct trace_event_file *trace_file, |
195 | unsigned long len) | 195 | unsigned long len) |
196 | { | 196 | { |
197 | struct ftrace_event_call *event_call = ftrace_file->event_call; | 197 | struct trace_event_call *event_call = trace_file->event_call; |
198 | 198 | ||
199 | local_save_flags(fbuffer->flags); | 199 | local_save_flags(fbuffer->flags); |
200 | fbuffer->pc = preempt_count(); | 200 | fbuffer->pc = preempt_count(); |
201 | fbuffer->ftrace_file = ftrace_file; | 201 | fbuffer->trace_file = trace_file; |
202 | 202 | ||
203 | fbuffer->event = | 203 | fbuffer->event = |
204 | trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file, | 204 | trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file, |
205 | event_call->event.type, len, | 205 | event_call->event.type, len, |
206 | fbuffer->flags, fbuffer->pc); | 206 | fbuffer->flags, fbuffer->pc); |
207 | if (!fbuffer->event) | 207 | if (!fbuffer->event) |
@@ -210,13 +210,13 @@ void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, | |||
210 | fbuffer->entry = ring_buffer_event_data(fbuffer->event); | 210 | fbuffer->entry = ring_buffer_event_data(fbuffer->event); |
211 | return fbuffer->entry; | 211 | return fbuffer->entry; |
212 | } | 212 | } |
213 | EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve); | 213 | EXPORT_SYMBOL_GPL(trace_event_buffer_reserve); |
214 | 214 | ||
215 | static DEFINE_SPINLOCK(tracepoint_iter_lock); | 215 | static DEFINE_SPINLOCK(tracepoint_iter_lock); |
216 | 216 | ||
217 | static void output_printk(struct ftrace_event_buffer *fbuffer) | 217 | static void output_printk(struct trace_event_buffer *fbuffer) |
218 | { | 218 | { |
219 | struct ftrace_event_call *event_call; | 219 | struct trace_event_call *event_call; |
220 | struct trace_event *event; | 220 | struct trace_event *event; |
221 | unsigned long flags; | 221 | unsigned long flags; |
222 | struct trace_iterator *iter = tracepoint_print_iter; | 222 | struct trace_iterator *iter = tracepoint_print_iter; |
@@ -224,12 +224,12 @@ static void output_printk(struct ftrace_event_buffer *fbuffer) | |||
224 | if (!iter) | 224 | if (!iter) |
225 | return; | 225 | return; |
226 | 226 | ||
227 | event_call = fbuffer->ftrace_file->event_call; | 227 | event_call = fbuffer->trace_file->event_call; |
228 | if (!event_call || !event_call->event.funcs || | 228 | if (!event_call || !event_call->event.funcs || |
229 | !event_call->event.funcs->trace) | 229 | !event_call->event.funcs->trace) |
230 | return; | 230 | return; |
231 | 231 | ||
232 | event = &fbuffer->ftrace_file->event_call->event; | 232 | event = &fbuffer->trace_file->event_call->event; |
233 | 233 | ||
234 | spin_lock_irqsave(&tracepoint_iter_lock, flags); | 234 | spin_lock_irqsave(&tracepoint_iter_lock, flags); |
235 | trace_seq_init(&iter->seq); | 235 | trace_seq_init(&iter->seq); |
@@ -241,21 +241,21 @@ static void output_printk(struct ftrace_event_buffer *fbuffer) | |||
241 | spin_unlock_irqrestore(&tracepoint_iter_lock, flags); | 241 | spin_unlock_irqrestore(&tracepoint_iter_lock, flags); |
242 | } | 242 | } |
243 | 243 | ||
244 | void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer) | 244 | void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) |
245 | { | 245 | { |
246 | if (tracepoint_printk) | 246 | if (tracepoint_printk) |
247 | output_printk(fbuffer); | 247 | output_printk(fbuffer); |
248 | 248 | ||
249 | event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer, | 249 | event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer, |
250 | fbuffer->event, fbuffer->entry, | 250 | fbuffer->event, fbuffer->entry, |
251 | fbuffer->flags, fbuffer->pc); | 251 | fbuffer->flags, fbuffer->pc); |
252 | } | 252 | } |
253 | EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit); | 253 | EXPORT_SYMBOL_GPL(trace_event_buffer_commit); |
254 | 254 | ||
255 | int ftrace_event_reg(struct ftrace_event_call *call, | 255 | int trace_event_reg(struct trace_event_call *call, |
256 | enum trace_reg type, void *data) | 256 | enum trace_reg type, void *data) |
257 | { | 257 | { |
258 | struct ftrace_event_file *file = data; | 258 | struct trace_event_file *file = data; |
259 | 259 | ||
260 | WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); | 260 | WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); |
261 | switch (type) { | 261 | switch (type) { |
@@ -288,34 +288,34 @@ int ftrace_event_reg(struct ftrace_event_call *call, | |||
288 | } | 288 | } |
289 | return 0; | 289 | return 0; |
290 | } | 290 | } |
291 | EXPORT_SYMBOL_GPL(ftrace_event_reg); | 291 | EXPORT_SYMBOL_GPL(trace_event_reg); |
292 | 292 | ||
293 | void trace_event_enable_cmd_record(bool enable) | 293 | void trace_event_enable_cmd_record(bool enable) |
294 | { | 294 | { |
295 | struct ftrace_event_file *file; | 295 | struct trace_event_file *file; |
296 | struct trace_array *tr; | 296 | struct trace_array *tr; |
297 | 297 | ||
298 | mutex_lock(&event_mutex); | 298 | mutex_lock(&event_mutex); |
299 | do_for_each_event_file(tr, file) { | 299 | do_for_each_event_file(tr, file) { |
300 | 300 | ||
301 | if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) | 301 | if (!(file->flags & EVENT_FILE_FL_ENABLED)) |
302 | continue; | 302 | continue; |
303 | 303 | ||
304 | if (enable) { | 304 | if (enable) { |
305 | tracing_start_cmdline_record(); | 305 | tracing_start_cmdline_record(); |
306 | set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); | 306 | set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); |
307 | } else { | 307 | } else { |
308 | tracing_stop_cmdline_record(); | 308 | tracing_stop_cmdline_record(); |
309 | clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); | 309 | clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); |
310 | } | 310 | } |
311 | } while_for_each_event_file(); | 311 | } while_for_each_event_file(); |
312 | mutex_unlock(&event_mutex); | 312 | mutex_unlock(&event_mutex); |
313 | } | 313 | } |
314 | 314 | ||
315 | static int __ftrace_event_enable_disable(struct ftrace_event_file *file, | 315 | static int __ftrace_event_enable_disable(struct trace_event_file *file, |
316 | int enable, int soft_disable) | 316 | int enable, int soft_disable) |
317 | { | 317 | { |
318 | struct ftrace_event_call *call = file->event_call; | 318 | struct trace_event_call *call = file->event_call; |
319 | int ret = 0; | 319 | int ret = 0; |
320 | int disable; | 320 | int disable; |
321 | 321 | ||
@@ -337,24 +337,24 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, | |||
337 | if (soft_disable) { | 337 | if (soft_disable) { |
338 | if (atomic_dec_return(&file->sm_ref) > 0) | 338 | if (atomic_dec_return(&file->sm_ref) > 0) |
339 | break; | 339 | break; |
340 | disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED; | 340 | disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; |
341 | clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); | 341 | clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); |
342 | } else | 342 | } else |
343 | disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE); | 343 | disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE); |
344 | 344 | ||
345 | if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) { | 345 | if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) { |
346 | clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); | 346 | clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); |
347 | if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) { | 347 | if (file->flags & EVENT_FILE_FL_RECORDED_CMD) { |
348 | tracing_stop_cmdline_record(); | 348 | tracing_stop_cmdline_record(); |
349 | clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); | 349 | clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); |
350 | } | 350 | } |
351 | call->class->reg(call, TRACE_REG_UNREGISTER, file); | 351 | call->class->reg(call, TRACE_REG_UNREGISTER, file); |
352 | } | 352 | } |
353 | /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ | 353 | /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ |
354 | if (file->flags & FTRACE_EVENT_FL_SOFT_MODE) | 354 | if (file->flags & EVENT_FILE_FL_SOFT_MODE) |
355 | set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); | 355 | set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); |
356 | else | 356 | else |
357 | clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); | 357 | clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); |
358 | break; | 358 | break; |
359 | case 1: | 359 | case 1: |
360 | /* | 360 | /* |
@@ -366,31 +366,31 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, | |||
366 | * it still seems to be disabled. | 366 | * it still seems to be disabled. |
367 | */ | 367 | */ |
368 | if (!soft_disable) | 368 | if (!soft_disable) |
369 | clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); | 369 | clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); |
370 | else { | 370 | else { |
371 | if (atomic_inc_return(&file->sm_ref) > 1) | 371 | if (atomic_inc_return(&file->sm_ref) > 1) |
372 | break; | 372 | break; |
373 | set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); | 373 | set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); |
374 | } | 374 | } |
375 | 375 | ||
376 | if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { | 376 | if (!(file->flags & EVENT_FILE_FL_ENABLED)) { |
377 | 377 | ||
378 | /* Keep the event disabled, when going to SOFT_MODE. */ | 378 | /* Keep the event disabled, when going to SOFT_MODE. */ |
379 | if (soft_disable) | 379 | if (soft_disable) |
380 | set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); | 380 | set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); |
381 | 381 | ||
382 | if (trace_flags & TRACE_ITER_RECORD_CMD) { | 382 | if (trace_flags & TRACE_ITER_RECORD_CMD) { |
383 | tracing_start_cmdline_record(); | 383 | tracing_start_cmdline_record(); |
384 | set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); | 384 | set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); |
385 | } | 385 | } |
386 | ret = call->class->reg(call, TRACE_REG_REGISTER, file); | 386 | ret = call->class->reg(call, TRACE_REG_REGISTER, file); |
387 | if (ret) { | 387 | if (ret) { |
388 | tracing_stop_cmdline_record(); | 388 | tracing_stop_cmdline_record(); |
389 | pr_info("event trace: Could not enable event " | 389 | pr_info("event trace: Could not enable event " |
390 | "%s\n", ftrace_event_name(call)); | 390 | "%s\n", trace_event_name(call)); |
391 | break; | 391 | break; |
392 | } | 392 | } |
393 | set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); | 393 | set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); |
394 | 394 | ||
395 | /* WAS_ENABLED gets set but never cleared. */ | 395 | /* WAS_ENABLED gets set but never cleared. */ |
396 | call->flags |= TRACE_EVENT_FL_WAS_ENABLED; | 396 | call->flags |= TRACE_EVENT_FL_WAS_ENABLED; |
@@ -401,13 +401,13 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, | |||
401 | return ret; | 401 | return ret; |
402 | } | 402 | } |
403 | 403 | ||
404 | int trace_event_enable_disable(struct ftrace_event_file *file, | 404 | int trace_event_enable_disable(struct trace_event_file *file, |
405 | int enable, int soft_disable) | 405 | int enable, int soft_disable) |
406 | { | 406 | { |
407 | return __ftrace_event_enable_disable(file, enable, soft_disable); | 407 | return __ftrace_event_enable_disable(file, enable, soft_disable); |
408 | } | 408 | } |
409 | 409 | ||
410 | static int ftrace_event_enable_disable(struct ftrace_event_file *file, | 410 | static int ftrace_event_enable_disable(struct trace_event_file *file, |
411 | int enable) | 411 | int enable) |
412 | { | 412 | { |
413 | return __ftrace_event_enable_disable(file, enable, 0); | 413 | return __ftrace_event_enable_disable(file, enable, 0); |
@@ -415,7 +415,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_file *file, | |||
415 | 415 | ||
416 | static void ftrace_clear_events(struct trace_array *tr) | 416 | static void ftrace_clear_events(struct trace_array *tr) |
417 | { | 417 | { |
418 | struct ftrace_event_file *file; | 418 | struct trace_event_file *file; |
419 | 419 | ||
420 | mutex_lock(&event_mutex); | 420 | mutex_lock(&event_mutex); |
421 | list_for_each_entry(file, &tr->events, list) { | 421 | list_for_each_entry(file, &tr->events, list) { |
@@ -449,14 +449,14 @@ static void __get_system(struct event_subsystem *system) | |||
449 | system_refcount_inc(system); | 449 | system_refcount_inc(system); |
450 | } | 450 | } |
451 | 451 | ||
452 | static void __get_system_dir(struct ftrace_subsystem_dir *dir) | 452 | static void __get_system_dir(struct trace_subsystem_dir *dir) |
453 | { | 453 | { |
454 | WARN_ON_ONCE(dir->ref_count == 0); | 454 | WARN_ON_ONCE(dir->ref_count == 0); |
455 | dir->ref_count++; | 455 | dir->ref_count++; |
456 | __get_system(dir->subsystem); | 456 | __get_system(dir->subsystem); |
457 | } | 457 | } |
458 | 458 | ||
459 | static void __put_system_dir(struct ftrace_subsystem_dir *dir) | 459 | static void __put_system_dir(struct trace_subsystem_dir *dir) |
460 | { | 460 | { |
461 | WARN_ON_ONCE(dir->ref_count == 0); | 461 | WARN_ON_ONCE(dir->ref_count == 0); |
462 | /* If the subsystem is about to be freed, the dir must be too */ | 462 | /* If the subsystem is about to be freed, the dir must be too */ |
@@ -467,14 +467,14 @@ static void __put_system_dir(struct ftrace_subsystem_dir *dir) | |||
467 | kfree(dir); | 467 | kfree(dir); |
468 | } | 468 | } |
469 | 469 | ||
470 | static void put_system(struct ftrace_subsystem_dir *dir) | 470 | static void put_system(struct trace_subsystem_dir *dir) |
471 | { | 471 | { |
472 | mutex_lock(&event_mutex); | 472 | mutex_lock(&event_mutex); |
473 | __put_system_dir(dir); | 473 | __put_system_dir(dir); |
474 | mutex_unlock(&event_mutex); | 474 | mutex_unlock(&event_mutex); |
475 | } | 475 | } |
476 | 476 | ||
477 | static void remove_subsystem(struct ftrace_subsystem_dir *dir) | 477 | static void remove_subsystem(struct trace_subsystem_dir *dir) |
478 | { | 478 | { |
479 | if (!dir) | 479 | if (!dir) |
480 | return; | 480 | return; |
@@ -486,7 +486,7 @@ static void remove_subsystem(struct ftrace_subsystem_dir *dir) | |||
486 | } | 486 | } |
487 | } | 487 | } |
488 | 488 | ||
489 | static void remove_event_file_dir(struct ftrace_event_file *file) | 489 | static void remove_event_file_dir(struct trace_event_file *file) |
490 | { | 490 | { |
491 | struct dentry *dir = file->dir; | 491 | struct dentry *dir = file->dir; |
492 | struct dentry *child; | 492 | struct dentry *child; |
@@ -515,15 +515,15 @@ static int | |||
515 | __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, | 515 | __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, |
516 | const char *sub, const char *event, int set) | 516 | const char *sub, const char *event, int set) |
517 | { | 517 | { |
518 | struct ftrace_event_file *file; | 518 | struct trace_event_file *file; |
519 | struct ftrace_event_call *call; | 519 | struct trace_event_call *call; |
520 | const char *name; | 520 | const char *name; |
521 | int ret = -EINVAL; | 521 | int ret = -EINVAL; |
522 | 522 | ||
523 | list_for_each_entry(file, &tr->events, list) { | 523 | list_for_each_entry(file, &tr->events, list) { |
524 | 524 | ||
525 | call = file->event_call; | 525 | call = file->event_call; |
526 | name = ftrace_event_name(call); | 526 | name = trace_event_name(call); |
527 | 527 | ||
528 | if (!name || !call->class || !call->class->reg) | 528 | if (!name || !call->class || !call->class->reg) |
529 | continue; | 529 | continue; |
@@ -671,8 +671,8 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
671 | static void * | 671 | static void * |
672 | t_next(struct seq_file *m, void *v, loff_t *pos) | 672 | t_next(struct seq_file *m, void *v, loff_t *pos) |
673 | { | 673 | { |
674 | struct ftrace_event_file *file = v; | 674 | struct trace_event_file *file = v; |
675 | struct ftrace_event_call *call; | 675 | struct trace_event_call *call; |
676 | struct trace_array *tr = m->private; | 676 | struct trace_array *tr = m->private; |
677 | 677 | ||
678 | (*pos)++; | 678 | (*pos)++; |
@@ -692,13 +692,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
692 | 692 | ||
693 | static void *t_start(struct seq_file *m, loff_t *pos) | 693 | static void *t_start(struct seq_file *m, loff_t *pos) |
694 | { | 694 | { |
695 | struct ftrace_event_file *file; | 695 | struct trace_event_file *file; |
696 | struct trace_array *tr = m->private; | 696 | struct trace_array *tr = m->private; |
697 | loff_t l; | 697 | loff_t l; |
698 | 698 | ||
699 | mutex_lock(&event_mutex); | 699 | mutex_lock(&event_mutex); |
700 | 700 | ||
701 | file = list_entry(&tr->events, struct ftrace_event_file, list); | 701 | file = list_entry(&tr->events, struct trace_event_file, list); |
702 | for (l = 0; l <= *pos; ) { | 702 | for (l = 0; l <= *pos; ) { |
703 | file = t_next(m, file, &l); | 703 | file = t_next(m, file, &l); |
704 | if (!file) | 704 | if (!file) |
@@ -710,13 +710,13 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
710 | static void * | 710 | static void * |
711 | s_next(struct seq_file *m, void *v, loff_t *pos) | 711 | s_next(struct seq_file *m, void *v, loff_t *pos) |
712 | { | 712 | { |
713 | struct ftrace_event_file *file = v; | 713 | struct trace_event_file *file = v; |
714 | struct trace_array *tr = m->private; | 714 | struct trace_array *tr = m->private; |
715 | 715 | ||
716 | (*pos)++; | 716 | (*pos)++; |
717 | 717 | ||
718 | list_for_each_entry_continue(file, &tr->events, list) { | 718 | list_for_each_entry_continue(file, &tr->events, list) { |
719 | if (file->flags & FTRACE_EVENT_FL_ENABLED) | 719 | if (file->flags & EVENT_FILE_FL_ENABLED) |
720 | return file; | 720 | return file; |
721 | } | 721 | } |
722 | 722 | ||
@@ -725,13 +725,13 @@ s_next(struct seq_file *m, void *v, loff_t *pos) | |||
725 | 725 | ||
726 | static void *s_start(struct seq_file *m, loff_t *pos) | 726 | static void *s_start(struct seq_file *m, loff_t *pos) |
727 | { | 727 | { |
728 | struct ftrace_event_file *file; | 728 | struct trace_event_file *file; |
729 | struct trace_array *tr = m->private; | 729 | struct trace_array *tr = m->private; |
730 | loff_t l; | 730 | loff_t l; |
731 | 731 | ||
732 | mutex_lock(&event_mutex); | 732 | mutex_lock(&event_mutex); |
733 | 733 | ||
734 | file = list_entry(&tr->events, struct ftrace_event_file, list); | 734 | file = list_entry(&tr->events, struct trace_event_file, list); |
735 | for (l = 0; l <= *pos; ) { | 735 | for (l = 0; l <= *pos; ) { |
736 | file = s_next(m, file, &l); | 736 | file = s_next(m, file, &l); |
737 | if (!file) | 737 | if (!file) |
@@ -742,12 +742,12 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
742 | 742 | ||
743 | static int t_show(struct seq_file *m, void *v) | 743 | static int t_show(struct seq_file *m, void *v) |
744 | { | 744 | { |
745 | struct ftrace_event_file *file = v; | 745 | struct trace_event_file *file = v; |
746 | struct ftrace_event_call *call = file->event_call; | 746 | struct trace_event_call *call = file->event_call; |
747 | 747 | ||
748 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) | 748 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) |
749 | seq_printf(m, "%s:", call->class->system); | 749 | seq_printf(m, "%s:", call->class->system); |
750 | seq_printf(m, "%s\n", ftrace_event_name(call)); | 750 | seq_printf(m, "%s\n", trace_event_name(call)); |
751 | 751 | ||
752 | return 0; | 752 | return 0; |
753 | } | 753 | } |
@@ -761,7 +761,7 @@ static ssize_t | |||
761 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | 761 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, |
762 | loff_t *ppos) | 762 | loff_t *ppos) |
763 | { | 763 | { |
764 | struct ftrace_event_file *file; | 764 | struct trace_event_file *file; |
765 | unsigned long flags; | 765 | unsigned long flags; |
766 | char buf[4] = "0"; | 766 | char buf[4] = "0"; |
767 | 767 | ||
@@ -774,12 +774,12 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
774 | if (!file) | 774 | if (!file) |
775 | return -ENODEV; | 775 | return -ENODEV; |
776 | 776 | ||
777 | if (flags & FTRACE_EVENT_FL_ENABLED && | 777 | if (flags & EVENT_FILE_FL_ENABLED && |
778 | !(flags & FTRACE_EVENT_FL_SOFT_DISABLED)) | 778 | !(flags & EVENT_FILE_FL_SOFT_DISABLED)) |
779 | strcpy(buf, "1"); | 779 | strcpy(buf, "1"); |
780 | 780 | ||
781 | if (flags & FTRACE_EVENT_FL_SOFT_DISABLED || | 781 | if (flags & EVENT_FILE_FL_SOFT_DISABLED || |
782 | flags & FTRACE_EVENT_FL_SOFT_MODE) | 782 | flags & EVENT_FILE_FL_SOFT_MODE) |
783 | strcat(buf, "*"); | 783 | strcat(buf, "*"); |
784 | 784 | ||
785 | strcat(buf, "\n"); | 785 | strcat(buf, "\n"); |
@@ -791,7 +791,7 @@ static ssize_t | |||
791 | event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | 791 | event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, |
792 | loff_t *ppos) | 792 | loff_t *ppos) |
793 | { | 793 | { |
794 | struct ftrace_event_file *file; | 794 | struct trace_event_file *file; |
795 | unsigned long val; | 795 | unsigned long val; |
796 | int ret; | 796 | int ret; |
797 | 797 | ||
@@ -828,10 +828,10 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
828 | loff_t *ppos) | 828 | loff_t *ppos) |
829 | { | 829 | { |
830 | const char set_to_char[4] = { '?', '0', '1', 'X' }; | 830 | const char set_to_char[4] = { '?', '0', '1', 'X' }; |
831 | struct ftrace_subsystem_dir *dir = filp->private_data; | 831 | struct trace_subsystem_dir *dir = filp->private_data; |
832 | struct event_subsystem *system = dir->subsystem; | 832 | struct event_subsystem *system = dir->subsystem; |
833 | struct ftrace_event_call *call; | 833 | struct trace_event_call *call; |
834 | struct ftrace_event_file *file; | 834 | struct trace_event_file *file; |
835 | struct trace_array *tr = dir->tr; | 835 | struct trace_array *tr = dir->tr; |
836 | char buf[2]; | 836 | char buf[2]; |
837 | int set = 0; | 837 | int set = 0; |
@@ -840,7 +840,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
840 | mutex_lock(&event_mutex); | 840 | mutex_lock(&event_mutex); |
841 | list_for_each_entry(file, &tr->events, list) { | 841 | list_for_each_entry(file, &tr->events, list) { |
842 | call = file->event_call; | 842 | call = file->event_call; |
843 | if (!ftrace_event_name(call) || !call->class || !call->class->reg) | 843 | if (!trace_event_name(call) || !call->class || !call->class->reg) |
844 | continue; | 844 | continue; |
845 | 845 | ||
846 | if (system && strcmp(call->class->system, system->name) != 0) | 846 | if (system && strcmp(call->class->system, system->name) != 0) |
@@ -851,7 +851,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
851 | * or if all events or cleared, or if we have | 851 | * or if all events or cleared, or if we have |
852 | * a mixture. | 852 | * a mixture. |
853 | */ | 853 | */ |
854 | set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED)); | 854 | set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED)); |
855 | 855 | ||
856 | /* | 856 | /* |
857 | * If we have a mixture, no need to look further. | 857 | * If we have a mixture, no need to look further. |
@@ -873,7 +873,7 @@ static ssize_t | |||
873 | system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | 873 | system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, |
874 | loff_t *ppos) | 874 | loff_t *ppos) |
875 | { | 875 | { |
876 | struct ftrace_subsystem_dir *dir = filp->private_data; | 876 | struct trace_subsystem_dir *dir = filp->private_data; |
877 | struct event_subsystem *system = dir->subsystem; | 877 | struct event_subsystem *system = dir->subsystem; |
878 | const char *name = NULL; | 878 | const char *name = NULL; |
879 | unsigned long val; | 879 | unsigned long val; |
@@ -917,7 +917,7 @@ enum { | |||
917 | 917 | ||
918 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) | 918 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) |
919 | { | 919 | { |
920 | struct ftrace_event_call *call = event_file_data(m->private); | 920 | struct trace_event_call *call = event_file_data(m->private); |
921 | struct list_head *common_head = &ftrace_common_fields; | 921 | struct list_head *common_head = &ftrace_common_fields; |
922 | struct list_head *head = trace_get_fields(call); | 922 | struct list_head *head = trace_get_fields(call); |
923 | struct list_head *node = v; | 923 | struct list_head *node = v; |
@@ -949,13 +949,13 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos) | |||
949 | 949 | ||
950 | static int f_show(struct seq_file *m, void *v) | 950 | static int f_show(struct seq_file *m, void *v) |
951 | { | 951 | { |
952 | struct ftrace_event_call *call = event_file_data(m->private); | 952 | struct trace_event_call *call = event_file_data(m->private); |
953 | struct ftrace_event_field *field; | 953 | struct ftrace_event_field *field; |
954 | const char *array_descriptor; | 954 | const char *array_descriptor; |
955 | 955 | ||
956 | switch ((unsigned long)v) { | 956 | switch ((unsigned long)v) { |
957 | case FORMAT_HEADER: | 957 | case FORMAT_HEADER: |
958 | seq_printf(m, "name: %s\n", ftrace_event_name(call)); | 958 | seq_printf(m, "name: %s\n", trace_event_name(call)); |
959 | seq_printf(m, "ID: %d\n", call->event.type); | 959 | seq_printf(m, "ID: %d\n", call->event.type); |
960 | seq_puts(m, "format:\n"); | 960 | seq_puts(m, "format:\n"); |
961 | return 0; | 961 | return 0; |
@@ -1062,7 +1062,7 @@ static ssize_t | |||
1062 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | 1062 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, |
1063 | loff_t *ppos) | 1063 | loff_t *ppos) |
1064 | { | 1064 | { |
1065 | struct ftrace_event_file *file; | 1065 | struct trace_event_file *file; |
1066 | struct trace_seq *s; | 1066 | struct trace_seq *s; |
1067 | int r = -ENODEV; | 1067 | int r = -ENODEV; |
1068 | 1068 | ||
@@ -1095,7 +1095,7 @@ static ssize_t | |||
1095 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | 1095 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, |
1096 | loff_t *ppos) | 1096 | loff_t *ppos) |
1097 | { | 1097 | { |
1098 | struct ftrace_event_file *file; | 1098 | struct trace_event_file *file; |
1099 | char *buf; | 1099 | char *buf; |
1100 | int err = -ENODEV; | 1100 | int err = -ENODEV; |
1101 | 1101 | ||
@@ -1132,7 +1132,7 @@ static LIST_HEAD(event_subsystems); | |||
1132 | static int subsystem_open(struct inode *inode, struct file *filp) | 1132 | static int subsystem_open(struct inode *inode, struct file *filp) |
1133 | { | 1133 | { |
1134 | struct event_subsystem *system = NULL; | 1134 | struct event_subsystem *system = NULL; |
1135 | struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */ | 1135 | struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */ |
1136 | struct trace_array *tr; | 1136 | struct trace_array *tr; |
1137 | int ret; | 1137 | int ret; |
1138 | 1138 | ||
@@ -1181,7 +1181,7 @@ static int subsystem_open(struct inode *inode, struct file *filp) | |||
1181 | 1181 | ||
1182 | static int system_tr_open(struct inode *inode, struct file *filp) | 1182 | static int system_tr_open(struct inode *inode, struct file *filp) |
1183 | { | 1183 | { |
1184 | struct ftrace_subsystem_dir *dir; | 1184 | struct trace_subsystem_dir *dir; |
1185 | struct trace_array *tr = inode->i_private; | 1185 | struct trace_array *tr = inode->i_private; |
1186 | int ret; | 1186 | int ret; |
1187 | 1187 | ||
@@ -1214,7 +1214,7 @@ static int system_tr_open(struct inode *inode, struct file *filp) | |||
1214 | 1214 | ||
1215 | static int subsystem_release(struct inode *inode, struct file *file) | 1215 | static int subsystem_release(struct inode *inode, struct file *file) |
1216 | { | 1216 | { |
1217 | struct ftrace_subsystem_dir *dir = file->private_data; | 1217 | struct trace_subsystem_dir *dir = file->private_data; |
1218 | 1218 | ||
1219 | trace_array_put(dir->tr); | 1219 | trace_array_put(dir->tr); |
1220 | 1220 | ||
@@ -1235,7 +1235,7 @@ static ssize_t | |||
1235 | subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | 1235 | subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, |
1236 | loff_t *ppos) | 1236 | loff_t *ppos) |
1237 | { | 1237 | { |
1238 | struct ftrace_subsystem_dir *dir = filp->private_data; | 1238 | struct trace_subsystem_dir *dir = filp->private_data; |
1239 | struct event_subsystem *system = dir->subsystem; | 1239 | struct event_subsystem *system = dir->subsystem; |
1240 | struct trace_seq *s; | 1240 | struct trace_seq *s; |
1241 | int r; | 1241 | int r; |
@@ -1262,7 +1262,7 @@ static ssize_t | |||
1262 | subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | 1262 | subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, |
1263 | loff_t *ppos) | 1263 | loff_t *ppos) |
1264 | { | 1264 | { |
1265 | struct ftrace_subsystem_dir *dir = filp->private_data; | 1265 | struct trace_subsystem_dir *dir = filp->private_data; |
1266 | char *buf; | 1266 | char *buf; |
1267 | int err; | 1267 | int err; |
1268 | 1268 | ||
@@ -1497,9 +1497,9 @@ create_new_subsystem(const char *name) | |||
1497 | 1497 | ||
1498 | static struct dentry * | 1498 | static struct dentry * |
1499 | event_subsystem_dir(struct trace_array *tr, const char *name, | 1499 | event_subsystem_dir(struct trace_array *tr, const char *name, |
1500 | struct ftrace_event_file *file, struct dentry *parent) | 1500 | struct trace_event_file *file, struct dentry *parent) |
1501 | { | 1501 | { |
1502 | struct ftrace_subsystem_dir *dir; | 1502 | struct trace_subsystem_dir *dir; |
1503 | struct event_subsystem *system; | 1503 | struct event_subsystem *system; |
1504 | struct dentry *entry; | 1504 | struct dentry *entry; |
1505 | 1505 | ||
@@ -1571,9 +1571,9 @@ event_subsystem_dir(struct trace_array *tr, const char *name, | |||
1571 | } | 1571 | } |
1572 | 1572 | ||
1573 | static int | 1573 | static int |
1574 | event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | 1574 | event_create_dir(struct dentry *parent, struct trace_event_file *file) |
1575 | { | 1575 | { |
1576 | struct ftrace_event_call *call = file->event_call; | 1576 | struct trace_event_call *call = file->event_call; |
1577 | struct trace_array *tr = file->tr; | 1577 | struct trace_array *tr = file->tr; |
1578 | struct list_head *head; | 1578 | struct list_head *head; |
1579 | struct dentry *d_events; | 1579 | struct dentry *d_events; |
@@ -1591,7 +1591,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
1591 | } else | 1591 | } else |
1592 | d_events = parent; | 1592 | d_events = parent; |
1593 | 1593 | ||
1594 | name = ftrace_event_name(call); | 1594 | name = trace_event_name(call); |
1595 | file->dir = tracefs_create_dir(name, d_events); | 1595 | file->dir = tracefs_create_dir(name, d_events); |
1596 | if (!file->dir) { | 1596 | if (!file->dir) { |
1597 | pr_warn("Could not create tracefs '%s' directory\n", name); | 1597 | pr_warn("Could not create tracefs '%s' directory\n", name); |
@@ -1634,9 +1634,9 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
1634 | return 0; | 1634 | return 0; |
1635 | } | 1635 | } |
1636 | 1636 | ||
1637 | static void remove_event_from_tracers(struct ftrace_event_call *call) | 1637 | static void remove_event_from_tracers(struct trace_event_call *call) |
1638 | { | 1638 | { |
1639 | struct ftrace_event_file *file; | 1639 | struct trace_event_file *file; |
1640 | struct trace_array *tr; | 1640 | struct trace_array *tr; |
1641 | 1641 | ||
1642 | do_for_each_event_file_safe(tr, file) { | 1642 | do_for_each_event_file_safe(tr, file) { |
@@ -1654,10 +1654,10 @@ static void remove_event_from_tracers(struct ftrace_event_call *call) | |||
1654 | } while_for_each_event_file(); | 1654 | } while_for_each_event_file(); |
1655 | } | 1655 | } |
1656 | 1656 | ||
1657 | static void event_remove(struct ftrace_event_call *call) | 1657 | static void event_remove(struct trace_event_call *call) |
1658 | { | 1658 | { |
1659 | struct trace_array *tr; | 1659 | struct trace_array *tr; |
1660 | struct ftrace_event_file *file; | 1660 | struct trace_event_file *file; |
1661 | 1661 | ||
1662 | do_for_each_event_file(tr, file) { | 1662 | do_for_each_event_file(tr, file) { |
1663 | if (file->event_call != call) | 1663 | if (file->event_call != call) |
@@ -1673,17 +1673,17 @@ static void event_remove(struct ftrace_event_call *call) | |||
1673 | } while_for_each_event_file(); | 1673 | } while_for_each_event_file(); |
1674 | 1674 | ||
1675 | if (call->event.funcs) | 1675 | if (call->event.funcs) |
1676 | __unregister_ftrace_event(&call->event); | 1676 | __unregister_trace_event(&call->event); |
1677 | remove_event_from_tracers(call); | 1677 | remove_event_from_tracers(call); |
1678 | list_del(&call->list); | 1678 | list_del(&call->list); |
1679 | } | 1679 | } |
1680 | 1680 | ||
1681 | static int event_init(struct ftrace_event_call *call) | 1681 | static int event_init(struct trace_event_call *call) |
1682 | { | 1682 | { |
1683 | int ret = 0; | 1683 | int ret = 0; |
1684 | const char *name; | 1684 | const char *name; |
1685 | 1685 | ||
1686 | name = ftrace_event_name(call); | 1686 | name = trace_event_name(call); |
1687 | if (WARN_ON(!name)) | 1687 | if (WARN_ON(!name)) |
1688 | return -EINVAL; | 1688 | return -EINVAL; |
1689 | 1689 | ||
@@ -1697,7 +1697,7 @@ static int event_init(struct ftrace_event_call *call) | |||
1697 | } | 1697 | } |
1698 | 1698 | ||
1699 | static int | 1699 | static int |
1700 | __register_event(struct ftrace_event_call *call, struct module *mod) | 1700 | __register_event(struct trace_event_call *call, struct module *mod) |
1701 | { | 1701 | { |
1702 | int ret; | 1702 | int ret; |
1703 | 1703 | ||
@@ -1733,7 +1733,7 @@ static char *enum_replace(char *ptr, struct trace_enum_map *map, int len) | |||
1733 | return ptr + elen; | 1733 | return ptr + elen; |
1734 | } | 1734 | } |
1735 | 1735 | ||
1736 | static void update_event_printk(struct ftrace_event_call *call, | 1736 | static void update_event_printk(struct trace_event_call *call, |
1737 | struct trace_enum_map *map) | 1737 | struct trace_enum_map *map) |
1738 | { | 1738 | { |
1739 | char *ptr; | 1739 | char *ptr; |
@@ -1811,7 +1811,7 @@ static void update_event_printk(struct ftrace_event_call *call, | |||
1811 | 1811 | ||
1812 | void trace_event_enum_update(struct trace_enum_map **map, int len) | 1812 | void trace_event_enum_update(struct trace_enum_map **map, int len) |
1813 | { | 1813 | { |
1814 | struct ftrace_event_call *call, *p; | 1814 | struct trace_event_call *call, *p; |
1815 | const char *last_system = NULL; | 1815 | const char *last_system = NULL; |
1816 | int last_i; | 1816 | int last_i; |
1817 | int i; | 1817 | int i; |
@@ -1836,11 +1836,11 @@ void trace_event_enum_update(struct trace_enum_map **map, int len) | |||
1836 | up_write(&trace_event_sem); | 1836 | up_write(&trace_event_sem); |
1837 | } | 1837 | } |
1838 | 1838 | ||
1839 | static struct ftrace_event_file * | 1839 | static struct trace_event_file * |
1840 | trace_create_new_event(struct ftrace_event_call *call, | 1840 | trace_create_new_event(struct trace_event_call *call, |
1841 | struct trace_array *tr) | 1841 | struct trace_array *tr) |
1842 | { | 1842 | { |
1843 | struct ftrace_event_file *file; | 1843 | struct trace_event_file *file; |
1844 | 1844 | ||
1845 | file = kmem_cache_alloc(file_cachep, GFP_TRACE); | 1845 | file = kmem_cache_alloc(file_cachep, GFP_TRACE); |
1846 | if (!file) | 1846 | if (!file) |
@@ -1858,9 +1858,9 @@ trace_create_new_event(struct ftrace_event_call *call, | |||
1858 | 1858 | ||
1859 | /* Add an event to a trace directory */ | 1859 | /* Add an event to a trace directory */ |
1860 | static int | 1860 | static int |
1861 | __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr) | 1861 | __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr) |
1862 | { | 1862 | { |
1863 | struct ftrace_event_file *file; | 1863 | struct trace_event_file *file; |
1864 | 1864 | ||
1865 | file = trace_create_new_event(call, tr); | 1865 | file = trace_create_new_event(call, tr); |
1866 | if (!file) | 1866 | if (!file) |
@@ -1875,10 +1875,10 @@ __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr) | |||
1875 | * the filesystem is initialized. | 1875 | * the filesystem is initialized. |
1876 | */ | 1876 | */ |
1877 | static __init int | 1877 | static __init int |
1878 | __trace_early_add_new_event(struct ftrace_event_call *call, | 1878 | __trace_early_add_new_event(struct trace_event_call *call, |
1879 | struct trace_array *tr) | 1879 | struct trace_array *tr) |
1880 | { | 1880 | { |
1881 | struct ftrace_event_file *file; | 1881 | struct trace_event_file *file; |
1882 | 1882 | ||
1883 | file = trace_create_new_event(call, tr); | 1883 | file = trace_create_new_event(call, tr); |
1884 | if (!file) | 1884 | if (!file) |
@@ -1888,10 +1888,10 @@ __trace_early_add_new_event(struct ftrace_event_call *call, | |||
1888 | } | 1888 | } |
1889 | 1889 | ||
1890 | struct ftrace_module_file_ops; | 1890 | struct ftrace_module_file_ops; |
1891 | static void __add_event_to_tracers(struct ftrace_event_call *call); | 1891 | static void __add_event_to_tracers(struct trace_event_call *call); |
1892 | 1892 | ||
1893 | /* Add an additional event_call dynamically */ | 1893 | /* Add an additional event_call dynamically */ |
1894 | int trace_add_event_call(struct ftrace_event_call *call) | 1894 | int trace_add_event_call(struct trace_event_call *call) |
1895 | { | 1895 | { |
1896 | int ret; | 1896 | int ret; |
1897 | mutex_lock(&trace_types_lock); | 1897 | mutex_lock(&trace_types_lock); |
@@ -1910,7 +1910,7 @@ int trace_add_event_call(struct ftrace_event_call *call) | |||
1910 | * Must be called under locking of trace_types_lock, event_mutex and | 1910 | * Must be called under locking of trace_types_lock, event_mutex and |
1911 | * trace_event_sem. | 1911 | * trace_event_sem. |
1912 | */ | 1912 | */ |
1913 | static void __trace_remove_event_call(struct ftrace_event_call *call) | 1913 | static void __trace_remove_event_call(struct trace_event_call *call) |
1914 | { | 1914 | { |
1915 | event_remove(call); | 1915 | event_remove(call); |
1916 | trace_destroy_fields(call); | 1916 | trace_destroy_fields(call); |
@@ -1918,10 +1918,10 @@ static void __trace_remove_event_call(struct ftrace_event_call *call) | |||
1918 | call->filter = NULL; | 1918 | call->filter = NULL; |
1919 | } | 1919 | } |
1920 | 1920 | ||
1921 | static int probe_remove_event_call(struct ftrace_event_call *call) | 1921 | static int probe_remove_event_call(struct trace_event_call *call) |
1922 | { | 1922 | { |
1923 | struct trace_array *tr; | 1923 | struct trace_array *tr; |
1924 | struct ftrace_event_file *file; | 1924 | struct trace_event_file *file; |
1925 | 1925 | ||
1926 | #ifdef CONFIG_PERF_EVENTS | 1926 | #ifdef CONFIG_PERF_EVENTS |
1927 | if (call->perf_refcount) | 1927 | if (call->perf_refcount) |
@@ -1932,10 +1932,10 @@ static int probe_remove_event_call(struct ftrace_event_call *call) | |||
1932 | continue; | 1932 | continue; |
1933 | /* | 1933 | /* |
1934 | * We can't rely on ftrace_event_enable_disable(enable => 0) | 1934 | * We can't rely on ftrace_event_enable_disable(enable => 0) |
1935 | * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress | 1935 | * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress |
1936 | * TRACE_REG_UNREGISTER. | 1936 | * TRACE_REG_UNREGISTER. |
1937 | */ | 1937 | */ |
1938 | if (file->flags & FTRACE_EVENT_FL_ENABLED) | 1938 | if (file->flags & EVENT_FILE_FL_ENABLED) |
1939 | return -EBUSY; | 1939 | return -EBUSY; |
1940 | /* | 1940 | /* |
1941 | * The do_for_each_event_file_safe() is | 1941 | * The do_for_each_event_file_safe() is |
@@ -1952,7 +1952,7 @@ static int probe_remove_event_call(struct ftrace_event_call *call) | |||
1952 | } | 1952 | } |
1953 | 1953 | ||
1954 | /* Remove an event_call */ | 1954 | /* Remove an event_call */ |
1955 | int trace_remove_event_call(struct ftrace_event_call *call) | 1955 | int trace_remove_event_call(struct trace_event_call *call) |
1956 | { | 1956 | { |
1957 | int ret; | 1957 | int ret; |
1958 | 1958 | ||
@@ -1976,7 +1976,7 @@ int trace_remove_event_call(struct ftrace_event_call *call) | |||
1976 | 1976 | ||
1977 | static void trace_module_add_events(struct module *mod) | 1977 | static void trace_module_add_events(struct module *mod) |
1978 | { | 1978 | { |
1979 | struct ftrace_event_call **call, **start, **end; | 1979 | struct trace_event_call **call, **start, **end; |
1980 | 1980 | ||
1981 | if (!mod->num_trace_events) | 1981 | if (!mod->num_trace_events) |
1982 | return; | 1982 | return; |
@@ -1999,7 +1999,7 @@ static void trace_module_add_events(struct module *mod) | |||
1999 | 1999 | ||
2000 | static void trace_module_remove_events(struct module *mod) | 2000 | static void trace_module_remove_events(struct module *mod) |
2001 | { | 2001 | { |
2002 | struct ftrace_event_call *call, *p; | 2002 | struct trace_event_call *call, *p; |
2003 | bool clear_trace = false; | 2003 | bool clear_trace = false; |
2004 | 2004 | ||
2005 | down_write(&trace_event_sem); | 2005 | down_write(&trace_event_sem); |
@@ -2055,28 +2055,28 @@ static struct notifier_block trace_module_nb = { | |||
2055 | static void | 2055 | static void |
2056 | __trace_add_event_dirs(struct trace_array *tr) | 2056 | __trace_add_event_dirs(struct trace_array *tr) |
2057 | { | 2057 | { |
2058 | struct ftrace_event_call *call; | 2058 | struct trace_event_call *call; |
2059 | int ret; | 2059 | int ret; |
2060 | 2060 | ||
2061 | list_for_each_entry(call, &ftrace_events, list) { | 2061 | list_for_each_entry(call, &ftrace_events, list) { |
2062 | ret = __trace_add_new_event(call, tr); | 2062 | ret = __trace_add_new_event(call, tr); |
2063 | if (ret < 0) | 2063 | if (ret < 0) |
2064 | pr_warn("Could not create directory for event %s\n", | 2064 | pr_warn("Could not create directory for event %s\n", |
2065 | ftrace_event_name(call)); | 2065 | trace_event_name(call)); |
2066 | } | 2066 | } |
2067 | } | 2067 | } |
2068 | 2068 | ||
2069 | struct ftrace_event_file * | 2069 | struct trace_event_file * |
2070 | find_event_file(struct trace_array *tr, const char *system, const char *event) | 2070 | find_event_file(struct trace_array *tr, const char *system, const char *event) |
2071 | { | 2071 | { |
2072 | struct ftrace_event_file *file; | 2072 | struct trace_event_file *file; |
2073 | struct ftrace_event_call *call; | 2073 | struct trace_event_call *call; |
2074 | const char *name; | 2074 | const char *name; |
2075 | 2075 | ||
2076 | list_for_each_entry(file, &tr->events, list) { | 2076 | list_for_each_entry(file, &tr->events, list) { |
2077 | 2077 | ||
2078 | call = file->event_call; | 2078 | call = file->event_call; |
2079 | name = ftrace_event_name(call); | 2079 | name = trace_event_name(call); |
2080 | 2080 | ||
2081 | if (!name || !call->class || !call->class->reg) | 2081 | if (!name || !call->class || !call->class->reg) |
2082 | continue; | 2082 | continue; |
@@ -2098,7 +2098,7 @@ find_event_file(struct trace_array *tr, const char *system, const char *event) | |||
2098 | #define DISABLE_EVENT_STR "disable_event" | 2098 | #define DISABLE_EVENT_STR "disable_event" |
2099 | 2099 | ||
2100 | struct event_probe_data { | 2100 | struct event_probe_data { |
2101 | struct ftrace_event_file *file; | 2101 | struct trace_event_file *file; |
2102 | unsigned long count; | 2102 | unsigned long count; |
2103 | int ref; | 2103 | int ref; |
2104 | bool enable; | 2104 | bool enable; |
@@ -2114,9 +2114,9 @@ event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data) | |||
2114 | return; | 2114 | return; |
2115 | 2115 | ||
2116 | if (data->enable) | 2116 | if (data->enable) |
2117 | clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); | 2117 | clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); |
2118 | else | 2118 | else |
2119 | set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); | 2119 | set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); |
2120 | } | 2120 | } |
2121 | 2121 | ||
2122 | static void | 2122 | static void |
@@ -2132,7 +2132,7 @@ event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data | |||
2132 | return; | 2132 | return; |
2133 | 2133 | ||
2134 | /* Skip if the event is in a state we want to switch to */ | 2134 | /* Skip if the event is in a state we want to switch to */ |
2135 | if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) | 2135 | if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) |
2136 | return; | 2136 | return; |
2137 | 2137 | ||
2138 | if (data->count != -1) | 2138 | if (data->count != -1) |
@@ -2152,7 +2152,7 @@ event_enable_print(struct seq_file *m, unsigned long ip, | |||
2152 | seq_printf(m, "%s:%s:%s", | 2152 | seq_printf(m, "%s:%s:%s", |
2153 | data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, | 2153 | data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, |
2154 | data->file->event_call->class->system, | 2154 | data->file->event_call->class->system, |
2155 | ftrace_event_name(data->file->event_call)); | 2155 | trace_event_name(data->file->event_call)); |
2156 | 2156 | ||
2157 | if (data->count == -1) | 2157 | if (data->count == -1) |
2158 | seq_puts(m, ":unlimited\n"); | 2158 | seq_puts(m, ":unlimited\n"); |
@@ -2226,7 +2226,7 @@ event_enable_func(struct ftrace_hash *hash, | |||
2226 | char *glob, char *cmd, char *param, int enabled) | 2226 | char *glob, char *cmd, char *param, int enabled) |
2227 | { | 2227 | { |
2228 | struct trace_array *tr = top_trace_array(); | 2228 | struct trace_array *tr = top_trace_array(); |
2229 | struct ftrace_event_file *file; | 2229 | struct trace_event_file *file; |
2230 | struct ftrace_probe_ops *ops; | 2230 | struct ftrace_probe_ops *ops; |
2231 | struct event_probe_data *data; | 2231 | struct event_probe_data *data; |
2232 | const char *system; | 2232 | const char *system; |
@@ -2358,7 +2358,7 @@ static inline int register_event_cmds(void) { return 0; } | |||
2358 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 2358 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
2359 | 2359 | ||
2360 | /* | 2360 | /* |
2361 | * The top level array has already had its ftrace_event_file | 2361 | * The top level array has already had its trace_event_file |
2362 | * descriptors created in order to allow for early events to | 2362 | * descriptors created in order to allow for early events to |
2363 | * be recorded. This function is called after the tracefs has been | 2363 | * be recorded. This function is called after the tracefs has been |
2364 | * initialized, and we now have to create the files associated | 2364 | * initialized, and we now have to create the files associated |
@@ -2367,7 +2367,7 @@ static inline int register_event_cmds(void) { return 0; } | |||
2367 | static __init void | 2367 | static __init void |
2368 | __trace_early_add_event_dirs(struct trace_array *tr) | 2368 | __trace_early_add_event_dirs(struct trace_array *tr) |
2369 | { | 2369 | { |
2370 | struct ftrace_event_file *file; | 2370 | struct trace_event_file *file; |
2371 | int ret; | 2371 | int ret; |
2372 | 2372 | ||
2373 | 2373 | ||
@@ -2375,7 +2375,7 @@ __trace_early_add_event_dirs(struct trace_array *tr) | |||
2375 | ret = event_create_dir(tr->event_dir, file); | 2375 | ret = event_create_dir(tr->event_dir, file); |
2376 | if (ret < 0) | 2376 | if (ret < 0) |
2377 | pr_warn("Could not create directory for event %s\n", | 2377 | pr_warn("Could not create directory for event %s\n", |
2378 | ftrace_event_name(file->event_call)); | 2378 | trace_event_name(file->event_call)); |
2379 | } | 2379 | } |
2380 | } | 2380 | } |
2381 | 2381 | ||
@@ -2388,7 +2388,7 @@ __trace_early_add_event_dirs(struct trace_array *tr) | |||
2388 | static __init void | 2388 | static __init void |
2389 | __trace_early_add_events(struct trace_array *tr) | 2389 | __trace_early_add_events(struct trace_array *tr) |
2390 | { | 2390 | { |
2391 | struct ftrace_event_call *call; | 2391 | struct trace_event_call *call; |
2392 | int ret; | 2392 | int ret; |
2393 | 2393 | ||
2394 | list_for_each_entry(call, &ftrace_events, list) { | 2394 | list_for_each_entry(call, &ftrace_events, list) { |
@@ -2399,7 +2399,7 @@ __trace_early_add_events(struct trace_array *tr) | |||
2399 | ret = __trace_early_add_new_event(call, tr); | 2399 | ret = __trace_early_add_new_event(call, tr); |
2400 | if (ret < 0) | 2400 | if (ret < 0) |
2401 | pr_warn("Could not create early event %s\n", | 2401 | pr_warn("Could not create early event %s\n", |
2402 | ftrace_event_name(call)); | 2402 | trace_event_name(call)); |
2403 | } | 2403 | } |
2404 | } | 2404 | } |
2405 | 2405 | ||
@@ -2407,13 +2407,13 @@ __trace_early_add_events(struct trace_array *tr) | |||
2407 | static void | 2407 | static void |
2408 | __trace_remove_event_dirs(struct trace_array *tr) | 2408 | __trace_remove_event_dirs(struct trace_array *tr) |
2409 | { | 2409 | { |
2410 | struct ftrace_event_file *file, *next; | 2410 | struct trace_event_file *file, *next; |
2411 | 2411 | ||
2412 | list_for_each_entry_safe(file, next, &tr->events, list) | 2412 | list_for_each_entry_safe(file, next, &tr->events, list) |
2413 | remove_event_file_dir(file); | 2413 | remove_event_file_dir(file); |
2414 | } | 2414 | } |
2415 | 2415 | ||
2416 | static void __add_event_to_tracers(struct ftrace_event_call *call) | 2416 | static void __add_event_to_tracers(struct trace_event_call *call) |
2417 | { | 2417 | { |
2418 | struct trace_array *tr; | 2418 | struct trace_array *tr; |
2419 | 2419 | ||
@@ -2421,8 +2421,8 @@ static void __add_event_to_tracers(struct ftrace_event_call *call) | |||
2421 | __trace_add_new_event(call, tr); | 2421 | __trace_add_new_event(call, tr); |
2422 | } | 2422 | } |
2423 | 2423 | ||
2424 | extern struct ftrace_event_call *__start_ftrace_events[]; | 2424 | extern struct trace_event_call *__start_ftrace_events[]; |
2425 | extern struct ftrace_event_call *__stop_ftrace_events[]; | 2425 | extern struct trace_event_call *__stop_ftrace_events[]; |
2426 | 2426 | ||
2427 | static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; | 2427 | static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; |
2428 | 2428 | ||
@@ -2557,7 +2557,7 @@ int event_trace_del_tracer(struct trace_array *tr) | |||
2557 | static __init int event_trace_memsetup(void) | 2557 | static __init int event_trace_memsetup(void) |
2558 | { | 2558 | { |
2559 | field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); | 2559 | field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); |
2560 | file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC); | 2560 | file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC); |
2561 | return 0; | 2561 | return 0; |
2562 | } | 2562 | } |
2563 | 2563 | ||
@@ -2593,7 +2593,7 @@ early_enable_events(struct trace_array *tr, bool disable_first) | |||
2593 | static __init int event_trace_enable(void) | 2593 | static __init int event_trace_enable(void) |
2594 | { | 2594 | { |
2595 | struct trace_array *tr = top_trace_array(); | 2595 | struct trace_array *tr = top_trace_array(); |
2596 | struct ftrace_event_call **iter, *call; | 2596 | struct trace_event_call **iter, *call; |
2597 | int ret; | 2597 | int ret; |
2598 | 2598 | ||
2599 | if (!tr) | 2599 | if (!tr) |
@@ -2754,9 +2754,9 @@ static __init void event_test_stuff(void) | |||
2754 | */ | 2754 | */ |
2755 | static __init void event_trace_self_tests(void) | 2755 | static __init void event_trace_self_tests(void) |
2756 | { | 2756 | { |
2757 | struct ftrace_subsystem_dir *dir; | 2757 | struct trace_subsystem_dir *dir; |
2758 | struct ftrace_event_file *file; | 2758 | struct trace_event_file *file; |
2759 | struct ftrace_event_call *call; | 2759 | struct trace_event_call *call; |
2760 | struct event_subsystem *system; | 2760 | struct event_subsystem *system; |
2761 | struct trace_array *tr; | 2761 | struct trace_array *tr; |
2762 | int ret; | 2762 | int ret; |
@@ -2787,13 +2787,13 @@ static __init void event_trace_self_tests(void) | |||
2787 | continue; | 2787 | continue; |
2788 | #endif | 2788 | #endif |
2789 | 2789 | ||
2790 | pr_info("Testing event %s: ", ftrace_event_name(call)); | 2790 | pr_info("Testing event %s: ", trace_event_name(call)); |
2791 | 2791 | ||
2792 | /* | 2792 | /* |
2793 | * If an event is already enabled, someone is using | 2793 | * If an event is already enabled, someone is using |
2794 | * it and the self test should not be on. | 2794 | * it and the self test should not be on. |
2795 | */ | 2795 | */ |
2796 | if (file->flags & FTRACE_EVENT_FL_ENABLED) { | 2796 | if (file->flags & EVENT_FILE_FL_ENABLED) { |
2797 | pr_warn("Enabled event during self test!\n"); | 2797 | pr_warn("Enabled event during self test!\n"); |
2798 | WARN_ON_ONCE(1); | 2798 | WARN_ON_ONCE(1); |
2799 | continue; | 2799 | continue; |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 1c92dfa1dd17..d81d6f302b14 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -643,7 +643,7 @@ static void append_filter_err(struct filter_parse_state *ps, | |||
643 | free_page((unsigned long) buf); | 643 | free_page((unsigned long) buf); |
644 | } | 644 | } |
645 | 645 | ||
646 | static inline struct event_filter *event_filter(struct ftrace_event_file *file) | 646 | static inline struct event_filter *event_filter(struct trace_event_file *file) |
647 | { | 647 | { |
648 | if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | 648 | if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |
649 | return file->event_call->filter; | 649 | return file->event_call->filter; |
@@ -652,7 +652,7 @@ static inline struct event_filter *event_filter(struct ftrace_event_file *file) | |||
652 | } | 652 | } |
653 | 653 | ||
654 | /* caller must hold event_mutex */ | 654 | /* caller must hold event_mutex */ |
655 | void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s) | 655 | void print_event_filter(struct trace_event_file *file, struct trace_seq *s) |
656 | { | 656 | { |
657 | struct event_filter *filter = event_filter(file); | 657 | struct event_filter *filter = event_filter(file); |
658 | 658 | ||
@@ -780,14 +780,14 @@ static void __free_preds(struct event_filter *filter) | |||
780 | filter->n_preds = 0; | 780 | filter->n_preds = 0; |
781 | } | 781 | } |
782 | 782 | ||
783 | static void filter_disable(struct ftrace_event_file *file) | 783 | static void filter_disable(struct trace_event_file *file) |
784 | { | 784 | { |
785 | struct ftrace_event_call *call = file->event_call; | 785 | struct trace_event_call *call = file->event_call; |
786 | 786 | ||
787 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | 787 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |
788 | call->flags &= ~TRACE_EVENT_FL_FILTERED; | 788 | call->flags &= ~TRACE_EVENT_FL_FILTERED; |
789 | else | 789 | else |
790 | file->flags &= ~FTRACE_EVENT_FL_FILTERED; | 790 | file->flags &= ~EVENT_FILE_FL_FILTERED; |
791 | } | 791 | } |
792 | 792 | ||
793 | static void __free_filter(struct event_filter *filter) | 793 | static void __free_filter(struct event_filter *filter) |
@@ -837,9 +837,9 @@ static int __alloc_preds(struct event_filter *filter, int n_preds) | |||
837 | return 0; | 837 | return 0; |
838 | } | 838 | } |
839 | 839 | ||
840 | static inline void __remove_filter(struct ftrace_event_file *file) | 840 | static inline void __remove_filter(struct trace_event_file *file) |
841 | { | 841 | { |
842 | struct ftrace_event_call *call = file->event_call; | 842 | struct trace_event_call *call = file->event_call; |
843 | 843 | ||
844 | filter_disable(file); | 844 | filter_disable(file); |
845 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | 845 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |
@@ -848,10 +848,10 @@ static inline void __remove_filter(struct ftrace_event_file *file) | |||
848 | remove_filter_string(file->filter); | 848 | remove_filter_string(file->filter); |
849 | } | 849 | } |
850 | 850 | ||
851 | static void filter_free_subsystem_preds(struct ftrace_subsystem_dir *dir, | 851 | static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir, |
852 | struct trace_array *tr) | 852 | struct trace_array *tr) |
853 | { | 853 | { |
854 | struct ftrace_event_file *file; | 854 | struct trace_event_file *file; |
855 | 855 | ||
856 | list_for_each_entry(file, &tr->events, list) { | 856 | list_for_each_entry(file, &tr->events, list) { |
857 | if (file->system != dir) | 857 | if (file->system != dir) |
@@ -860,9 +860,9 @@ static void filter_free_subsystem_preds(struct ftrace_subsystem_dir *dir, | |||
860 | } | 860 | } |
861 | } | 861 | } |
862 | 862 | ||
863 | static inline void __free_subsystem_filter(struct ftrace_event_file *file) | 863 | static inline void __free_subsystem_filter(struct trace_event_file *file) |
864 | { | 864 | { |
865 | struct ftrace_event_call *call = file->event_call; | 865 | struct trace_event_call *call = file->event_call; |
866 | 866 | ||
867 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) { | 867 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) { |
868 | __free_filter(call->filter); | 868 | __free_filter(call->filter); |
@@ -873,10 +873,10 @@ static inline void __free_subsystem_filter(struct ftrace_event_file *file) | |||
873 | } | 873 | } |
874 | } | 874 | } |
875 | 875 | ||
876 | static void filter_free_subsystem_filters(struct ftrace_subsystem_dir *dir, | 876 | static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir, |
877 | struct trace_array *tr) | 877 | struct trace_array *tr) |
878 | { | 878 | { |
879 | struct ftrace_event_file *file; | 879 | struct trace_event_file *file; |
880 | 880 | ||
881 | list_for_each_entry(file, &tr->events, list) { | 881 | list_for_each_entry(file, &tr->events, list) { |
882 | if (file->system != dir) | 882 | if (file->system != dir) |
@@ -1342,7 +1342,7 @@ parse_operand: | |||
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | static struct filter_pred *create_pred(struct filter_parse_state *ps, | 1344 | static struct filter_pred *create_pred(struct filter_parse_state *ps, |
1345 | struct ftrace_event_call *call, | 1345 | struct trace_event_call *call, |
1346 | int op, char *operand1, char *operand2) | 1346 | int op, char *operand1, char *operand2) |
1347 | { | 1347 | { |
1348 | struct ftrace_event_field *field; | 1348 | struct ftrace_event_field *field; |
@@ -1564,7 +1564,7 @@ static int fold_pred_tree(struct event_filter *filter, | |||
1564 | filter->preds); | 1564 | filter->preds); |
1565 | } | 1565 | } |
1566 | 1566 | ||
1567 | static int replace_preds(struct ftrace_event_call *call, | 1567 | static int replace_preds(struct trace_event_call *call, |
1568 | struct event_filter *filter, | 1568 | struct event_filter *filter, |
1569 | struct filter_parse_state *ps, | 1569 | struct filter_parse_state *ps, |
1570 | bool dry_run) | 1570 | bool dry_run) |
@@ -1677,20 +1677,20 @@ fail: | |||
1677 | return err; | 1677 | return err; |
1678 | } | 1678 | } |
1679 | 1679 | ||
1680 | static inline void event_set_filtered_flag(struct ftrace_event_file *file) | 1680 | static inline void event_set_filtered_flag(struct trace_event_file *file) |
1681 | { | 1681 | { |
1682 | struct ftrace_event_call *call = file->event_call; | 1682 | struct trace_event_call *call = file->event_call; |
1683 | 1683 | ||
1684 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | 1684 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |
1685 | call->flags |= TRACE_EVENT_FL_FILTERED; | 1685 | call->flags |= TRACE_EVENT_FL_FILTERED; |
1686 | else | 1686 | else |
1687 | file->flags |= FTRACE_EVENT_FL_FILTERED; | 1687 | file->flags |= EVENT_FILE_FL_FILTERED; |
1688 | } | 1688 | } |
1689 | 1689 | ||
1690 | static inline void event_set_filter(struct ftrace_event_file *file, | 1690 | static inline void event_set_filter(struct trace_event_file *file, |
1691 | struct event_filter *filter) | 1691 | struct event_filter *filter) |
1692 | { | 1692 | { |
1693 | struct ftrace_event_call *call = file->event_call; | 1693 | struct trace_event_call *call = file->event_call; |
1694 | 1694 | ||
1695 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | 1695 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |
1696 | rcu_assign_pointer(call->filter, filter); | 1696 | rcu_assign_pointer(call->filter, filter); |
@@ -1698,9 +1698,9 @@ static inline void event_set_filter(struct ftrace_event_file *file, | |||
1698 | rcu_assign_pointer(file->filter, filter); | 1698 | rcu_assign_pointer(file->filter, filter); |
1699 | } | 1699 | } |
1700 | 1700 | ||
1701 | static inline void event_clear_filter(struct ftrace_event_file *file) | 1701 | static inline void event_clear_filter(struct trace_event_file *file) |
1702 | { | 1702 | { |
1703 | struct ftrace_event_call *call = file->event_call; | 1703 | struct trace_event_call *call = file->event_call; |
1704 | 1704 | ||
1705 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | 1705 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |
1706 | RCU_INIT_POINTER(call->filter, NULL); | 1706 | RCU_INIT_POINTER(call->filter, NULL); |
@@ -1709,33 +1709,33 @@ static inline void event_clear_filter(struct ftrace_event_file *file) | |||
1709 | } | 1709 | } |
1710 | 1710 | ||
1711 | static inline void | 1711 | static inline void |
1712 | event_set_no_set_filter_flag(struct ftrace_event_file *file) | 1712 | event_set_no_set_filter_flag(struct trace_event_file *file) |
1713 | { | 1713 | { |
1714 | struct ftrace_event_call *call = file->event_call; | 1714 | struct trace_event_call *call = file->event_call; |
1715 | 1715 | ||
1716 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | 1716 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |
1717 | call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; | 1717 | call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; |
1718 | else | 1718 | else |
1719 | file->flags |= FTRACE_EVENT_FL_NO_SET_FILTER; | 1719 | file->flags |= EVENT_FILE_FL_NO_SET_FILTER; |
1720 | } | 1720 | } |
1721 | 1721 | ||
1722 | static inline void | 1722 | static inline void |
1723 | event_clear_no_set_filter_flag(struct ftrace_event_file *file) | 1723 | event_clear_no_set_filter_flag(struct trace_event_file *file) |
1724 | { | 1724 | { |
1725 | struct ftrace_event_call *call = file->event_call; | 1725 | struct trace_event_call *call = file->event_call; |
1726 | 1726 | ||
1727 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | 1727 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |
1728 | call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; | 1728 | call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; |
1729 | else | 1729 | else |
1730 | file->flags &= ~FTRACE_EVENT_FL_NO_SET_FILTER; | 1730 | file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER; |
1731 | } | 1731 | } |
1732 | 1732 | ||
1733 | static inline bool | 1733 | static inline bool |
1734 | event_no_set_filter_flag(struct ftrace_event_file *file) | 1734 | event_no_set_filter_flag(struct trace_event_file *file) |
1735 | { | 1735 | { |
1736 | struct ftrace_event_call *call = file->event_call; | 1736 | struct trace_event_call *call = file->event_call; |
1737 | 1737 | ||
1738 | if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER) | 1738 | if (file->flags & EVENT_FILE_FL_NO_SET_FILTER) |
1739 | return true; | 1739 | return true; |
1740 | 1740 | ||
1741 | if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) && | 1741 | if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) && |
@@ -1750,12 +1750,12 @@ struct filter_list { | |||
1750 | struct event_filter *filter; | 1750 | struct event_filter *filter; |
1751 | }; | 1751 | }; |
1752 | 1752 | ||
1753 | static int replace_system_preds(struct ftrace_subsystem_dir *dir, | 1753 | static int replace_system_preds(struct trace_subsystem_dir *dir, |
1754 | struct trace_array *tr, | 1754 | struct trace_array *tr, |
1755 | struct filter_parse_state *ps, | 1755 | struct filter_parse_state *ps, |
1756 | char *filter_string) | 1756 | char *filter_string) |
1757 | { | 1757 | { |
1758 | struct ftrace_event_file *file; | 1758 | struct trace_event_file *file; |
1759 | struct filter_list *filter_item; | 1759 | struct filter_list *filter_item; |
1760 | struct filter_list *tmp; | 1760 | struct filter_list *tmp; |
1761 | LIST_HEAD(filter_list); | 1761 | LIST_HEAD(filter_list); |
@@ -1899,8 +1899,8 @@ static void create_filter_finish(struct filter_parse_state *ps) | |||
1899 | } | 1899 | } |
1900 | 1900 | ||
1901 | /** | 1901 | /** |
1902 | * create_filter - create a filter for a ftrace_event_call | 1902 | * create_filter - create a filter for a trace_event_call |
1903 | * @call: ftrace_event_call to create a filter for | 1903 | * @call: trace_event_call to create a filter for |
1904 | * @filter_str: filter string | 1904 | * @filter_str: filter string |
1905 | * @set_str: remember @filter_str and enable detailed error in filter | 1905 | * @set_str: remember @filter_str and enable detailed error in filter |
1906 | * @filterp: out param for created filter (always updated on return) | 1906 | * @filterp: out param for created filter (always updated on return) |
@@ -1914,7 +1914,7 @@ static void create_filter_finish(struct filter_parse_state *ps) | |||
1914 | * information if @set_str is %true and the caller is responsible for | 1914 | * information if @set_str is %true and the caller is responsible for |
1915 | * freeing it. | 1915 | * freeing it. |
1916 | */ | 1916 | */ |
1917 | static int create_filter(struct ftrace_event_call *call, | 1917 | static int create_filter(struct trace_event_call *call, |
1918 | char *filter_str, bool set_str, | 1918 | char *filter_str, bool set_str, |
1919 | struct event_filter **filterp) | 1919 | struct event_filter **filterp) |
1920 | { | 1920 | { |
@@ -1934,7 +1934,7 @@ static int create_filter(struct ftrace_event_call *call, | |||
1934 | return err; | 1934 | return err; |
1935 | } | 1935 | } |
1936 | 1936 | ||
1937 | int create_event_filter(struct ftrace_event_call *call, | 1937 | int create_event_filter(struct trace_event_call *call, |
1938 | char *filter_str, bool set_str, | 1938 | char *filter_str, bool set_str, |
1939 | struct event_filter **filterp) | 1939 | struct event_filter **filterp) |
1940 | { | 1940 | { |
@@ -1950,7 +1950,7 @@ int create_event_filter(struct ftrace_event_call *call, | |||
1950 | * Identical to create_filter() except that it creates a subsystem filter | 1950 | * Identical to create_filter() except that it creates a subsystem filter |
1951 | * and always remembers @filter_str. | 1951 | * and always remembers @filter_str. |
1952 | */ | 1952 | */ |
1953 | static int create_system_filter(struct ftrace_subsystem_dir *dir, | 1953 | static int create_system_filter(struct trace_subsystem_dir *dir, |
1954 | struct trace_array *tr, | 1954 | struct trace_array *tr, |
1955 | char *filter_str, struct event_filter **filterp) | 1955 | char *filter_str, struct event_filter **filterp) |
1956 | { | 1956 | { |
@@ -1976,9 +1976,9 @@ static int create_system_filter(struct ftrace_subsystem_dir *dir, | |||
1976 | } | 1976 | } |
1977 | 1977 | ||
1978 | /* caller must hold event_mutex */ | 1978 | /* caller must hold event_mutex */ |
1979 | int apply_event_filter(struct ftrace_event_file *file, char *filter_string) | 1979 | int apply_event_filter(struct trace_event_file *file, char *filter_string) |
1980 | { | 1980 | { |
1981 | struct ftrace_event_call *call = file->event_call; | 1981 | struct trace_event_call *call = file->event_call; |
1982 | struct event_filter *filter; | 1982 | struct event_filter *filter; |
1983 | int err; | 1983 | int err; |
1984 | 1984 | ||
@@ -2027,7 +2027,7 @@ int apply_event_filter(struct ftrace_event_file *file, char *filter_string) | |||
2027 | return err; | 2027 | return err; |
2028 | } | 2028 | } |
2029 | 2029 | ||
2030 | int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, | 2030 | int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, |
2031 | char *filter_string) | 2031 | char *filter_string) |
2032 | { | 2032 | { |
2033 | struct event_subsystem *system = dir->subsystem; | 2033 | struct event_subsystem *system = dir->subsystem; |
@@ -2226,7 +2226,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id, | |||
2226 | { | 2226 | { |
2227 | int err; | 2227 | int err; |
2228 | struct event_filter *filter; | 2228 | struct event_filter *filter; |
2229 | struct ftrace_event_call *call; | 2229 | struct trace_event_call *call; |
2230 | 2230 | ||
2231 | mutex_lock(&event_mutex); | 2231 | mutex_lock(&event_mutex); |
2232 | 2232 | ||
@@ -2282,7 +2282,7 @@ out_unlock: | |||
2282 | 2282 | ||
2283 | static struct test_filter_data_t { | 2283 | static struct test_filter_data_t { |
2284 | char *filter; | 2284 | char *filter; |
2285 | struct ftrace_raw_ftrace_test_filter rec; | 2285 | struct trace_event_raw_ftrace_test_filter rec; |
2286 | int match; | 2286 | int match; |
2287 | char *not_visited; | 2287 | char *not_visited; |
2288 | } test_filter_data[] = { | 2288 | } test_filter_data[] = { |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 8712df9decb4..42a4009fd75a 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
@@ -40,7 +40,7 @@ trigger_data_free(struct event_trigger_data *data) | |||
40 | 40 | ||
41 | /** | 41 | /** |
42 | * event_triggers_call - Call triggers associated with a trace event | 42 | * event_triggers_call - Call triggers associated with a trace event |
43 | * @file: The ftrace_event_file associated with the event | 43 | * @file: The trace_event_file associated with the event |
44 | * @rec: The trace entry for the event, NULL for unconditional invocation | 44 | * @rec: The trace entry for the event, NULL for unconditional invocation |
45 | * | 45 | * |
46 | * For each trigger associated with an event, invoke the trigger | 46 | * For each trigger associated with an event, invoke the trigger |
@@ -63,7 +63,7 @@ trigger_data_free(struct event_trigger_data *data) | |||
63 | * any trigger that should be deferred, ETT_NONE if nothing to defer. | 63 | * any trigger that should be deferred, ETT_NONE if nothing to defer. |
64 | */ | 64 | */ |
65 | enum event_trigger_type | 65 | enum event_trigger_type |
66 | event_triggers_call(struct ftrace_event_file *file, void *rec) | 66 | event_triggers_call(struct trace_event_file *file, void *rec) |
67 | { | 67 | { |
68 | struct event_trigger_data *data; | 68 | struct event_trigger_data *data; |
69 | enum event_trigger_type tt = ETT_NONE; | 69 | enum event_trigger_type tt = ETT_NONE; |
@@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(event_triggers_call); | |||
92 | 92 | ||
93 | /** | 93 | /** |
94 | * event_triggers_post_call - Call 'post_triggers' for a trace event | 94 | * event_triggers_post_call - Call 'post_triggers' for a trace event |
95 | * @file: The ftrace_event_file associated with the event | 95 | * @file: The trace_event_file associated with the event |
96 | * @tt: enum event_trigger_type containing a set bit for each trigger to invoke | 96 | * @tt: enum event_trigger_type containing a set bit for each trigger to invoke |
97 | * | 97 | * |
98 | * For each trigger associated with an event, invoke the trigger | 98 | * For each trigger associated with an event, invoke the trigger |
@@ -103,7 +103,7 @@ EXPORT_SYMBOL_GPL(event_triggers_call); | |||
103 | * Called from tracepoint handlers (with rcu_read_lock_sched() held). | 103 | * Called from tracepoint handlers (with rcu_read_lock_sched() held). |
104 | */ | 104 | */ |
105 | void | 105 | void |
106 | event_triggers_post_call(struct ftrace_event_file *file, | 106 | event_triggers_post_call(struct trace_event_file *file, |
107 | enum event_trigger_type tt) | 107 | enum event_trigger_type tt) |
108 | { | 108 | { |
109 | struct event_trigger_data *data; | 109 | struct event_trigger_data *data; |
@@ -119,7 +119,7 @@ EXPORT_SYMBOL_GPL(event_triggers_post_call); | |||
119 | 119 | ||
120 | static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) | 120 | static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) |
121 | { | 121 | { |
122 | struct ftrace_event_file *event_file = event_file_data(m->private); | 122 | struct trace_event_file *event_file = event_file_data(m->private); |
123 | 123 | ||
124 | if (t == SHOW_AVAILABLE_TRIGGERS) | 124 | if (t == SHOW_AVAILABLE_TRIGGERS) |
125 | return NULL; | 125 | return NULL; |
@@ -129,7 +129,7 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) | |||
129 | 129 | ||
130 | static void *trigger_start(struct seq_file *m, loff_t *pos) | 130 | static void *trigger_start(struct seq_file *m, loff_t *pos) |
131 | { | 131 | { |
132 | struct ftrace_event_file *event_file; | 132 | struct trace_event_file *event_file; |
133 | 133 | ||
134 | /* ->stop() is called even if ->start() fails */ | 134 | /* ->stop() is called even if ->start() fails */ |
135 | mutex_lock(&event_mutex); | 135 | mutex_lock(&event_mutex); |
@@ -201,7 +201,7 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file) | |||
201 | return ret; | 201 | return ret; |
202 | } | 202 | } |
203 | 203 | ||
204 | static int trigger_process_regex(struct ftrace_event_file *file, char *buff) | 204 | static int trigger_process_regex(struct trace_event_file *file, char *buff) |
205 | { | 205 | { |
206 | char *command, *next = buff; | 206 | char *command, *next = buff; |
207 | struct event_command *p; | 207 | struct event_command *p; |
@@ -227,7 +227,7 @@ static ssize_t event_trigger_regex_write(struct file *file, | |||
227 | const char __user *ubuf, | 227 | const char __user *ubuf, |
228 | size_t cnt, loff_t *ppos) | 228 | size_t cnt, loff_t *ppos) |
229 | { | 229 | { |
230 | struct ftrace_event_file *event_file; | 230 | struct trace_event_file *event_file; |
231 | ssize_t ret; | 231 | ssize_t ret; |
232 | char *buf; | 232 | char *buf; |
233 | 233 | ||
@@ -430,7 +430,7 @@ event_trigger_free(struct event_trigger_ops *ops, | |||
430 | trigger_data_free(data); | 430 | trigger_data_free(data); |
431 | } | 431 | } |
432 | 432 | ||
433 | static int trace_event_trigger_enable_disable(struct ftrace_event_file *file, | 433 | static int trace_event_trigger_enable_disable(struct trace_event_file *file, |
434 | int trigger_enable) | 434 | int trigger_enable) |
435 | { | 435 | { |
436 | int ret = 0; | 436 | int ret = 0; |
@@ -438,12 +438,12 @@ static int trace_event_trigger_enable_disable(struct ftrace_event_file *file, | |||
438 | if (trigger_enable) { | 438 | if (trigger_enable) { |
439 | if (atomic_inc_return(&file->tm_ref) > 1) | 439 | if (atomic_inc_return(&file->tm_ref) > 1) |
440 | return ret; | 440 | return ret; |
441 | set_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags); | 441 | set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); |
442 | ret = trace_event_enable_disable(file, 1, 1); | 442 | ret = trace_event_enable_disable(file, 1, 1); |
443 | } else { | 443 | } else { |
444 | if (atomic_dec_return(&file->tm_ref) > 0) | 444 | if (atomic_dec_return(&file->tm_ref) > 0) |
445 | return ret; | 445 | return ret; |
446 | clear_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags); | 446 | clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); |
447 | ret = trace_event_enable_disable(file, 0, 1); | 447 | ret = trace_event_enable_disable(file, 0, 1); |
448 | } | 448 | } |
449 | 449 | ||
@@ -466,7 +466,7 @@ static int trace_event_trigger_enable_disable(struct ftrace_event_file *file, | |||
466 | void | 466 | void |
467 | clear_event_triggers(struct trace_array *tr) | 467 | clear_event_triggers(struct trace_array *tr) |
468 | { | 468 | { |
469 | struct ftrace_event_file *file; | 469 | struct trace_event_file *file; |
470 | 470 | ||
471 | list_for_each_entry(file, &tr->events, list) { | 471 | list_for_each_entry(file, &tr->events, list) { |
472 | struct event_trigger_data *data; | 472 | struct event_trigger_data *data; |
@@ -480,7 +480,7 @@ clear_event_triggers(struct trace_array *tr) | |||
480 | 480 | ||
481 | /** | 481 | /** |
482 | * update_cond_flag - Set or reset the TRIGGER_COND bit | 482 | * update_cond_flag - Set or reset the TRIGGER_COND bit |
483 | * @file: The ftrace_event_file associated with the event | 483 | * @file: The trace_event_file associated with the event |
484 | * | 484 | * |
485 | * If an event has triggers and any of those triggers has a filter or | 485 | * If an event has triggers and any of those triggers has a filter or |
486 | * a post_trigger, trigger invocation needs to be deferred until after | 486 | * a post_trigger, trigger invocation needs to be deferred until after |
@@ -488,7 +488,7 @@ clear_event_triggers(struct trace_array *tr) | |||
488 | * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be | 488 | * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be |
489 | * cleared. | 489 | * cleared. |
490 | */ | 490 | */ |
491 | static void update_cond_flag(struct ftrace_event_file *file) | 491 | static void update_cond_flag(struct trace_event_file *file) |
492 | { | 492 | { |
493 | struct event_trigger_data *data; | 493 | struct event_trigger_data *data; |
494 | bool set_cond = false; | 494 | bool set_cond = false; |
@@ -501,9 +501,9 @@ static void update_cond_flag(struct ftrace_event_file *file) | |||
501 | } | 501 | } |
502 | 502 | ||
503 | if (set_cond) | 503 | if (set_cond) |
504 | set_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags); | 504 | set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); |
505 | else | 505 | else |
506 | clear_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags); | 506 | clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); |
507 | } | 507 | } |
508 | 508 | ||
509 | /** | 509 | /** |
@@ -511,7 +511,7 @@ static void update_cond_flag(struct ftrace_event_file *file) | |||
511 | * @glob: The raw string used to register the trigger | 511 | * @glob: The raw string used to register the trigger |
512 | * @ops: The trigger ops associated with the trigger | 512 | * @ops: The trigger ops associated with the trigger |
513 | * @data: Trigger-specific data to associate with the trigger | 513 | * @data: Trigger-specific data to associate with the trigger |
514 | * @file: The ftrace_event_file associated with the event | 514 | * @file: The trace_event_file associated with the event |
515 | * | 515 | * |
516 | * Common implementation for event trigger registration. | 516 | * Common implementation for event trigger registration. |
517 | * | 517 | * |
@@ -522,7 +522,7 @@ static void update_cond_flag(struct ftrace_event_file *file) | |||
522 | */ | 522 | */ |
523 | static int register_trigger(char *glob, struct event_trigger_ops *ops, | 523 | static int register_trigger(char *glob, struct event_trigger_ops *ops, |
524 | struct event_trigger_data *data, | 524 | struct event_trigger_data *data, |
525 | struct ftrace_event_file *file) | 525 | struct trace_event_file *file) |
526 | { | 526 | { |
527 | struct event_trigger_data *test; | 527 | struct event_trigger_data *test; |
528 | int ret = 0; | 528 | int ret = 0; |
@@ -557,7 +557,7 @@ out: | |||
557 | * @glob: The raw string used to register the trigger | 557 | * @glob: The raw string used to register the trigger |
558 | * @ops: The trigger ops associated with the trigger | 558 | * @ops: The trigger ops associated with the trigger |
559 | * @test: Trigger-specific data used to find the trigger to remove | 559 | * @test: Trigger-specific data used to find the trigger to remove |
560 | * @file: The ftrace_event_file associated with the event | 560 | * @file: The trace_event_file associated with the event |
561 | * | 561 | * |
562 | * Common implementation for event trigger unregistration. | 562 | * Common implementation for event trigger unregistration. |
563 | * | 563 | * |
@@ -566,7 +566,7 @@ out: | |||
566 | */ | 566 | */ |
567 | static void unregister_trigger(char *glob, struct event_trigger_ops *ops, | 567 | static void unregister_trigger(char *glob, struct event_trigger_ops *ops, |
568 | struct event_trigger_data *test, | 568 | struct event_trigger_data *test, |
569 | struct ftrace_event_file *file) | 569 | struct trace_event_file *file) |
570 | { | 570 | { |
571 | struct event_trigger_data *data; | 571 | struct event_trigger_data *data; |
572 | bool unregistered = false; | 572 | bool unregistered = false; |
@@ -588,7 +588,7 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops, | |||
588 | /** | 588 | /** |
589 | * event_trigger_callback - Generic event_command @func implementation | 589 | * event_trigger_callback - Generic event_command @func implementation |
590 | * @cmd_ops: The command ops, used for trigger registration | 590 | * @cmd_ops: The command ops, used for trigger registration |
591 | * @file: The ftrace_event_file associated with the event | 591 | * @file: The trace_event_file associated with the event |
592 | * @glob: The raw string used to register the trigger | 592 | * @glob: The raw string used to register the trigger |
593 | * @cmd: The cmd portion of the string used to register the trigger | 593 | * @cmd: The cmd portion of the string used to register the trigger |
594 | * @param: The params portion of the string used to register the trigger | 594 | * @param: The params portion of the string used to register the trigger |
@@ -603,7 +603,7 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops, | |||
603 | */ | 603 | */ |
604 | static int | 604 | static int |
605 | event_trigger_callback(struct event_command *cmd_ops, | 605 | event_trigger_callback(struct event_command *cmd_ops, |
606 | struct ftrace_event_file *file, | 606 | struct trace_event_file *file, |
607 | char *glob, char *cmd, char *param) | 607 | char *glob, char *cmd, char *param) |
608 | { | 608 | { |
609 | struct event_trigger_data *trigger_data; | 609 | struct event_trigger_data *trigger_data; |
@@ -688,7 +688,7 @@ event_trigger_callback(struct event_command *cmd_ops, | |||
688 | * set_trigger_filter - Generic event_command @set_filter implementation | 688 | * set_trigger_filter - Generic event_command @set_filter implementation |
689 | * @filter_str: The filter string for the trigger, NULL to remove filter | 689 | * @filter_str: The filter string for the trigger, NULL to remove filter |
690 | * @trigger_data: Trigger-specific data | 690 | * @trigger_data: Trigger-specific data |
691 | * @file: The ftrace_event_file associated with the event | 691 | * @file: The trace_event_file associated with the event |
692 | * | 692 | * |
693 | * Common implementation for event command filter parsing and filter | 693 | * Common implementation for event command filter parsing and filter |
694 | * instantiation. | 694 | * instantiation. |
@@ -702,7 +702,7 @@ event_trigger_callback(struct event_command *cmd_ops, | |||
702 | */ | 702 | */ |
703 | static int set_trigger_filter(char *filter_str, | 703 | static int set_trigger_filter(char *filter_str, |
704 | struct event_trigger_data *trigger_data, | 704 | struct event_trigger_data *trigger_data, |
705 | struct ftrace_event_file *file) | 705 | struct trace_event_file *file) |
706 | { | 706 | { |
707 | struct event_trigger_data *data = trigger_data; | 707 | struct event_trigger_data *data = trigger_data; |
708 | struct event_filter *filter = NULL, *tmp; | 708 | struct event_filter *filter = NULL, *tmp; |
@@ -900,7 +900,7 @@ snapshot_count_trigger(struct event_trigger_data *data) | |||
900 | static int | 900 | static int |
901 | register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, | 901 | register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, |
902 | struct event_trigger_data *data, | 902 | struct event_trigger_data *data, |
903 | struct ftrace_event_file *file) | 903 | struct trace_event_file *file) |
904 | { | 904 | { |
905 | int ret = register_trigger(glob, ops, data, file); | 905 | int ret = register_trigger(glob, ops, data, file); |
906 | 906 | ||
@@ -968,7 +968,7 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; } | |||
968 | * Skip 3: | 968 | * Skip 3: |
969 | * stacktrace_trigger() | 969 | * stacktrace_trigger() |
970 | * event_triggers_post_call() | 970 | * event_triggers_post_call() |
971 | * ftrace_raw_event_xxx() | 971 | * trace_event_raw_event_xxx() |
972 | */ | 972 | */ |
973 | #define STACK_SKIP 3 | 973 | #define STACK_SKIP 3 |
974 | 974 | ||
@@ -1053,7 +1053,7 @@ static __init void unregister_trigger_traceon_traceoff_cmds(void) | |||
1053 | #define DISABLE_EVENT_STR "disable_event" | 1053 | #define DISABLE_EVENT_STR "disable_event" |
1054 | 1054 | ||
1055 | struct enable_trigger_data { | 1055 | struct enable_trigger_data { |
1056 | struct ftrace_event_file *file; | 1056 | struct trace_event_file *file; |
1057 | bool enable; | 1057 | bool enable; |
1058 | }; | 1058 | }; |
1059 | 1059 | ||
@@ -1063,9 +1063,9 @@ event_enable_trigger(struct event_trigger_data *data) | |||
1063 | struct enable_trigger_data *enable_data = data->private_data; | 1063 | struct enable_trigger_data *enable_data = data->private_data; |
1064 | 1064 | ||
1065 | if (enable_data->enable) | 1065 | if (enable_data->enable) |
1066 | clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); | 1066 | clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); |
1067 | else | 1067 | else |
1068 | set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); | 1068 | set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | static void | 1071 | static void |
@@ -1077,7 +1077,7 @@ event_enable_count_trigger(struct event_trigger_data *data) | |||
1077 | return; | 1077 | return; |
1078 | 1078 | ||
1079 | /* Skip if the event is in a state we want to switch to */ | 1079 | /* Skip if the event is in a state we want to switch to */ |
1080 | if (enable_data->enable == !(enable_data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) | 1080 | if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) |
1081 | return; | 1081 | return; |
1082 | 1082 | ||
1083 | if (data->count != -1) | 1083 | if (data->count != -1) |
@@ -1095,7 +1095,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, | |||
1095 | seq_printf(m, "%s:%s:%s", | 1095 | seq_printf(m, "%s:%s:%s", |
1096 | enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, | 1096 | enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, |
1097 | enable_data->file->event_call->class->system, | 1097 | enable_data->file->event_call->class->system, |
1098 | ftrace_event_name(enable_data->file->event_call)); | 1098 | trace_event_name(enable_data->file->event_call)); |
1099 | 1099 | ||
1100 | if (data->count == -1) | 1100 | if (data->count == -1) |
1101 | seq_puts(m, ":unlimited"); | 1101 | seq_puts(m, ":unlimited"); |
@@ -1159,10 +1159,10 @@ static struct event_trigger_ops event_disable_count_trigger_ops = { | |||
1159 | 1159 | ||
1160 | static int | 1160 | static int |
1161 | event_enable_trigger_func(struct event_command *cmd_ops, | 1161 | event_enable_trigger_func(struct event_command *cmd_ops, |
1162 | struct ftrace_event_file *file, | 1162 | struct trace_event_file *file, |
1163 | char *glob, char *cmd, char *param) | 1163 | char *glob, char *cmd, char *param) |
1164 | { | 1164 | { |
1165 | struct ftrace_event_file *event_enable_file; | 1165 | struct trace_event_file *event_enable_file; |
1166 | struct enable_trigger_data *enable_data; | 1166 | struct enable_trigger_data *enable_data; |
1167 | struct event_trigger_data *trigger_data; | 1167 | struct event_trigger_data *trigger_data; |
1168 | struct event_trigger_ops *trigger_ops; | 1168 | struct event_trigger_ops *trigger_ops; |
@@ -1294,7 +1294,7 @@ event_enable_trigger_func(struct event_command *cmd_ops, | |||
1294 | static int event_enable_register_trigger(char *glob, | 1294 | static int event_enable_register_trigger(char *glob, |
1295 | struct event_trigger_ops *ops, | 1295 | struct event_trigger_ops *ops, |
1296 | struct event_trigger_data *data, | 1296 | struct event_trigger_data *data, |
1297 | struct ftrace_event_file *file) | 1297 | struct trace_event_file *file) |
1298 | { | 1298 | { |
1299 | struct enable_trigger_data *enable_data = data->private_data; | 1299 | struct enable_trigger_data *enable_data = data->private_data; |
1300 | struct enable_trigger_data *test_enable_data; | 1300 | struct enable_trigger_data *test_enable_data; |
@@ -1331,7 +1331,7 @@ out: | |||
1331 | static void event_enable_unregister_trigger(char *glob, | 1331 | static void event_enable_unregister_trigger(char *glob, |
1332 | struct event_trigger_ops *ops, | 1332 | struct event_trigger_ops *ops, |
1333 | struct event_trigger_data *test, | 1333 | struct event_trigger_data *test, |
1334 | struct ftrace_event_file *file) | 1334 | struct trace_event_file *file) |
1335 | { | 1335 | { |
1336 | struct enable_trigger_data *test_enable_data = test->private_data; | 1336 | struct enable_trigger_data *test_enable_data = test->private_data; |
1337 | struct enable_trigger_data *enable_data; | 1337 | struct enable_trigger_data *enable_data; |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 174a6a71146c..adabf7da9113 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -125,7 +125,7 @@ static void __always_unused ____ftrace_check_##name(void) \ | |||
125 | #undef FTRACE_ENTRY | 125 | #undef FTRACE_ENTRY |
126 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ | 126 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ |
127 | static int __init \ | 127 | static int __init \ |
128 | ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ | 128 | ftrace_define_fields_##name(struct trace_event_call *event_call) \ |
129 | { \ | 129 | { \ |
130 | struct struct_name field; \ | 130 | struct struct_name field; \ |
131 | int ret; \ | 131 | int ret; \ |
@@ -163,14 +163,14 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ | |||
163 | #define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\ | 163 | #define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\ |
164 | regfn) \ | 164 | regfn) \ |
165 | \ | 165 | \ |
166 | struct ftrace_event_class __refdata event_class_ftrace_##call = { \ | 166 | struct trace_event_class __refdata event_class_ftrace_##call = { \ |
167 | .system = __stringify(TRACE_SYSTEM), \ | 167 | .system = __stringify(TRACE_SYSTEM), \ |
168 | .define_fields = ftrace_define_fields_##call, \ | 168 | .define_fields = ftrace_define_fields_##call, \ |
169 | .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ | 169 | .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ |
170 | .reg = regfn, \ | 170 | .reg = regfn, \ |
171 | }; \ | 171 | }; \ |
172 | \ | 172 | \ |
173 | struct ftrace_event_call __used event_##call = { \ | 173 | struct trace_event_call __used event_##call = { \ |
174 | .class = &event_class_ftrace_##call, \ | 174 | .class = &event_class_ftrace_##call, \ |
175 | { \ | 175 | { \ |
176 | .name = #call, \ | 176 | .name = #call, \ |
@@ -179,7 +179,7 @@ struct ftrace_event_call __used event_##call = { \ | |||
179 | .print_fmt = print, \ | 179 | .print_fmt = print, \ |
180 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ | 180 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ |
181 | }; \ | 181 | }; \ |
182 | struct ftrace_event_call __used \ | 182 | struct trace_event_call __used \ |
183 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; | 183 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; |
184 | 184 | ||
185 | #undef FTRACE_ENTRY | 185 | #undef FTRACE_ENTRY |
@@ -187,7 +187,7 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; | |||
187 | FTRACE_ENTRY_REG(call, struct_name, etype, \ | 187 | FTRACE_ENTRY_REG(call, struct_name, etype, \ |
188 | PARAMS(tstruct), PARAMS(print), filter, NULL) | 188 | PARAMS(tstruct), PARAMS(print), filter, NULL) |
189 | 189 | ||
190 | int ftrace_event_is_function(struct ftrace_event_call *call) | 190 | int ftrace_event_is_function(struct trace_event_call *call) |
191 | { | 191 | { |
192 | return call == &event_function; | 192 | return call == &event_function; |
193 | } | 193 | } |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index a51e79688455..8968bf720c12 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -278,7 +278,7 @@ int __trace_graph_entry(struct trace_array *tr, | |||
278 | unsigned long flags, | 278 | unsigned long flags, |
279 | int pc) | 279 | int pc) |
280 | { | 280 | { |
281 | struct ftrace_event_call *call = &event_funcgraph_entry; | 281 | struct trace_event_call *call = &event_funcgraph_entry; |
282 | struct ring_buffer_event *event; | 282 | struct ring_buffer_event *event; |
283 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | 283 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
284 | struct ftrace_graph_ent_entry *entry; | 284 | struct ftrace_graph_ent_entry *entry; |
@@ -393,7 +393,7 @@ void __trace_graph_return(struct trace_array *tr, | |||
393 | unsigned long flags, | 393 | unsigned long flags, |
394 | int pc) | 394 | int pc) |
395 | { | 395 | { |
396 | struct ftrace_event_call *call = &event_funcgraph_exit; | 396 | struct trace_event_call *call = &event_funcgraph_exit; |
397 | struct ring_buffer_event *event; | 397 | struct ring_buffer_event *event; |
398 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | 398 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
399 | struct ftrace_graph_ret_entry *entry; | 399 | struct ftrace_graph_ret_entry *entry; |
@@ -1454,12 +1454,12 @@ static __init int init_graph_trace(void) | |||
1454 | { | 1454 | { |
1455 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); | 1455 | max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); |
1456 | 1456 | ||
1457 | if (!register_ftrace_event(&graph_trace_entry_event)) { | 1457 | if (!register_trace_event(&graph_trace_entry_event)) { |
1458 | pr_warning("Warning: could not register graph trace events\n"); | 1458 | pr_warning("Warning: could not register graph trace events\n"); |
1459 | return 1; | 1459 | return 1; |
1460 | } | 1460 | } |
1461 | 1461 | ||
1462 | if (!register_ftrace_event(&graph_trace_ret_event)) { | 1462 | if (!register_trace_event(&graph_trace_ret_event)) { |
1463 | pr_warning("Warning: could not register graph trace events\n"); | 1463 | pr_warning("Warning: could not register graph trace events\n"); |
1464 | return 1; | 1464 | return 1; |
1465 | } | 1465 | } |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d0ce590f06e1..b7d0cdd9906c 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -348,7 +348,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, | |||
348 | struct trace_kprobe *tk; | 348 | struct trace_kprobe *tk; |
349 | 349 | ||
350 | list_for_each_entry(tk, &probe_list, list) | 350 | list_for_each_entry(tk, &probe_list, list) |
351 | if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 && | 351 | if (strcmp(trace_event_name(&tk->tp.call), event) == 0 && |
352 | strcmp(tk->tp.call.class->system, group) == 0) | 352 | strcmp(tk->tp.call.class->system, group) == 0) |
353 | return tk; | 353 | return tk; |
354 | return NULL; | 354 | return NULL; |
@@ -359,7 +359,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, | |||
359 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. | 359 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. |
360 | */ | 360 | */ |
361 | static int | 361 | static int |
362 | enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) | 362 | enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) |
363 | { | 363 | { |
364 | int ret = 0; | 364 | int ret = 0; |
365 | 365 | ||
@@ -394,7 +394,7 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) | |||
394 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. | 394 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. |
395 | */ | 395 | */ |
396 | static int | 396 | static int |
397 | disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) | 397 | disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) |
398 | { | 398 | { |
399 | struct event_file_link *link = NULL; | 399 | struct event_file_link *link = NULL; |
400 | int wait = 0; | 400 | int wait = 0; |
@@ -523,7 +523,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk) | |||
523 | mutex_lock(&probe_lock); | 523 | mutex_lock(&probe_lock); |
524 | 524 | ||
525 | /* Delete old (same name) event if exist */ | 525 | /* Delete old (same name) event if exist */ |
526 | old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call), | 526 | old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call), |
527 | tk->tp.call.class->system); | 527 | tk->tp.call.class->system); |
528 | if (old_tk) { | 528 | if (old_tk) { |
529 | ret = unregister_trace_kprobe(old_tk); | 529 | ret = unregister_trace_kprobe(old_tk); |
@@ -572,7 +572,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb, | |||
572 | if (ret) | 572 | if (ret) |
573 | pr_warning("Failed to re-register probe %s on" | 573 | pr_warning("Failed to re-register probe %s on" |
574 | "%s: %d\n", | 574 | "%s: %d\n", |
575 | ftrace_event_name(&tk->tp.call), | 575 | trace_event_name(&tk->tp.call), |
576 | mod->name, ret); | 576 | mod->name, ret); |
577 | } | 577 | } |
578 | } | 578 | } |
@@ -829,7 +829,7 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
829 | 829 | ||
830 | seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); | 830 | seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); |
831 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, | 831 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, |
832 | ftrace_event_name(&tk->tp.call)); | 832 | trace_event_name(&tk->tp.call)); |
833 | 833 | ||
834 | if (!tk->symbol) | 834 | if (!tk->symbol) |
835 | seq_printf(m, " 0x%p", tk->rp.kp.addr); | 835 | seq_printf(m, " 0x%p", tk->rp.kp.addr); |
@@ -888,7 +888,7 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) | |||
888 | struct trace_kprobe *tk = v; | 888 | struct trace_kprobe *tk = v; |
889 | 889 | ||
890 | seq_printf(m, " %-44s %15lu %15lu\n", | 890 | seq_printf(m, " %-44s %15lu %15lu\n", |
891 | ftrace_event_name(&tk->tp.call), tk->nhit, | 891 | trace_event_name(&tk->tp.call), tk->nhit, |
892 | tk->rp.kp.nmissed); | 892 | tk->rp.kp.nmissed); |
893 | 893 | ||
894 | return 0; | 894 | return 0; |
@@ -917,18 +917,18 @@ static const struct file_operations kprobe_profile_ops = { | |||
917 | /* Kprobe handler */ | 917 | /* Kprobe handler */ |
918 | static nokprobe_inline void | 918 | static nokprobe_inline void |
919 | __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, | 919 | __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, |
920 | struct ftrace_event_file *ftrace_file) | 920 | struct trace_event_file *trace_file) |
921 | { | 921 | { |
922 | struct kprobe_trace_entry_head *entry; | 922 | struct kprobe_trace_entry_head *entry; |
923 | struct ring_buffer_event *event; | 923 | struct ring_buffer_event *event; |
924 | struct ring_buffer *buffer; | 924 | struct ring_buffer *buffer; |
925 | int size, dsize, pc; | 925 | int size, dsize, pc; |
926 | unsigned long irq_flags; | 926 | unsigned long irq_flags; |
927 | struct ftrace_event_call *call = &tk->tp.call; | 927 | struct trace_event_call *call = &tk->tp.call; |
928 | 928 | ||
929 | WARN_ON(call != ftrace_file->event_call); | 929 | WARN_ON(call != trace_file->event_call); |
930 | 930 | ||
931 | if (ftrace_trigger_soft_disabled(ftrace_file)) | 931 | if (trace_trigger_soft_disabled(trace_file)) |
932 | return; | 932 | return; |
933 | 933 | ||
934 | local_save_flags(irq_flags); | 934 | local_save_flags(irq_flags); |
@@ -937,7 +937,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, | |||
937 | dsize = __get_data_size(&tk->tp, regs); | 937 | dsize = __get_data_size(&tk->tp, regs); |
938 | size = sizeof(*entry) + tk->tp.size + dsize; | 938 | size = sizeof(*entry) + tk->tp.size + dsize; |
939 | 939 | ||
940 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, | 940 | event = trace_event_buffer_lock_reserve(&buffer, trace_file, |
941 | call->event.type, | 941 | call->event.type, |
942 | size, irq_flags, pc); | 942 | size, irq_flags, pc); |
943 | if (!event) | 943 | if (!event) |
@@ -947,7 +947,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, | |||
947 | entry->ip = (unsigned long)tk->rp.kp.addr; | 947 | entry->ip = (unsigned long)tk->rp.kp.addr; |
948 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); | 948 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); |
949 | 949 | ||
950 | event_trigger_unlock_commit_regs(ftrace_file, buffer, event, | 950 | event_trigger_unlock_commit_regs(trace_file, buffer, event, |
951 | entry, irq_flags, pc, regs); | 951 | entry, irq_flags, pc, regs); |
952 | } | 952 | } |
953 | 953 | ||
@@ -965,18 +965,18 @@ NOKPROBE_SYMBOL(kprobe_trace_func); | |||
965 | static nokprobe_inline void | 965 | static nokprobe_inline void |
966 | __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, | 966 | __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, |
967 | struct pt_regs *regs, | 967 | struct pt_regs *regs, |
968 | struct ftrace_event_file *ftrace_file) | 968 | struct trace_event_file *trace_file) |
969 | { | 969 | { |
970 | struct kretprobe_trace_entry_head *entry; | 970 | struct kretprobe_trace_entry_head *entry; |
971 | struct ring_buffer_event *event; | 971 | struct ring_buffer_event *event; |
972 | struct ring_buffer *buffer; | 972 | struct ring_buffer *buffer; |
973 | int size, pc, dsize; | 973 | int size, pc, dsize; |
974 | unsigned long irq_flags; | 974 | unsigned long irq_flags; |
975 | struct ftrace_event_call *call = &tk->tp.call; | 975 | struct trace_event_call *call = &tk->tp.call; |
976 | 976 | ||
977 | WARN_ON(call != ftrace_file->event_call); | 977 | WARN_ON(call != trace_file->event_call); |
978 | 978 | ||
979 | if (ftrace_trigger_soft_disabled(ftrace_file)) | 979 | if (trace_trigger_soft_disabled(trace_file)) |
980 | return; | 980 | return; |
981 | 981 | ||
982 | local_save_flags(irq_flags); | 982 | local_save_flags(irq_flags); |
@@ -985,7 +985,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, | |||
985 | dsize = __get_data_size(&tk->tp, regs); | 985 | dsize = __get_data_size(&tk->tp, regs); |
986 | size = sizeof(*entry) + tk->tp.size + dsize; | 986 | size = sizeof(*entry) + tk->tp.size + dsize; |
987 | 987 | ||
988 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, | 988 | event = trace_event_buffer_lock_reserve(&buffer, trace_file, |
989 | call->event.type, | 989 | call->event.type, |
990 | size, irq_flags, pc); | 990 | size, irq_flags, pc); |
991 | if (!event) | 991 | if (!event) |
@@ -996,7 +996,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, | |||
996 | entry->ret_ip = (unsigned long)ri->ret_addr; | 996 | entry->ret_ip = (unsigned long)ri->ret_addr; |
997 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); | 997 | store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); |
998 | 998 | ||
999 | event_trigger_unlock_commit_regs(ftrace_file, buffer, event, | 999 | event_trigger_unlock_commit_regs(trace_file, buffer, event, |
1000 | entry, irq_flags, pc, regs); | 1000 | entry, irq_flags, pc, regs); |
1001 | } | 1001 | } |
1002 | 1002 | ||
@@ -1025,7 +1025,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags, | |||
1025 | field = (struct kprobe_trace_entry_head *)iter->ent; | 1025 | field = (struct kprobe_trace_entry_head *)iter->ent; |
1026 | tp = container_of(event, struct trace_probe, call.event); | 1026 | tp = container_of(event, struct trace_probe, call.event); |
1027 | 1027 | ||
1028 | trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)); | 1028 | trace_seq_printf(s, "%s: (", trace_event_name(&tp->call)); |
1029 | 1029 | ||
1030 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) | 1030 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) |
1031 | goto out; | 1031 | goto out; |
@@ -1056,7 +1056,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, | |||
1056 | field = (struct kretprobe_trace_entry_head *)iter->ent; | 1056 | field = (struct kretprobe_trace_entry_head *)iter->ent; |
1057 | tp = container_of(event, struct trace_probe, call.event); | 1057 | tp = container_of(event, struct trace_probe, call.event); |
1058 | 1058 | ||
1059 | trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)); | 1059 | trace_seq_printf(s, "%s: (", trace_event_name(&tp->call)); |
1060 | 1060 | ||
1061 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) | 1061 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) |
1062 | goto out; | 1062 | goto out; |
@@ -1081,7 +1081,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, | |||
1081 | } | 1081 | } |
1082 | 1082 | ||
1083 | 1083 | ||
1084 | static int kprobe_event_define_fields(struct ftrace_event_call *event_call) | 1084 | static int kprobe_event_define_fields(struct trace_event_call *event_call) |
1085 | { | 1085 | { |
1086 | int ret, i; | 1086 | int ret, i; |
1087 | struct kprobe_trace_entry_head field; | 1087 | struct kprobe_trace_entry_head field; |
@@ -1104,7 +1104,7 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
1104 | return 0; | 1104 | return 0; |
1105 | } | 1105 | } |
1106 | 1106 | ||
1107 | static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | 1107 | static int kretprobe_event_define_fields(struct trace_event_call *event_call) |
1108 | { | 1108 | { |
1109 | int ret, i; | 1109 | int ret, i; |
1110 | struct kretprobe_trace_entry_head field; | 1110 | struct kretprobe_trace_entry_head field; |
@@ -1134,7 +1134,7 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
1134 | static void | 1134 | static void |
1135 | kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) | 1135 | kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) |
1136 | { | 1136 | { |
1137 | struct ftrace_event_call *call = &tk->tp.call; | 1137 | struct trace_event_call *call = &tk->tp.call; |
1138 | struct bpf_prog *prog = call->prog; | 1138 | struct bpf_prog *prog = call->prog; |
1139 | struct kprobe_trace_entry_head *entry; | 1139 | struct kprobe_trace_entry_head *entry; |
1140 | struct hlist_head *head; | 1140 | struct hlist_head *head; |
@@ -1169,7 +1169,7 @@ static void | |||
1169 | kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, | 1169 | kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, |
1170 | struct pt_regs *regs) | 1170 | struct pt_regs *regs) |
1171 | { | 1171 | { |
1172 | struct ftrace_event_call *call = &tk->tp.call; | 1172 | struct trace_event_call *call = &tk->tp.call; |
1173 | struct bpf_prog *prog = call->prog; | 1173 | struct bpf_prog *prog = call->prog; |
1174 | struct kretprobe_trace_entry_head *entry; | 1174 | struct kretprobe_trace_entry_head *entry; |
1175 | struct hlist_head *head; | 1175 | struct hlist_head *head; |
@@ -1206,11 +1206,11 @@ NOKPROBE_SYMBOL(kretprobe_perf_func); | |||
1206 | * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe | 1206 | * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe |
1207 | * lockless, but we can't race with this __init function. | 1207 | * lockless, but we can't race with this __init function. |
1208 | */ | 1208 | */ |
1209 | static int kprobe_register(struct ftrace_event_call *event, | 1209 | static int kprobe_register(struct trace_event_call *event, |
1210 | enum trace_reg type, void *data) | 1210 | enum trace_reg type, void *data) |
1211 | { | 1211 | { |
1212 | struct trace_kprobe *tk = (struct trace_kprobe *)event->data; | 1212 | struct trace_kprobe *tk = (struct trace_kprobe *)event->data; |
1213 | struct ftrace_event_file *file = data; | 1213 | struct trace_event_file *file = data; |
1214 | 1214 | ||
1215 | switch (type) { | 1215 | switch (type) { |
1216 | case TRACE_REG_REGISTER: | 1216 | case TRACE_REG_REGISTER: |
@@ -1276,10 +1276,10 @@ static struct trace_event_functions kprobe_funcs = { | |||
1276 | 1276 | ||
1277 | static int register_kprobe_event(struct trace_kprobe *tk) | 1277 | static int register_kprobe_event(struct trace_kprobe *tk) |
1278 | { | 1278 | { |
1279 | struct ftrace_event_call *call = &tk->tp.call; | 1279 | struct trace_event_call *call = &tk->tp.call; |
1280 | int ret; | 1280 | int ret; |
1281 | 1281 | ||
1282 | /* Initialize ftrace_event_call */ | 1282 | /* Initialize trace_event_call */ |
1283 | INIT_LIST_HEAD(&call->class->fields); | 1283 | INIT_LIST_HEAD(&call->class->fields); |
1284 | if (trace_kprobe_is_return(tk)) { | 1284 | if (trace_kprobe_is_return(tk)) { |
1285 | call->event.funcs = &kretprobe_funcs; | 1285 | call->event.funcs = &kretprobe_funcs; |
@@ -1290,7 +1290,7 @@ static int register_kprobe_event(struct trace_kprobe *tk) | |||
1290 | } | 1290 | } |
1291 | if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) | 1291 | if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) |
1292 | return -ENOMEM; | 1292 | return -ENOMEM; |
1293 | ret = register_ftrace_event(&call->event); | 1293 | ret = register_trace_event(&call->event); |
1294 | if (!ret) { | 1294 | if (!ret) { |
1295 | kfree(call->print_fmt); | 1295 | kfree(call->print_fmt); |
1296 | return -ENODEV; | 1296 | return -ENODEV; |
@@ -1301,9 +1301,9 @@ static int register_kprobe_event(struct trace_kprobe *tk) | |||
1301 | ret = trace_add_event_call(call); | 1301 | ret = trace_add_event_call(call); |
1302 | if (ret) { | 1302 | if (ret) { |
1303 | pr_info("Failed to register kprobe event: %s\n", | 1303 | pr_info("Failed to register kprobe event: %s\n", |
1304 | ftrace_event_name(call)); | 1304 | trace_event_name(call)); |
1305 | kfree(call->print_fmt); | 1305 | kfree(call->print_fmt); |
1306 | unregister_ftrace_event(&call->event); | 1306 | unregister_trace_event(&call->event); |
1307 | } | 1307 | } |
1308 | return ret; | 1308 | return ret; |
1309 | } | 1309 | } |
@@ -1364,10 +1364,10 @@ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3, | |||
1364 | return a1 + a2 + a3 + a4 + a5 + a6; | 1364 | return a1 + a2 + a3 + a4 + a5 + a6; |
1365 | } | 1365 | } |
1366 | 1366 | ||
1367 | static struct ftrace_event_file * | 1367 | static struct trace_event_file * |
1368 | find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) | 1368 | find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) |
1369 | { | 1369 | { |
1370 | struct ftrace_event_file *file; | 1370 | struct trace_event_file *file; |
1371 | 1371 | ||
1372 | list_for_each_entry(file, &tr->events, list) | 1372 | list_for_each_entry(file, &tr->events, list) |
1373 | if (file->event_call == &tk->tp.call) | 1373 | if (file->event_call == &tk->tp.call) |
@@ -1385,7 +1385,7 @@ static __init int kprobe_trace_self_tests_init(void) | |||
1385 | int ret, warn = 0; | 1385 | int ret, warn = 0; |
1386 | int (*target)(int, int, int, int, int, int); | 1386 | int (*target)(int, int, int, int, int, int); |
1387 | struct trace_kprobe *tk; | 1387 | struct trace_kprobe *tk; |
1388 | struct ftrace_event_file *file; | 1388 | struct trace_event_file *file; |
1389 | 1389 | ||
1390 | if (tracing_is_disabled()) | 1390 | if (tracing_is_disabled()) |
1391 | return -ENODEV; | 1391 | return -ENODEV; |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 7a9ba62e9fef..638e110c5bfd 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -298,7 +298,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
298 | struct trace_array_cpu *data, | 298 | struct trace_array_cpu *data, |
299 | struct mmiotrace_rw *rw) | 299 | struct mmiotrace_rw *rw) |
300 | { | 300 | { |
301 | struct ftrace_event_call *call = &event_mmiotrace_rw; | 301 | struct trace_event_call *call = &event_mmiotrace_rw; |
302 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | 302 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
303 | struct ring_buffer_event *event; | 303 | struct ring_buffer_event *event; |
304 | struct trace_mmiotrace_rw *entry; | 304 | struct trace_mmiotrace_rw *entry; |
@@ -328,7 +328,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
328 | struct trace_array_cpu *data, | 328 | struct trace_array_cpu *data, |
329 | struct mmiotrace_map *map) | 329 | struct mmiotrace_map *map) |
330 | { | 330 | { |
331 | struct ftrace_event_call *call = &event_mmiotrace_map; | 331 | struct trace_event_call *call = &event_mmiotrace_map; |
332 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | 332 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
333 | struct ring_buffer_event *event; | 333 | struct ring_buffer_event *event; |
334 | struct trace_mmiotrace_map *entry; | 334 | struct trace_mmiotrace_map *entry; |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 25a086bcb700..dfab253727dc 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -60,9 +60,9 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) | |||
60 | } | 60 | } |
61 | 61 | ||
62 | const char * | 62 | const char * |
63 | ftrace_print_flags_seq(struct trace_seq *p, const char *delim, | 63 | trace_print_flags_seq(struct trace_seq *p, const char *delim, |
64 | unsigned long flags, | 64 | unsigned long flags, |
65 | const struct trace_print_flags *flag_array) | 65 | const struct trace_print_flags *flag_array) |
66 | { | 66 | { |
67 | unsigned long mask; | 67 | unsigned long mask; |
68 | const char *str; | 68 | const char *str; |
@@ -95,11 +95,11 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim, | |||
95 | 95 | ||
96 | return ret; | 96 | return ret; |
97 | } | 97 | } |
98 | EXPORT_SYMBOL(ftrace_print_flags_seq); | 98 | EXPORT_SYMBOL(trace_print_flags_seq); |
99 | 99 | ||
100 | const char * | 100 | const char * |
101 | ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | 101 | trace_print_symbols_seq(struct trace_seq *p, unsigned long val, |
102 | const struct trace_print_flags *symbol_array) | 102 | const struct trace_print_flags *symbol_array) |
103 | { | 103 | { |
104 | int i; | 104 | int i; |
105 | const char *ret = trace_seq_buffer_ptr(p); | 105 | const char *ret = trace_seq_buffer_ptr(p); |
@@ -120,11 +120,11 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | |||
120 | 120 | ||
121 | return ret; | 121 | return ret; |
122 | } | 122 | } |
123 | EXPORT_SYMBOL(ftrace_print_symbols_seq); | 123 | EXPORT_SYMBOL(trace_print_symbols_seq); |
124 | 124 | ||
125 | #if BITS_PER_LONG == 32 | 125 | #if BITS_PER_LONG == 32 |
126 | const char * | 126 | const char * |
127 | ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, | 127 | trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, |
128 | const struct trace_print_flags_u64 *symbol_array) | 128 | const struct trace_print_flags_u64 *symbol_array) |
129 | { | 129 | { |
130 | int i; | 130 | int i; |
@@ -146,12 +146,12 @@ ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, | |||
146 | 146 | ||
147 | return ret; | 147 | return ret; |
148 | } | 148 | } |
149 | EXPORT_SYMBOL(ftrace_print_symbols_seq_u64); | 149 | EXPORT_SYMBOL(trace_print_symbols_seq_u64); |
150 | #endif | 150 | #endif |
151 | 151 | ||
152 | const char * | 152 | const char * |
153 | ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, | 153 | trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, |
154 | unsigned int bitmask_size) | 154 | unsigned int bitmask_size) |
155 | { | 155 | { |
156 | const char *ret = trace_seq_buffer_ptr(p); | 156 | const char *ret = trace_seq_buffer_ptr(p); |
157 | 157 | ||
@@ -160,10 +160,10 @@ ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, | |||
160 | 160 | ||
161 | return ret; | 161 | return ret; |
162 | } | 162 | } |
163 | EXPORT_SYMBOL_GPL(ftrace_print_bitmask_seq); | 163 | EXPORT_SYMBOL_GPL(trace_print_bitmask_seq); |
164 | 164 | ||
165 | const char * | 165 | const char * |
166 | ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) | 166 | trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) |
167 | { | 167 | { |
168 | int i; | 168 | int i; |
169 | const char *ret = trace_seq_buffer_ptr(p); | 169 | const char *ret = trace_seq_buffer_ptr(p); |
@@ -175,11 +175,11 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) | |||
175 | 175 | ||
176 | return ret; | 176 | return ret; |
177 | } | 177 | } |
178 | EXPORT_SYMBOL(ftrace_print_hex_seq); | 178 | EXPORT_SYMBOL(trace_print_hex_seq); |
179 | 179 | ||
180 | const char * | 180 | const char * |
181 | ftrace_print_array_seq(struct trace_seq *p, const void *buf, int count, | 181 | trace_print_array_seq(struct trace_seq *p, const void *buf, int count, |
182 | size_t el_size) | 182 | size_t el_size) |
183 | { | 183 | { |
184 | const char *ret = trace_seq_buffer_ptr(p); | 184 | const char *ret = trace_seq_buffer_ptr(p); |
185 | const char *prefix = ""; | 185 | const char *prefix = ""; |
@@ -220,17 +220,17 @@ ftrace_print_array_seq(struct trace_seq *p, const void *buf, int count, | |||
220 | 220 | ||
221 | return ret; | 221 | return ret; |
222 | } | 222 | } |
223 | EXPORT_SYMBOL(ftrace_print_array_seq); | 223 | EXPORT_SYMBOL(trace_print_array_seq); |
224 | 224 | ||
225 | int ftrace_raw_output_prep(struct trace_iterator *iter, | 225 | int trace_raw_output_prep(struct trace_iterator *iter, |
226 | struct trace_event *trace_event) | 226 | struct trace_event *trace_event) |
227 | { | 227 | { |
228 | struct ftrace_event_call *event; | 228 | struct trace_event_call *event; |
229 | struct trace_seq *s = &iter->seq; | 229 | struct trace_seq *s = &iter->seq; |
230 | struct trace_seq *p = &iter->tmp_seq; | 230 | struct trace_seq *p = &iter->tmp_seq; |
231 | struct trace_entry *entry; | 231 | struct trace_entry *entry; |
232 | 232 | ||
233 | event = container_of(trace_event, struct ftrace_event_call, event); | 233 | event = container_of(trace_event, struct trace_event_call, event); |
234 | entry = iter->ent; | 234 | entry = iter->ent; |
235 | 235 | ||
236 | if (entry->type != event->event.type) { | 236 | if (entry->type != event->event.type) { |
@@ -239,14 +239,14 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, | |||
239 | } | 239 | } |
240 | 240 | ||
241 | trace_seq_init(p); | 241 | trace_seq_init(p); |
242 | trace_seq_printf(s, "%s: ", ftrace_event_name(event)); | 242 | trace_seq_printf(s, "%s: ", trace_event_name(event)); |
243 | 243 | ||
244 | return trace_handle_return(s); | 244 | return trace_handle_return(s); |
245 | } | 245 | } |
246 | EXPORT_SYMBOL(ftrace_raw_output_prep); | 246 | EXPORT_SYMBOL(trace_raw_output_prep); |
247 | 247 | ||
248 | static int ftrace_output_raw(struct trace_iterator *iter, char *name, | 248 | static int trace_output_raw(struct trace_iterator *iter, char *name, |
249 | char *fmt, va_list ap) | 249 | char *fmt, va_list ap) |
250 | { | 250 | { |
251 | struct trace_seq *s = &iter->seq; | 251 | struct trace_seq *s = &iter->seq; |
252 | 252 | ||
@@ -256,18 +256,18 @@ static int ftrace_output_raw(struct trace_iterator *iter, char *name, | |||
256 | return trace_handle_return(s); | 256 | return trace_handle_return(s); |
257 | } | 257 | } |
258 | 258 | ||
259 | int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) | 259 | int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) |
260 | { | 260 | { |
261 | va_list ap; | 261 | va_list ap; |
262 | int ret; | 262 | int ret; |
263 | 263 | ||
264 | va_start(ap, fmt); | 264 | va_start(ap, fmt); |
265 | ret = ftrace_output_raw(iter, name, fmt, ap); | 265 | ret = trace_output_raw(iter, name, fmt, ap); |
266 | va_end(ap); | 266 | va_end(ap); |
267 | 267 | ||
268 | return ret; | 268 | return ret; |
269 | } | 269 | } |
270 | EXPORT_SYMBOL_GPL(ftrace_output_call); | 270 | EXPORT_SYMBOL_GPL(trace_output_call); |
271 | 271 | ||
272 | #ifdef CONFIG_KRETPROBES | 272 | #ifdef CONFIG_KRETPROBES |
273 | static inline const char *kretprobed(const char *name) | 273 | static inline const char *kretprobed(const char *name) |
@@ -675,7 +675,7 @@ static int trace_search_list(struct list_head **list) | |||
675 | } | 675 | } |
676 | 676 | ||
677 | /* Did we used up all 65 thousand events??? */ | 677 | /* Did we used up all 65 thousand events??? */ |
678 | if ((last + 1) > FTRACE_MAX_EVENT) | 678 | if ((last + 1) > TRACE_EVENT_TYPE_MAX) |
679 | return 0; | 679 | return 0; |
680 | 680 | ||
681 | *list = &e->list; | 681 | *list = &e->list; |
@@ -693,7 +693,7 @@ void trace_event_read_unlock(void) | |||
693 | } | 693 | } |
694 | 694 | ||
695 | /** | 695 | /** |
696 | * register_ftrace_event - register output for an event type | 696 | * register_trace_event - register output for an event type |
697 | * @event: the event type to register | 697 | * @event: the event type to register |
698 | * | 698 | * |
699 | * Event types are stored in a hash and this hash is used to | 699 | * Event types are stored in a hash and this hash is used to |
@@ -707,7 +707,7 @@ void trace_event_read_unlock(void) | |||
707 | * | 707 | * |
708 | * Returns the event type number or zero on error. | 708 | * Returns the event type number or zero on error. |
709 | */ | 709 | */ |
710 | int register_ftrace_event(struct trace_event *event) | 710 | int register_trace_event(struct trace_event *event) |
711 | { | 711 | { |
712 | unsigned key; | 712 | unsigned key; |
713 | int ret = 0; | 713 | int ret = 0; |
@@ -725,7 +725,7 @@ int register_ftrace_event(struct trace_event *event) | |||
725 | if (!event->type) { | 725 | if (!event->type) { |
726 | struct list_head *list = NULL; | 726 | struct list_head *list = NULL; |
727 | 727 | ||
728 | if (next_event_type > FTRACE_MAX_EVENT) { | 728 | if (next_event_type > TRACE_EVENT_TYPE_MAX) { |
729 | 729 | ||
730 | event->type = trace_search_list(&list); | 730 | event->type = trace_search_list(&list); |
731 | if (!event->type) | 731 | if (!event->type) |
@@ -771,12 +771,12 @@ int register_ftrace_event(struct trace_event *event) | |||
771 | 771 | ||
772 | return ret; | 772 | return ret; |
773 | } | 773 | } |
774 | EXPORT_SYMBOL_GPL(register_ftrace_event); | 774 | EXPORT_SYMBOL_GPL(register_trace_event); |
775 | 775 | ||
776 | /* | 776 | /* |
777 | * Used by module code with the trace_event_sem held for write. | 777 | * Used by module code with the trace_event_sem held for write. |
778 | */ | 778 | */ |
779 | int __unregister_ftrace_event(struct trace_event *event) | 779 | int __unregister_trace_event(struct trace_event *event) |
780 | { | 780 | { |
781 | hlist_del(&event->node); | 781 | hlist_del(&event->node); |
782 | list_del(&event->list); | 782 | list_del(&event->list); |
@@ -784,18 +784,18 @@ int __unregister_ftrace_event(struct trace_event *event) | |||
784 | } | 784 | } |
785 | 785 | ||
786 | /** | 786 | /** |
787 | * unregister_ftrace_event - remove a no longer used event | 787 | * unregister_trace_event - remove a no longer used event |
788 | * @event: the event to remove | 788 | * @event: the event to remove |
789 | */ | 789 | */ |
790 | int unregister_ftrace_event(struct trace_event *event) | 790 | int unregister_trace_event(struct trace_event *event) |
791 | { | 791 | { |
792 | down_write(&trace_event_sem); | 792 | down_write(&trace_event_sem); |
793 | __unregister_ftrace_event(event); | 793 | __unregister_trace_event(event); |
794 | up_write(&trace_event_sem); | 794 | up_write(&trace_event_sem); |
795 | 795 | ||
796 | return 0; | 796 | return 0; |
797 | } | 797 | } |
798 | EXPORT_SYMBOL_GPL(unregister_ftrace_event); | 798 | EXPORT_SYMBOL_GPL(unregister_trace_event); |
799 | 799 | ||
800 | /* | 800 | /* |
801 | * Standard events | 801 | * Standard events |
@@ -1243,7 +1243,7 @@ __init static int init_events(void) | |||
1243 | for (i = 0; events[i]; i++) { | 1243 | for (i = 0; events[i]; i++) { |
1244 | event = events[i]; | 1244 | event = events[i]; |
1245 | 1245 | ||
1246 | ret = register_ftrace_event(event); | 1246 | ret = register_trace_event(event); |
1247 | if (!ret) { | 1247 | if (!ret) { |
1248 | printk(KERN_WARNING "event %d failed to register\n", | 1248 | printk(KERN_WARNING "event %d failed to register\n", |
1249 | event->type); | 1249 | event->type); |
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index 8ef2c40efb3c..4cbfe85b99c8 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h | |||
@@ -32,7 +32,7 @@ extern int | |||
32 | trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry); | 32 | trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry); |
33 | 33 | ||
34 | /* used by module unregistering */ | 34 | /* used by module unregistering */ |
35 | extern int __unregister_ftrace_event(struct trace_event *event); | 35 | extern int __unregister_trace_event(struct trace_event *event); |
36 | extern struct rw_semaphore trace_event_sem; | 36 | extern struct rw_semaphore trace_event_sem; |
37 | 37 | ||
38 | #define SEQ_PUT_FIELD(s, x) \ | 38 | #define SEQ_PUT_FIELD(s, x) \ |
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index ab283e146b70..b98dee914542 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h | |||
@@ -272,8 +272,8 @@ struct probe_arg { | |||
272 | 272 | ||
273 | struct trace_probe { | 273 | struct trace_probe { |
274 | unsigned int flags; /* For TP_FLAG_* */ | 274 | unsigned int flags; /* For TP_FLAG_* */ |
275 | struct ftrace_event_class class; | 275 | struct trace_event_class class; |
276 | struct ftrace_event_call call; | 276 | struct trace_event_call call; |
277 | struct list_head files; | 277 | struct list_head files; |
278 | ssize_t size; /* trace entry size */ | 278 | ssize_t size; /* trace entry size */ |
279 | unsigned int nr_args; | 279 | unsigned int nr_args; |
@@ -281,7 +281,7 @@ struct trace_probe { | |||
281 | }; | 281 | }; |
282 | 282 | ||
283 | struct event_file_link { | 283 | struct event_file_link { |
284 | struct ftrace_event_file *file; | 284 | struct trace_event_file *file; |
285 | struct list_head list; | 285 | struct list_head list; |
286 | }; | 286 | }; |
287 | 287 | ||
@@ -314,7 +314,7 @@ static inline int is_good_name(const char *name) | |||
314 | } | 314 | } |
315 | 315 | ||
316 | static inline struct event_file_link * | 316 | static inline struct event_file_link * |
317 | find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | 317 | find_event_file_link(struct trace_probe *tp, struct trace_event_file *file) |
318 | { | 318 | { |
319 | struct event_file_link *link; | 319 | struct event_file_link *link; |
320 | 320 | ||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index d6e1003724e9..9b33dd117f3f 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -369,7 +369,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
369 | struct task_struct *next, | 369 | struct task_struct *next, |
370 | unsigned long flags, int pc) | 370 | unsigned long flags, int pc) |
371 | { | 371 | { |
372 | struct ftrace_event_call *call = &event_context_switch; | 372 | struct trace_event_call *call = &event_context_switch; |
373 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | 373 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
374 | struct ring_buffer_event *event; | 374 | struct ring_buffer_event *event; |
375 | struct ctx_switch_entry *entry; | 375 | struct ctx_switch_entry *entry; |
@@ -397,7 +397,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
397 | struct task_struct *curr, | 397 | struct task_struct *curr, |
398 | unsigned long flags, int pc) | 398 | unsigned long flags, int pc) |
399 | { | 399 | { |
400 | struct ftrace_event_call *call = &event_wakeup; | 400 | struct trace_event_call *call = &event_wakeup; |
401 | struct ring_buffer_event *event; | 401 | struct ring_buffer_event *event; |
402 | struct ctx_switch_entry *entry; | 402 | struct ctx_switch_entry *entry; |
403 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | 403 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index f97f6e3a676c..7d567a4b9fa7 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -13,13 +13,13 @@ | |||
13 | 13 | ||
14 | static DEFINE_MUTEX(syscall_trace_lock); | 14 | static DEFINE_MUTEX(syscall_trace_lock); |
15 | 15 | ||
16 | static int syscall_enter_register(struct ftrace_event_call *event, | 16 | static int syscall_enter_register(struct trace_event_call *event, |
17 | enum trace_reg type, void *data); | 17 | enum trace_reg type, void *data); |
18 | static int syscall_exit_register(struct ftrace_event_call *event, | 18 | static int syscall_exit_register(struct trace_event_call *event, |
19 | enum trace_reg type, void *data); | 19 | enum trace_reg type, void *data); |
20 | 20 | ||
21 | static struct list_head * | 21 | static struct list_head * |
22 | syscall_get_enter_fields(struct ftrace_event_call *call) | 22 | syscall_get_enter_fields(struct trace_event_call *call) |
23 | { | 23 | { |
24 | struct syscall_metadata *entry = call->data; | 24 | struct syscall_metadata *entry = call->data; |
25 | 25 | ||
@@ -219,7 +219,7 @@ __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) | |||
219 | return pos; | 219 | return pos; |
220 | } | 220 | } |
221 | 221 | ||
222 | static int __init set_syscall_print_fmt(struct ftrace_event_call *call) | 222 | static int __init set_syscall_print_fmt(struct trace_event_call *call) |
223 | { | 223 | { |
224 | char *print_fmt; | 224 | char *print_fmt; |
225 | int len; | 225 | int len; |
@@ -244,7 +244,7 @@ static int __init set_syscall_print_fmt(struct ftrace_event_call *call) | |||
244 | return 0; | 244 | return 0; |
245 | } | 245 | } |
246 | 246 | ||
247 | static void __init free_syscall_print_fmt(struct ftrace_event_call *call) | 247 | static void __init free_syscall_print_fmt(struct trace_event_call *call) |
248 | { | 248 | { |
249 | struct syscall_metadata *entry = call->data; | 249 | struct syscall_metadata *entry = call->data; |
250 | 250 | ||
@@ -252,7 +252,7 @@ static void __init free_syscall_print_fmt(struct ftrace_event_call *call) | |||
252 | kfree(call->print_fmt); | 252 | kfree(call->print_fmt); |
253 | } | 253 | } |
254 | 254 | ||
255 | static int __init syscall_enter_define_fields(struct ftrace_event_call *call) | 255 | static int __init syscall_enter_define_fields(struct trace_event_call *call) |
256 | { | 256 | { |
257 | struct syscall_trace_enter trace; | 257 | struct syscall_trace_enter trace; |
258 | struct syscall_metadata *meta = call->data; | 258 | struct syscall_metadata *meta = call->data; |
@@ -275,7 +275,7 @@ static int __init syscall_enter_define_fields(struct ftrace_event_call *call) | |||
275 | return ret; | 275 | return ret; |
276 | } | 276 | } |
277 | 277 | ||
278 | static int __init syscall_exit_define_fields(struct ftrace_event_call *call) | 278 | static int __init syscall_exit_define_fields(struct trace_event_call *call) |
279 | { | 279 | { |
280 | struct syscall_trace_exit trace; | 280 | struct syscall_trace_exit trace; |
281 | int ret; | 281 | int ret; |
@@ -293,7 +293,7 @@ static int __init syscall_exit_define_fields(struct ftrace_event_call *call) | |||
293 | static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | 293 | static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) |
294 | { | 294 | { |
295 | struct trace_array *tr = data; | 295 | struct trace_array *tr = data; |
296 | struct ftrace_event_file *ftrace_file; | 296 | struct trace_event_file *trace_file; |
297 | struct syscall_trace_enter *entry; | 297 | struct syscall_trace_enter *entry; |
298 | struct syscall_metadata *sys_data; | 298 | struct syscall_metadata *sys_data; |
299 | struct ring_buffer_event *event; | 299 | struct ring_buffer_event *event; |
@@ -308,11 +308,11 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
308 | return; | 308 | return; |
309 | 309 | ||
310 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ | 310 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ |
311 | ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); | 311 | trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); |
312 | if (!ftrace_file) | 312 | if (!trace_file) |
313 | return; | 313 | return; |
314 | 314 | ||
315 | if (ftrace_trigger_soft_disabled(ftrace_file)) | 315 | if (trace_trigger_soft_disabled(trace_file)) |
316 | return; | 316 | return; |
317 | 317 | ||
318 | sys_data = syscall_nr_to_meta(syscall_nr); | 318 | sys_data = syscall_nr_to_meta(syscall_nr); |
@@ -334,14 +334,14 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
334 | entry->nr = syscall_nr; | 334 | entry->nr = syscall_nr; |
335 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); | 335 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); |
336 | 336 | ||
337 | event_trigger_unlock_commit(ftrace_file, buffer, event, entry, | 337 | event_trigger_unlock_commit(trace_file, buffer, event, entry, |
338 | irq_flags, pc); | 338 | irq_flags, pc); |
339 | } | 339 | } |
340 | 340 | ||
341 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | 341 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) |
342 | { | 342 | { |
343 | struct trace_array *tr = data; | 343 | struct trace_array *tr = data; |
344 | struct ftrace_event_file *ftrace_file; | 344 | struct trace_event_file *trace_file; |
345 | struct syscall_trace_exit *entry; | 345 | struct syscall_trace_exit *entry; |
346 | struct syscall_metadata *sys_data; | 346 | struct syscall_metadata *sys_data; |
347 | struct ring_buffer_event *event; | 347 | struct ring_buffer_event *event; |
@@ -355,11 +355,11 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
355 | return; | 355 | return; |
356 | 356 | ||
357 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ | 357 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ |
358 | ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); | 358 | trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); |
359 | if (!ftrace_file) | 359 | if (!trace_file) |
360 | return; | 360 | return; |
361 | 361 | ||
362 | if (ftrace_trigger_soft_disabled(ftrace_file)) | 362 | if (trace_trigger_soft_disabled(trace_file)) |
363 | return; | 363 | return; |
364 | 364 | ||
365 | sys_data = syscall_nr_to_meta(syscall_nr); | 365 | sys_data = syscall_nr_to_meta(syscall_nr); |
@@ -380,12 +380,12 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
380 | entry->nr = syscall_nr; | 380 | entry->nr = syscall_nr; |
381 | entry->ret = syscall_get_return_value(current, regs); | 381 | entry->ret = syscall_get_return_value(current, regs); |
382 | 382 | ||
383 | event_trigger_unlock_commit(ftrace_file, buffer, event, entry, | 383 | event_trigger_unlock_commit(trace_file, buffer, event, entry, |
384 | irq_flags, pc); | 384 | irq_flags, pc); |
385 | } | 385 | } |
386 | 386 | ||
387 | static int reg_event_syscall_enter(struct ftrace_event_file *file, | 387 | static int reg_event_syscall_enter(struct trace_event_file *file, |
388 | struct ftrace_event_call *call) | 388 | struct trace_event_call *call) |
389 | { | 389 | { |
390 | struct trace_array *tr = file->tr; | 390 | struct trace_array *tr = file->tr; |
391 | int ret = 0; | 391 | int ret = 0; |
@@ -405,8 +405,8 @@ static int reg_event_syscall_enter(struct ftrace_event_file *file, | |||
405 | return ret; | 405 | return ret; |
406 | } | 406 | } |
407 | 407 | ||
408 | static void unreg_event_syscall_enter(struct ftrace_event_file *file, | 408 | static void unreg_event_syscall_enter(struct trace_event_file *file, |
409 | struct ftrace_event_call *call) | 409 | struct trace_event_call *call) |
410 | { | 410 | { |
411 | struct trace_array *tr = file->tr; | 411 | struct trace_array *tr = file->tr; |
412 | int num; | 412 | int num; |
@@ -422,8 +422,8 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file, | |||
422 | mutex_unlock(&syscall_trace_lock); | 422 | mutex_unlock(&syscall_trace_lock); |
423 | } | 423 | } |
424 | 424 | ||
425 | static int reg_event_syscall_exit(struct ftrace_event_file *file, | 425 | static int reg_event_syscall_exit(struct trace_event_file *file, |
426 | struct ftrace_event_call *call) | 426 | struct trace_event_call *call) |
427 | { | 427 | { |
428 | struct trace_array *tr = file->tr; | 428 | struct trace_array *tr = file->tr; |
429 | int ret = 0; | 429 | int ret = 0; |
@@ -443,8 +443,8 @@ static int reg_event_syscall_exit(struct ftrace_event_file *file, | |||
443 | return ret; | 443 | return ret; |
444 | } | 444 | } |
445 | 445 | ||
446 | static void unreg_event_syscall_exit(struct ftrace_event_file *file, | 446 | static void unreg_event_syscall_exit(struct trace_event_file *file, |
447 | struct ftrace_event_call *call) | 447 | struct trace_event_call *call) |
448 | { | 448 | { |
449 | struct trace_array *tr = file->tr; | 449 | struct trace_array *tr = file->tr; |
450 | int num; | 450 | int num; |
@@ -460,7 +460,7 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file, | |||
460 | mutex_unlock(&syscall_trace_lock); | 460 | mutex_unlock(&syscall_trace_lock); |
461 | } | 461 | } |
462 | 462 | ||
463 | static int __init init_syscall_trace(struct ftrace_event_call *call) | 463 | static int __init init_syscall_trace(struct trace_event_call *call) |
464 | { | 464 | { |
465 | int id; | 465 | int id; |
466 | int num; | 466 | int num; |
@@ -493,7 +493,7 @@ struct trace_event_functions exit_syscall_print_funcs = { | |||
493 | .trace = print_syscall_exit, | 493 | .trace = print_syscall_exit, |
494 | }; | 494 | }; |
495 | 495 | ||
496 | struct ftrace_event_class __refdata event_class_syscall_enter = { | 496 | struct trace_event_class __refdata event_class_syscall_enter = { |
497 | .system = "syscalls", | 497 | .system = "syscalls", |
498 | .reg = syscall_enter_register, | 498 | .reg = syscall_enter_register, |
499 | .define_fields = syscall_enter_define_fields, | 499 | .define_fields = syscall_enter_define_fields, |
@@ -501,7 +501,7 @@ struct ftrace_event_class __refdata event_class_syscall_enter = { | |||
501 | .raw_init = init_syscall_trace, | 501 | .raw_init = init_syscall_trace, |
502 | }; | 502 | }; |
503 | 503 | ||
504 | struct ftrace_event_class __refdata event_class_syscall_exit = { | 504 | struct trace_event_class __refdata event_class_syscall_exit = { |
505 | .system = "syscalls", | 505 | .system = "syscalls", |
506 | .reg = syscall_exit_register, | 506 | .reg = syscall_exit_register, |
507 | .define_fields = syscall_exit_define_fields, | 507 | .define_fields = syscall_exit_define_fields, |
@@ -584,7 +584,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
584 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); | 584 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); |
585 | } | 585 | } |
586 | 586 | ||
587 | static int perf_sysenter_enable(struct ftrace_event_call *call) | 587 | static int perf_sysenter_enable(struct trace_event_call *call) |
588 | { | 588 | { |
589 | int ret = 0; | 589 | int ret = 0; |
590 | int num; | 590 | int num; |
@@ -605,7 +605,7 @@ static int perf_sysenter_enable(struct ftrace_event_call *call) | |||
605 | return ret; | 605 | return ret; |
606 | } | 606 | } |
607 | 607 | ||
608 | static void perf_sysenter_disable(struct ftrace_event_call *call) | 608 | static void perf_sysenter_disable(struct trace_event_call *call) |
609 | { | 609 | { |
610 | int num; | 610 | int num; |
611 | 611 | ||
@@ -656,7 +656,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
656 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); | 656 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); |
657 | } | 657 | } |
658 | 658 | ||
659 | static int perf_sysexit_enable(struct ftrace_event_call *call) | 659 | static int perf_sysexit_enable(struct trace_event_call *call) |
660 | { | 660 | { |
661 | int ret = 0; | 661 | int ret = 0; |
662 | int num; | 662 | int num; |
@@ -677,7 +677,7 @@ static int perf_sysexit_enable(struct ftrace_event_call *call) | |||
677 | return ret; | 677 | return ret; |
678 | } | 678 | } |
679 | 679 | ||
680 | static void perf_sysexit_disable(struct ftrace_event_call *call) | 680 | static void perf_sysexit_disable(struct trace_event_call *call) |
681 | { | 681 | { |
682 | int num; | 682 | int num; |
683 | 683 | ||
@@ -693,10 +693,10 @@ static void perf_sysexit_disable(struct ftrace_event_call *call) | |||
693 | 693 | ||
694 | #endif /* CONFIG_PERF_EVENTS */ | 694 | #endif /* CONFIG_PERF_EVENTS */ |
695 | 695 | ||
696 | static int syscall_enter_register(struct ftrace_event_call *event, | 696 | static int syscall_enter_register(struct trace_event_call *event, |
697 | enum trace_reg type, void *data) | 697 | enum trace_reg type, void *data) |
698 | { | 698 | { |
699 | struct ftrace_event_file *file = data; | 699 | struct trace_event_file *file = data; |
700 | 700 | ||
701 | switch (type) { | 701 | switch (type) { |
702 | case TRACE_REG_REGISTER: | 702 | case TRACE_REG_REGISTER: |
@@ -721,10 +721,10 @@ static int syscall_enter_register(struct ftrace_event_call *event, | |||
721 | return 0; | 721 | return 0; |
722 | } | 722 | } |
723 | 723 | ||
724 | static int syscall_exit_register(struct ftrace_event_call *event, | 724 | static int syscall_exit_register(struct trace_event_call *event, |
725 | enum trace_reg type, void *data) | 725 | enum trace_reg type, void *data) |
726 | { | 726 | { |
727 | struct ftrace_event_file *file = data; | 727 | struct trace_event_file *file = data; |
728 | 728 | ||
729 | switch (type) { | 729 | switch (type) { |
730 | case TRACE_REG_REGISTER: | 730 | case TRACE_REG_REGISTER: |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 6dd022c7b5bc..aa1ea7b36fa8 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -293,7 +293,7 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou | |||
293 | struct trace_uprobe *tu; | 293 | struct trace_uprobe *tu; |
294 | 294 | ||
295 | list_for_each_entry(tu, &uprobe_list, list) | 295 | list_for_each_entry(tu, &uprobe_list, list) |
296 | if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 && | 296 | if (strcmp(trace_event_name(&tu->tp.call), event) == 0 && |
297 | strcmp(tu->tp.call.class->system, group) == 0) | 297 | strcmp(tu->tp.call.class->system, group) == 0) |
298 | return tu; | 298 | return tu; |
299 | 299 | ||
@@ -323,7 +323,7 @@ static int register_trace_uprobe(struct trace_uprobe *tu) | |||
323 | mutex_lock(&uprobe_lock); | 323 | mutex_lock(&uprobe_lock); |
324 | 324 | ||
325 | /* register as an event */ | 325 | /* register as an event */ |
326 | old_tu = find_probe_event(ftrace_event_name(&tu->tp.call), | 326 | old_tu = find_probe_event(trace_event_name(&tu->tp.call), |
327 | tu->tp.call.class->system); | 327 | tu->tp.call.class->system); |
328 | if (old_tu) { | 328 | if (old_tu) { |
329 | /* delete old event */ | 329 | /* delete old event */ |
@@ -600,7 +600,7 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
600 | int i; | 600 | int i; |
601 | 601 | ||
602 | seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, | 602 | seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, |
603 | ftrace_event_name(&tu->tp.call)); | 603 | trace_event_name(&tu->tp.call)); |
604 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); | 604 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); |
605 | 605 | ||
606 | for (i = 0; i < tu->tp.nr_args; i++) | 606 | for (i = 0; i < tu->tp.nr_args; i++) |
@@ -651,7 +651,7 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) | |||
651 | struct trace_uprobe *tu = v; | 651 | struct trace_uprobe *tu = v; |
652 | 652 | ||
653 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, | 653 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, |
654 | ftrace_event_name(&tu->tp.call), tu->nhit); | 654 | trace_event_name(&tu->tp.call), tu->nhit); |
655 | return 0; | 655 | return 0; |
656 | } | 656 | } |
657 | 657 | ||
@@ -770,26 +770,26 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) | |||
770 | static void __uprobe_trace_func(struct trace_uprobe *tu, | 770 | static void __uprobe_trace_func(struct trace_uprobe *tu, |
771 | unsigned long func, struct pt_regs *regs, | 771 | unsigned long func, struct pt_regs *regs, |
772 | struct uprobe_cpu_buffer *ucb, int dsize, | 772 | struct uprobe_cpu_buffer *ucb, int dsize, |
773 | struct ftrace_event_file *ftrace_file) | 773 | struct trace_event_file *trace_file) |
774 | { | 774 | { |
775 | struct uprobe_trace_entry_head *entry; | 775 | struct uprobe_trace_entry_head *entry; |
776 | struct ring_buffer_event *event; | 776 | struct ring_buffer_event *event; |
777 | struct ring_buffer *buffer; | 777 | struct ring_buffer *buffer; |
778 | void *data; | 778 | void *data; |
779 | int size, esize; | 779 | int size, esize; |
780 | struct ftrace_event_call *call = &tu->tp.call; | 780 | struct trace_event_call *call = &tu->tp.call; |
781 | 781 | ||
782 | WARN_ON(call != ftrace_file->event_call); | 782 | WARN_ON(call != trace_file->event_call); |
783 | 783 | ||
784 | if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) | 784 | if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) |
785 | return; | 785 | return; |
786 | 786 | ||
787 | if (ftrace_trigger_soft_disabled(ftrace_file)) | 787 | if (trace_trigger_soft_disabled(trace_file)) |
788 | return; | 788 | return; |
789 | 789 | ||
790 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | 790 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
791 | size = esize + tu->tp.size + dsize; | 791 | size = esize + tu->tp.size + dsize; |
792 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, | 792 | event = trace_event_buffer_lock_reserve(&buffer, trace_file, |
793 | call->event.type, size, 0, 0); | 793 | call->event.type, size, 0, 0); |
794 | if (!event) | 794 | if (!event) |
795 | return; | 795 | return; |
@@ -806,7 +806,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu, | |||
806 | 806 | ||
807 | memcpy(data, ucb->buf, tu->tp.size + dsize); | 807 | memcpy(data, ucb->buf, tu->tp.size + dsize); |
808 | 808 | ||
809 | event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0); | 809 | event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0); |
810 | } | 810 | } |
811 | 811 | ||
812 | /* uprobe handler */ | 812 | /* uprobe handler */ |
@@ -853,12 +853,12 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e | |||
853 | 853 | ||
854 | if (is_ret_probe(tu)) { | 854 | if (is_ret_probe(tu)) { |
855 | trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", | 855 | trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", |
856 | ftrace_event_name(&tu->tp.call), | 856 | trace_event_name(&tu->tp.call), |
857 | entry->vaddr[1], entry->vaddr[0]); | 857 | entry->vaddr[1], entry->vaddr[0]); |
858 | data = DATAOF_TRACE_ENTRY(entry, true); | 858 | data = DATAOF_TRACE_ENTRY(entry, true); |
859 | } else { | 859 | } else { |
860 | trace_seq_printf(s, "%s: (0x%lx)", | 860 | trace_seq_printf(s, "%s: (0x%lx)", |
861 | ftrace_event_name(&tu->tp.call), | 861 | trace_event_name(&tu->tp.call), |
862 | entry->vaddr[0]); | 862 | entry->vaddr[0]); |
863 | data = DATAOF_TRACE_ENTRY(entry, false); | 863 | data = DATAOF_TRACE_ENTRY(entry, false); |
864 | } | 864 | } |
@@ -881,7 +881,7 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self, | |||
881 | struct mm_struct *mm); | 881 | struct mm_struct *mm); |
882 | 882 | ||
883 | static int | 883 | static int |
884 | probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, | 884 | probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, |
885 | filter_func_t filter) | 885 | filter_func_t filter) |
886 | { | 886 | { |
887 | bool enabled = trace_probe_is_enabled(&tu->tp); | 887 | bool enabled = trace_probe_is_enabled(&tu->tp); |
@@ -938,7 +938,7 @@ probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, | |||
938 | } | 938 | } |
939 | 939 | ||
940 | static void | 940 | static void |
941 | probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file) | 941 | probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) |
942 | { | 942 | { |
943 | if (!trace_probe_is_enabled(&tu->tp)) | 943 | if (!trace_probe_is_enabled(&tu->tp)) |
944 | return; | 944 | return; |
@@ -967,7 +967,7 @@ probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file) | |||
967 | uprobe_buffer_disable(); | 967 | uprobe_buffer_disable(); |
968 | } | 968 | } |
969 | 969 | ||
970 | static int uprobe_event_define_fields(struct ftrace_event_call *event_call) | 970 | static int uprobe_event_define_fields(struct trace_event_call *event_call) |
971 | { | 971 | { |
972 | int ret, i, size; | 972 | int ret, i, size; |
973 | struct uprobe_trace_entry_head field; | 973 | struct uprobe_trace_entry_head field; |
@@ -1093,7 +1093,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu, | |||
1093 | unsigned long func, struct pt_regs *regs, | 1093 | unsigned long func, struct pt_regs *regs, |
1094 | struct uprobe_cpu_buffer *ucb, int dsize) | 1094 | struct uprobe_cpu_buffer *ucb, int dsize) |
1095 | { | 1095 | { |
1096 | struct ftrace_event_call *call = &tu->tp.call; | 1096 | struct trace_event_call *call = &tu->tp.call; |
1097 | struct uprobe_trace_entry_head *entry; | 1097 | struct uprobe_trace_entry_head *entry; |
1098 | struct hlist_head *head; | 1098 | struct hlist_head *head; |
1099 | void *data; | 1099 | void *data; |
@@ -1159,11 +1159,11 @@ static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, | |||
1159 | #endif /* CONFIG_PERF_EVENTS */ | 1159 | #endif /* CONFIG_PERF_EVENTS */ |
1160 | 1160 | ||
1161 | static int | 1161 | static int |
1162 | trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, | 1162 | trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, |
1163 | void *data) | 1163 | void *data) |
1164 | { | 1164 | { |
1165 | struct trace_uprobe *tu = event->data; | 1165 | struct trace_uprobe *tu = event->data; |
1166 | struct ftrace_event_file *file = data; | 1166 | struct trace_event_file *file = data; |
1167 | 1167 | ||
1168 | switch (type) { | 1168 | switch (type) { |
1169 | case TRACE_REG_REGISTER: | 1169 | case TRACE_REG_REGISTER: |
@@ -1272,10 +1272,10 @@ static struct trace_event_functions uprobe_funcs = { | |||
1272 | 1272 | ||
1273 | static int register_uprobe_event(struct trace_uprobe *tu) | 1273 | static int register_uprobe_event(struct trace_uprobe *tu) |
1274 | { | 1274 | { |
1275 | struct ftrace_event_call *call = &tu->tp.call; | 1275 | struct trace_event_call *call = &tu->tp.call; |
1276 | int ret; | 1276 | int ret; |
1277 | 1277 | ||
1278 | /* Initialize ftrace_event_call */ | 1278 | /* Initialize trace_event_call */ |
1279 | INIT_LIST_HEAD(&call->class->fields); | 1279 | INIT_LIST_HEAD(&call->class->fields); |
1280 | call->event.funcs = &uprobe_funcs; | 1280 | call->event.funcs = &uprobe_funcs; |
1281 | call->class->define_fields = uprobe_event_define_fields; | 1281 | call->class->define_fields = uprobe_event_define_fields; |
@@ -1283,7 +1283,7 @@ static int register_uprobe_event(struct trace_uprobe *tu) | |||
1283 | if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) | 1283 | if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) |
1284 | return -ENOMEM; | 1284 | return -ENOMEM; |
1285 | 1285 | ||
1286 | ret = register_ftrace_event(&call->event); | 1286 | ret = register_trace_event(&call->event); |
1287 | if (!ret) { | 1287 | if (!ret) { |
1288 | kfree(call->print_fmt); | 1288 | kfree(call->print_fmt); |
1289 | return -ENODEV; | 1289 | return -ENODEV; |
@@ -1295,9 +1295,9 @@ static int register_uprobe_event(struct trace_uprobe *tu) | |||
1295 | 1295 | ||
1296 | if (ret) { | 1296 | if (ret) { |
1297 | pr_info("Failed to register uprobe event: %s\n", | 1297 | pr_info("Failed to register uprobe event: %s\n", |
1298 | ftrace_event_name(call)); | 1298 | trace_event_name(call)); |
1299 | kfree(call->print_fmt); | 1299 | kfree(call->print_fmt); |
1300 | unregister_ftrace_event(&call->event); | 1300 | unregister_trace_event(&call->event); |
1301 | } | 1301 | } |
1302 | 1302 | ||
1303 | return ret; | 1303 | return ret; |
diff --git a/mm/debug.c b/mm/debug.c index 3eb3ac2fcee7..76089ddf99ea 100644 --- a/mm/debug.c +++ b/mm/debug.c | |||
@@ -7,7 +7,7 @@ | |||
7 | 7 | ||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/ftrace_event.h> | 10 | #include <linux/trace_events.h> |
11 | #include <linux/memcontrol.h> | 11 | #include <linux/memcontrol.h> |
12 | 12 | ||
13 | static const struct trace_print_flags pageflag_names[] = { | 13 | static const struct trace_print_flags pageflag_names[] = { |
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index 430b5d27828e..1bd593bbf7a5 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c | |||
@@ -55,10 +55,10 @@ void xs_init(pTHX) | |||
55 | 55 | ||
56 | INTERP my_perl; | 56 | INTERP my_perl; |
57 | 57 | ||
58 | #define FTRACE_MAX_EVENT \ | 58 | #define TRACE_EVENT_TYPE_MAX \ |
59 | ((1 << (sizeof(unsigned short) * 8)) - 1) | 59 | ((1 << (sizeof(unsigned short) * 8)) - 1) |
60 | 60 | ||
61 | static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT); | 61 | static DECLARE_BITMAP(events_defined, TRACE_EVENT_TYPE_MAX); |
62 | 62 | ||
63 | extern struct scripting_context *scripting_context; | 63 | extern struct scripting_context *scripting_context; |
64 | 64 | ||
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 5544b8cdd1ee..ace2484985cb 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c | |||
@@ -44,10 +44,10 @@ | |||
44 | 44 | ||
45 | PyMODINIT_FUNC initperf_trace_context(void); | 45 | PyMODINIT_FUNC initperf_trace_context(void); |
46 | 46 | ||
47 | #define FTRACE_MAX_EVENT \ | 47 | #define TRACE_EVENT_TYPE_MAX \ |
48 | ((1 << (sizeof(unsigned short) * 8)) - 1) | 48 | ((1 << (sizeof(unsigned short) * 8)) - 1) |
49 | 49 | ||
50 | static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT); | 50 | static DECLARE_BITMAP(events_defined, TRACE_EVENT_TYPE_MAX); |
51 | 51 | ||
52 | #define MAX_FIELDS 64 | 52 | #define MAX_FIELDS 64 |
53 | #define N_COMMON_FIELDS 7 | 53 | #define N_COMMON_FIELDS 7 |