diff options
| -rw-r--r-- | include/linux/ftrace_event.h | 22 | ||||
| -rw-r--r-- | include/linux/syscalls.h | 8 | ||||
| -rw-r--r-- | include/linux/tracepoint.h | 49 | ||||
| -rw-r--r-- | include/trace/events/syscalls.h | 3 | ||||
| -rw-r--r-- | include/trace/ftrace.h | 15 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 55 | ||||
| -rw-r--r-- | kernel/trace/trace_events_trigger.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_export.c | 6 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 21 | ||||
| -rw-r--r-- | kernel/trace/trace_output.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_uprobe.c | 20 | ||||
| -rw-r--r-- | kernel/tracepoint.c | 516 |
13 files changed, 358 insertions, 363 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index cdc30111d2f8..d16da3e53bc7 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/percpu.h> | 7 | #include <linux/percpu.h> |
| 8 | #include <linux/hardirq.h> | 8 | #include <linux/hardirq.h> |
| 9 | #include <linux/perf_event.h> | 9 | #include <linux/perf_event.h> |
| 10 | #include <linux/tracepoint.h> | ||
| 10 | 11 | ||
| 11 | struct trace_array; | 12 | struct trace_array; |
| 12 | struct trace_buffer; | 13 | struct trace_buffer; |
| @@ -232,6 +233,7 @@ enum { | |||
| 232 | TRACE_EVENT_FL_IGNORE_ENABLE_BIT, | 233 | TRACE_EVENT_FL_IGNORE_ENABLE_BIT, |
| 233 | TRACE_EVENT_FL_WAS_ENABLED_BIT, | 234 | TRACE_EVENT_FL_WAS_ENABLED_BIT, |
| 234 | TRACE_EVENT_FL_USE_CALL_FILTER_BIT, | 235 | TRACE_EVENT_FL_USE_CALL_FILTER_BIT, |
| 236 | TRACE_EVENT_FL_TRACEPOINT_BIT, | ||
| 235 | }; | 237 | }; |
| 236 | 238 | ||
| 237 | /* | 239 | /* |
| @@ -244,6 +246,7 @@ enum { | |||
| 244 | * (used for module unloading, if a module event is enabled, | 246 | * (used for module unloading, if a module event is enabled, |
| 245 | * it is best to clear the buffers that used it). | 247 | * it is best to clear the buffers that used it). |
| 246 | * USE_CALL_FILTER - For ftrace internal events, don't use file filter | 248 | * USE_CALL_FILTER - For ftrace internal events, don't use file filter |
| 249 | * TRACEPOINT - Event is a tracepoint | ||
| 247 | */ | 250 | */ |
| 248 | enum { | 251 | enum { |
| 249 | TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), | 252 | TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), |
| @@ -252,12 +255,17 @@ enum { | |||
| 252 | TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), | 255 | TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), |
| 253 | TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), | 256 | TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), |
| 254 | TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), | 257 | TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), |
| 258 | TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), | ||
| 255 | }; | 259 | }; |
| 256 | 260 | ||
| 257 | struct ftrace_event_call { | 261 | struct ftrace_event_call { |
| 258 | struct list_head list; | 262 | struct list_head list; |
| 259 | struct ftrace_event_class *class; | 263 | struct ftrace_event_class *class; |
| 260 | char *name; | 264 | union { |
| 265 | char *name; | ||
| 266 | /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ | ||
| 267 | struct tracepoint *tp; | ||
| 268 | }; | ||
| 261 | struct trace_event event; | 269 | struct trace_event event; |
| 262 | const char *print_fmt; | 270 | const char *print_fmt; |
| 263 | struct event_filter *filter; | 271 | struct event_filter *filter; |
| @@ -271,6 +279,7 @@ struct ftrace_event_call { | |||
| 271 | * bit 3: ftrace internal event (do not enable) | 279 | * bit 3: ftrace internal event (do not enable) |
| 272 | * bit 4: Event was enabled by module | 280 | * bit 4: Event was enabled by module |
| 273 | * bit 5: use call filter rather than file filter | 281 | * bit 5: use call filter rather than file filter |
| 282 | * bit 6: Event is a tracepoint | ||
| 274 | */ | 283 | */ |
| 275 | int flags; /* static flags of different events */ | 284 | int flags; /* static flags of different events */ |
| 276 | 285 | ||
| @@ -283,6 +292,15 @@ struct ftrace_event_call { | |||
| 283 | #endif | 292 | #endif |
| 284 | }; | 293 | }; |
| 285 | 294 | ||
| 295 | static inline const char * | ||
| 296 | ftrace_event_name(struct ftrace_event_call *call) | ||
| 297 | { | ||
| 298 | if (call->flags & TRACE_EVENT_FL_TRACEPOINT) | ||
| 299 | return call->tp ? call->tp->name : NULL; | ||
| 300 | else | ||
| 301 | return call->name; | ||
| 302 | } | ||
| 303 | |||
| 286 | struct trace_array; | 304 | struct trace_array; |
| 287 | struct ftrace_subsystem_dir; | 305 | struct ftrace_subsystem_dir; |
| 288 | 306 | ||
| @@ -353,7 +371,7 @@ struct ftrace_event_file { | |||
| 353 | #define __TRACE_EVENT_FLAGS(name, value) \ | 371 | #define __TRACE_EVENT_FLAGS(name, value) \ |
| 354 | static int __init trace_init_flags_##name(void) \ | 372 | static int __init trace_init_flags_##name(void) \ |
| 355 | { \ | 373 | { \ |
| 356 | event_##name.flags = value; \ | 374 | event_##name.flags |= value; \ |
| 357 | return 0; \ | 375 | return 0; \ |
| 358 | } \ | 376 | } \ |
| 359 | early_initcall(trace_init_flags_##name); | 377 | early_initcall(trace_init_flags_##name); |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 697ceb70a9a9..a4a0588c5397 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -119,8 +119,10 @@ extern struct trace_event_functions exit_syscall_print_funcs; | |||
| 119 | static struct syscall_metadata __syscall_meta_##sname; \ | 119 | static struct syscall_metadata __syscall_meta_##sname; \ |
| 120 | static struct ftrace_event_call __used \ | 120 | static struct ftrace_event_call __used \ |
| 121 | event_enter_##sname = { \ | 121 | event_enter_##sname = { \ |
| 122 | .name = "sys_enter"#sname, \ | ||
| 123 | .class = &event_class_syscall_enter, \ | 122 | .class = &event_class_syscall_enter, \ |
| 123 | { \ | ||
| 124 | .name = "sys_enter"#sname, \ | ||
| 125 | }, \ | ||
| 124 | .event.funcs = &enter_syscall_print_funcs, \ | 126 | .event.funcs = &enter_syscall_print_funcs, \ |
| 125 | .data = (void *)&__syscall_meta_##sname,\ | 127 | .data = (void *)&__syscall_meta_##sname,\ |
| 126 | .flags = TRACE_EVENT_FL_CAP_ANY, \ | 128 | .flags = TRACE_EVENT_FL_CAP_ANY, \ |
| @@ -133,8 +135,10 @@ extern struct trace_event_functions exit_syscall_print_funcs; | |||
| 133 | static struct syscall_metadata __syscall_meta_##sname; \ | 135 | static struct syscall_metadata __syscall_meta_##sname; \ |
| 134 | static struct ftrace_event_call __used \ | 136 | static struct ftrace_event_call __used \ |
| 135 | event_exit_##sname = { \ | 137 | event_exit_##sname = { \ |
| 136 | .name = "sys_exit"#sname, \ | ||
| 137 | .class = &event_class_syscall_exit, \ | 138 | .class = &event_class_syscall_exit, \ |
| 139 | { \ | ||
| 140 | .name = "sys_exit"#sname, \ | ||
| 141 | }, \ | ||
| 138 | .event.funcs = &exit_syscall_print_funcs, \ | 142 | .event.funcs = &exit_syscall_print_funcs, \ |
| 139 | .data = (void *)&__syscall_meta_##sname,\ | 143 | .data = (void *)&__syscall_meta_##sname,\ |
| 140 | .flags = TRACE_EVENT_FL_CAP_ANY, \ | 144 | .flags = TRACE_EVENT_FL_CAP_ANY, \ |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 812b2553dfd8..9d30ee469c2a 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | * | 6 | * |
| 7 | * See Documentation/trace/tracepoints.txt. | 7 | * See Documentation/trace/tracepoints.txt. |
| 8 | * | 8 | * |
| 9 | * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | 9 | * Copyright (C) 2008-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 10 | * | 10 | * |
| 11 | * Heavily inspired from the Linux Kernel Markers. | 11 | * Heavily inspired from the Linux Kernel Markers. |
| 12 | * | 12 | * |
| @@ -21,6 +21,7 @@ | |||
| 21 | 21 | ||
| 22 | struct module; | 22 | struct module; |
| 23 | struct tracepoint; | 23 | struct tracepoint; |
| 24 | struct notifier_block; | ||
| 24 | 25 | ||
| 25 | struct tracepoint_func { | 26 | struct tracepoint_func { |
| 26 | void *func; | 27 | void *func; |
| @@ -35,31 +36,38 @@ struct tracepoint { | |||
| 35 | struct tracepoint_func __rcu *funcs; | 36 | struct tracepoint_func __rcu *funcs; |
| 36 | }; | 37 | }; |
| 37 | 38 | ||
| 38 | /* | ||
| 39 | * Connect a probe to a tracepoint. | ||
| 40 | * Internal API, should not be used directly. | ||
| 41 | */ | ||
| 42 | extern int tracepoint_probe_register(const char *name, void *probe, void *data); | ||
| 43 | |||
| 44 | /* | ||
| 45 | * Disconnect a probe from a tracepoint. | ||
| 46 | * Internal API, should not be used directly. | ||
| 47 | */ | ||
| 48 | extern int | 39 | extern int |
| 49 | tracepoint_probe_unregister(const char *name, void *probe, void *data); | 40 | tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); |
| 41 | extern int | ||
| 42 | tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); | ||
| 43 | extern void | ||
| 44 | for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), | ||
| 45 | void *priv); | ||
| 50 | 46 | ||
| 51 | #ifdef CONFIG_MODULES | 47 | #ifdef CONFIG_MODULES |
| 52 | struct tp_module { | 48 | struct tp_module { |
| 53 | struct list_head list; | 49 | struct list_head list; |
| 54 | unsigned int num_tracepoints; | 50 | struct module *mod; |
| 55 | struct tracepoint * const *tracepoints_ptrs; | ||
| 56 | }; | 51 | }; |
| 52 | |||
| 57 | bool trace_module_has_bad_taint(struct module *mod); | 53 | bool trace_module_has_bad_taint(struct module *mod); |
| 54 | extern int register_tracepoint_module_notifier(struct notifier_block *nb); | ||
| 55 | extern int unregister_tracepoint_module_notifier(struct notifier_block *nb); | ||
| 58 | #else | 56 | #else |
| 59 | static inline bool trace_module_has_bad_taint(struct module *mod) | 57 | static inline bool trace_module_has_bad_taint(struct module *mod) |
| 60 | { | 58 | { |
| 61 | return false; | 59 | return false; |
| 62 | } | 60 | } |
| 61 | static inline | ||
| 62 | int register_tracepoint_module_notifier(struct notifier_block *nb) | ||
| 63 | { | ||
| 64 | return 0; | ||
| 65 | } | ||
| 66 | static inline | ||
| 67 | int unregister_tracepoint_module_notifier(struct notifier_block *nb) | ||
| 68 | { | ||
| 69 | return 0; | ||
| 70 | } | ||
| 63 | #endif /* CONFIG_MODULES */ | 71 | #endif /* CONFIG_MODULES */ |
| 64 | 72 | ||
| 65 | /* | 73 | /* |
| @@ -72,6 +80,11 @@ static inline void tracepoint_synchronize_unregister(void) | |||
| 72 | synchronize_sched(); | 80 | synchronize_sched(); |
| 73 | } | 81 | } |
| 74 | 82 | ||
| 83 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS | ||
| 84 | extern void syscall_regfunc(void); | ||
| 85 | extern void syscall_unregfunc(void); | ||
| 86 | #endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */ | ||
| 87 | |||
| 75 | #define PARAMS(args...) args | 88 | #define PARAMS(args...) args |
| 76 | 89 | ||
| 77 | #endif /* _LINUX_TRACEPOINT_H */ | 90 | #endif /* _LINUX_TRACEPOINT_H */ |
| @@ -160,14 +173,14 @@ static inline void tracepoint_synchronize_unregister(void) | |||
| 160 | static inline int \ | 173 | static inline int \ |
| 161 | register_trace_##name(void (*probe)(data_proto), void *data) \ | 174 | register_trace_##name(void (*probe)(data_proto), void *data) \ |
| 162 | { \ | 175 | { \ |
| 163 | return tracepoint_probe_register(#name, (void *)probe, \ | 176 | return tracepoint_probe_register(&__tracepoint_##name, \ |
| 164 | data); \ | 177 | (void *)probe, data); \ |
| 165 | } \ | 178 | } \ |
| 166 | static inline int \ | 179 | static inline int \ |
| 167 | unregister_trace_##name(void (*probe)(data_proto), void *data) \ | 180 | unregister_trace_##name(void (*probe)(data_proto), void *data) \ |
| 168 | { \ | 181 | { \ |
| 169 | return tracepoint_probe_unregister(#name, (void *)probe, \ | 182 | return tracepoint_probe_unregister(&__tracepoint_##name,\ |
| 170 | data); \ | 183 | (void *)probe, data); \ |
| 171 | } \ | 184 | } \ |
| 172 | static inline void \ | 185 | static inline void \ |
| 173 | check_trace_callback_type_##name(void (*cb)(data_proto)) \ | 186 | check_trace_callback_type_##name(void (*cb)(data_proto)) \ |
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h index 5a4c04a75b3d..14e49c798135 100644 --- a/include/trace/events/syscalls.h +++ b/include/trace/events/syscalls.h | |||
| @@ -13,9 +13,6 @@ | |||
| 13 | 13 | ||
| 14 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS | 14 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS |
| 15 | 15 | ||
| 16 | extern void syscall_regfunc(void); | ||
| 17 | extern void syscall_unregfunc(void); | ||
| 18 | |||
| 19 | TRACE_EVENT_FN(sys_enter, | 16 | TRACE_EVENT_FN(sys_enter, |
| 20 | 17 | ||
| 21 | TP_PROTO(struct pt_regs *regs, long id), | 18 | TP_PROTO(struct pt_regs *regs, long id), |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 8765126b328c..0a1a4f7caf09 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
| @@ -470,10 +470,13 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
| 470 | * }; | 470 | * }; |
| 471 | * | 471 | * |
| 472 | * static struct ftrace_event_call event_<call> = { | 472 | * static struct ftrace_event_call event_<call> = { |
| 473 | * .name = "<call>", | ||
| 474 | * .class = event_class_<template>, | 473 | * .class = event_class_<template>, |
| 474 | * { | ||
| 475 | * .tp = &__tracepoint_<call>, | ||
| 476 | * }, | ||
| 475 | * .event = &ftrace_event_type_<call>, | 477 | * .event = &ftrace_event_type_<call>, |
| 476 | * .print_fmt = print_fmt_<call>, | 478 | * .print_fmt = print_fmt_<call>, |
| 479 | * .flags = TRACE_EVENT_FL_TRACEPOINT, | ||
| 477 | * }; | 480 | * }; |
| 478 | * // its only safe to use pointers when doing linker tricks to | 481 | * // its only safe to use pointers when doing linker tricks to |
| 479 | * // create an array. | 482 | * // create an array. |
| @@ -605,10 +608,13 @@ static struct ftrace_event_class __used __refdata event_class_##call = { \ | |||
| 605 | #define DEFINE_EVENT(template, call, proto, args) \ | 608 | #define DEFINE_EVENT(template, call, proto, args) \ |
| 606 | \ | 609 | \ |
| 607 | static struct ftrace_event_call __used event_##call = { \ | 610 | static struct ftrace_event_call __used event_##call = { \ |
| 608 | .name = #call, \ | ||
| 609 | .class = &event_class_##template, \ | 611 | .class = &event_class_##template, \ |
| 612 | { \ | ||
| 613 | .tp = &__tracepoint_##call, \ | ||
| 614 | }, \ | ||
| 610 | .event.funcs = &ftrace_event_type_funcs_##template, \ | 615 | .event.funcs = &ftrace_event_type_funcs_##template, \ |
| 611 | .print_fmt = print_fmt_##template, \ | 616 | .print_fmt = print_fmt_##template, \ |
| 617 | .flags = TRACE_EVENT_FL_TRACEPOINT, \ | ||
| 612 | }; \ | 618 | }; \ |
| 613 | static struct ftrace_event_call __used \ | 619 | static struct ftrace_event_call __used \ |
| 614 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | 620 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call |
| @@ -619,10 +625,13 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | |||
| 619 | static const char print_fmt_##call[] = print; \ | 625 | static const char print_fmt_##call[] = print; \ |
| 620 | \ | 626 | \ |
| 621 | static struct ftrace_event_call __used event_##call = { \ | 627 | static struct ftrace_event_call __used event_##call = { \ |
| 622 | .name = #call, \ | ||
| 623 | .class = &event_class_##template, \ | 628 | .class = &event_class_##template, \ |
| 629 | { \ | ||
| 630 | .tp = &__tracepoint_##call, \ | ||
| 631 | }, \ | ||
| 624 | .event.funcs = &ftrace_event_type_funcs_##call, \ | 632 | .event.funcs = &ftrace_event_type_funcs_##call, \ |
| 625 | .print_fmt = print_fmt_##call, \ | 633 | .print_fmt = print_fmt_##call, \ |
| 634 | .flags = TRACE_EVENT_FL_TRACEPOINT, \ | ||
| 626 | }; \ | 635 | }; \ |
| 627 | static struct ftrace_event_call __used \ | 636 | static struct ftrace_event_call __used \ |
| 628 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | 637 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9be67c5e5b0f..e3e665685ee5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -3611,6 +3611,8 @@ static const char readme_msg[] = | |||
| 3611 | #ifdef CONFIG_TRACER_SNAPSHOT | 3611 | #ifdef CONFIG_TRACER_SNAPSHOT |
| 3612 | "\t\t snapshot\n" | 3612 | "\t\t snapshot\n" |
| 3613 | #endif | 3613 | #endif |
| 3614 | "\t\t dump\n" | ||
| 3615 | "\t\t cpudump\n" | ||
| 3614 | "\t example: echo do_fault:traceoff > set_ftrace_filter\n" | 3616 | "\t example: echo do_fault:traceoff > set_ftrace_filter\n" |
| 3615 | "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" | 3617 | "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" |
| 3616 | "\t The first one will disable tracing every time do_fault is hit\n" | 3618 | "\t The first one will disable tracing every time do_fault is hit\n" |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 83a4378dc5e0..3ddfd8f62c05 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -223,24 +223,25 @@ int ftrace_event_reg(struct ftrace_event_call *call, | |||
| 223 | { | 223 | { |
| 224 | struct ftrace_event_file *file = data; | 224 | struct ftrace_event_file *file = data; |
| 225 | 225 | ||
| 226 | WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); | ||
| 226 | switch (type) { | 227 | switch (type) { |
| 227 | case TRACE_REG_REGISTER: | 228 | case TRACE_REG_REGISTER: |
| 228 | return tracepoint_probe_register(call->name, | 229 | return tracepoint_probe_register(call->tp, |
| 229 | call->class->probe, | 230 | call->class->probe, |
| 230 | file); | 231 | file); |
| 231 | case TRACE_REG_UNREGISTER: | 232 | case TRACE_REG_UNREGISTER: |
| 232 | tracepoint_probe_unregister(call->name, | 233 | tracepoint_probe_unregister(call->tp, |
| 233 | call->class->probe, | 234 | call->class->probe, |
| 234 | file); | 235 | file); |
| 235 | return 0; | 236 | return 0; |
| 236 | 237 | ||
| 237 | #ifdef CONFIG_PERF_EVENTS | 238 | #ifdef CONFIG_PERF_EVENTS |
| 238 | case TRACE_REG_PERF_REGISTER: | 239 | case TRACE_REG_PERF_REGISTER: |
| 239 | return tracepoint_probe_register(call->name, | 240 | return tracepoint_probe_register(call->tp, |
| 240 | call->class->perf_probe, | 241 | call->class->perf_probe, |
| 241 | call); | 242 | call); |
| 242 | case TRACE_REG_PERF_UNREGISTER: | 243 | case TRACE_REG_PERF_UNREGISTER: |
| 243 | tracepoint_probe_unregister(call->name, | 244 | tracepoint_probe_unregister(call->tp, |
| 244 | call->class->perf_probe, | 245 | call->class->perf_probe, |
| 245 | call); | 246 | call); |
| 246 | return 0; | 247 | return 0; |
| @@ -352,7 +353,7 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, | |||
| 352 | if (ret) { | 353 | if (ret) { |
| 353 | tracing_stop_cmdline_record(); | 354 | tracing_stop_cmdline_record(); |
| 354 | pr_info("event trace: Could not enable event " | 355 | pr_info("event trace: Could not enable event " |
| 355 | "%s\n", call->name); | 356 | "%s\n", ftrace_event_name(call)); |
| 356 | break; | 357 | break; |
| 357 | } | 358 | } |
| 358 | set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); | 359 | set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); |
| @@ -481,27 +482,29 @@ __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, | |||
| 481 | { | 482 | { |
| 482 | struct ftrace_event_file *file; | 483 | struct ftrace_event_file *file; |
| 483 | struct ftrace_event_call *call; | 484 | struct ftrace_event_call *call; |
| 485 | const char *name; | ||
| 484 | int ret = -EINVAL; | 486 | int ret = -EINVAL; |
| 485 | 487 | ||
| 486 | list_for_each_entry(file, &tr->events, list) { | 488 | list_for_each_entry(file, &tr->events, list) { |
| 487 | 489 | ||
| 488 | call = file->event_call; | 490 | call = file->event_call; |
| 491 | name = ftrace_event_name(call); | ||
| 489 | 492 | ||
| 490 | if (!call->name || !call->class || !call->class->reg) | 493 | if (!name || !call->class || !call->class->reg) |
| 491 | continue; | 494 | continue; |
| 492 | 495 | ||
| 493 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) | 496 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) |
| 494 | continue; | 497 | continue; |
| 495 | 498 | ||
| 496 | if (match && | 499 | if (match && |
| 497 | strcmp(match, call->name) != 0 && | 500 | strcmp(match, name) != 0 && |
| 498 | strcmp(match, call->class->system) != 0) | 501 | strcmp(match, call->class->system) != 0) |
| 499 | continue; | 502 | continue; |
| 500 | 503 | ||
| 501 | if (sub && strcmp(sub, call->class->system) != 0) | 504 | if (sub && strcmp(sub, call->class->system) != 0) |
| 502 | continue; | 505 | continue; |
| 503 | 506 | ||
| 504 | if (event && strcmp(event, call->name) != 0) | 507 | if (event && strcmp(event, name) != 0) |
| 505 | continue; | 508 | continue; |
| 506 | 509 | ||
| 507 | ftrace_event_enable_disable(file, set); | 510 | ftrace_event_enable_disable(file, set); |
| @@ -699,7 +702,7 @@ static int t_show(struct seq_file *m, void *v) | |||
| 699 | 702 | ||
| 700 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) | 703 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) |
| 701 | seq_printf(m, "%s:", call->class->system); | 704 | seq_printf(m, "%s:", call->class->system); |
| 702 | seq_printf(m, "%s\n", call->name); | 705 | seq_printf(m, "%s\n", ftrace_event_name(call)); |
| 703 | 706 | ||
| 704 | return 0; | 707 | return 0; |
| 705 | } | 708 | } |
| @@ -792,7 +795,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
| 792 | mutex_lock(&event_mutex); | 795 | mutex_lock(&event_mutex); |
| 793 | list_for_each_entry(file, &tr->events, list) { | 796 | list_for_each_entry(file, &tr->events, list) { |
| 794 | call = file->event_call; | 797 | call = file->event_call; |
| 795 | if (!call->name || !call->class || !call->class->reg) | 798 | if (!ftrace_event_name(call) || !call->class || !call->class->reg) |
| 796 | continue; | 799 | continue; |
| 797 | 800 | ||
| 798 | if (system && strcmp(call->class->system, system->name) != 0) | 801 | if (system && strcmp(call->class->system, system->name) != 0) |
| @@ -907,7 +910,7 @@ static int f_show(struct seq_file *m, void *v) | |||
| 907 | 910 | ||
| 908 | switch ((unsigned long)v) { | 911 | switch ((unsigned long)v) { |
| 909 | case FORMAT_HEADER: | 912 | case FORMAT_HEADER: |
| 910 | seq_printf(m, "name: %s\n", call->name); | 913 | seq_printf(m, "name: %s\n", ftrace_event_name(call)); |
| 911 | seq_printf(m, "ID: %d\n", call->event.type); | 914 | seq_printf(m, "ID: %d\n", call->event.type); |
| 912 | seq_printf(m, "format:\n"); | 915 | seq_printf(m, "format:\n"); |
| 913 | return 0; | 916 | return 0; |
| @@ -1527,6 +1530,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
| 1527 | struct trace_array *tr = file->tr; | 1530 | struct trace_array *tr = file->tr; |
| 1528 | struct list_head *head; | 1531 | struct list_head *head; |
| 1529 | struct dentry *d_events; | 1532 | struct dentry *d_events; |
| 1533 | const char *name; | ||
| 1530 | int ret; | 1534 | int ret; |
| 1531 | 1535 | ||
| 1532 | /* | 1536 | /* |
| @@ -1540,10 +1544,11 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
| 1540 | } else | 1544 | } else |
| 1541 | d_events = parent; | 1545 | d_events = parent; |
| 1542 | 1546 | ||
| 1543 | file->dir = debugfs_create_dir(call->name, d_events); | 1547 | name = ftrace_event_name(call); |
| 1548 | file->dir = debugfs_create_dir(name, d_events); | ||
| 1544 | if (!file->dir) { | 1549 | if (!file->dir) { |
| 1545 | pr_warning("Could not create debugfs '%s' directory\n", | 1550 | pr_warning("Could not create debugfs '%s' directory\n", |
| 1546 | call->name); | 1551 | name); |
| 1547 | return -1; | 1552 | return -1; |
| 1548 | } | 1553 | } |
| 1549 | 1554 | ||
| @@ -1567,7 +1572,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
| 1567 | ret = call->class->define_fields(call); | 1572 | ret = call->class->define_fields(call); |
| 1568 | if (ret < 0) { | 1573 | if (ret < 0) { |
| 1569 | pr_warning("Could not initialize trace point" | 1574 | pr_warning("Could not initialize trace point" |
| 1570 | " events/%s\n", call->name); | 1575 | " events/%s\n", name); |
| 1571 | return -1; | 1576 | return -1; |
| 1572 | } | 1577 | } |
| 1573 | } | 1578 | } |
| @@ -1631,15 +1636,17 @@ static void event_remove(struct ftrace_event_call *call) | |||
| 1631 | static int event_init(struct ftrace_event_call *call) | 1636 | static int event_init(struct ftrace_event_call *call) |
| 1632 | { | 1637 | { |
| 1633 | int ret = 0; | 1638 | int ret = 0; |
| 1639 | const char *name; | ||
| 1634 | 1640 | ||
| 1635 | if (WARN_ON(!call->name)) | 1641 | name = ftrace_event_name(call); |
| 1642 | if (WARN_ON(!name)) | ||
| 1636 | return -EINVAL; | 1643 | return -EINVAL; |
| 1637 | 1644 | ||
| 1638 | if (call->class->raw_init) { | 1645 | if (call->class->raw_init) { |
| 1639 | ret = call->class->raw_init(call); | 1646 | ret = call->class->raw_init(call); |
| 1640 | if (ret < 0 && ret != -ENOSYS) | 1647 | if (ret < 0 && ret != -ENOSYS) |
| 1641 | pr_warn("Could not initialize trace events/%s\n", | 1648 | pr_warn("Could not initialize trace events/%s\n", |
| 1642 | call->name); | 1649 | name); |
| 1643 | } | 1650 | } |
| 1644 | 1651 | ||
| 1645 | return ret; | 1652 | return ret; |
| @@ -1885,7 +1892,7 @@ __trace_add_event_dirs(struct trace_array *tr) | |||
| 1885 | ret = __trace_add_new_event(call, tr); | 1892 | ret = __trace_add_new_event(call, tr); |
| 1886 | if (ret < 0) | 1893 | if (ret < 0) |
| 1887 | pr_warning("Could not create directory for event %s\n", | 1894 | pr_warning("Could not create directory for event %s\n", |
| 1888 | call->name); | 1895 | ftrace_event_name(call)); |
| 1889 | } | 1896 | } |
| 1890 | } | 1897 | } |
| 1891 | 1898 | ||
| @@ -1894,18 +1901,20 @@ find_event_file(struct trace_array *tr, const char *system, const char *event) | |||
| 1894 | { | 1901 | { |
| 1895 | struct ftrace_event_file *file; | 1902 | struct ftrace_event_file *file; |
| 1896 | struct ftrace_event_call *call; | 1903 | struct ftrace_event_call *call; |
| 1904 | const char *name; | ||
| 1897 | 1905 | ||
| 1898 | list_for_each_entry(file, &tr->events, list) { | 1906 | list_for_each_entry(file, &tr->events, list) { |
| 1899 | 1907 | ||
| 1900 | call = file->event_call; | 1908 | call = file->event_call; |
| 1909 | name = ftrace_event_name(call); | ||
| 1901 | 1910 | ||
| 1902 | if (!call->name || !call->class || !call->class->reg) | 1911 | if (!name || !call->class || !call->class->reg) |
| 1903 | continue; | 1912 | continue; |
| 1904 | 1913 | ||
| 1905 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) | 1914 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) |
| 1906 | continue; | 1915 | continue; |
| 1907 | 1916 | ||
| 1908 | if (strcmp(event, call->name) == 0 && | 1917 | if (strcmp(event, name) == 0 && |
| 1909 | strcmp(system, call->class->system) == 0) | 1918 | strcmp(system, call->class->system) == 0) |
| 1910 | return file; | 1919 | return file; |
| 1911 | } | 1920 | } |
| @@ -1973,7 +1982,7 @@ event_enable_print(struct seq_file *m, unsigned long ip, | |||
| 1973 | seq_printf(m, "%s:%s:%s", | 1982 | seq_printf(m, "%s:%s:%s", |
| 1974 | data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, | 1983 | data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, |
| 1975 | data->file->event_call->class->system, | 1984 | data->file->event_call->class->system, |
| 1976 | data->file->event_call->name); | 1985 | ftrace_event_name(data->file->event_call)); |
| 1977 | 1986 | ||
| 1978 | if (data->count == -1) | 1987 | if (data->count == -1) |
| 1979 | seq_printf(m, ":unlimited\n"); | 1988 | seq_printf(m, ":unlimited\n"); |
| @@ -2193,7 +2202,7 @@ __trace_early_add_event_dirs(struct trace_array *tr) | |||
| 2193 | ret = event_create_dir(tr->event_dir, file); | 2202 | ret = event_create_dir(tr->event_dir, file); |
| 2194 | if (ret < 0) | 2203 | if (ret < 0) |
| 2195 | pr_warning("Could not create directory for event %s\n", | 2204 | pr_warning("Could not create directory for event %s\n", |
| 2196 | file->event_call->name); | 2205 | ftrace_event_name(file->event_call)); |
| 2197 | } | 2206 | } |
| 2198 | } | 2207 | } |
| 2199 | 2208 | ||
| @@ -2217,7 +2226,7 @@ __trace_early_add_events(struct trace_array *tr) | |||
| 2217 | ret = __trace_early_add_new_event(call, tr); | 2226 | ret = __trace_early_add_new_event(call, tr); |
| 2218 | if (ret < 0) | 2227 | if (ret < 0) |
| 2219 | pr_warning("Could not create early event %s\n", | 2228 | pr_warning("Could not create early event %s\n", |
| 2220 | call->name); | 2229 | ftrace_event_name(call)); |
| 2221 | } | 2230 | } |
| 2222 | } | 2231 | } |
| 2223 | 2232 | ||
| @@ -2549,7 +2558,7 @@ static __init void event_trace_self_tests(void) | |||
| 2549 | continue; | 2558 | continue; |
| 2550 | #endif | 2559 | #endif |
| 2551 | 2560 | ||
| 2552 | pr_info("Testing event %s: ", call->name); | 2561 | pr_info("Testing event %s: ", ftrace_event_name(call)); |
| 2553 | 2562 | ||
| 2554 | /* | 2563 | /* |
| 2555 | * If an event is already enabled, someone is using | 2564 | * If an event is already enabled, someone is using |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 8efbb69b04f0..925f537f07d1 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
| @@ -1095,7 +1095,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, | |||
| 1095 | seq_printf(m, "%s:%s:%s", | 1095 | seq_printf(m, "%s:%s:%s", |
| 1096 | enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, | 1096 | enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, |
| 1097 | enable_data->file->event_call->class->system, | 1097 | enable_data->file->event_call->class->system, |
| 1098 | enable_data->file->event_call->name); | 1098 | ftrace_event_name(enable_data->file->event_call)); |
| 1099 | 1099 | ||
| 1100 | if (data->count == -1) | 1100 | if (data->count == -1) |
| 1101 | seq_puts(m, ":unlimited"); | 1101 | seq_puts(m, ":unlimited"); |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index ee0a5098ac43..d4ddde28a81a 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
| @@ -173,9 +173,11 @@ struct ftrace_event_class __refdata event_class_ftrace_##call = { \ | |||
| 173 | }; \ | 173 | }; \ |
| 174 | \ | 174 | \ |
| 175 | struct ftrace_event_call __used event_##call = { \ | 175 | struct ftrace_event_call __used event_##call = { \ |
| 176 | .name = #call, \ | ||
| 177 | .event.type = etype, \ | ||
| 178 | .class = &event_class_ftrace_##call, \ | 176 | .class = &event_class_ftrace_##call, \ |
| 177 | { \ | ||
| 178 | .name = #call, \ | ||
| 179 | }, \ | ||
| 180 | .event.type = etype, \ | ||
| 179 | .print_fmt = print, \ | 181 | .print_fmt = print, \ |
| 180 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ | 182 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ |
| 181 | }; \ | 183 | }; \ |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d021d21dd150..903ae28962be 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -341,7 +341,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, | |||
| 341 | struct trace_kprobe *tk; | 341 | struct trace_kprobe *tk; |
| 342 | 342 | ||
| 343 | list_for_each_entry(tk, &probe_list, list) | 343 | list_for_each_entry(tk, &probe_list, list) |
| 344 | if (strcmp(tk->tp.call.name, event) == 0 && | 344 | if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 && |
| 345 | strcmp(tk->tp.call.class->system, group) == 0) | 345 | strcmp(tk->tp.call.class->system, group) == 0) |
| 346 | return tk; | 346 | return tk; |
| 347 | return NULL; | 347 | return NULL; |
| @@ -516,7 +516,8 @@ static int register_trace_kprobe(struct trace_kprobe *tk) | |||
| 516 | mutex_lock(&probe_lock); | 516 | mutex_lock(&probe_lock); |
| 517 | 517 | ||
| 518 | /* Delete old (same name) event if exist */ | 518 | /* Delete old (same name) event if exist */ |
| 519 | old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system); | 519 | old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call), |
| 520 | tk->tp.call.class->system); | ||
| 520 | if (old_tk) { | 521 | if (old_tk) { |
| 521 | ret = unregister_trace_kprobe(old_tk); | 522 | ret = unregister_trace_kprobe(old_tk); |
| 522 | if (ret < 0) | 523 | if (ret < 0) |
| @@ -564,7 +565,8 @@ static int trace_kprobe_module_callback(struct notifier_block *nb, | |||
| 564 | if (ret) | 565 | if (ret) |
| 565 | pr_warning("Failed to re-register probe %s on" | 566 | pr_warning("Failed to re-register probe %s on" |
| 566 | "%s: %d\n", | 567 | "%s: %d\n", |
| 567 | tk->tp.call.name, mod->name, ret); | 568 | ftrace_event_name(&tk->tp.call), |
| 569 | mod->name, ret); | ||
| 568 | } | 570 | } |
| 569 | } | 571 | } |
| 570 | mutex_unlock(&probe_lock); | 572 | mutex_unlock(&probe_lock); |
| @@ -818,7 +820,8 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
| 818 | int i; | 820 | int i; |
| 819 | 821 | ||
| 820 | seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); | 822 | seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); |
| 821 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name); | 823 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, |
| 824 | ftrace_event_name(&tk->tp.call)); | ||
| 822 | 825 | ||
| 823 | if (!tk->symbol) | 826 | if (!tk->symbol) |
| 824 | seq_printf(m, " 0x%p", tk->rp.kp.addr); | 827 | seq_printf(m, " 0x%p", tk->rp.kp.addr); |
| @@ -876,7 +879,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) | |||
| 876 | { | 879 | { |
| 877 | struct trace_kprobe *tk = v; | 880 | struct trace_kprobe *tk = v; |
| 878 | 881 | ||
| 879 | seq_printf(m, " %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit, | 882 | seq_printf(m, " %-44s %15lu %15lu\n", |
| 883 | ftrace_event_name(&tk->tp.call), tk->nhit, | ||
| 880 | tk->rp.kp.nmissed); | 884 | tk->rp.kp.nmissed); |
| 881 | 885 | ||
| 882 | return 0; | 886 | return 0; |
| @@ -1011,7 +1015,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags, | |||
| 1011 | field = (struct kprobe_trace_entry_head *)iter->ent; | 1015 | field = (struct kprobe_trace_entry_head *)iter->ent; |
| 1012 | tp = container_of(event, struct trace_probe, call.event); | 1016 | tp = container_of(event, struct trace_probe, call.event); |
| 1013 | 1017 | ||
| 1014 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) | 1018 | if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) |
| 1015 | goto partial; | 1019 | goto partial; |
| 1016 | 1020 | ||
| 1017 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) | 1021 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) |
| @@ -1047,7 +1051,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, | |||
| 1047 | field = (struct kretprobe_trace_entry_head *)iter->ent; | 1051 | field = (struct kretprobe_trace_entry_head *)iter->ent; |
| 1048 | tp = container_of(event, struct trace_probe, call.event); | 1052 | tp = container_of(event, struct trace_probe, call.event); |
| 1049 | 1053 | ||
| 1050 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) | 1054 | if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) |
| 1051 | goto partial; | 1055 | goto partial; |
| 1052 | 1056 | ||
| 1053 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) | 1057 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) |
| @@ -1286,7 +1290,8 @@ static int register_kprobe_event(struct trace_kprobe *tk) | |||
| 1286 | call->data = tk; | 1290 | call->data = tk; |
| 1287 | ret = trace_add_event_call(call); | 1291 | ret = trace_add_event_call(call); |
| 1288 | if (ret) { | 1292 | if (ret) { |
| 1289 | pr_info("Failed to register kprobe event: %s\n", call->name); | 1293 | pr_info("Failed to register kprobe event: %s\n", |
| 1294 | ftrace_event_name(call)); | ||
| 1290 | kfree(call->print_fmt); | 1295 | kfree(call->print_fmt); |
| 1291 | unregister_ftrace_event(&call->event); | 1296 | unregister_ftrace_event(&call->event); |
| 1292 | } | 1297 | } |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ca0e79e2abaa..a436de18aa99 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
| @@ -431,7 +431,7 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, | |||
| 431 | } | 431 | } |
| 432 | 432 | ||
| 433 | trace_seq_init(p); | 433 | trace_seq_init(p); |
| 434 | ret = trace_seq_printf(s, "%s: ", event->name); | 434 | ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event)); |
| 435 | if (!ret) | 435 | if (!ret) |
| 436 | return TRACE_TYPE_PARTIAL_LINE; | 436 | return TRACE_TYPE_PARTIAL_LINE; |
| 437 | 437 | ||
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index e4473367e7a4..930e51462dc8 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
| @@ -294,7 +294,7 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou | |||
| 294 | struct trace_uprobe *tu; | 294 | struct trace_uprobe *tu; |
| 295 | 295 | ||
| 296 | list_for_each_entry(tu, &uprobe_list, list) | 296 | list_for_each_entry(tu, &uprobe_list, list) |
| 297 | if (strcmp(tu->tp.call.name, event) == 0 && | 297 | if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 && |
| 298 | strcmp(tu->tp.call.class->system, group) == 0) | 298 | strcmp(tu->tp.call.class->system, group) == 0) |
| 299 | return tu; | 299 | return tu; |
| 300 | 300 | ||
| @@ -324,7 +324,8 @@ static int register_trace_uprobe(struct trace_uprobe *tu) | |||
| 324 | mutex_lock(&uprobe_lock); | 324 | mutex_lock(&uprobe_lock); |
| 325 | 325 | ||
| 326 | /* register as an event */ | 326 | /* register as an event */ |
| 327 | old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system); | 327 | old_tu = find_probe_event(ftrace_event_name(&tu->tp.call), |
| 328 | tu->tp.call.class->system); | ||
| 328 | if (old_tu) { | 329 | if (old_tu) { |
| 329 | /* delete old event */ | 330 | /* delete old event */ |
| 330 | ret = unregister_trace_uprobe(old_tu); | 331 | ret = unregister_trace_uprobe(old_tu); |
| @@ -599,7 +600,8 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
| 599 | char c = is_ret_probe(tu) ? 'r' : 'p'; | 600 | char c = is_ret_probe(tu) ? 'r' : 'p'; |
| 600 | int i; | 601 | int i; |
| 601 | 602 | ||
| 602 | seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name); | 603 | seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, |
| 604 | ftrace_event_name(&tu->tp.call)); | ||
| 603 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); | 605 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); |
| 604 | 606 | ||
| 605 | for (i = 0; i < tu->tp.nr_args; i++) | 607 | for (i = 0; i < tu->tp.nr_args; i++) |
| @@ -649,7 +651,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) | |||
| 649 | { | 651 | { |
| 650 | struct trace_uprobe *tu = v; | 652 | struct trace_uprobe *tu = v; |
| 651 | 653 | ||
| 652 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit); | 654 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, |
| 655 | ftrace_event_name(&tu->tp.call), tu->nhit); | ||
| 653 | return 0; | 656 | return 0; |
| 654 | } | 657 | } |
| 655 | 658 | ||
| @@ -844,12 +847,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e | |||
| 844 | tu = container_of(event, struct trace_uprobe, tp.call.event); | 847 | tu = container_of(event, struct trace_uprobe, tp.call.event); |
| 845 | 848 | ||
| 846 | if (is_ret_probe(tu)) { | 849 | if (is_ret_probe(tu)) { |
| 847 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name, | 850 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", |
| 851 | ftrace_event_name(&tu->tp.call), | ||
| 848 | entry->vaddr[1], entry->vaddr[0])) | 852 | entry->vaddr[1], entry->vaddr[0])) |
| 849 | goto partial; | 853 | goto partial; |
| 850 | data = DATAOF_TRACE_ENTRY(entry, true); | 854 | data = DATAOF_TRACE_ENTRY(entry, true); |
| 851 | } else { | 855 | } else { |
| 852 | if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name, | 856 | if (!trace_seq_printf(s, "%s: (0x%lx)", |
| 857 | ftrace_event_name(&tu->tp.call), | ||
| 853 | entry->vaddr[0])) | 858 | entry->vaddr[0])) |
| 854 | goto partial; | 859 | goto partial; |
| 855 | data = DATAOF_TRACE_ENTRY(entry, false); | 860 | data = DATAOF_TRACE_ENTRY(entry, false); |
| @@ -1275,7 +1280,8 @@ static int register_uprobe_event(struct trace_uprobe *tu) | |||
| 1275 | ret = trace_add_event_call(call); | 1280 | ret = trace_add_event_call(call); |
| 1276 | 1281 | ||
| 1277 | if (ret) { | 1282 | if (ret) { |
| 1278 | pr_info("Failed to register uprobe event: %s\n", call->name); | 1283 | pr_info("Failed to register uprobe event: %s\n", |
| 1284 | ftrace_event_name(call)); | ||
| 1279 | kfree(call->print_fmt); | 1285 | kfree(call->print_fmt); |
| 1280 | unregister_ftrace_event(&call->event); | 1286 | unregister_ftrace_event(&call->event); |
| 1281 | } | 1287 | } |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index fb0a38a26555..ac5b23cf7212 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2008 Mathieu Desnoyers | 2 | * Copyright (C) 2008-2014 Mathieu Desnoyers |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
| @@ -33,39 +33,27 @@ extern struct tracepoint * const __stop___tracepoints_ptrs[]; | |||
| 33 | /* Set to 1 to enable tracepoint debug output */ | 33 | /* Set to 1 to enable tracepoint debug output */ |
| 34 | static const int tracepoint_debug; | 34 | static const int tracepoint_debug; |
| 35 | 35 | ||
| 36 | #ifdef CONFIG_MODULES | ||
| 36 | /* | 37 | /* |
| 37 | * Tracepoints mutex protects the builtin and module tracepoints and the hash | 38 | * Tracepoint module list mutex protects the local module list. |
| 38 | * table, as well as the local module list. | ||
| 39 | */ | 39 | */ |
| 40 | static DEFINE_MUTEX(tracepoints_mutex); | 40 | static DEFINE_MUTEX(tracepoint_module_list_mutex); |
| 41 | 41 | ||
| 42 | #ifdef CONFIG_MODULES | 42 | /* Local list of struct tp_module */ |
| 43 | /* Local list of struct module */ | ||
| 44 | static LIST_HEAD(tracepoint_module_list); | 43 | static LIST_HEAD(tracepoint_module_list); |
| 45 | #endif /* CONFIG_MODULES */ | 44 | #endif /* CONFIG_MODULES */ |
| 46 | 45 | ||
| 47 | /* | 46 | /* |
| 48 | * Tracepoint hash table, containing the active tracepoints. | 47 | * tracepoints_mutex protects the builtin and module tracepoints. |
| 49 | * Protected by tracepoints_mutex. | 48 | * tracepoints_mutex nests inside tracepoint_module_list_mutex. |
| 50 | */ | 49 | */ |
| 51 | #define TRACEPOINT_HASH_BITS 6 | 50 | static DEFINE_MUTEX(tracepoints_mutex); |
| 52 | #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS) | ||
| 53 | static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; | ||
| 54 | 51 | ||
| 55 | /* | 52 | /* |
| 56 | * Note about RCU : | 53 | * Note about RCU : |
| 57 | * It is used to delay the free of multiple probes array until a quiescent | 54 | * It is used to delay the free of multiple probes array until a quiescent |
| 58 | * state is reached. | 55 | * state is reached. |
| 59 | * Tracepoint entries modifications are protected by the tracepoints_mutex. | ||
| 60 | */ | 56 | */ |
| 61 | struct tracepoint_entry { | ||
| 62 | struct hlist_node hlist; | ||
| 63 | struct tracepoint_func *funcs; | ||
| 64 | int refcount; /* Number of times armed. 0 if disarmed. */ | ||
| 65 | int enabled; /* Tracepoint enabled */ | ||
| 66 | char name[0]; | ||
| 67 | }; | ||
| 68 | |||
| 69 | struct tp_probes { | 57 | struct tp_probes { |
| 70 | struct rcu_head rcu; | 58 | struct rcu_head rcu; |
| 71 | struct tracepoint_func probes[0]; | 59 | struct tracepoint_func probes[0]; |
| @@ -92,34 +80,33 @@ static inline void release_probes(struct tracepoint_func *old) | |||
| 92 | } | 80 | } |
| 93 | } | 81 | } |
| 94 | 82 | ||
| 95 | static void debug_print_probes(struct tracepoint_entry *entry) | 83 | static void debug_print_probes(struct tracepoint_func *funcs) |
| 96 | { | 84 | { |
| 97 | int i; | 85 | int i; |
| 98 | 86 | ||
| 99 | if (!tracepoint_debug || !entry->funcs) | 87 | if (!tracepoint_debug || !funcs) |
| 100 | return; | 88 | return; |
| 101 | 89 | ||
| 102 | for (i = 0; entry->funcs[i].func; i++) | 90 | for (i = 0; funcs[i].func; i++) |
| 103 | printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func); | 91 | printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); |
| 104 | } | 92 | } |
| 105 | 93 | ||
| 106 | static struct tracepoint_func * | 94 | static struct tracepoint_func *func_add(struct tracepoint_func **funcs, |
| 107 | tracepoint_entry_add_probe(struct tracepoint_entry *entry, | 95 | struct tracepoint_func *tp_func) |
| 108 | void *probe, void *data) | ||
| 109 | { | 96 | { |
| 110 | int nr_probes = 0; | 97 | int nr_probes = 0; |
| 111 | struct tracepoint_func *old, *new; | 98 | struct tracepoint_func *old, *new; |
| 112 | 99 | ||
| 113 | if (WARN_ON(!probe)) | 100 | if (WARN_ON(!tp_func->func)) |
| 114 | return ERR_PTR(-EINVAL); | 101 | return ERR_PTR(-EINVAL); |
| 115 | 102 | ||
| 116 | debug_print_probes(entry); | 103 | debug_print_probes(*funcs); |
| 117 | old = entry->funcs; | 104 | old = *funcs; |
| 118 | if (old) { | 105 | if (old) { |
| 119 | /* (N -> N+1), (N != 0, 1) probes */ | 106 | /* (N -> N+1), (N != 0, 1) probes */ |
| 120 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) | 107 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) |
| 121 | if (old[nr_probes].func == probe && | 108 | if (old[nr_probes].func == tp_func->func && |
| 122 | old[nr_probes].data == data) | 109 | old[nr_probes].data == tp_func->data) |
| 123 | return ERR_PTR(-EEXIST); | 110 | return ERR_PTR(-EEXIST); |
| 124 | } | 111 | } |
| 125 | /* + 2 : one for new probe, one for NULL func */ | 112 | /* + 2 : one for new probe, one for NULL func */ |
| @@ -128,33 +115,30 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, | |||
| 128 | return ERR_PTR(-ENOMEM); | 115 | return ERR_PTR(-ENOMEM); |
| 129 | if (old) | 116 | if (old) |
| 130 | memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); | 117 | memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); |
| 131 | new[nr_probes].func = probe; | 118 | new[nr_probes] = *tp_func; |
| 132 | new[nr_probes].data = data; | ||
| 133 | new[nr_probes + 1].func = NULL; | 119 | new[nr_probes + 1].func = NULL; |
| 134 | entry->refcount = nr_probes + 1; | 120 | *funcs = new; |
| 135 | entry->funcs = new; | 121 | debug_print_probes(*funcs); |
| 136 | debug_print_probes(entry); | ||
| 137 | return old; | 122 | return old; |
| 138 | } | 123 | } |
| 139 | 124 | ||
| 140 | static void * | 125 | static void *func_remove(struct tracepoint_func **funcs, |
| 141 | tracepoint_entry_remove_probe(struct tracepoint_entry *entry, | 126 | struct tracepoint_func *tp_func) |
| 142 | void *probe, void *data) | ||
| 143 | { | 127 | { |
| 144 | int nr_probes = 0, nr_del = 0, i; | 128 | int nr_probes = 0, nr_del = 0, i; |
| 145 | struct tracepoint_func *old, *new; | 129 | struct tracepoint_func *old, *new; |
| 146 | 130 | ||
| 147 | old = entry->funcs; | 131 | old = *funcs; |
| 148 | 132 | ||
| 149 | if (!old) | 133 | if (!old) |
| 150 | return ERR_PTR(-ENOENT); | 134 | return ERR_PTR(-ENOENT); |
| 151 | 135 | ||
| 152 | debug_print_probes(entry); | 136 | debug_print_probes(*funcs); |
| 153 | /* (N -> M), (N > 1, M >= 0) probes */ | 137 | /* (N -> M), (N > 1, M >= 0) probes */ |
| 154 | if (probe) { | 138 | if (tp_func->func) { |
| 155 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) { | 139 | for (nr_probes = 0; old[nr_probes].func; nr_probes++) { |
| 156 | if (old[nr_probes].func == probe && | 140 | if (old[nr_probes].func == tp_func->func && |
| 157 | old[nr_probes].data == data) | 141 | old[nr_probes].data == tp_func->data) |
| 158 | nr_del++; | 142 | nr_del++; |
| 159 | } | 143 | } |
| 160 | } | 144 | } |
| @@ -165,9 +149,8 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, | |||
| 165 | */ | 149 | */ |
| 166 | if (nr_probes - nr_del == 0) { | 150 | if (nr_probes - nr_del == 0) { |
| 167 | /* N -> 0, (N > 1) */ | 151 | /* N -> 0, (N > 1) */ |
| 168 | entry->funcs = NULL; | 152 | *funcs = NULL; |
| 169 | entry->refcount = 0; | 153 | debug_print_probes(*funcs); |
| 170 | debug_print_probes(entry); | ||
| 171 | return old; | 154 | return old; |
| 172 | } else { | 155 | } else { |
| 173 | int j = 0; | 156 | int j = 0; |
| @@ -177,91 +160,35 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, | |||
| 177 | if (new == NULL) | 160 | if (new == NULL) |
| 178 | return ERR_PTR(-ENOMEM); | 161 | return ERR_PTR(-ENOMEM); |
| 179 | for (i = 0; old[i].func; i++) | 162 | for (i = 0; old[i].func; i++) |
| 180 | if (old[i].func != probe || old[i].data != data) | 163 | if (old[i].func != tp_func->func |
| 164 | || old[i].data != tp_func->data) | ||
| 181 | new[j++] = old[i]; | 165 | new[j++] = old[i]; |
| 182 | new[nr_probes - nr_del].func = NULL; | 166 | new[nr_probes - nr_del].func = NULL; |
| 183 | entry->refcount = nr_probes - nr_del; | 167 | *funcs = new; |
| 184 | entry->funcs = new; | ||
| 185 | } | 168 | } |
| 186 | debug_print_probes(entry); | 169 | debug_print_probes(*funcs); |
| 187 | return old; | 170 | return old; |
| 188 | } | 171 | } |
| 189 | 172 | ||
| 190 | /* | 173 | /* |
| 191 | * Get tracepoint if the tracepoint is present in the tracepoint hash table. | 174 | * Add the probe function to a tracepoint. |
| 192 | * Must be called with tracepoints_mutex held. | ||
| 193 | * Returns NULL if not present. | ||
| 194 | */ | 175 | */ |
| 195 | static struct tracepoint_entry *get_tracepoint(const char *name) | 176 | static int tracepoint_add_func(struct tracepoint *tp, |
| 177 | struct tracepoint_func *func) | ||
| 196 | { | 178 | { |
| 197 | struct hlist_head *head; | 179 | struct tracepoint_func *old, *tp_funcs; |
| 198 | struct tracepoint_entry *e; | ||
| 199 | u32 hash = jhash(name, strlen(name), 0); | ||
| 200 | |||
| 201 | head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; | ||
| 202 | hlist_for_each_entry(e, head, hlist) { | ||
| 203 | if (!strcmp(name, e->name)) | ||
| 204 | return e; | ||
| 205 | } | ||
| 206 | return NULL; | ||
| 207 | } | ||
| 208 | 180 | ||
| 209 | /* | 181 | if (tp->regfunc && !static_key_enabled(&tp->key)) |
| 210 | * Add the tracepoint to the tracepoint hash table. Must be called with | 182 | tp->regfunc(); |
| 211 | * tracepoints_mutex held. | ||
| 212 | */ | ||
| 213 | static struct tracepoint_entry *add_tracepoint(const char *name) | ||
| 214 | { | ||
| 215 | struct hlist_head *head; | ||
| 216 | struct tracepoint_entry *e; | ||
| 217 | size_t name_len = strlen(name) + 1; | ||
| 218 | u32 hash = jhash(name, name_len-1, 0); | ||
| 219 | |||
| 220 | head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; | ||
| 221 | hlist_for_each_entry(e, head, hlist) { | ||
| 222 | if (!strcmp(name, e->name)) { | ||
| 223 | printk(KERN_NOTICE | ||
| 224 | "tracepoint %s busy\n", name); | ||
| 225 | return ERR_PTR(-EEXIST); /* Already there */ | ||
| 226 | } | ||
| 227 | } | ||
| 228 | /* | ||
| 229 | * Using kmalloc here to allocate a variable length element. Could | ||
| 230 | * cause some memory fragmentation if overused. | ||
| 231 | */ | ||
| 232 | e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL); | ||
| 233 | if (!e) | ||
| 234 | return ERR_PTR(-ENOMEM); | ||
| 235 | memcpy(&e->name[0], name, name_len); | ||
| 236 | e->funcs = NULL; | ||
| 237 | e->refcount = 0; | ||
| 238 | e->enabled = 0; | ||
| 239 | hlist_add_head(&e->hlist, head); | ||
| 240 | return e; | ||
| 241 | } | ||
| 242 | 183 | ||
| 243 | /* | 184 | tp_funcs = rcu_dereference_protected(tp->funcs, |
| 244 | * Remove the tracepoint from the tracepoint hash table. Must be called with | 185 | lockdep_is_held(&tracepoints_mutex)); |
| 245 | * mutex_lock held. | 186 | old = func_add(&tp_funcs, func); |
| 246 | */ | 187 | if (IS_ERR(old)) { |
| 247 | static inline void remove_tracepoint(struct tracepoint_entry *e) | 188 | WARN_ON_ONCE(1); |
| 248 | { | 189 | return PTR_ERR(old); |
| 249 | hlist_del(&e->hlist); | 190 | } |
| 250 | kfree(e); | 191 | release_probes(old); |
| 251 | } | ||
| 252 | |||
| 253 | /* | ||
| 254 | * Sets the probe callback corresponding to one tracepoint. | ||
| 255 | */ | ||
| 256 | static void set_tracepoint(struct tracepoint_entry **entry, | ||
| 257 | struct tracepoint *elem, int active) | ||
| 258 | { | ||
| 259 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); | ||
| 260 | |||
| 261 | if (elem->regfunc && !static_key_enabled(&elem->key) && active) | ||
| 262 | elem->regfunc(); | ||
| 263 | else if (elem->unregfunc && static_key_enabled(&elem->key) && !active) | ||
| 264 | elem->unregfunc(); | ||
| 265 | 192 | ||
| 266 | /* | 193 | /* |
| 267 | * rcu_assign_pointer has a smp_wmb() which makes sure that the new | 194 | * rcu_assign_pointer has a smp_wmb() which makes sure that the new |
| @@ -270,193 +197,90 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
| 270 | * include/linux/tracepoints.h. A matching smp_read_barrier_depends() | 197 | * include/linux/tracepoints.h. A matching smp_read_barrier_depends() |
| 271 | * is used. | 198 | * is used. |
| 272 | */ | 199 | */ |
| 273 | rcu_assign_pointer(elem->funcs, (*entry)->funcs); | 200 | rcu_assign_pointer(tp->funcs, tp_funcs); |
| 274 | if (active && !static_key_enabled(&elem->key)) | 201 | if (!static_key_enabled(&tp->key)) |
| 275 | static_key_slow_inc(&elem->key); | 202 | static_key_slow_inc(&tp->key); |
| 276 | else if (!active && static_key_enabled(&elem->key)) | 203 | return 0; |
| 277 | static_key_slow_dec(&elem->key); | ||
| 278 | } | 204 | } |
| 279 | 205 | ||
| 280 | /* | 206 | /* |
| 281 | * Disable a tracepoint and its probe callback. | 207 | * Remove a probe function from a tracepoint. |
| 282 | * Note: only waiting an RCU period after setting elem->call to the empty | 208 | * Note: only waiting an RCU period after setting elem->call to the empty |
| 283 | * function insures that the original callback is not used anymore. This insured | 209 | * function insures that the original callback is not used anymore. This insured |
| 284 | * by preempt_disable around the call site. | 210 | * by preempt_disable around the call site. |
| 285 | */ | 211 | */ |
| 286 | static void disable_tracepoint(struct tracepoint *elem) | 212 | static int tracepoint_remove_func(struct tracepoint *tp, |
| 213 | struct tracepoint_func *func) | ||
| 287 | { | 214 | { |
| 288 | if (elem->unregfunc && static_key_enabled(&elem->key)) | 215 | struct tracepoint_func *old, *tp_funcs; |
| 289 | elem->unregfunc(); | ||
| 290 | |||
| 291 | if (static_key_enabled(&elem->key)) | ||
| 292 | static_key_slow_dec(&elem->key); | ||
| 293 | rcu_assign_pointer(elem->funcs, NULL); | ||
| 294 | } | ||
| 295 | 216 | ||
| 296 | /** | 217 | tp_funcs = rcu_dereference_protected(tp->funcs, |
| 297 | * tracepoint_update_probe_range - Update a probe range | 218 | lockdep_is_held(&tracepoints_mutex)); |
| 298 | * @begin: beginning of the range | 219 | old = func_remove(&tp_funcs, func); |
| 299 | * @end: end of the range | 220 | if (IS_ERR(old)) { |
| 300 | * | 221 | WARN_ON_ONCE(1); |
| 301 | * Updates the probe callback corresponding to a range of tracepoints. | 222 | return PTR_ERR(old); |
| 302 | * Called with tracepoints_mutex held. | ||
| 303 | */ | ||
| 304 | static void tracepoint_update_probe_range(struct tracepoint * const *begin, | ||
| 305 | struct tracepoint * const *end) | ||
| 306 | { | ||
| 307 | struct tracepoint * const *iter; | ||
| 308 | struct tracepoint_entry *mark_entry; | ||
| 309 | |||
| 310 | if (!begin) | ||
| 311 | return; | ||
| 312 | |||
| 313 | for (iter = begin; iter < end; iter++) { | ||
| 314 | mark_entry = get_tracepoint((*iter)->name); | ||
| 315 | if (mark_entry) { | ||
| 316 | set_tracepoint(&mark_entry, *iter, | ||
| 317 | !!mark_entry->refcount); | ||
| 318 | mark_entry->enabled = !!mark_entry->refcount; | ||
| 319 | } else { | ||
| 320 | disable_tracepoint(*iter); | ||
| 321 | } | ||
| 322 | } | 223 | } |
| 323 | } | 224 | release_probes(old); |
| 324 | |||
| 325 | #ifdef CONFIG_MODULES | ||
| 326 | void module_update_tracepoints(void) | ||
| 327 | { | ||
| 328 | struct tp_module *tp_mod; | ||
| 329 | |||
| 330 | list_for_each_entry(tp_mod, &tracepoint_module_list, list) | ||
| 331 | tracepoint_update_probe_range(tp_mod->tracepoints_ptrs, | ||
| 332 | tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints); | ||
| 333 | } | ||
| 334 | #else /* CONFIG_MODULES */ | ||
| 335 | void module_update_tracepoints(void) | ||
| 336 | { | ||
| 337 | } | ||
| 338 | #endif /* CONFIG_MODULES */ | ||
| 339 | 225 | ||
| 226 | if (!tp_funcs) { | ||
| 227 | /* Removed last function */ | ||
| 228 | if (tp->unregfunc && static_key_enabled(&tp->key)) | ||
| 229 | tp->unregfunc(); | ||
| 340 | 230 | ||
| 341 | /* | 231 | if (static_key_enabled(&tp->key)) |
| 342 | * Update probes, removing the faulty probes. | 232 | static_key_slow_dec(&tp->key); |
| 343 | * Called with tracepoints_mutex held. | ||
| 344 | */ | ||
| 345 | static void tracepoint_update_probes(void) | ||
| 346 | { | ||
| 347 | /* Core kernel tracepoints */ | ||
| 348 | tracepoint_update_probe_range(__start___tracepoints_ptrs, | ||
| 349 | __stop___tracepoints_ptrs); | ||
| 350 | /* tracepoints in modules. */ | ||
| 351 | module_update_tracepoints(); | ||
| 352 | } | ||
| 353 | |||
| 354 | static struct tracepoint_func * | ||
| 355 | tracepoint_add_probe(const char *name, void *probe, void *data) | ||
| 356 | { | ||
| 357 | struct tracepoint_entry *entry; | ||
| 358 | struct tracepoint_func *old; | ||
| 359 | |||
| 360 | entry = get_tracepoint(name); | ||
| 361 | if (!entry) { | ||
| 362 | entry = add_tracepoint(name); | ||
| 363 | if (IS_ERR(entry)) | ||
| 364 | return (struct tracepoint_func *)entry; | ||
| 365 | } | 233 | } |
| 366 | old = tracepoint_entry_add_probe(entry, probe, data); | 234 | rcu_assign_pointer(tp->funcs, tp_funcs); |
| 367 | if (IS_ERR(old) && !entry->refcount) | 235 | return 0; |
| 368 | remove_tracepoint(entry); | ||
| 369 | return old; | ||
| 370 | } | 236 | } |
| 371 | 237 | ||
| 372 | /** | 238 | /** |
| 373 | * tracepoint_probe_register - Connect a probe to a tracepoint | 239 | * tracepoint_probe_register - Connect a probe to a tracepoint |
| 374 | * @name: tracepoint name | 240 | * @tp: tracepoint |
| 375 | * @probe: probe handler | 241 | * @probe: probe handler |
| 376 | * @data: probe private data | ||
| 377 | * | ||
| 378 | * Returns: | ||
| 379 | * - 0 if the probe was successfully registered, and tracepoint | ||
| 380 | * callsites are currently loaded for that probe, | ||
| 381 | * - -ENODEV if the probe was successfully registered, but no tracepoint | ||
| 382 | * callsite is currently loaded for that probe, | ||
| 383 | * - other negative error value on error. | ||
| 384 | * | ||
| 385 | * When tracepoint_probe_register() returns either 0 or -ENODEV, | ||
| 386 | * parameters @name, @probe, and @data may be used by the tracepoint | ||
| 387 | * infrastructure until the probe is unregistered. | ||
| 388 | * | 242 | * |
| 389 | * The probe address must at least be aligned on the architecture pointer size. | 243 | * Returns 0 if ok, error value on error. |
| 244 | * Note: if @tp is within a module, the caller is responsible for | ||
| 245 | * unregistering the probe before the module is gone. This can be | ||
| 246 | * performed either with a tracepoint module going notifier, or from | ||
| 247 | * within module exit functions. | ||
| 390 | */ | 248 | */ |
| 391 | int tracepoint_probe_register(const char *name, void *probe, void *data) | 249 | int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) |
| 392 | { | 250 | { |
| 393 | struct tracepoint_func *old; | 251 | struct tracepoint_func tp_func; |
| 394 | struct tracepoint_entry *entry; | 252 | int ret; |
| 395 | int ret = 0; | ||
| 396 | 253 | ||
| 397 | mutex_lock(&tracepoints_mutex); | 254 | mutex_lock(&tracepoints_mutex); |
| 398 | old = tracepoint_add_probe(name, probe, data); | 255 | tp_func.func = probe; |
| 399 | if (IS_ERR(old)) { | 256 | tp_func.data = data; |
| 400 | mutex_unlock(&tracepoints_mutex); | 257 | ret = tracepoint_add_func(tp, &tp_func); |
| 401 | return PTR_ERR(old); | ||
| 402 | } | ||
| 403 | tracepoint_update_probes(); /* may update entry */ | ||
| 404 | entry = get_tracepoint(name); | ||
| 405 | /* Make sure the entry was enabled */ | ||
| 406 | if (!entry || !entry->enabled) | ||
| 407 | ret = -ENODEV; | ||
| 408 | mutex_unlock(&tracepoints_mutex); | 258 | mutex_unlock(&tracepoints_mutex); |
| 409 | release_probes(old); | ||
| 410 | return ret; | 259 | return ret; |
| 411 | } | 260 | } |
| 412 | EXPORT_SYMBOL_GPL(tracepoint_probe_register); | 261 | EXPORT_SYMBOL_GPL(tracepoint_probe_register); |
| 413 | 262 | ||
| 414 | static struct tracepoint_func * | ||
| 415 | tracepoint_remove_probe(const char *name, void *probe, void *data) | ||
| 416 | { | ||
| 417 | struct tracepoint_entry *entry; | ||
| 418 | struct tracepoint_func *old; | ||
| 419 | |||
| 420 | entry = get_tracepoint(name); | ||
| 421 | if (!entry) | ||
| 422 | return ERR_PTR(-ENOENT); | ||
| 423 | old = tracepoint_entry_remove_probe(entry, probe, data); | ||
| 424 | if (IS_ERR(old)) | ||
| 425 | return old; | ||
| 426 | if (!entry->refcount) | ||
| 427 | remove_tracepoint(entry); | ||
| 428 | return old; | ||
| 429 | } | ||
| 430 | |||
| 431 | /** | 263 | /** |
| 432 | * tracepoint_probe_unregister - Disconnect a probe from a tracepoint | 264 | * tracepoint_probe_unregister - Disconnect a probe from a tracepoint |
| 433 | * @name: tracepoint name | 265 | * @tp: tracepoint |
| 434 | * @probe: probe function pointer | 266 | * @probe: probe function pointer |
| 435 | * @data: probe private data | ||
| 436 | * | 267 | * |
| 437 | * We do not need to call a synchronize_sched to make sure the probes have | 268 | * Returns 0 if ok, error value on error. |
| 438 | * finished running before doing a module unload, because the module unload | ||
| 439 | * itself uses stop_machine(), which insures that every preempt disabled section | ||
| 440 | * have finished. | ||
| 441 | */ | 269 | */ |
| 442 | int tracepoint_probe_unregister(const char *name, void *probe, void *data) | 270 | int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) |
| 443 | { | 271 | { |
| 444 | struct tracepoint_func *old; | 272 | struct tracepoint_func tp_func; |
| 273 | int ret; | ||
| 445 | 274 | ||
| 446 | mutex_lock(&tracepoints_mutex); | 275 | mutex_lock(&tracepoints_mutex); |
| 447 | old = tracepoint_remove_probe(name, probe, data); | 276 | tp_func.func = probe; |
| 448 | if (IS_ERR(old)) { | 277 | tp_func.data = data; |
| 449 | mutex_unlock(&tracepoints_mutex); | 278 | ret = tracepoint_remove_func(tp, &tp_func); |
| 450 | return PTR_ERR(old); | ||
| 451 | } | ||
| 452 | tracepoint_update_probes(); /* may update entry */ | ||
| 453 | mutex_unlock(&tracepoints_mutex); | 279 | mutex_unlock(&tracepoints_mutex); |
| 454 | release_probes(old); | 280 | return ret; |
| 455 | return 0; | ||
| 456 | } | 281 | } |
| 457 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); | 282 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); |
| 458 | 283 | ||
| 459 | |||
| 460 | #ifdef CONFIG_MODULES | 284 | #ifdef CONFIG_MODULES |
| 461 | bool trace_module_has_bad_taint(struct module *mod) | 285 | bool trace_module_has_bad_taint(struct module *mod) |
| 462 | { | 286 | { |
| @@ -464,6 +288,74 @@ bool trace_module_has_bad_taint(struct module *mod) | |||
| 464 | (1 << TAINT_UNSIGNED_MODULE)); | 288 | (1 << TAINT_UNSIGNED_MODULE)); |
| 465 | } | 289 | } |
| 466 | 290 | ||
| 291 | static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list); | ||
| 292 | |||
| 293 | /** | ||
| 294 | * register_tracepoint_notifier - register tracepoint coming/going notifier | ||
| 295 | * @nb: notifier block | ||
| 296 | * | ||
| 297 | * Notifiers registered with this function are called on module | ||
| 298 | * coming/going with the tracepoint_module_list_mutex held. | ||
| 299 | * The notifier block callback should expect a "struct tp_module" data | ||
| 300 | * pointer. | ||
| 301 | */ | ||
| 302 | int register_tracepoint_module_notifier(struct notifier_block *nb) | ||
| 303 | { | ||
| 304 | struct tp_module *tp_mod; | ||
| 305 | int ret; | ||
| 306 | |||
| 307 | mutex_lock(&tracepoint_module_list_mutex); | ||
| 308 | ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb); | ||
| 309 | if (ret) | ||
| 310 | goto end; | ||
| 311 | list_for_each_entry(tp_mod, &tracepoint_module_list, list) | ||
| 312 | (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod); | ||
| 313 | end: | ||
| 314 | mutex_unlock(&tracepoint_module_list_mutex); | ||
| 315 | return ret; | ||
| 316 | } | ||
| 317 | EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier); | ||
| 318 | |||
| 319 | /** | ||
| 320 | * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier | ||
| 321 | * @nb: notifier block | ||
| 322 | * | ||
| 323 | * The notifier block callback should expect a "struct tp_module" data | ||
| 324 | * pointer. | ||
| 325 | */ | ||
| 326 | int unregister_tracepoint_module_notifier(struct notifier_block *nb) | ||
| 327 | { | ||
| 328 | struct tp_module *tp_mod; | ||
| 329 | int ret; | ||
| 330 | |||
| 331 | mutex_lock(&tracepoint_module_list_mutex); | ||
| 332 | ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb); | ||
| 333 | if (ret) | ||
| 334 | goto end; | ||
| 335 | list_for_each_entry(tp_mod, &tracepoint_module_list, list) | ||
| 336 | (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod); | ||
| 337 | end: | ||
| 338 | mutex_unlock(&tracepoint_module_list_mutex); | ||
| 339 | return ret; | ||
| 340 | |||
| 341 | } | ||
| 342 | EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier); | ||
| 343 | |||
| 344 | /* | ||
| 345 | * Ensure the tracer unregistered the module's probes before the module | ||
| 346 | * teardown is performed. Prevents leaks of probe and data pointers. | ||
| 347 | */ | ||
| 348 | static void tp_module_going_check_quiescent(struct tracepoint * const *begin, | ||
| 349 | struct tracepoint * const *end) | ||
| 350 | { | ||
| 351 | struct tracepoint * const *iter; | ||
| 352 | |||
| 353 | if (!begin) | ||
| 354 | return; | ||
| 355 | for (iter = begin; iter < end; iter++) | ||
| 356 | WARN_ON_ONCE((*iter)->funcs); | ||
| 357 | } | ||
| 358 | |||
| 467 | static int tracepoint_module_coming(struct module *mod) | 359 | static int tracepoint_module_coming(struct module *mod) |
| 468 | { | 360 | { |
| 469 | struct tp_module *tp_mod; | 361 | struct tp_module *tp_mod; |
| @@ -479,36 +371,41 @@ static int tracepoint_module_coming(struct module *mod) | |||
| 479 | */ | 371 | */ |
| 480 | if (trace_module_has_bad_taint(mod)) | 372 | if (trace_module_has_bad_taint(mod)) |
| 481 | return 0; | 373 | return 0; |
| 482 | mutex_lock(&tracepoints_mutex); | 374 | mutex_lock(&tracepoint_module_list_mutex); |
| 483 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); | 375 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); |
| 484 | if (!tp_mod) { | 376 | if (!tp_mod) { |
| 485 | ret = -ENOMEM; | 377 | ret = -ENOMEM; |
| 486 | goto end; | 378 | goto end; |
| 487 | } | 379 | } |
| 488 | tp_mod->num_tracepoints = mod->num_tracepoints; | 380 | tp_mod->mod = mod; |
| 489 | tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs; | ||
| 490 | list_add_tail(&tp_mod->list, &tracepoint_module_list); | 381 | list_add_tail(&tp_mod->list, &tracepoint_module_list); |
| 491 | tracepoint_update_probe_range(mod->tracepoints_ptrs, | 382 | blocking_notifier_call_chain(&tracepoint_notify_list, |
| 492 | mod->tracepoints_ptrs + mod->num_tracepoints); | 383 | MODULE_STATE_COMING, tp_mod); |
| 493 | end: | 384 | end: |
| 494 | mutex_unlock(&tracepoints_mutex); | 385 | mutex_unlock(&tracepoint_module_list_mutex); |
| 495 | return ret; | 386 | return ret; |
| 496 | } | 387 | } |
| 497 | 388 | ||
| 498 | static int tracepoint_module_going(struct module *mod) | 389 | static void tracepoint_module_going(struct module *mod) |
| 499 | { | 390 | { |
| 500 | struct tp_module *pos; | 391 | struct tp_module *tp_mod; |
| 501 | 392 | ||
| 502 | if (!mod->num_tracepoints) | 393 | if (!mod->num_tracepoints) |
| 503 | return 0; | 394 | return; |
| 504 | 395 | ||
| 505 | mutex_lock(&tracepoints_mutex); | 396 | mutex_lock(&tracepoint_module_list_mutex); |
| 506 | tracepoint_update_probe_range(mod->tracepoints_ptrs, | 397 | list_for_each_entry(tp_mod, &tracepoint_module_list, list) { |
| 507 | mod->tracepoints_ptrs + mod->num_tracepoints); | 398 | if (tp_mod->mod == mod) { |
| 508 | list_for_each_entry(pos, &tracepoint_module_list, list) { | 399 | blocking_notifier_call_chain(&tracepoint_notify_list, |
| 509 | if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) { | 400 | MODULE_STATE_GOING, tp_mod); |
| 510 | list_del(&pos->list); | 401 | list_del(&tp_mod->list); |
| 511 | kfree(pos); | 402 | kfree(tp_mod); |
| 403 | /* | ||
| 404 | * Called the going notifier before checking for | ||
| 405 | * quiescence. | ||
| 406 | */ | ||
| 407 | tp_module_going_check_quiescent(mod->tracepoints_ptrs, | ||
| 408 | mod->tracepoints_ptrs + mod->num_tracepoints); | ||
| 512 | break; | 409 | break; |
| 513 | } | 410 | } |
| 514 | } | 411 | } |
| @@ -518,12 +415,11 @@ static int tracepoint_module_going(struct module *mod) | |||
| 518 | * flag on "going", in case a module taints the kernel only after being | 415 | * flag on "going", in case a module taints the kernel only after being |
| 519 | * loaded. | 416 | * loaded. |
| 520 | */ | 417 | */ |
| 521 | mutex_unlock(&tracepoints_mutex); | 418 | mutex_unlock(&tracepoint_module_list_mutex); |
| 522 | return 0; | ||
| 523 | } | 419 | } |
| 524 | 420 | ||
| 525 | int tracepoint_module_notify(struct notifier_block *self, | 421 | static int tracepoint_module_notify(struct notifier_block *self, |
| 526 | unsigned long val, void *data) | 422 | unsigned long val, void *data) |
| 527 | { | 423 | { |
| 528 | struct module *mod = data; | 424 | struct module *mod = data; |
| 529 | int ret = 0; | 425 | int ret = 0; |
| @@ -535,24 +431,58 @@ int tracepoint_module_notify(struct notifier_block *self, | |||
| 535 | case MODULE_STATE_LIVE: | 431 | case MODULE_STATE_LIVE: |
| 536 | break; | 432 | break; |
| 537 | case MODULE_STATE_GOING: | 433 | case MODULE_STATE_GOING: |
| 538 | ret = tracepoint_module_going(mod); | 434 | tracepoint_module_going(mod); |
| 435 | break; | ||
| 436 | case MODULE_STATE_UNFORMED: | ||
| 539 | break; | 437 | break; |
| 540 | } | 438 | } |
| 541 | return ret; | 439 | return ret; |
| 542 | } | 440 | } |
| 543 | 441 | ||
| 544 | struct notifier_block tracepoint_module_nb = { | 442 | static struct notifier_block tracepoint_module_nb = { |
| 545 | .notifier_call = tracepoint_module_notify, | 443 | .notifier_call = tracepoint_module_notify, |
| 546 | .priority = 0, | 444 | .priority = 0, |
| 547 | }; | 445 | }; |
| 548 | 446 | ||
| 549 | static int init_tracepoints(void) | 447 | static __init int init_tracepoints(void) |
| 550 | { | 448 | { |
| 551 | return register_module_notifier(&tracepoint_module_nb); | 449 | int ret; |
| 450 | |||
| 451 | ret = register_module_notifier(&tracepoint_module_nb); | ||
| 452 | if (ret) | ||
| 453 | pr_warning("Failed to register tracepoint module enter notifier\n"); | ||
| 454 | |||
| 455 | return ret; | ||
| 552 | } | 456 | } |
| 553 | __initcall(init_tracepoints); | 457 | __initcall(init_tracepoints); |
| 554 | #endif /* CONFIG_MODULES */ | 458 | #endif /* CONFIG_MODULES */ |
| 555 | 459 | ||
| 460 | static void for_each_tracepoint_range(struct tracepoint * const *begin, | ||
| 461 | struct tracepoint * const *end, | ||
| 462 | void (*fct)(struct tracepoint *tp, void *priv), | ||
| 463 | void *priv) | ||
| 464 | { | ||
| 465 | struct tracepoint * const *iter; | ||
| 466 | |||
| 467 | if (!begin) | ||
| 468 | return; | ||
| 469 | for (iter = begin; iter < end; iter++) | ||
| 470 | fct(*iter, priv); | ||
| 471 | } | ||
| 472 | |||
| 473 | /** | ||
| 474 | * for_each_kernel_tracepoint - iteration on all kernel tracepoints | ||
| 475 | * @fct: callback | ||
| 476 | * @priv: private data | ||
| 477 | */ | ||
| 478 | void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), | ||
| 479 | void *priv) | ||
| 480 | { | ||
| 481 | for_each_tracepoint_range(__start___tracepoints_ptrs, | ||
| 482 | __stop___tracepoints_ptrs, fct, priv); | ||
| 483 | } | ||
| 484 | EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint); | ||
| 485 | |||
| 556 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS | 486 | #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS |
| 557 | 487 | ||
| 558 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | 488 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ |
