diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-03 21:41:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-03 21:41:21 -0400 |
commit | 4c174688ee92805aa5df6e06e5b625a3286e415c (patch) | |
tree | 78e18b242b31a3a50eda41bfdd4705e07f13647a | |
parent | 9c35baf6cee9a5745d55de6f9995916dde642517 (diff) | |
parent | 73a757e63114dfd765f1c5d1ff7e994f123d0234 (diff) |
Merge tag 'trace-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"New features for this release:
- Pretty much a full rewrite of the processing of function plugins.
i.e. echo do_IRQ:stacktrace > set_ftrace_filter
- The rewrite was needed to add plugins to be unique to tracing
instances. i.e. mkdir instance/foo; cd instances/foo; echo
do_IRQ:stacktrace > set_ftrace_filter The old way was written very
hacky. This removes a lot of those hacks.
- New "function-fork" tracing option. When set, pids in the
set_ftrace_pid will have their children added when the processes
with their pids listed in the set_ftrace_pid file forks.
- Exposure of "maxactive" for kretprobe in kprobe_events
- Allow for builtin init functions to be traced by the function
tracer (via the kernel command line). Module init function tracing
will come in the next release.
- Added more selftests, and have selftests also test in an instance"
* tag 'trace-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (60 commits)
ring-buffer: Return reader page back into existing ring buffer
selftests: ftrace: Allow some event trigger tests to run in an instance
selftests: ftrace: Have some basic tests run in a tracing instance too
selftests: ftrace: Have event tests also run in an tracing instance
selftests: ftrace: Make func_event_triggers and func_traceonoff_triggers tests do instances
selftests: ftrace: Allow some tests to be run in a tracing instance
tracing/ftrace: Allow for instances to trigger their own stacktrace probes
tracing/ftrace: Allow for the traceonoff probe be unique to instances
tracing/ftrace: Enable snapshot function trigger to work with instances
tracing/ftrace: Allow instances to have their own function probes
tracing/ftrace: Add a better way to pass data via the probe functions
ftrace: Dynamically create the probe ftrace_ops for the trace_array
tracing: Pass the trace_array into ftrace_probe_ops functions
tracing: Have the trace_array hold the list of registered func probes
ftrace: If the hash for a probe fails to update then free what was initialized
ftrace: Have the function probes call their own function
ftrace: Have each function probe use its own ftrace_ops
ftrace: Have unregister_ftrace_function_probe_func() return a value
ftrace: Add helper function ftrace_hash_move_and_update_ops()
ftrace: Remove data field from ftrace_func_probe structure
...
40 files changed, 1902 insertions, 656 deletions
diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index 5ea85059db3b..1a3a3d6bc2a8 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt | |||
@@ -24,7 +24,7 @@ current_tracer. Instead of that, add probe points via | |||
24 | Synopsis of kprobe_events | 24 | Synopsis of kprobe_events |
25 | ------------------------- | 25 | ------------------------- |
26 | p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe | 26 | p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe |
27 | r[:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe | 27 | r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe |
28 | -:[GRP/]EVENT : Clear a probe | 28 | -:[GRP/]EVENT : Clear a probe |
29 | 29 | ||
30 | GRP : Group name. If omitted, use "kprobes" for it. | 30 | GRP : Group name. If omitted, use "kprobes" for it. |
@@ -33,6 +33,9 @@ Synopsis of kprobe_events | |||
33 | MOD : Module name which has given SYM. | 33 | MOD : Module name which has given SYM. |
34 | SYM[+offs] : Symbol+offset where the probe is inserted. | 34 | SYM[+offs] : Symbol+offset where the probe is inserted. |
35 | MEMADDR : Address where the probe is inserted. | 35 | MEMADDR : Address where the probe is inserted. |
36 | MAXACTIVE : Maximum number of instances of the specified function that | ||
37 | can be probed simultaneously, or 0 for the default value | ||
38 | as defined in Documentation/kprobes.txt section 1.3.1. | ||
36 | 39 | ||
37 | FETCHARGS : Arguments. Each probe can have up to 128 args. | 40 | FETCHARGS : Arguments. Each probe can have up to 128 args. |
38 | %REG : Fetch register REG | 41 | %REG : Fetch register REG |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 5b7153540727..8ee76dce9140 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -533,7 +533,13 @@ static void do_sync_core(void *data) | |||
533 | 533 | ||
534 | static void run_sync(void) | 534 | static void run_sync(void) |
535 | { | 535 | { |
536 | int enable_irqs = irqs_disabled(); | 536 | int enable_irqs; |
537 | |||
538 | /* No need to sync if there's only one CPU */ | ||
539 | if (num_online_cpus() == 1) | ||
540 | return; | ||
541 | |||
542 | enable_irqs = irqs_disabled(); | ||
537 | 543 | ||
538 | /* We may be called with interrupts disabled (on bootup). */ | 544 | /* We may be called with interrupts disabled (on bootup). */ |
539 | if (enable_irqs) | 545 | if (enable_irqs) |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 3633e8beff39..6d2a63e4ea52 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -42,8 +42,10 @@ | |||
42 | /* Main tracing buffer and events set up */ | 42 | /* Main tracing buffer and events set up */ |
43 | #ifdef CONFIG_TRACING | 43 | #ifdef CONFIG_TRACING |
44 | void trace_init(void); | 44 | void trace_init(void); |
45 | void early_trace_init(void); | ||
45 | #else | 46 | #else |
46 | static inline void trace_init(void) { } | 47 | static inline void trace_init(void) { } |
48 | static inline void early_trace_init(void) { } | ||
47 | #endif | 49 | #endif |
48 | 50 | ||
49 | struct module; | 51 | struct module; |
@@ -144,6 +146,10 @@ struct ftrace_ops_hash { | |||
144 | struct ftrace_hash *filter_hash; | 146 | struct ftrace_hash *filter_hash; |
145 | struct mutex regex_lock; | 147 | struct mutex regex_lock; |
146 | }; | 148 | }; |
149 | |||
150 | void ftrace_free_init_mem(void); | ||
151 | #else | ||
152 | static inline void ftrace_free_init_mem(void) { } | ||
147 | #endif | 153 | #endif |
148 | 154 | ||
149 | /* | 155 | /* |
@@ -260,6 +266,7 @@ static inline int ftrace_nr_registered_ops(void) | |||
260 | } | 266 | } |
261 | static inline void clear_ftrace_function(void) { } | 267 | static inline void clear_ftrace_function(void) { } |
262 | static inline void ftrace_kill(void) { } | 268 | static inline void ftrace_kill(void) { } |
269 | static inline void ftrace_free_init_mem(void) { } | ||
263 | #endif /* CONFIG_FUNCTION_TRACER */ | 270 | #endif /* CONFIG_FUNCTION_TRACER */ |
264 | 271 | ||
265 | #ifdef CONFIG_STACK_TRACER | 272 | #ifdef CONFIG_STACK_TRACER |
@@ -279,15 +286,45 @@ int | |||
279 | stack_trace_sysctl(struct ctl_table *table, int write, | 286 | stack_trace_sysctl(struct ctl_table *table, int write, |
280 | void __user *buffer, size_t *lenp, | 287 | void __user *buffer, size_t *lenp, |
281 | loff_t *ppos); | 288 | loff_t *ppos); |
282 | #endif | ||
283 | 289 | ||
284 | struct ftrace_func_command { | 290 | /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ |
285 | struct list_head list; | 291 | DECLARE_PER_CPU(int, disable_stack_tracer); |
286 | char *name; | 292 | |
287 | int (*func)(struct ftrace_hash *hash, | 293 | /** |
288 | char *func, char *cmd, | 294 | * stack_tracer_disable - temporarily disable the stack tracer |
289 | char *params, int enable); | 295 | * |
290 | }; | 296 | * There's a few locations (namely in RCU) where stack tracing |
297 | * cannot be executed. This function is used to disable stack | ||
298 | * tracing during those critical sections. | ||
299 | * | ||
300 | * This function must be called with preemption or interrupts | ||
301 | * disabled and stack_tracer_enable() must be called shortly after | ||
302 | * while preemption or interrupts are still disabled. | ||
303 | */ | ||
304 | static inline void stack_tracer_disable(void) | ||
305 | { | ||
306 | /* Preemption or interupts must be disabled */ | ||
307 | if (IS_ENABLED(CONFIG_PREEMPT_DEBUG)) | ||
308 | WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); | ||
309 | this_cpu_inc(disable_stack_tracer); | ||
310 | } | ||
311 | |||
312 | /** | ||
313 | * stack_tracer_enable - re-enable the stack tracer | ||
314 | * | ||
315 | * After stack_tracer_disable() is called, stack_tracer_enable() | ||
316 | * must be called shortly afterward. | ||
317 | */ | ||
318 | static inline void stack_tracer_enable(void) | ||
319 | { | ||
320 | if (IS_ENABLED(CONFIG_PREEMPT_DEBUG)) | ||
321 | WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); | ||
322 | this_cpu_dec(disable_stack_tracer); | ||
323 | } | ||
324 | #else | ||
325 | static inline void stack_tracer_disable(void) { } | ||
326 | static inline void stack_tracer_enable(void) { } | ||
327 | #endif | ||
291 | 328 | ||
292 | #ifdef CONFIG_DYNAMIC_FTRACE | 329 | #ifdef CONFIG_DYNAMIC_FTRACE |
293 | 330 | ||
@@ -315,30 +352,6 @@ void ftrace_bug(int err, struct dyn_ftrace *rec); | |||
315 | 352 | ||
316 | struct seq_file; | 353 | struct seq_file; |
317 | 354 | ||
318 | struct ftrace_probe_ops { | ||
319 | void (*func)(unsigned long ip, | ||
320 | unsigned long parent_ip, | ||
321 | void **data); | ||
322 | int (*init)(struct ftrace_probe_ops *ops, | ||
323 | unsigned long ip, void **data); | ||
324 | void (*free)(struct ftrace_probe_ops *ops, | ||
325 | unsigned long ip, void **data); | ||
326 | int (*print)(struct seq_file *m, | ||
327 | unsigned long ip, | ||
328 | struct ftrace_probe_ops *ops, | ||
329 | void *data); | ||
330 | }; | ||
331 | |||
332 | extern int | ||
333 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
334 | void *data); | ||
335 | extern void | ||
336 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
337 | void *data); | ||
338 | extern void | ||
339 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); | ||
340 | extern void unregister_ftrace_function_probe_all(char *glob); | ||
341 | |||
342 | extern int ftrace_text_reserved(const void *start, const void *end); | 355 | extern int ftrace_text_reserved(const void *start, const void *end); |
343 | 356 | ||
344 | extern int ftrace_nr_registered_ops(void); | 357 | extern int ftrace_nr_registered_ops(void); |
@@ -400,9 +413,6 @@ void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); | |||
400 | void ftrace_free_filter(struct ftrace_ops *ops); | 413 | void ftrace_free_filter(struct ftrace_ops *ops); |
401 | void ftrace_ops_set_global_filter(struct ftrace_ops *ops); | 414 | void ftrace_ops_set_global_filter(struct ftrace_ops *ops); |
402 | 415 | ||
403 | int register_ftrace_command(struct ftrace_func_command *cmd); | ||
404 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | ||
405 | |||
406 | enum { | 416 | enum { |
407 | FTRACE_UPDATE_CALLS = (1 << 0), | 417 | FTRACE_UPDATE_CALLS = (1 << 0), |
408 | FTRACE_DISABLE_CALLS = (1 << 1), | 418 | FTRACE_DISABLE_CALLS = (1 << 1), |
@@ -433,8 +443,8 @@ enum { | |||
433 | FTRACE_ITER_FILTER = (1 << 0), | 443 | FTRACE_ITER_FILTER = (1 << 0), |
434 | FTRACE_ITER_NOTRACE = (1 << 1), | 444 | FTRACE_ITER_NOTRACE = (1 << 1), |
435 | FTRACE_ITER_PRINTALL = (1 << 2), | 445 | FTRACE_ITER_PRINTALL = (1 << 2), |
436 | FTRACE_ITER_DO_HASH = (1 << 3), | 446 | FTRACE_ITER_DO_PROBES = (1 << 3), |
437 | FTRACE_ITER_HASH = (1 << 4), | 447 | FTRACE_ITER_PROBE = (1 << 4), |
438 | FTRACE_ITER_ENABLED = (1 << 5), | 448 | FTRACE_ITER_ENABLED = (1 << 5), |
439 | }; | 449 | }; |
440 | 450 | ||
@@ -618,14 +628,6 @@ static inline void ftrace_enable_daemon(void) { } | |||
618 | static inline void ftrace_module_init(struct module *mod) { } | 628 | static inline void ftrace_module_init(struct module *mod) { } |
619 | static inline void ftrace_module_enable(struct module *mod) { } | 629 | static inline void ftrace_module_enable(struct module *mod) { } |
620 | static inline void ftrace_release_mod(struct module *mod) { } | 630 | static inline void ftrace_release_mod(struct module *mod) { } |
621 | static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) | ||
622 | { | ||
623 | return -EINVAL; | ||
624 | } | ||
625 | static inline __init int unregister_ftrace_command(char *cmd_name) | ||
626 | { | ||
627 | return -EINVAL; | ||
628 | } | ||
629 | static inline int ftrace_text_reserved(const void *start, const void *end) | 631 | static inline int ftrace_text_reserved(const void *start, const void *end) |
630 | { | 632 | { |
631 | return 0; | 633 | return 0; |
diff --git a/include/linux/init.h b/include/linux/init.h index 79af0962fd52..94769d687cf0 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | /* These are for everybody (although not all archs will actually | 40 | /* These are for everybody (although not all archs will actually |
41 | discard it in modules) */ | 41 | discard it in modules) */ |
42 | #define __init __section(.init.text) __cold notrace __latent_entropy | 42 | #define __init __section(.init.text) __cold __inittrace __latent_entropy |
43 | #define __initdata __section(.init.data) | 43 | #define __initdata __section(.init.data) |
44 | #define __initconst __section(.init.rodata) | 44 | #define __initconst __section(.init.rodata) |
45 | #define __exitdata __section(.exit.data) | 45 | #define __exitdata __section(.exit.data) |
@@ -68,8 +68,10 @@ | |||
68 | 68 | ||
69 | #ifdef MODULE | 69 | #ifdef MODULE |
70 | #define __exitused | 70 | #define __exitused |
71 | #define __inittrace notrace | ||
71 | #else | 72 | #else |
72 | #define __exitused __used | 73 | #define __exitused __used |
74 | #define __inittrace | ||
73 | #endif | 75 | #endif |
74 | 76 | ||
75 | #define __exit __section(.exit.text) __exitused __cold notrace | 77 | #define __exit __section(.exit.text) __exitused __cold notrace |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index de88b33c0974..dea8f17b2fe3 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -97,6 +97,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename, | |||
97 | unsigned long secs, | 97 | unsigned long secs, |
98 | unsigned long c_old, | 98 | unsigned long c_old, |
99 | unsigned long c); | 99 | unsigned long c); |
100 | bool rcu_irq_enter_disabled(void); | ||
100 | #else | 101 | #else |
101 | static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, | 102 | static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, |
102 | int *flags, | 103 | int *flags, |
@@ -113,6 +114,10 @@ static inline void rcutorture_record_test_transition(void) | |||
113 | static inline void rcutorture_record_progress(unsigned long vernum) | 114 | static inline void rcutorture_record_progress(unsigned long vernum) |
114 | { | 115 | { |
115 | } | 116 | } |
117 | static inline bool rcu_irq_enter_disabled(void) | ||
118 | { | ||
119 | return false; | ||
120 | } | ||
116 | #ifdef CONFIG_RCU_TRACE | 121 | #ifdef CONFIG_RCU_TRACE |
117 | void do_trace_rcu_torture_read(const char *rcutorturename, | 122 | void do_trace_rcu_torture_read(const char *rcutorturename, |
118 | struct rcu_head *rhp, | 123 | struct rcu_head *rhp, |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index b6d4568795a7..ee9b461af095 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -185,7 +185,7 @@ size_t ring_buffer_page_len(void *page); | |||
185 | 185 | ||
186 | 186 | ||
187 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); | 187 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); |
188 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); | 188 | void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data); |
189 | int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, | 189 | int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, |
190 | size_t len, int cpu, int full); | 190 | size_t len, int cpu, int full); |
191 | 191 | ||
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 0af63c4381b9..a556805eff8a 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h | |||
@@ -138,16 +138,7 @@ enum print_line_t { | |||
138 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | 138 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ |
139 | }; | 139 | }; |
140 | 140 | ||
141 | /* | 141 | enum print_line_t trace_handle_return(struct trace_seq *s); |
142 | * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq | ||
143 | * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function | ||
144 | * simplifies those functions and keeps them in sync. | ||
145 | */ | ||
146 | static inline enum print_line_t trace_handle_return(struct trace_seq *s) | ||
147 | { | ||
148 | return trace_seq_has_overflowed(s) ? | ||
149 | TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; | ||
150 | } | ||
151 | 142 | ||
152 | void tracing_generic_entry_update(struct trace_entry *entry, | 143 | void tracing_generic_entry_update(struct trace_entry *entry, |
153 | unsigned long flags, | 144 | unsigned long flags, |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index f72fcfe0e66a..cc48cb2ce209 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -128,7 +128,7 @@ extern void syscall_unregfunc(void); | |||
128 | * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just | 128 | * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just |
129 | * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". | 129 | * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". |
130 | */ | 130 | */ |
131 | #define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \ | 131 | #define __DO_TRACE(tp, proto, args, cond, rcucheck) \ |
132 | do { \ | 132 | do { \ |
133 | struct tracepoint_func *it_func_ptr; \ | 133 | struct tracepoint_func *it_func_ptr; \ |
134 | void *it_func; \ | 134 | void *it_func; \ |
@@ -136,7 +136,11 @@ extern void syscall_unregfunc(void); | |||
136 | \ | 136 | \ |
137 | if (!(cond)) \ | 137 | if (!(cond)) \ |
138 | return; \ | 138 | return; \ |
139 | prercu; \ | 139 | if (rcucheck) { \ |
140 | if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \ | ||
141 | return; \ | ||
142 | rcu_irq_enter_irqson(); \ | ||
143 | } \ | ||
140 | rcu_read_lock_sched_notrace(); \ | 144 | rcu_read_lock_sched_notrace(); \ |
141 | it_func_ptr = rcu_dereference_sched((tp)->funcs); \ | 145 | it_func_ptr = rcu_dereference_sched((tp)->funcs); \ |
142 | if (it_func_ptr) { \ | 146 | if (it_func_ptr) { \ |
@@ -147,20 +151,19 @@ extern void syscall_unregfunc(void); | |||
147 | } while ((++it_func_ptr)->func); \ | 151 | } while ((++it_func_ptr)->func); \ |
148 | } \ | 152 | } \ |
149 | rcu_read_unlock_sched_notrace(); \ | 153 | rcu_read_unlock_sched_notrace(); \ |
150 | postrcu; \ | 154 | if (rcucheck) \ |
155 | rcu_irq_exit_irqson(); \ | ||
151 | } while (0) | 156 | } while (0) |
152 | 157 | ||
153 | #ifndef MODULE | 158 | #ifndef MODULE |
154 | #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \ | 159 | #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \ |
155 | static inline void trace_##name##_rcuidle(proto) \ | 160 | static inline void trace_##name##_rcuidle(proto) \ |
156 | { \ | 161 | { \ |
157 | if (static_key_false(&__tracepoint_##name.key)) \ | 162 | if (static_key_false(&__tracepoint_##name.key)) \ |
158 | __DO_TRACE(&__tracepoint_##name, \ | 163 | __DO_TRACE(&__tracepoint_##name, \ |
159 | TP_PROTO(data_proto), \ | 164 | TP_PROTO(data_proto), \ |
160 | TP_ARGS(data_args), \ | 165 | TP_ARGS(data_args), \ |
161 | TP_CONDITION(cond), \ | 166 | TP_CONDITION(cond), 1); \ |
162 | rcu_irq_enter_irqson(), \ | ||
163 | rcu_irq_exit_irqson()); \ | ||
164 | } | 167 | } |
165 | #else | 168 | #else |
166 | #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) | 169 | #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) |
@@ -186,7 +189,7 @@ extern void syscall_unregfunc(void); | |||
186 | __DO_TRACE(&__tracepoint_##name, \ | 189 | __DO_TRACE(&__tracepoint_##name, \ |
187 | TP_PROTO(data_proto), \ | 190 | TP_PROTO(data_proto), \ |
188 | TP_ARGS(data_args), \ | 191 | TP_ARGS(data_args), \ |
189 | TP_CONDITION(cond),,); \ | 192 | TP_CONDITION(cond), 0); \ |
190 | if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ | 193 | if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ |
191 | rcu_read_lock_sched_notrace(); \ | 194 | rcu_read_lock_sched_notrace(); \ |
192 | rcu_dereference_sched(__tracepoint_##name.funcs);\ | 195 | rcu_dereference_sched(__tracepoint_##name.funcs);\ |
diff --git a/init/main.c b/init/main.c index b1b9dbf7622f..cc48053bb39f 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -545,6 +545,11 @@ asmlinkage __visible void __init start_kernel(void) | |||
545 | trap_init(); | 545 | trap_init(); |
546 | mm_init(); | 546 | mm_init(); |
547 | 547 | ||
548 | ftrace_init(); | ||
549 | |||
550 | /* trace_printk can be enabled here */ | ||
551 | early_trace_init(); | ||
552 | |||
548 | /* | 553 | /* |
549 | * Set up the scheduler prior starting any interrupts (such as the | 554 | * Set up the scheduler prior starting any interrupts (such as the |
550 | * timer interrupt). Full topology setup happens at smp_init() | 555 | * timer interrupt). Full topology setup happens at smp_init() |
@@ -570,7 +575,7 @@ asmlinkage __visible void __init start_kernel(void) | |||
570 | 575 | ||
571 | rcu_init(); | 576 | rcu_init(); |
572 | 577 | ||
573 | /* trace_printk() and trace points may be used after this */ | 578 | /* Trace events are available after this */ |
574 | trace_init(); | 579 | trace_init(); |
575 | 580 | ||
576 | context_tracking_init(); | 581 | context_tracking_init(); |
@@ -670,8 +675,6 @@ asmlinkage __visible void __init start_kernel(void) | |||
670 | efi_free_boot_services(); | 675 | efi_free_boot_services(); |
671 | } | 676 | } |
672 | 677 | ||
673 | ftrace_init(); | ||
674 | |||
675 | /* Do the rest non-__init'ed, we're now alive */ | 678 | /* Do the rest non-__init'ed, we're now alive */ |
676 | rest_init(); | 679 | rest_init(); |
677 | } | 680 | } |
@@ -959,6 +962,7 @@ static int __ref kernel_init(void *unused) | |||
959 | kernel_init_freeable(); | 962 | kernel_init_freeable(); |
960 | /* need to finish all async __init code before freeing the memory */ | 963 | /* need to finish all async __init code before freeing the memory */ |
961 | async_synchronize_full(); | 964 | async_synchronize_full(); |
965 | ftrace_free_init_mem(); | ||
962 | free_initmem(); | 966 | free_initmem(); |
963 | mark_readonly(); | 967 | mark_readonly(); |
964 | system_state = SYSTEM_RUNNING; | 968 | system_state = SYSTEM_RUNNING; |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 50fee7689e71..a6dcf3bd244f 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/random.h> | 57 | #include <linux/random.h> |
58 | #include <linux/trace_events.h> | 58 | #include <linux/trace_events.h> |
59 | #include <linux/suspend.h> | 59 | #include <linux/suspend.h> |
60 | #include <linux/ftrace.h> | ||
60 | 61 | ||
61 | #include "tree.h" | 62 | #include "tree.h" |
62 | #include "rcu.h" | 63 | #include "rcu.h" |
@@ -284,6 +285,20 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | |||
284 | }; | 285 | }; |
285 | 286 | ||
286 | /* | 287 | /* |
288 | * There's a few places, currently just in the tracing infrastructure, | ||
289 | * that uses rcu_irq_enter() to make sure RCU is watching. But there's | ||
290 | * a small location where that will not even work. In those cases | ||
291 | * rcu_irq_enter_disabled() needs to be checked to make sure rcu_irq_enter() | ||
292 | * can be called. | ||
293 | */ | ||
294 | static DEFINE_PER_CPU(bool, disable_rcu_irq_enter); | ||
295 | |||
296 | bool rcu_irq_enter_disabled(void) | ||
297 | { | ||
298 | return this_cpu_read(disable_rcu_irq_enter); | ||
299 | } | ||
300 | |||
301 | /* | ||
287 | * Record entry into an extended quiescent state. This is only to be | 302 | * Record entry into an extended quiescent state. This is only to be |
288 | * called when not already in an extended quiescent state. | 303 | * called when not already in an extended quiescent state. |
289 | */ | 304 | */ |
@@ -771,25 +786,24 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) | |||
771 | } | 786 | } |
772 | 787 | ||
773 | /* | 788 | /* |
774 | * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state | 789 | * rcu_eqs_enter_common - current CPU is entering an extended quiescent state |
775 | * | 790 | * |
776 | * If the new value of the ->dynticks_nesting counter now is zero, | 791 | * Enter idle, doing appropriate accounting. The caller must have |
777 | * we really have entered idle, and must do the appropriate accounting. | 792 | * disabled interrupts. |
778 | * The caller must have disabled interrupts. | ||
779 | */ | 793 | */ |
780 | static void rcu_eqs_enter_common(long long oldval, bool user) | 794 | static void rcu_eqs_enter_common(bool user) |
781 | { | 795 | { |
782 | struct rcu_state *rsp; | 796 | struct rcu_state *rsp; |
783 | struct rcu_data *rdp; | 797 | struct rcu_data *rdp; |
784 | RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);) | 798 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
785 | 799 | ||
786 | trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); | 800 | trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0); |
787 | if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 801 | if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && |
788 | !user && !is_idle_task(current)) { | 802 | !user && !is_idle_task(current)) { |
789 | struct task_struct *idle __maybe_unused = | 803 | struct task_struct *idle __maybe_unused = |
790 | idle_task(smp_processor_id()); | 804 | idle_task(smp_processor_id()); |
791 | 805 | ||
792 | trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0); | 806 | trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0); |
793 | rcu_ftrace_dump(DUMP_ORIG); | 807 | rcu_ftrace_dump(DUMP_ORIG); |
794 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", | 808 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
795 | current->pid, current->comm, | 809 | current->pid, current->comm, |
@@ -800,7 +814,10 @@ static void rcu_eqs_enter_common(long long oldval, bool user) | |||
800 | do_nocb_deferred_wakeup(rdp); | 814 | do_nocb_deferred_wakeup(rdp); |
801 | } | 815 | } |
802 | rcu_prepare_for_idle(); | 816 | rcu_prepare_for_idle(); |
803 | rcu_dynticks_eqs_enter(); | 817 | __this_cpu_inc(disable_rcu_irq_enter); |
818 | rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */ | ||
819 | rcu_dynticks_eqs_enter(); /* After this, tracing works again. */ | ||
820 | __this_cpu_dec(disable_rcu_irq_enter); | ||
804 | rcu_dynticks_task_enter(); | 821 | rcu_dynticks_task_enter(); |
805 | 822 | ||
806 | /* | 823 | /* |
@@ -821,19 +838,15 @@ static void rcu_eqs_enter_common(long long oldval, bool user) | |||
821 | */ | 838 | */ |
822 | static void rcu_eqs_enter(bool user) | 839 | static void rcu_eqs_enter(bool user) |
823 | { | 840 | { |
824 | long long oldval; | ||
825 | struct rcu_dynticks *rdtp; | 841 | struct rcu_dynticks *rdtp; |
826 | 842 | ||
827 | rdtp = this_cpu_ptr(&rcu_dynticks); | 843 | rdtp = this_cpu_ptr(&rcu_dynticks); |
828 | oldval = rdtp->dynticks_nesting; | ||
829 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 844 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && |
830 | (oldval & DYNTICK_TASK_NEST_MASK) == 0); | 845 | (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); |
831 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) { | 846 | if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) |
832 | rdtp->dynticks_nesting = 0; | 847 | rcu_eqs_enter_common(user); |
833 | rcu_eqs_enter_common(oldval, user); | 848 | else |
834 | } else { | ||
835 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | 849 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; |
836 | } | ||
837 | } | 850 | } |
838 | 851 | ||
839 | /** | 852 | /** |
@@ -892,19 +905,18 @@ void rcu_user_enter(void) | |||
892 | */ | 905 | */ |
893 | void rcu_irq_exit(void) | 906 | void rcu_irq_exit(void) |
894 | { | 907 | { |
895 | long long oldval; | ||
896 | struct rcu_dynticks *rdtp; | 908 | struct rcu_dynticks *rdtp; |
897 | 909 | ||
898 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); | 910 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); |
899 | rdtp = this_cpu_ptr(&rcu_dynticks); | 911 | rdtp = this_cpu_ptr(&rcu_dynticks); |
900 | oldval = rdtp->dynticks_nesting; | ||
901 | rdtp->dynticks_nesting--; | ||
902 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 912 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && |
903 | rdtp->dynticks_nesting < 0); | 913 | rdtp->dynticks_nesting < 1); |
904 | if (rdtp->dynticks_nesting) | 914 | if (rdtp->dynticks_nesting <= 1) { |
905 | trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); | 915 | rcu_eqs_enter_common(true); |
906 | else | 916 | } else { |
907 | rcu_eqs_enter_common(oldval, true); | 917 | trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1); |
918 | rdtp->dynticks_nesting--; | ||
919 | } | ||
908 | rcu_sysidle_enter(1); | 920 | rcu_sysidle_enter(1); |
909 | } | 921 | } |
910 | 922 | ||
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 9619b5768e4b..7e06f04e98fe 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -134,7 +134,8 @@ config FUNCTION_TRACER | |||
134 | select KALLSYMS | 134 | select KALLSYMS |
135 | select GENERIC_TRACER | 135 | select GENERIC_TRACER |
136 | select CONTEXT_SWITCH_TRACER | 136 | select CONTEXT_SWITCH_TRACER |
137 | select GLOB | 137 | select GLOB |
138 | select TASKS_RCU if PREEMPT | ||
138 | help | 139 | help |
139 | Enable the kernel to trace every kernel function. This is done | 140 | Enable the kernel to trace every kernel function. This is done |
140 | by using a compiler feature to insert a small, 5-byte No-Operation | 141 | by using a compiler feature to insert a small, 5-byte No-Operation |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index dd3e91d68dc7..00077a57b746 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -36,6 +36,7 @@ | |||
36 | 36 | ||
37 | #include <trace/events/sched.h> | 37 | #include <trace/events/sched.h> |
38 | 38 | ||
39 | #include <asm/sections.h> | ||
39 | #include <asm/setup.h> | 40 | #include <asm/setup.h> |
40 | 41 | ||
41 | #include "trace_output.h" | 42 | #include "trace_output.h" |
@@ -1095,22 +1096,20 @@ static bool update_all_ops; | |||
1095 | # error Dynamic ftrace depends on MCOUNT_RECORD | 1096 | # error Dynamic ftrace depends on MCOUNT_RECORD |
1096 | #endif | 1097 | #endif |
1097 | 1098 | ||
1098 | static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; | ||
1099 | |||
1100 | struct ftrace_func_probe { | ||
1101 | struct hlist_node node; | ||
1102 | struct ftrace_probe_ops *ops; | ||
1103 | unsigned long flags; | ||
1104 | unsigned long ip; | ||
1105 | void *data; | ||
1106 | struct list_head free_list; | ||
1107 | }; | ||
1108 | |||
1109 | struct ftrace_func_entry { | 1099 | struct ftrace_func_entry { |
1110 | struct hlist_node hlist; | 1100 | struct hlist_node hlist; |
1111 | unsigned long ip; | 1101 | unsigned long ip; |
1112 | }; | 1102 | }; |
1113 | 1103 | ||
1104 | struct ftrace_func_probe { | ||
1105 | struct ftrace_probe_ops *probe_ops; | ||
1106 | struct ftrace_ops ops; | ||
1107 | struct trace_array *tr; | ||
1108 | struct list_head list; | ||
1109 | void *data; | ||
1110 | int ref; | ||
1111 | }; | ||
1112 | |||
1114 | /* | 1113 | /* |
1115 | * We make these constant because no one should touch them, | 1114 | * We make these constant because no one should touch them, |
1116 | * but they are used as the default "empty hash", to avoid allocating | 1115 | * but they are used as the default "empty hash", to avoid allocating |
@@ -1271,7 +1270,7 @@ static void | |||
1271 | remove_hash_entry(struct ftrace_hash *hash, | 1270 | remove_hash_entry(struct ftrace_hash *hash, |
1272 | struct ftrace_func_entry *entry) | 1271 | struct ftrace_func_entry *entry) |
1273 | { | 1272 | { |
1274 | hlist_del(&entry->hlist); | 1273 | hlist_del_rcu(&entry->hlist); |
1275 | hash->count--; | 1274 | hash->count--; |
1276 | } | 1275 | } |
1277 | 1276 | ||
@@ -2807,18 +2806,28 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2807 | * callers are done before leaving this function. | 2806 | * callers are done before leaving this function. |
2808 | * The same goes for freeing the per_cpu data of the per_cpu | 2807 | * The same goes for freeing the per_cpu data of the per_cpu |
2809 | * ops. | 2808 | * ops. |
2810 | * | ||
2811 | * Again, normal synchronize_sched() is not good enough. | ||
2812 | * We need to do a hard force of sched synchronization. | ||
2813 | * This is because we use preempt_disable() to do RCU, but | ||
2814 | * the function tracers can be called where RCU is not watching | ||
2815 | * (like before user_exit()). We can not rely on the RCU | ||
2816 | * infrastructure to do the synchronization, thus we must do it | ||
2817 | * ourselves. | ||
2818 | */ | 2809 | */ |
2819 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) { | 2810 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) { |
2811 | /* | ||
2812 | * We need to do a hard force of sched synchronization. | ||
2813 | * This is because we use preempt_disable() to do RCU, but | ||
2814 | * the function tracers can be called where RCU is not watching | ||
2815 | * (like before user_exit()). We can not rely on the RCU | ||
2816 | * infrastructure to do the synchronization, thus we must do it | ||
2817 | * ourselves. | ||
2818 | */ | ||
2820 | schedule_on_each_cpu(ftrace_sync); | 2819 | schedule_on_each_cpu(ftrace_sync); |
2821 | 2820 | ||
2821 | /* | ||
2822 | * When the kernel is preeptive, tasks can be preempted | ||
2823 | * while on a ftrace trampoline. Just scheduling a task on | ||
2824 | * a CPU is not good enough to flush them. Calling | ||
2825 | * synchornize_rcu_tasks() will wait for those tasks to | ||
2826 | * execute and either schedule voluntarily or enter user space. | ||
2827 | */ | ||
2828 | if (IS_ENABLED(CONFIG_PREEMPT)) | ||
2829 | synchronize_rcu_tasks(); | ||
2830 | |||
2822 | arch_ftrace_trampoline_free(ops); | 2831 | arch_ftrace_trampoline_free(ops); |
2823 | 2832 | ||
2824 | if (ops->flags & FTRACE_OPS_FL_PER_CPU) | 2833 | if (ops->flags & FTRACE_OPS_FL_PER_CPU) |
@@ -3055,34 +3064,63 @@ struct ftrace_iterator { | |||
3055 | struct ftrace_page *pg; | 3064 | struct ftrace_page *pg; |
3056 | struct dyn_ftrace *func; | 3065 | struct dyn_ftrace *func; |
3057 | struct ftrace_func_probe *probe; | 3066 | struct ftrace_func_probe *probe; |
3067 | struct ftrace_func_entry *probe_entry; | ||
3058 | struct trace_parser parser; | 3068 | struct trace_parser parser; |
3059 | struct ftrace_hash *hash; | 3069 | struct ftrace_hash *hash; |
3060 | struct ftrace_ops *ops; | 3070 | struct ftrace_ops *ops; |
3061 | int hidx; | 3071 | int pidx; |
3062 | int idx; | 3072 | int idx; |
3063 | unsigned flags; | 3073 | unsigned flags; |
3064 | }; | 3074 | }; |
3065 | 3075 | ||
3066 | static void * | 3076 | static void * |
3067 | t_hash_next(struct seq_file *m, loff_t *pos) | 3077 | t_probe_next(struct seq_file *m, loff_t *pos) |
3068 | { | 3078 | { |
3069 | struct ftrace_iterator *iter = m->private; | 3079 | struct ftrace_iterator *iter = m->private; |
3080 | struct trace_array *tr = iter->ops->private; | ||
3081 | struct list_head *func_probes; | ||
3082 | struct ftrace_hash *hash; | ||
3083 | struct list_head *next; | ||
3070 | struct hlist_node *hnd = NULL; | 3084 | struct hlist_node *hnd = NULL; |
3071 | struct hlist_head *hhd; | 3085 | struct hlist_head *hhd; |
3086 | int size; | ||
3072 | 3087 | ||
3073 | (*pos)++; | 3088 | (*pos)++; |
3074 | iter->pos = *pos; | 3089 | iter->pos = *pos; |
3075 | 3090 | ||
3076 | if (iter->probe) | 3091 | if (!tr) |
3077 | hnd = &iter->probe->node; | ||
3078 | retry: | ||
3079 | if (iter->hidx >= FTRACE_FUNC_HASHSIZE) | ||
3080 | return NULL; | 3092 | return NULL; |
3081 | 3093 | ||
3082 | hhd = &ftrace_func_hash[iter->hidx]; | 3094 | func_probes = &tr->func_probes; |
3095 | if (list_empty(func_probes)) | ||
3096 | return NULL; | ||
3097 | |||
3098 | if (!iter->probe) { | ||
3099 | next = func_probes->next; | ||
3100 | iter->probe = list_entry(next, struct ftrace_func_probe, list); | ||
3101 | } | ||
3102 | |||
3103 | if (iter->probe_entry) | ||
3104 | hnd = &iter->probe_entry->hlist; | ||
3105 | |||
3106 | hash = iter->probe->ops.func_hash->filter_hash; | ||
3107 | size = 1 << hash->size_bits; | ||
3108 | |||
3109 | retry: | ||
3110 | if (iter->pidx >= size) { | ||
3111 | if (iter->probe->list.next == func_probes) | ||
3112 | return NULL; | ||
3113 | next = iter->probe->list.next; | ||
3114 | iter->probe = list_entry(next, struct ftrace_func_probe, list); | ||
3115 | hash = iter->probe->ops.func_hash->filter_hash; | ||
3116 | size = 1 << hash->size_bits; | ||
3117 | iter->pidx = 0; | ||
3118 | } | ||
3119 | |||
3120 | hhd = &hash->buckets[iter->pidx]; | ||
3083 | 3121 | ||
3084 | if (hlist_empty(hhd)) { | 3122 | if (hlist_empty(hhd)) { |
3085 | iter->hidx++; | 3123 | iter->pidx++; |
3086 | hnd = NULL; | 3124 | hnd = NULL; |
3087 | goto retry; | 3125 | goto retry; |
3088 | } | 3126 | } |
@@ -3092,7 +3130,7 @@ t_hash_next(struct seq_file *m, loff_t *pos) | |||
3092 | else { | 3130 | else { |
3093 | hnd = hnd->next; | 3131 | hnd = hnd->next; |
3094 | if (!hnd) { | 3132 | if (!hnd) { |
3095 | iter->hidx++; | 3133 | iter->pidx++; |
3096 | goto retry; | 3134 | goto retry; |
3097 | } | 3135 | } |
3098 | } | 3136 | } |
@@ -3100,26 +3138,28 @@ t_hash_next(struct seq_file *m, loff_t *pos) | |||
3100 | if (WARN_ON_ONCE(!hnd)) | 3138 | if (WARN_ON_ONCE(!hnd)) |
3101 | return NULL; | 3139 | return NULL; |
3102 | 3140 | ||
3103 | iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); | 3141 | iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); |
3104 | 3142 | ||
3105 | return iter; | 3143 | return iter; |
3106 | } | 3144 | } |
3107 | 3145 | ||
3108 | static void *t_hash_start(struct seq_file *m, loff_t *pos) | 3146 | static void *t_probe_start(struct seq_file *m, loff_t *pos) |
3109 | { | 3147 | { |
3110 | struct ftrace_iterator *iter = m->private; | 3148 | struct ftrace_iterator *iter = m->private; |
3111 | void *p = NULL; | 3149 | void *p = NULL; |
3112 | loff_t l; | 3150 | loff_t l; |
3113 | 3151 | ||
3114 | if (!(iter->flags & FTRACE_ITER_DO_HASH)) | 3152 | if (!(iter->flags & FTRACE_ITER_DO_PROBES)) |
3115 | return NULL; | 3153 | return NULL; |
3116 | 3154 | ||
3117 | if (iter->func_pos > *pos) | 3155 | if (iter->func_pos > *pos) |
3118 | return NULL; | 3156 | return NULL; |
3119 | 3157 | ||
3120 | iter->hidx = 0; | 3158 | iter->probe = NULL; |
3159 | iter->probe_entry = NULL; | ||
3160 | iter->pidx = 0; | ||
3121 | for (l = 0; l <= (*pos - iter->func_pos); ) { | 3161 | for (l = 0; l <= (*pos - iter->func_pos); ) { |
3122 | p = t_hash_next(m, &l); | 3162 | p = t_probe_next(m, &l); |
3123 | if (!p) | 3163 | if (!p) |
3124 | break; | 3164 | break; |
3125 | } | 3165 | } |
@@ -3127,50 +3167,42 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos) | |||
3127 | return NULL; | 3167 | return NULL; |
3128 | 3168 | ||
3129 | /* Only set this if we have an item */ | 3169 | /* Only set this if we have an item */ |
3130 | iter->flags |= FTRACE_ITER_HASH; | 3170 | iter->flags |= FTRACE_ITER_PROBE; |
3131 | 3171 | ||
3132 | return iter; | 3172 | return iter; |
3133 | } | 3173 | } |
3134 | 3174 | ||
3135 | static int | 3175 | static int |
3136 | t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) | 3176 | t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) |
3137 | { | 3177 | { |
3138 | struct ftrace_func_probe *rec; | 3178 | struct ftrace_func_entry *probe_entry; |
3179 | struct ftrace_probe_ops *probe_ops; | ||
3180 | struct ftrace_func_probe *probe; | ||
3181 | |||
3182 | probe = iter->probe; | ||
3183 | probe_entry = iter->probe_entry; | ||
3139 | 3184 | ||
3140 | rec = iter->probe; | 3185 | if (WARN_ON_ONCE(!probe || !probe_entry)) |
3141 | if (WARN_ON_ONCE(!rec)) | ||
3142 | return -EIO; | 3186 | return -EIO; |
3143 | 3187 | ||
3144 | if (rec->ops->print) | 3188 | probe_ops = probe->probe_ops; |
3145 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); | ||
3146 | 3189 | ||
3147 | seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func); | 3190 | if (probe_ops->print) |
3191 | return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); | ||
3148 | 3192 | ||
3149 | if (rec->data) | 3193 | seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, |
3150 | seq_printf(m, ":%p", rec->data); | 3194 | (void *)probe_ops->func); |
3151 | seq_putc(m, '\n'); | ||
3152 | 3195 | ||
3153 | return 0; | 3196 | return 0; |
3154 | } | 3197 | } |
3155 | 3198 | ||
3156 | static void * | 3199 | static void * |
3157 | t_next(struct seq_file *m, void *v, loff_t *pos) | 3200 | t_func_next(struct seq_file *m, loff_t *pos) |
3158 | { | 3201 | { |
3159 | struct ftrace_iterator *iter = m->private; | 3202 | struct ftrace_iterator *iter = m->private; |
3160 | struct ftrace_ops *ops = iter->ops; | ||
3161 | struct dyn_ftrace *rec = NULL; | 3203 | struct dyn_ftrace *rec = NULL; |
3162 | 3204 | ||
3163 | if (unlikely(ftrace_disabled)) | ||
3164 | return NULL; | ||
3165 | |||
3166 | if (iter->flags & FTRACE_ITER_HASH) | ||
3167 | return t_hash_next(m, pos); | ||
3168 | |||
3169 | (*pos)++; | 3205 | (*pos)++; |
3170 | iter->pos = iter->func_pos = *pos; | ||
3171 | |||
3172 | if (iter->flags & FTRACE_ITER_PRINTALL) | ||
3173 | return t_hash_start(m, pos); | ||
3174 | 3206 | ||
3175 | retry: | 3207 | retry: |
3176 | if (iter->idx >= iter->pg->index) { | 3208 | if (iter->idx >= iter->pg->index) { |
@@ -3181,11 +3213,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
3181 | } | 3213 | } |
3182 | } else { | 3214 | } else { |
3183 | rec = &iter->pg->records[iter->idx++]; | 3215 | rec = &iter->pg->records[iter->idx++]; |
3184 | if (((iter->flags & FTRACE_ITER_FILTER) && | 3216 | if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && |
3185 | !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) || | 3217 | !ftrace_lookup_ip(iter->hash, rec->ip)) || |
3186 | |||
3187 | ((iter->flags & FTRACE_ITER_NOTRACE) && | ||
3188 | !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) || | ||
3189 | 3218 | ||
3190 | ((iter->flags & FTRACE_ITER_ENABLED) && | 3219 | ((iter->flags & FTRACE_ITER_ENABLED) && |
3191 | !(rec->flags & FTRACE_FL_ENABLED))) { | 3220 | !(rec->flags & FTRACE_FL_ENABLED))) { |
@@ -3196,24 +3225,51 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
3196 | } | 3225 | } |
3197 | 3226 | ||
3198 | if (!rec) | 3227 | if (!rec) |
3199 | return t_hash_start(m, pos); | 3228 | return NULL; |
3200 | 3229 | ||
3230 | iter->pos = iter->func_pos = *pos; | ||
3201 | iter->func = rec; | 3231 | iter->func = rec; |
3202 | 3232 | ||
3203 | return iter; | 3233 | return iter; |
3204 | } | 3234 | } |
3205 | 3235 | ||
3236 | static void * | ||
3237 | t_next(struct seq_file *m, void *v, loff_t *pos) | ||
3238 | { | ||
3239 | struct ftrace_iterator *iter = m->private; | ||
3240 | loff_t l = *pos; /* t_hash_start() must use original pos */ | ||
3241 | void *ret; | ||
3242 | |||
3243 | if (unlikely(ftrace_disabled)) | ||
3244 | return NULL; | ||
3245 | |||
3246 | if (iter->flags & FTRACE_ITER_PROBE) | ||
3247 | return t_probe_next(m, pos); | ||
3248 | |||
3249 | if (iter->flags & FTRACE_ITER_PRINTALL) { | ||
3250 | /* next must increment pos, and t_probe_start does not */ | ||
3251 | (*pos)++; | ||
3252 | return t_probe_start(m, &l); | ||
3253 | } | ||
3254 | |||
3255 | ret = t_func_next(m, pos); | ||
3256 | |||
3257 | if (!ret) | ||
3258 | return t_probe_start(m, &l); | ||
3259 | |||
3260 | return ret; | ||
3261 | } | ||
3262 | |||
3206 | static void reset_iter_read(struct ftrace_iterator *iter) | 3263 | static void reset_iter_read(struct ftrace_iterator *iter) |
3207 | { | 3264 | { |
3208 | iter->pos = 0; | 3265 | iter->pos = 0; |
3209 | iter->func_pos = 0; | 3266 | iter->func_pos = 0; |
3210 | iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); | 3267 | iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE); |
3211 | } | 3268 | } |
3212 | 3269 | ||
3213 | static void *t_start(struct seq_file *m, loff_t *pos) | 3270 | static void *t_start(struct seq_file *m, loff_t *pos) |
3214 | { | 3271 | { |
3215 | struct ftrace_iterator *iter = m->private; | 3272 | struct ftrace_iterator *iter = m->private; |
3216 | struct ftrace_ops *ops = iter->ops; | ||
3217 | void *p = NULL; | 3273 | void *p = NULL; |
3218 | loff_t l; | 3274 | loff_t l; |
3219 | 3275 | ||
@@ -3233,20 +3289,19 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
3233 | * off, we can short cut and just print out that all | 3289 | * off, we can short cut and just print out that all |
3234 | * functions are enabled. | 3290 | * functions are enabled. |
3235 | */ | 3291 | */ |
3236 | if ((iter->flags & FTRACE_ITER_FILTER && | 3292 | if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && |
3237 | ftrace_hash_empty(ops->func_hash->filter_hash)) || | 3293 | ftrace_hash_empty(iter->hash)) { |
3238 | (iter->flags & FTRACE_ITER_NOTRACE && | 3294 | iter->func_pos = 1; /* Account for the message */ |
3239 | ftrace_hash_empty(ops->func_hash->notrace_hash))) { | ||
3240 | if (*pos > 0) | 3295 | if (*pos > 0) |
3241 | return t_hash_start(m, pos); | 3296 | return t_probe_start(m, pos); |
3242 | iter->flags |= FTRACE_ITER_PRINTALL; | 3297 | iter->flags |= FTRACE_ITER_PRINTALL; |
3243 | /* reset in case of seek/pread */ | 3298 | /* reset in case of seek/pread */ |
3244 | iter->flags &= ~FTRACE_ITER_HASH; | 3299 | iter->flags &= ~FTRACE_ITER_PROBE; |
3245 | return iter; | 3300 | return iter; |
3246 | } | 3301 | } |
3247 | 3302 | ||
3248 | if (iter->flags & FTRACE_ITER_HASH) | 3303 | if (iter->flags & FTRACE_ITER_PROBE) |
3249 | return t_hash_start(m, pos); | 3304 | return t_probe_start(m, pos); |
3250 | 3305 | ||
3251 | /* | 3306 | /* |
3252 | * Unfortunately, we need to restart at ftrace_pages_start | 3307 | * Unfortunately, we need to restart at ftrace_pages_start |
@@ -3256,13 +3311,13 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
3256 | iter->pg = ftrace_pages_start; | 3311 | iter->pg = ftrace_pages_start; |
3257 | iter->idx = 0; | 3312 | iter->idx = 0; |
3258 | for (l = 0; l <= *pos; ) { | 3313 | for (l = 0; l <= *pos; ) { |
3259 | p = t_next(m, p, &l); | 3314 | p = t_func_next(m, &l); |
3260 | if (!p) | 3315 | if (!p) |
3261 | break; | 3316 | break; |
3262 | } | 3317 | } |
3263 | 3318 | ||
3264 | if (!p) | 3319 | if (!p) |
3265 | return t_hash_start(m, pos); | 3320 | return t_probe_start(m, pos); |
3266 | 3321 | ||
3267 | return iter; | 3322 | return iter; |
3268 | } | 3323 | } |
@@ -3293,8 +3348,8 @@ static int t_show(struct seq_file *m, void *v) | |||
3293 | struct ftrace_iterator *iter = m->private; | 3348 | struct ftrace_iterator *iter = m->private; |
3294 | struct dyn_ftrace *rec; | 3349 | struct dyn_ftrace *rec; |
3295 | 3350 | ||
3296 | if (iter->flags & FTRACE_ITER_HASH) | 3351 | if (iter->flags & FTRACE_ITER_PROBE) |
3297 | return t_hash_show(m, iter); | 3352 | return t_probe_show(m, iter); |
3298 | 3353 | ||
3299 | if (iter->flags & FTRACE_ITER_PRINTALL) { | 3354 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
3300 | if (iter->flags & FTRACE_ITER_NOTRACE) | 3355 | if (iter->flags & FTRACE_ITER_NOTRACE) |
@@ -3355,12 +3410,13 @@ ftrace_avail_open(struct inode *inode, struct file *file) | |||
3355 | return -ENODEV; | 3410 | return -ENODEV; |
3356 | 3411 | ||
3357 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); | 3412 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
3358 | if (iter) { | 3413 | if (!iter) |
3359 | iter->pg = ftrace_pages_start; | 3414 | return -ENOMEM; |
3360 | iter->ops = &global_ops; | ||
3361 | } | ||
3362 | 3415 | ||
3363 | return iter ? 0 : -ENOMEM; | 3416 | iter->pg = ftrace_pages_start; |
3417 | iter->ops = &global_ops; | ||
3418 | |||
3419 | return 0; | ||
3364 | } | 3420 | } |
3365 | 3421 | ||
3366 | static int | 3422 | static int |
@@ -3369,13 +3425,14 @@ ftrace_enabled_open(struct inode *inode, struct file *file) | |||
3369 | struct ftrace_iterator *iter; | 3425 | struct ftrace_iterator *iter; |
3370 | 3426 | ||
3371 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); | 3427 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
3372 | if (iter) { | 3428 | if (!iter) |
3373 | iter->pg = ftrace_pages_start; | 3429 | return -ENOMEM; |
3374 | iter->flags = FTRACE_ITER_ENABLED; | ||
3375 | iter->ops = &global_ops; | ||
3376 | } | ||
3377 | 3430 | ||
3378 | return iter ? 0 : -ENOMEM; | 3431 | iter->pg = ftrace_pages_start; |
3432 | iter->flags = FTRACE_ITER_ENABLED; | ||
3433 | iter->ops = &global_ops; | ||
3434 | |||
3435 | return 0; | ||
3379 | } | 3436 | } |
3380 | 3437 | ||
3381 | /** | 3438 | /** |
@@ -3440,7 +3497,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, | |||
3440 | ret = -ENOMEM; | 3497 | ret = -ENOMEM; |
3441 | goto out_unlock; | 3498 | goto out_unlock; |
3442 | } | 3499 | } |
3443 | } | 3500 | } else |
3501 | iter->hash = hash; | ||
3444 | 3502 | ||
3445 | if (file->f_mode & FMODE_READ) { | 3503 | if (file->f_mode & FMODE_READ) { |
3446 | iter->pg = ftrace_pages_start; | 3504 | iter->pg = ftrace_pages_start; |
@@ -3470,7 +3528,7 @@ ftrace_filter_open(struct inode *inode, struct file *file) | |||
3470 | struct ftrace_ops *ops = inode->i_private; | 3528 | struct ftrace_ops *ops = inode->i_private; |
3471 | 3529 | ||
3472 | return ftrace_regex_open(ops, | 3530 | return ftrace_regex_open(ops, |
3473 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, | 3531 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, |
3474 | inode, file); | 3532 | inode, file); |
3475 | } | 3533 | } |
3476 | 3534 | ||
@@ -3654,6 +3712,56 @@ ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) | |||
3654 | return match_records(hash, buff, len, NULL); | 3712 | return match_records(hash, buff, len, NULL); |
3655 | } | 3713 | } |
3656 | 3714 | ||
3715 | static void ftrace_ops_update_code(struct ftrace_ops *ops, | ||
3716 | struct ftrace_ops_hash *old_hash) | ||
3717 | { | ||
3718 | struct ftrace_ops *op; | ||
3719 | |||
3720 | if (!ftrace_enabled) | ||
3721 | return; | ||
3722 | |||
3723 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { | ||
3724 | ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); | ||
3725 | return; | ||
3726 | } | ||
3727 | |||
3728 | /* | ||
3729 | * If this is the shared global_ops filter, then we need to | ||
3730 | * check if there is another ops that shares it, is enabled. | ||
3731 | * If so, we still need to run the modify code. | ||
3732 | */ | ||
3733 | if (ops->func_hash != &global_ops.local_hash) | ||
3734 | return; | ||
3735 | |||
3736 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
3737 | if (op->func_hash == &global_ops.local_hash && | ||
3738 | op->flags & FTRACE_OPS_FL_ENABLED) { | ||
3739 | ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); | ||
3740 | /* Only need to do this once */ | ||
3741 | return; | ||
3742 | } | ||
3743 | } while_for_each_ftrace_op(op); | ||
3744 | } | ||
3745 | |||
3746 | static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, | ||
3747 | struct ftrace_hash **orig_hash, | ||
3748 | struct ftrace_hash *hash, | ||
3749 | int enable) | ||
3750 | { | ||
3751 | struct ftrace_ops_hash old_hash_ops; | ||
3752 | struct ftrace_hash *old_hash; | ||
3753 | int ret; | ||
3754 | |||
3755 | old_hash = *orig_hash; | ||
3756 | old_hash_ops.filter_hash = ops->func_hash->filter_hash; | ||
3757 | old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; | ||
3758 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | ||
3759 | if (!ret) { | ||
3760 | ftrace_ops_update_code(ops, &old_hash_ops); | ||
3761 | free_ftrace_hash_rcu(old_hash); | ||
3762 | } | ||
3763 | return ret; | ||
3764 | } | ||
3657 | 3765 | ||
3658 | /* | 3766 | /* |
3659 | * We register the module command as a template to show others how | 3767 | * We register the module command as a template to show others how |
@@ -3661,7 +3769,7 @@ ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) | |||
3661 | */ | 3769 | */ |
3662 | 3770 | ||
3663 | static int | 3771 | static int |
3664 | ftrace_mod_callback(struct ftrace_hash *hash, | 3772 | ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, |
3665 | char *func, char *cmd, char *module, int enable) | 3773 | char *func, char *cmd, char *module, int enable) |
3666 | { | 3774 | { |
3667 | int ret; | 3775 | int ret; |
@@ -3695,16 +3803,11 @@ core_initcall(ftrace_mod_cmd_init); | |||
3695 | static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, | 3803 | static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, |
3696 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 3804 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
3697 | { | 3805 | { |
3698 | struct ftrace_func_probe *entry; | 3806 | struct ftrace_probe_ops *probe_ops; |
3699 | struct hlist_head *hhd; | 3807 | struct ftrace_func_probe *probe; |
3700 | unsigned long key; | ||
3701 | 3808 | ||
3702 | key = hash_long(ip, FTRACE_HASH_BITS); | 3809 | probe = container_of(op, struct ftrace_func_probe, ops); |
3703 | 3810 | probe_ops = probe->probe_ops; | |
3704 | hhd = &ftrace_func_hash[key]; | ||
3705 | |||
3706 | if (hlist_empty(hhd)) | ||
3707 | return; | ||
3708 | 3811 | ||
3709 | /* | 3812 | /* |
3710 | * Disable preemption for these calls to prevent a RCU grace | 3813 | * Disable preemption for these calls to prevent a RCU grace |
@@ -3712,209 +3815,336 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, | |||
3712 | * on the hash. rcu_read_lock is too dangerous here. | 3815 | * on the hash. rcu_read_lock is too dangerous here. |
3713 | */ | 3816 | */ |
3714 | preempt_disable_notrace(); | 3817 | preempt_disable_notrace(); |
3715 | hlist_for_each_entry_rcu_notrace(entry, hhd, node) { | 3818 | probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); |
3716 | if (entry->ip == ip) | ||
3717 | entry->ops->func(ip, parent_ip, &entry->data); | ||
3718 | } | ||
3719 | preempt_enable_notrace(); | 3819 | preempt_enable_notrace(); |
3720 | } | 3820 | } |
3721 | 3821 | ||
3722 | static struct ftrace_ops trace_probe_ops __read_mostly = | 3822 | struct ftrace_func_map { |
3723 | { | 3823 | struct ftrace_func_entry entry; |
3724 | .func = function_trace_probe_call, | 3824 | void *data; |
3725 | .flags = FTRACE_OPS_FL_INITIALIZED, | ||
3726 | INIT_OPS_HASH(trace_probe_ops) | ||
3727 | }; | 3825 | }; |
3728 | 3826 | ||
3729 | static int ftrace_probe_registered; | 3827 | struct ftrace_func_mapper { |
3828 | struct ftrace_hash hash; | ||
3829 | }; | ||
3730 | 3830 | ||
3731 | static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash) | 3831 | /** |
3832 | * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper | ||
3833 | * | ||
3834 | * Returns a ftrace_func_mapper descriptor that can be used to map ips to data. | ||
3835 | */ | ||
3836 | struct ftrace_func_mapper *allocate_ftrace_func_mapper(void) | ||
3732 | { | 3837 | { |
3733 | int ret; | 3838 | struct ftrace_hash *hash; |
3734 | int i; | ||
3735 | 3839 | ||
3736 | if (ftrace_probe_registered) { | 3840 | /* |
3737 | /* still need to update the function call sites */ | 3841 | * The mapper is simply a ftrace_hash, but since the entries |
3738 | if (ftrace_enabled) | 3842 | * in the hash are not ftrace_func_entry type, we define it |
3739 | ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, | 3843 | * as a separate structure. |
3740 | old_hash); | 3844 | */ |
3741 | return; | 3845 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
3742 | } | 3846 | return (struct ftrace_func_mapper *)hash; |
3847 | } | ||
3743 | 3848 | ||
3744 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | 3849 | /** |
3745 | struct hlist_head *hhd = &ftrace_func_hash[i]; | 3850 | * ftrace_func_mapper_find_ip - Find some data mapped to an ip |
3746 | if (hhd->first) | 3851 | * @mapper: The mapper that has the ip maps |
3747 | break; | 3852 | * @ip: the instruction pointer to find the data for |
3748 | } | 3853 | * |
3749 | /* Nothing registered? */ | 3854 | * Returns the data mapped to @ip if found otherwise NULL. The return |
3750 | if (i == FTRACE_FUNC_HASHSIZE) | 3855 | * is actually the address of the mapper data pointer. The address is |
3751 | return; | 3856 | * returned for use cases where the data is no bigger than a long, and |
3857 | * the user can use the data pointer as its data instead of having to | ||
3858 | * allocate more memory for the reference. | ||
3859 | */ | ||
3860 | void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, | ||
3861 | unsigned long ip) | ||
3862 | { | ||
3863 | struct ftrace_func_entry *entry; | ||
3864 | struct ftrace_func_map *map; | ||
3752 | 3865 | ||
3753 | ret = ftrace_startup(&trace_probe_ops, 0); | 3866 | entry = ftrace_lookup_ip(&mapper->hash, ip); |
3867 | if (!entry) | ||
3868 | return NULL; | ||
3754 | 3869 | ||
3755 | ftrace_probe_registered = 1; | 3870 | map = (struct ftrace_func_map *)entry; |
3871 | return &map->data; | ||
3756 | } | 3872 | } |
3757 | 3873 | ||
3758 | static bool __disable_ftrace_function_probe(void) | 3874 | /** |
3875 | * ftrace_func_mapper_add_ip - Map some data to an ip | ||
3876 | * @mapper: The mapper that has the ip maps | ||
3877 | * @ip: The instruction pointer address to map @data to | ||
3878 | * @data: The data to map to @ip | ||
3879 | * | ||
3880 | * Returns 0 on succes otherwise an error. | ||
3881 | */ | ||
3882 | int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, | ||
3883 | unsigned long ip, void *data) | ||
3759 | { | 3884 | { |
3760 | int i; | 3885 | struct ftrace_func_entry *entry; |
3886 | struct ftrace_func_map *map; | ||
3761 | 3887 | ||
3762 | if (!ftrace_probe_registered) | 3888 | entry = ftrace_lookup_ip(&mapper->hash, ip); |
3763 | return false; | 3889 | if (entry) |
3890 | return -EBUSY; | ||
3764 | 3891 | ||
3765 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | 3892 | map = kmalloc(sizeof(*map), GFP_KERNEL); |
3766 | struct hlist_head *hhd = &ftrace_func_hash[i]; | 3893 | if (!map) |
3767 | if (hhd->first) | 3894 | return -ENOMEM; |
3768 | return false; | ||
3769 | } | ||
3770 | 3895 | ||
3771 | /* no more funcs left */ | 3896 | map->entry.ip = ip; |
3772 | ftrace_shutdown(&trace_probe_ops, 0); | 3897 | map->data = data; |
3773 | 3898 | ||
3774 | ftrace_probe_registered = 0; | 3899 | __add_hash_entry(&mapper->hash, &map->entry); |
3775 | return true; | ||
3776 | } | ||
3777 | 3900 | ||
3901 | return 0; | ||
3902 | } | ||
3778 | 3903 | ||
3779 | static void ftrace_free_entry(struct ftrace_func_probe *entry) | 3904 | /** |
3905 | * ftrace_func_mapper_remove_ip - Remove an ip from the mapping | ||
3906 | * @mapper: The mapper that has the ip maps | ||
3907 | * @ip: The instruction pointer address to remove the data from | ||
3908 | * | ||
3909 | * Returns the data if it is found, otherwise NULL. | ||
3910 | * Note, if the data pointer is used as the data itself, (see | ||
3911 | * ftrace_func_mapper_find_ip(), then the return value may be meaningless, | ||
3912 | * if the data pointer was set to zero. | ||
3913 | */ | ||
3914 | void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, | ||
3915 | unsigned long ip) | ||
3780 | { | 3916 | { |
3781 | if (entry->ops->free) | 3917 | struct ftrace_func_entry *entry; |
3782 | entry->ops->free(entry->ops, entry->ip, &entry->data); | 3918 | struct ftrace_func_map *map; |
3919 | void *data; | ||
3920 | |||
3921 | entry = ftrace_lookup_ip(&mapper->hash, ip); | ||
3922 | if (!entry) | ||
3923 | return NULL; | ||
3924 | |||
3925 | map = (struct ftrace_func_map *)entry; | ||
3926 | data = map->data; | ||
3927 | |||
3928 | remove_hash_entry(&mapper->hash, entry); | ||
3783 | kfree(entry); | 3929 | kfree(entry); |
3930 | |||
3931 | return data; | ||
3932 | } | ||
3933 | |||
3934 | /** | ||
3935 | * free_ftrace_func_mapper - free a mapping of ips and data | ||
3936 | * @mapper: The mapper that has the ip maps | ||
3937 | * @free_func: A function to be called on each data item. | ||
3938 | * | ||
3939 | * This is used to free the function mapper. The @free_func is optional | ||
3940 | * and can be used if the data needs to be freed as well. | ||
3941 | */ | ||
3942 | void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, | ||
3943 | ftrace_mapper_func free_func) | ||
3944 | { | ||
3945 | struct ftrace_func_entry *entry; | ||
3946 | struct ftrace_func_map *map; | ||
3947 | struct hlist_head *hhd; | ||
3948 | int size = 1 << mapper->hash.size_bits; | ||
3949 | int i; | ||
3950 | |||
3951 | if (free_func && mapper->hash.count) { | ||
3952 | for (i = 0; i < size; i++) { | ||
3953 | hhd = &mapper->hash.buckets[i]; | ||
3954 | hlist_for_each_entry(entry, hhd, hlist) { | ||
3955 | map = (struct ftrace_func_map *)entry; | ||
3956 | free_func(map); | ||
3957 | } | ||
3958 | } | ||
3959 | } | ||
3960 | free_ftrace_hash(&mapper->hash); | ||
3961 | } | ||
3962 | |||
3963 | static void release_probe(struct ftrace_func_probe *probe) | ||
3964 | { | ||
3965 | struct ftrace_probe_ops *probe_ops; | ||
3966 | |||
3967 | mutex_lock(&ftrace_lock); | ||
3968 | |||
3969 | WARN_ON(probe->ref <= 0); | ||
3970 | |||
3971 | /* Subtract the ref that was used to protect this instance */ | ||
3972 | probe->ref--; | ||
3973 | |||
3974 | if (!probe->ref) { | ||
3975 | probe_ops = probe->probe_ops; | ||
3976 | /* | ||
3977 | * Sending zero as ip tells probe_ops to free | ||
3978 | * the probe->data itself | ||
3979 | */ | ||
3980 | if (probe_ops->free) | ||
3981 | probe_ops->free(probe_ops, probe->tr, 0, probe->data); | ||
3982 | list_del(&probe->list); | ||
3983 | kfree(probe); | ||
3984 | } | ||
3985 | mutex_unlock(&ftrace_lock); | ||
3986 | } | ||
3987 | |||
3988 | static void acquire_probe_locked(struct ftrace_func_probe *probe) | ||
3989 | { | ||
3990 | /* | ||
3991 | * Add one ref to keep it from being freed when releasing the | ||
3992 | * ftrace_lock mutex. | ||
3993 | */ | ||
3994 | probe->ref++; | ||
3784 | } | 3995 | } |
3785 | 3996 | ||
3786 | int | 3997 | int |
3787 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 3998 | register_ftrace_function_probe(char *glob, struct trace_array *tr, |
3788 | void *data) | 3999 | struct ftrace_probe_ops *probe_ops, |
4000 | void *data) | ||
3789 | { | 4001 | { |
3790 | struct ftrace_ops_hash old_hash_ops; | 4002 | struct ftrace_func_entry *entry; |
3791 | struct ftrace_func_probe *entry; | 4003 | struct ftrace_func_probe *probe; |
3792 | struct ftrace_glob func_g; | 4004 | struct ftrace_hash **orig_hash; |
3793 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; | 4005 | struct ftrace_hash *old_hash; |
3794 | struct ftrace_hash *old_hash = *orig_hash; | ||
3795 | struct ftrace_hash *hash; | 4006 | struct ftrace_hash *hash; |
3796 | struct ftrace_page *pg; | ||
3797 | struct dyn_ftrace *rec; | ||
3798 | int not; | ||
3799 | unsigned long key; | ||
3800 | int count = 0; | 4007 | int count = 0; |
4008 | int size; | ||
3801 | int ret; | 4009 | int ret; |
4010 | int i; | ||
3802 | 4011 | ||
3803 | func_g.type = filter_parse_regex(glob, strlen(glob), | 4012 | if (WARN_ON(!tr)) |
3804 | &func_g.search, ¬); | ||
3805 | func_g.len = strlen(func_g.search); | ||
3806 | |||
3807 | /* we do not support '!' for function probes */ | ||
3808 | if (WARN_ON(not)) | ||
3809 | return -EINVAL; | 4013 | return -EINVAL; |
3810 | 4014 | ||
3811 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); | 4015 | /* We do not support '!' for function probes */ |
4016 | if (WARN_ON(glob[0] == '!')) | ||
4017 | return -EINVAL; | ||
3812 | 4018 | ||
3813 | old_hash_ops.filter_hash = old_hash; | ||
3814 | /* Probes only have filters */ | ||
3815 | old_hash_ops.notrace_hash = NULL; | ||
3816 | 4019 | ||
3817 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); | 4020 | mutex_lock(&ftrace_lock); |
3818 | if (!hash) { | 4021 | /* Check if the probe_ops is already registered */ |
3819 | count = -ENOMEM; | 4022 | list_for_each_entry(probe, &tr->func_probes, list) { |
3820 | goto out; | 4023 | if (probe->probe_ops == probe_ops) |
4024 | break; | ||
3821 | } | 4025 | } |
3822 | 4026 | if (&probe->list == &tr->func_probes) { | |
3823 | if (unlikely(ftrace_disabled)) { | 4027 | probe = kzalloc(sizeof(*probe), GFP_KERNEL); |
3824 | count = -ENODEV; | 4028 | if (!probe) { |
3825 | goto out; | 4029 | mutex_unlock(&ftrace_lock); |
4030 | return -ENOMEM; | ||
4031 | } | ||
4032 | probe->probe_ops = probe_ops; | ||
4033 | probe->ops.func = function_trace_probe_call; | ||
4034 | probe->tr = tr; | ||
4035 | ftrace_ops_init(&probe->ops); | ||
4036 | list_add(&probe->list, &tr->func_probes); | ||
3826 | } | 4037 | } |
3827 | 4038 | ||
3828 | mutex_lock(&ftrace_lock); | 4039 | acquire_probe_locked(probe); |
3829 | 4040 | ||
3830 | do_for_each_ftrace_rec(pg, rec) { | 4041 | mutex_unlock(&ftrace_lock); |
3831 | 4042 | ||
3832 | if (rec->flags & FTRACE_FL_DISABLED) | 4043 | mutex_lock(&probe->ops.func_hash->regex_lock); |
3833 | continue; | ||
3834 | 4044 | ||
3835 | if (!ftrace_match_record(rec, &func_g, NULL, 0)) | 4045 | orig_hash = &probe->ops.func_hash->filter_hash; |
3836 | continue; | 4046 | old_hash = *orig_hash; |
4047 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); | ||
3837 | 4048 | ||
3838 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | 4049 | ret = ftrace_match_records(hash, glob, strlen(glob)); |
3839 | if (!entry) { | ||
3840 | /* If we did not process any, then return error */ | ||
3841 | if (!count) | ||
3842 | count = -ENOMEM; | ||
3843 | goto out_unlock; | ||
3844 | } | ||
3845 | 4050 | ||
3846 | count++; | 4051 | /* Nothing found? */ |
4052 | if (!ret) | ||
4053 | ret = -EINVAL; | ||
3847 | 4054 | ||
3848 | entry->data = data; | 4055 | if (ret < 0) |
4056 | goto out; | ||
3849 | 4057 | ||
3850 | /* | 4058 | size = 1 << hash->size_bits; |
3851 | * The caller might want to do something special | 4059 | for (i = 0; i < size; i++) { |
3852 | * for each function we find. We call the callback | 4060 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
3853 | * to give the caller an opportunity to do so. | 4061 | if (ftrace_lookup_ip(old_hash, entry->ip)) |
3854 | */ | ||
3855 | if (ops->init) { | ||
3856 | if (ops->init(ops, rec->ip, &entry->data) < 0) { | ||
3857 | /* caller does not like this func */ | ||
3858 | kfree(entry); | ||
3859 | continue; | 4062 | continue; |
4063 | /* | ||
4064 | * The caller might want to do something special | ||
4065 | * for each function we find. We call the callback | ||
4066 | * to give the caller an opportunity to do so. | ||
4067 | */ | ||
4068 | if (probe_ops->init) { | ||
4069 | ret = probe_ops->init(probe_ops, tr, | ||
4070 | entry->ip, data, | ||
4071 | &probe->data); | ||
4072 | if (ret < 0) { | ||
4073 | if (probe_ops->free && count) | ||
4074 | probe_ops->free(probe_ops, tr, | ||
4075 | 0, probe->data); | ||
4076 | probe->data = NULL; | ||
4077 | goto out; | ||
4078 | } | ||
3860 | } | 4079 | } |
4080 | count++; | ||
3861 | } | 4081 | } |
4082 | } | ||
3862 | 4083 | ||
3863 | ret = enter_record(hash, rec, 0); | 4084 | mutex_lock(&ftrace_lock); |
3864 | if (ret < 0) { | ||
3865 | kfree(entry); | ||
3866 | count = ret; | ||
3867 | goto out_unlock; | ||
3868 | } | ||
3869 | |||
3870 | entry->ops = ops; | ||
3871 | entry->ip = rec->ip; | ||
3872 | |||
3873 | key = hash_long(entry->ip, FTRACE_HASH_BITS); | ||
3874 | hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); | ||
3875 | 4085 | ||
3876 | } while_for_each_ftrace_rec(); | 4086 | if (!count) { |
4087 | /* Nothing was added? */ | ||
4088 | ret = -EINVAL; | ||
4089 | goto out_unlock; | ||
4090 | } | ||
3877 | 4091 | ||
3878 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | 4092 | ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, |
4093 | hash, 1); | ||
4094 | if (ret < 0) | ||
4095 | goto err_unlock; | ||
3879 | 4096 | ||
3880 | __enable_ftrace_function_probe(&old_hash_ops); | 4097 | /* One ref for each new function traced */ |
4098 | probe->ref += count; | ||
3881 | 4099 | ||
3882 | if (!ret) | 4100 | if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) |
3883 | free_ftrace_hash_rcu(old_hash); | 4101 | ret = ftrace_startup(&probe->ops, 0); |
3884 | else | ||
3885 | count = ret; | ||
3886 | 4102 | ||
3887 | out_unlock: | 4103 | out_unlock: |
3888 | mutex_unlock(&ftrace_lock); | 4104 | mutex_unlock(&ftrace_lock); |
4105 | |||
4106 | if (!ret) | ||
4107 | ret = count; | ||
3889 | out: | 4108 | out: |
3890 | mutex_unlock(&trace_probe_ops.func_hash->regex_lock); | 4109 | mutex_unlock(&probe->ops.func_hash->regex_lock); |
3891 | free_ftrace_hash(hash); | 4110 | free_ftrace_hash(hash); |
3892 | 4111 | ||
3893 | return count; | 4112 | release_probe(probe); |
3894 | } | ||
3895 | 4113 | ||
3896 | enum { | 4114 | return ret; |
3897 | PROBE_TEST_FUNC = 1, | ||
3898 | PROBE_TEST_DATA = 2 | ||
3899 | }; | ||
3900 | 4115 | ||
3901 | static void | 4116 | err_unlock: |
3902 | __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 4117 | if (!probe_ops->free || !count) |
3903 | void *data, int flags) | 4118 | goto out_unlock; |
4119 | |||
4120 | /* Failed to do the move, need to call the free functions */ | ||
4121 | for (i = 0; i < size; i++) { | ||
4122 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { | ||
4123 | if (ftrace_lookup_ip(old_hash, entry->ip)) | ||
4124 | continue; | ||
4125 | probe_ops->free(probe_ops, tr, entry->ip, probe->data); | ||
4126 | } | ||
4127 | } | ||
4128 | goto out_unlock; | ||
4129 | } | ||
4130 | |||
4131 | int | ||
4132 | unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, | ||
4133 | struct ftrace_probe_ops *probe_ops) | ||
3904 | { | 4134 | { |
3905 | struct ftrace_ops_hash old_hash_ops; | 4135 | struct ftrace_ops_hash old_hash_ops; |
3906 | struct ftrace_func_entry *rec_entry; | 4136 | struct ftrace_func_entry *entry; |
3907 | struct ftrace_func_probe *entry; | 4137 | struct ftrace_func_probe *probe; |
3908 | struct ftrace_func_probe *p; | ||
3909 | struct ftrace_glob func_g; | 4138 | struct ftrace_glob func_g; |
3910 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; | 4139 | struct ftrace_hash **orig_hash; |
3911 | struct ftrace_hash *old_hash = *orig_hash; | 4140 | struct ftrace_hash *old_hash; |
3912 | struct list_head free_list; | 4141 | struct ftrace_hash *hash = NULL; |
3913 | struct ftrace_hash *hash; | ||
3914 | struct hlist_node *tmp; | 4142 | struct hlist_node *tmp; |
4143 | struct hlist_head hhd; | ||
3915 | char str[KSYM_SYMBOL_LEN]; | 4144 | char str[KSYM_SYMBOL_LEN]; |
3916 | int i, ret; | 4145 | int count = 0; |
3917 | bool disabled; | 4146 | int i, ret = -ENODEV; |
4147 | int size; | ||
3918 | 4148 | ||
3919 | if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) | 4149 | if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) |
3920 | func_g.search = NULL; | 4150 | func_g.search = NULL; |
@@ -3928,95 +4158,104 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3928 | 4158 | ||
3929 | /* we do not support '!' for function probes */ | 4159 | /* we do not support '!' for function probes */ |
3930 | if (WARN_ON(not)) | 4160 | if (WARN_ON(not)) |
3931 | return; | 4161 | return -EINVAL; |
4162 | } | ||
4163 | |||
4164 | mutex_lock(&ftrace_lock); | ||
4165 | /* Check if the probe_ops is already registered */ | ||
4166 | list_for_each_entry(probe, &tr->func_probes, list) { | ||
4167 | if (probe->probe_ops == probe_ops) | ||
4168 | break; | ||
3932 | } | 4169 | } |
4170 | if (&probe->list == &tr->func_probes) | ||
4171 | goto err_unlock_ftrace; | ||
4172 | |||
4173 | ret = -EINVAL; | ||
4174 | if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) | ||
4175 | goto err_unlock_ftrace; | ||
4176 | |||
4177 | acquire_probe_locked(probe); | ||
4178 | |||
4179 | mutex_unlock(&ftrace_lock); | ||
3933 | 4180 | ||
3934 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); | 4181 | mutex_lock(&probe->ops.func_hash->regex_lock); |
4182 | |||
4183 | orig_hash = &probe->ops.func_hash->filter_hash; | ||
4184 | old_hash = *orig_hash; | ||
4185 | |||
4186 | if (ftrace_hash_empty(old_hash)) | ||
4187 | goto out_unlock; | ||
3935 | 4188 | ||
3936 | old_hash_ops.filter_hash = old_hash; | 4189 | old_hash_ops.filter_hash = old_hash; |
3937 | /* Probes only have filters */ | 4190 | /* Probes only have filters */ |
3938 | old_hash_ops.notrace_hash = NULL; | 4191 | old_hash_ops.notrace_hash = NULL; |
3939 | 4192 | ||
3940 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 4193 | ret = -ENOMEM; |
4194 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); | ||
3941 | if (!hash) | 4195 | if (!hash) |
3942 | /* Hmm, should report this somehow */ | ||
3943 | goto out_unlock; | 4196 | goto out_unlock; |
3944 | 4197 | ||
3945 | INIT_LIST_HEAD(&free_list); | 4198 | INIT_HLIST_HEAD(&hhd); |
3946 | |||
3947 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | ||
3948 | struct hlist_head *hhd = &ftrace_func_hash[i]; | ||
3949 | 4199 | ||
3950 | hlist_for_each_entry_safe(entry, tmp, hhd, node) { | 4200 | size = 1 << hash->size_bits; |
3951 | 4201 | for (i = 0; i < size; i++) { | |
3952 | /* break up if statements for readability */ | 4202 | hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { |
3953 | if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) | ||
3954 | continue; | ||
3955 | |||
3956 | if ((flags & PROBE_TEST_DATA) && entry->data != data) | ||
3957 | continue; | ||
3958 | 4203 | ||
3959 | /* do this last, since it is the most expensive */ | ||
3960 | if (func_g.search) { | 4204 | if (func_g.search) { |
3961 | kallsyms_lookup(entry->ip, NULL, NULL, | 4205 | kallsyms_lookup(entry->ip, NULL, NULL, |
3962 | NULL, str); | 4206 | NULL, str); |
3963 | if (!ftrace_match(str, &func_g)) | 4207 | if (!ftrace_match(str, &func_g)) |
3964 | continue; | 4208 | continue; |
3965 | } | 4209 | } |
3966 | 4210 | count++; | |
3967 | rec_entry = ftrace_lookup_ip(hash, entry->ip); | 4211 | remove_hash_entry(hash, entry); |
3968 | /* It is possible more than one entry had this ip */ | 4212 | hlist_add_head(&entry->hlist, &hhd); |
3969 | if (rec_entry) | ||
3970 | free_hash_entry(hash, rec_entry); | ||
3971 | |||
3972 | hlist_del_rcu(&entry->node); | ||
3973 | list_add(&entry->free_list, &free_list); | ||
3974 | } | 4213 | } |
3975 | } | 4214 | } |
4215 | |||
4216 | /* Nothing found? */ | ||
4217 | if (!count) { | ||
4218 | ret = -EINVAL; | ||
4219 | goto out_unlock; | ||
4220 | } | ||
4221 | |||
3976 | mutex_lock(&ftrace_lock); | 4222 | mutex_lock(&ftrace_lock); |
3977 | disabled = __disable_ftrace_function_probe(); | 4223 | |
3978 | /* | 4224 | WARN_ON(probe->ref < count); |
3979 | * Remove after the disable is called. Otherwise, if the last | 4225 | |
3980 | * probe is removed, a null hash means *all enabled*. | 4226 | probe->ref -= count; |
3981 | */ | 4227 | |
3982 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | 4228 | if (ftrace_hash_empty(hash)) |
4229 | ftrace_shutdown(&probe->ops, 0); | ||
4230 | |||
4231 | ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, | ||
4232 | hash, 1); | ||
3983 | 4233 | ||
3984 | /* still need to update the function call sites */ | 4234 | /* still need to update the function call sites */ |
3985 | if (ftrace_enabled && !disabled) | 4235 | if (ftrace_enabled && !ftrace_hash_empty(hash)) |
3986 | ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, | 4236 | ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, |
3987 | &old_hash_ops); | 4237 | &old_hash_ops); |
3988 | synchronize_sched(); | 4238 | synchronize_sched(); |
3989 | if (!ret) | ||
3990 | free_ftrace_hash_rcu(old_hash); | ||
3991 | 4239 | ||
3992 | list_for_each_entry_safe(entry, p, &free_list, free_list) { | 4240 | hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { |
3993 | list_del(&entry->free_list); | 4241 | hlist_del(&entry->hlist); |
3994 | ftrace_free_entry(entry); | 4242 | if (probe_ops->free) |
4243 | probe_ops->free(probe_ops, tr, entry->ip, probe->data); | ||
4244 | kfree(entry); | ||
3995 | } | 4245 | } |
3996 | mutex_unlock(&ftrace_lock); | 4246 | mutex_unlock(&ftrace_lock); |
3997 | 4247 | ||
3998 | out_unlock: | 4248 | out_unlock: |
3999 | mutex_unlock(&trace_probe_ops.func_hash->regex_lock); | 4249 | mutex_unlock(&probe->ops.func_hash->regex_lock); |
4000 | free_ftrace_hash(hash); | 4250 | free_ftrace_hash(hash); |
4001 | } | ||
4002 | 4251 | ||
4003 | void | 4252 | release_probe(probe); |
4004 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
4005 | void *data) | ||
4006 | { | ||
4007 | __unregister_ftrace_function_probe(glob, ops, data, | ||
4008 | PROBE_TEST_FUNC | PROBE_TEST_DATA); | ||
4009 | } | ||
4010 | 4253 | ||
4011 | void | 4254 | return ret; |
4012 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) | ||
4013 | { | ||
4014 | __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC); | ||
4015 | } | ||
4016 | 4255 | ||
4017 | void unregister_ftrace_function_probe_all(char *glob) | 4256 | err_unlock_ftrace: |
4018 | { | 4257 | mutex_unlock(&ftrace_lock); |
4019 | __unregister_ftrace_function_probe(glob, NULL, NULL, 0); | 4258 | return ret; |
4020 | } | 4259 | } |
4021 | 4260 | ||
4022 | static LIST_HEAD(ftrace_commands); | 4261 | static LIST_HEAD(ftrace_commands); |
@@ -4068,9 +4307,11 @@ __init int unregister_ftrace_command(struct ftrace_func_command *cmd) | |||
4068 | return ret; | 4307 | return ret; |
4069 | } | 4308 | } |
4070 | 4309 | ||
4071 | static int ftrace_process_regex(struct ftrace_hash *hash, | 4310 | static int ftrace_process_regex(struct ftrace_iterator *iter, |
4072 | char *buff, int len, int enable) | 4311 | char *buff, int len, int enable) |
4073 | { | 4312 | { |
4313 | struct ftrace_hash *hash = iter->hash; | ||
4314 | struct trace_array *tr = iter->ops->private; | ||
4074 | char *func, *command, *next = buff; | 4315 | char *func, *command, *next = buff; |
4075 | struct ftrace_func_command *p; | 4316 | struct ftrace_func_command *p; |
4076 | int ret = -EINVAL; | 4317 | int ret = -EINVAL; |
@@ -4090,10 +4331,13 @@ static int ftrace_process_regex(struct ftrace_hash *hash, | |||
4090 | 4331 | ||
4091 | command = strsep(&next, ":"); | 4332 | command = strsep(&next, ":"); |
4092 | 4333 | ||
4334 | if (WARN_ON_ONCE(!tr)) | ||
4335 | return -EINVAL; | ||
4336 | |||
4093 | mutex_lock(&ftrace_cmd_mutex); | 4337 | mutex_lock(&ftrace_cmd_mutex); |
4094 | list_for_each_entry(p, &ftrace_commands, list) { | 4338 | list_for_each_entry(p, &ftrace_commands, list) { |
4095 | if (strcmp(p->name, command) == 0) { | 4339 | if (strcmp(p->name, command) == 0) { |
4096 | ret = p->func(hash, func, command, next, enable); | 4340 | ret = p->func(tr, hash, func, command, next, enable); |
4097 | goto out_unlock; | 4341 | goto out_unlock; |
4098 | } | 4342 | } |
4099 | } | 4343 | } |
@@ -4130,7 +4374,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
4130 | 4374 | ||
4131 | if (read >= 0 && trace_parser_loaded(parser) && | 4375 | if (read >= 0 && trace_parser_loaded(parser) && |
4132 | !trace_parser_cont(parser)) { | 4376 | !trace_parser_cont(parser)) { |
4133 | ret = ftrace_process_regex(iter->hash, parser->buffer, | 4377 | ret = ftrace_process_regex(iter, parser->buffer, |
4134 | parser->idx, enable); | 4378 | parser->idx, enable); |
4135 | trace_parser_clear(parser); | 4379 | trace_parser_clear(parser); |
4136 | if (ret < 0) | 4380 | if (ret < 0) |
@@ -4175,44 +4419,11 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) | |||
4175 | return add_hash_entry(hash, ip); | 4419 | return add_hash_entry(hash, ip); |
4176 | } | 4420 | } |
4177 | 4421 | ||
4178 | static void ftrace_ops_update_code(struct ftrace_ops *ops, | ||
4179 | struct ftrace_ops_hash *old_hash) | ||
4180 | { | ||
4181 | struct ftrace_ops *op; | ||
4182 | |||
4183 | if (!ftrace_enabled) | ||
4184 | return; | ||
4185 | |||
4186 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { | ||
4187 | ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); | ||
4188 | return; | ||
4189 | } | ||
4190 | |||
4191 | /* | ||
4192 | * If this is the shared global_ops filter, then we need to | ||
4193 | * check if there is another ops that shares it, is enabled. | ||
4194 | * If so, we still need to run the modify code. | ||
4195 | */ | ||
4196 | if (ops->func_hash != &global_ops.local_hash) | ||
4197 | return; | ||
4198 | |||
4199 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
4200 | if (op->func_hash == &global_ops.local_hash && | ||
4201 | op->flags & FTRACE_OPS_FL_ENABLED) { | ||
4202 | ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); | ||
4203 | /* Only need to do this once */ | ||
4204 | return; | ||
4205 | } | ||
4206 | } while_for_each_ftrace_op(op); | ||
4207 | } | ||
4208 | |||
4209 | static int | 4422 | static int |
4210 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | 4423 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, |
4211 | unsigned long ip, int remove, int reset, int enable) | 4424 | unsigned long ip, int remove, int reset, int enable) |
4212 | { | 4425 | { |
4213 | struct ftrace_hash **orig_hash; | 4426 | struct ftrace_hash **orig_hash; |
4214 | struct ftrace_ops_hash old_hash_ops; | ||
4215 | struct ftrace_hash *old_hash; | ||
4216 | struct ftrace_hash *hash; | 4427 | struct ftrace_hash *hash; |
4217 | int ret; | 4428 | int ret; |
4218 | 4429 | ||
@@ -4247,14 +4458,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
4247 | } | 4458 | } |
4248 | 4459 | ||
4249 | mutex_lock(&ftrace_lock); | 4460 | mutex_lock(&ftrace_lock); |
4250 | old_hash = *orig_hash; | 4461 | ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); |
4251 | old_hash_ops.filter_hash = ops->func_hash->filter_hash; | ||
4252 | old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; | ||
4253 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | ||
4254 | if (!ret) { | ||
4255 | ftrace_ops_update_code(ops, &old_hash_ops); | ||
4256 | free_ftrace_hash_rcu(old_hash); | ||
4257 | } | ||
4258 | mutex_unlock(&ftrace_lock); | 4462 | mutex_unlock(&ftrace_lock); |
4259 | 4463 | ||
4260 | out_regex_unlock: | 4464 | out_regex_unlock: |
@@ -4493,10 +4697,8 @@ static void __init set_ftrace_early_filters(void) | |||
4493 | int ftrace_regex_release(struct inode *inode, struct file *file) | 4697 | int ftrace_regex_release(struct inode *inode, struct file *file) |
4494 | { | 4698 | { |
4495 | struct seq_file *m = (struct seq_file *)file->private_data; | 4699 | struct seq_file *m = (struct seq_file *)file->private_data; |
4496 | struct ftrace_ops_hash old_hash_ops; | ||
4497 | struct ftrace_iterator *iter; | 4700 | struct ftrace_iterator *iter; |
4498 | struct ftrace_hash **orig_hash; | 4701 | struct ftrace_hash **orig_hash; |
4499 | struct ftrace_hash *old_hash; | ||
4500 | struct trace_parser *parser; | 4702 | struct trace_parser *parser; |
4501 | int filter_hash; | 4703 | int filter_hash; |
4502 | int ret; | 4704 | int ret; |
@@ -4526,16 +4728,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
4526 | orig_hash = &iter->ops->func_hash->notrace_hash; | 4728 | orig_hash = &iter->ops->func_hash->notrace_hash; |
4527 | 4729 | ||
4528 | mutex_lock(&ftrace_lock); | 4730 | mutex_lock(&ftrace_lock); |
4529 | old_hash = *orig_hash; | 4731 | ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash, |
4530 | old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash; | 4732 | iter->hash, filter_hash); |
4531 | old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash; | ||
4532 | ret = ftrace_hash_move(iter->ops, filter_hash, | ||
4533 | orig_hash, iter->hash); | ||
4534 | if (!ret) { | ||
4535 | ftrace_ops_update_code(iter->ops, &old_hash_ops); | ||
4536 | free_ftrace_hash_rcu(old_hash); | ||
4537 | } | ||
4538 | mutex_unlock(&ftrace_lock); | 4733 | mutex_unlock(&ftrace_lock); |
4734 | } else { | ||
4735 | /* For read only, the hash is the ops hash */ | ||
4736 | iter->hash = NULL; | ||
4539 | } | 4737 | } |
4540 | 4738 | ||
4541 | mutex_unlock(&iter->ops->func_hash->regex_lock); | 4739 | mutex_unlock(&iter->ops->func_hash->regex_lock); |
@@ -5274,6 +5472,50 @@ void ftrace_module_init(struct module *mod) | |||
5274 | } | 5472 | } |
5275 | #endif /* CONFIG_MODULES */ | 5473 | #endif /* CONFIG_MODULES */ |
5276 | 5474 | ||
5475 | void __init ftrace_free_init_mem(void) | ||
5476 | { | ||
5477 | unsigned long start = (unsigned long)(&__init_begin); | ||
5478 | unsigned long end = (unsigned long)(&__init_end); | ||
5479 | struct ftrace_page **last_pg = &ftrace_pages_start; | ||
5480 | struct ftrace_page *pg; | ||
5481 | struct dyn_ftrace *rec; | ||
5482 | struct dyn_ftrace key; | ||
5483 | int order; | ||
5484 | |||
5485 | key.ip = start; | ||
5486 | key.flags = end; /* overload flags, as it is unsigned long */ | ||
5487 | |||
5488 | mutex_lock(&ftrace_lock); | ||
5489 | |||
5490 | for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { | ||
5491 | if (end < pg->records[0].ip || | ||
5492 | start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) | ||
5493 | continue; | ||
5494 | again: | ||
5495 | rec = bsearch(&key, pg->records, pg->index, | ||
5496 | sizeof(struct dyn_ftrace), | ||
5497 | ftrace_cmp_recs); | ||
5498 | if (!rec) | ||
5499 | continue; | ||
5500 | pg->index--; | ||
5501 | if (!pg->index) { | ||
5502 | *last_pg = pg->next; | ||
5503 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); | ||
5504 | free_pages((unsigned long)pg->records, order); | ||
5505 | kfree(pg); | ||
5506 | pg = container_of(last_pg, struct ftrace_page, next); | ||
5507 | if (!(*last_pg)) | ||
5508 | ftrace_pages = pg; | ||
5509 | continue; | ||
5510 | } | ||
5511 | memmove(rec, rec + 1, | ||
5512 | (pg->index - (rec - pg->records)) * sizeof(*rec)); | ||
5513 | /* More than one function may be in this block */ | ||
5514 | goto again; | ||
5515 | } | ||
5516 | mutex_unlock(&ftrace_lock); | ||
5517 | } | ||
5518 | |||
5277 | void __init ftrace_init(void) | 5519 | void __init ftrace_init(void) |
5278 | { | 5520 | { |
5279 | extern unsigned long __start_mcount_loc[]; | 5521 | extern unsigned long __start_mcount_loc[]; |
@@ -5316,25 +5558,13 @@ void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) | |||
5316 | 5558 | ||
5317 | static void ftrace_update_trampoline(struct ftrace_ops *ops) | 5559 | static void ftrace_update_trampoline(struct ftrace_ops *ops) |
5318 | { | 5560 | { |
5319 | |||
5320 | /* | ||
5321 | * Currently there's no safe way to free a trampoline when the kernel | ||
5322 | * is configured with PREEMPT. That is because a task could be preempted | ||
5323 | * when it jumped to the trampoline, it may be preempted for a long time | ||
5324 | * depending on the system load, and currently there's no way to know | ||
5325 | * when it will be off the trampoline. If the trampoline is freed | ||
5326 | * too early, when the task runs again, it will be executing on freed | ||
5327 | * memory and crash. | ||
5328 | */ | ||
5329 | #ifdef CONFIG_PREEMPT | ||
5330 | /* Currently, only non dynamic ops can have a trampoline */ | ||
5331 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) | ||
5332 | return; | ||
5333 | #endif | ||
5334 | |||
5335 | arch_ftrace_update_trampoline(ops); | 5561 | arch_ftrace_update_trampoline(ops); |
5336 | } | 5562 | } |
5337 | 5563 | ||
5564 | void ftrace_init_trace_array(struct trace_array *tr) | ||
5565 | { | ||
5566 | INIT_LIST_HEAD(&tr->func_probes); | ||
5567 | } | ||
5338 | #else | 5568 | #else |
5339 | 5569 | ||
5340 | static struct ftrace_ops global_ops = { | 5570 | static struct ftrace_ops global_ops = { |
@@ -5389,6 +5619,7 @@ __init void ftrace_init_global_array_ops(struct trace_array *tr) | |||
5389 | { | 5619 | { |
5390 | tr->ops = &global_ops; | 5620 | tr->ops = &global_ops; |
5391 | tr->ops->private = tr; | 5621 | tr->ops->private = tr; |
5622 | ftrace_init_trace_array(tr); | ||
5392 | } | 5623 | } |
5393 | 5624 | ||
5394 | void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) | 5625 | void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) |
@@ -5543,6 +5774,43 @@ ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, | |||
5543 | trace_ignore_this_task(pid_list, next)); | 5774 | trace_ignore_this_task(pid_list, next)); |
5544 | } | 5775 | } |
5545 | 5776 | ||
5777 | static void | ||
5778 | ftrace_pid_follow_sched_process_fork(void *data, | ||
5779 | struct task_struct *self, | ||
5780 | struct task_struct *task) | ||
5781 | { | ||
5782 | struct trace_pid_list *pid_list; | ||
5783 | struct trace_array *tr = data; | ||
5784 | |||
5785 | pid_list = rcu_dereference_sched(tr->function_pids); | ||
5786 | trace_filter_add_remove_task(pid_list, self, task); | ||
5787 | } | ||
5788 | |||
5789 | static void | ||
5790 | ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task) | ||
5791 | { | ||
5792 | struct trace_pid_list *pid_list; | ||
5793 | struct trace_array *tr = data; | ||
5794 | |||
5795 | pid_list = rcu_dereference_sched(tr->function_pids); | ||
5796 | trace_filter_add_remove_task(pid_list, NULL, task); | ||
5797 | } | ||
5798 | |||
5799 | void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) | ||
5800 | { | ||
5801 | if (enable) { | ||
5802 | register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, | ||
5803 | tr); | ||
5804 | register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, | ||
5805 | tr); | ||
5806 | } else { | ||
5807 | unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, | ||
5808 | tr); | ||
5809 | unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, | ||
5810 | tr); | ||
5811 | } | ||
5812 | } | ||
5813 | |||
5546 | static void clear_ftrace_pids(struct trace_array *tr) | 5814 | static void clear_ftrace_pids(struct trace_array *tr) |
5547 | { | 5815 | { |
5548 | struct trace_pid_list *pid_list; | 5816 | struct trace_pid_list *pid_list; |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index ca47a4fa2986..4ae268e687fe 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -438,6 +438,7 @@ struct ring_buffer_per_cpu { | |||
438 | raw_spinlock_t reader_lock; /* serialize readers */ | 438 | raw_spinlock_t reader_lock; /* serialize readers */ |
439 | arch_spinlock_t lock; | 439 | arch_spinlock_t lock; |
440 | struct lock_class_key lock_key; | 440 | struct lock_class_key lock_key; |
441 | struct buffer_data_page *free_page; | ||
441 | unsigned long nr_pages; | 442 | unsigned long nr_pages; |
442 | unsigned int current_context; | 443 | unsigned int current_context; |
443 | struct list_head *pages; | 444 | struct list_head *pages; |
@@ -4389,9 +4390,25 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | |||
4389 | */ | 4390 | */ |
4390 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) | 4391 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) |
4391 | { | 4392 | { |
4392 | struct buffer_data_page *bpage; | 4393 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
4394 | struct buffer_data_page *bpage = NULL; | ||
4395 | unsigned long flags; | ||
4393 | struct page *page; | 4396 | struct page *page; |
4394 | 4397 | ||
4398 | local_irq_save(flags); | ||
4399 | arch_spin_lock(&cpu_buffer->lock); | ||
4400 | |||
4401 | if (cpu_buffer->free_page) { | ||
4402 | bpage = cpu_buffer->free_page; | ||
4403 | cpu_buffer->free_page = NULL; | ||
4404 | } | ||
4405 | |||
4406 | arch_spin_unlock(&cpu_buffer->lock); | ||
4407 | local_irq_restore(flags); | ||
4408 | |||
4409 | if (bpage) | ||
4410 | goto out; | ||
4411 | |||
4395 | page = alloc_pages_node(cpu_to_node(cpu), | 4412 | page = alloc_pages_node(cpu_to_node(cpu), |
4396 | GFP_KERNEL | __GFP_NORETRY, 0); | 4413 | GFP_KERNEL | __GFP_NORETRY, 0); |
4397 | if (!page) | 4414 | if (!page) |
@@ -4399,6 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) | |||
4399 | 4416 | ||
4400 | bpage = page_address(page); | 4417 | bpage = page_address(page); |
4401 | 4418 | ||
4419 | out: | ||
4402 | rb_init_page(bpage); | 4420 | rb_init_page(bpage); |
4403 | 4421 | ||
4404 | return bpage; | 4422 | return bpage; |
@@ -4408,13 +4426,29 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); | |||
4408 | /** | 4426 | /** |
4409 | * ring_buffer_free_read_page - free an allocated read page | 4427 | * ring_buffer_free_read_page - free an allocated read page |
4410 | * @buffer: the buffer the page was allocate for | 4428 | * @buffer: the buffer the page was allocate for |
4429 | * @cpu: the cpu buffer the page came from | ||
4411 | * @data: the page to free | 4430 | * @data: the page to free |
4412 | * | 4431 | * |
4413 | * Free a page allocated from ring_buffer_alloc_read_page. | 4432 | * Free a page allocated from ring_buffer_alloc_read_page. |
4414 | */ | 4433 | */ |
4415 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | 4434 | void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data) |
4416 | { | 4435 | { |
4417 | free_page((unsigned long)data); | 4436 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
4437 | struct buffer_data_page *bpage = data; | ||
4438 | unsigned long flags; | ||
4439 | |||
4440 | local_irq_save(flags); | ||
4441 | arch_spin_lock(&cpu_buffer->lock); | ||
4442 | |||
4443 | if (!cpu_buffer->free_page) { | ||
4444 | cpu_buffer->free_page = bpage; | ||
4445 | bpage = NULL; | ||
4446 | } | ||
4447 | |||
4448 | arch_spin_unlock(&cpu_buffer->lock); | ||
4449 | local_irq_restore(flags); | ||
4450 | |||
4451 | free_page((unsigned long)bpage); | ||
4418 | } | 4452 | } |
4419 | EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); | 4453 | EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); |
4420 | 4454 | ||
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index c190a4d5013c..9fbcaf567886 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
@@ -171,7 +171,7 @@ static enum event_status read_page(int cpu) | |||
171 | } | 171 | } |
172 | } | 172 | } |
173 | } | 173 | } |
174 | ring_buffer_free_read_page(buffer, bpage); | 174 | ring_buffer_free_read_page(buffer, cpu, bpage); |
175 | 175 | ||
176 | if (ret < 0) | 176 | if (ret < 0) |
177 | return EVENT_DROPPED; | 177 | return EVENT_DROPPED; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0029fe62b245..80eda7d254ed 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -257,7 +257,7 @@ unsigned long long ns2usecs(u64 nsec) | |||
257 | 257 | ||
258 | /* trace_flags that are default zero for instances */ | 258 | /* trace_flags that are default zero for instances */ |
259 | #define ZEROED_TRACE_FLAGS \ | 259 | #define ZEROED_TRACE_FLAGS \ |
260 | TRACE_ITER_EVENT_FORK | 260 | (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK) |
261 | 261 | ||
262 | /* | 262 | /* |
263 | * The global_trace is the descriptor that holds the top-level tracing | 263 | * The global_trace is the descriptor that holds the top-level tracing |
@@ -757,7 +757,7 @@ __trace_buffer_lock_reserve(struct ring_buffer *buffer, | |||
757 | return event; | 757 | return event; |
758 | } | 758 | } |
759 | 759 | ||
760 | static void tracer_tracing_on(struct trace_array *tr) | 760 | void tracer_tracing_on(struct trace_array *tr) |
761 | { | 761 | { |
762 | if (tr->trace_buffer.buffer) | 762 | if (tr->trace_buffer.buffer) |
763 | ring_buffer_record_on(tr->trace_buffer.buffer); | 763 | ring_buffer_record_on(tr->trace_buffer.buffer); |
@@ -894,23 +894,8 @@ int __trace_bputs(unsigned long ip, const char *str) | |||
894 | EXPORT_SYMBOL_GPL(__trace_bputs); | 894 | EXPORT_SYMBOL_GPL(__trace_bputs); |
895 | 895 | ||
896 | #ifdef CONFIG_TRACER_SNAPSHOT | 896 | #ifdef CONFIG_TRACER_SNAPSHOT |
897 | /** | 897 | static void tracing_snapshot_instance(struct trace_array *tr) |
898 | * trace_snapshot - take a snapshot of the current buffer. | ||
899 | * | ||
900 | * This causes a swap between the snapshot buffer and the current live | ||
901 | * tracing buffer. You can use this to take snapshots of the live | ||
902 | * trace when some condition is triggered, but continue to trace. | ||
903 | * | ||
904 | * Note, make sure to allocate the snapshot with either | ||
905 | * a tracing_snapshot_alloc(), or by doing it manually | ||
906 | * with: echo 1 > /sys/kernel/debug/tracing/snapshot | ||
907 | * | ||
908 | * If the snapshot buffer is not allocated, it will stop tracing. | ||
909 | * Basically making a permanent snapshot. | ||
910 | */ | ||
911 | void tracing_snapshot(void) | ||
912 | { | 898 | { |
913 | struct trace_array *tr = &global_trace; | ||
914 | struct tracer *tracer = tr->current_trace; | 899 | struct tracer *tracer = tr->current_trace; |
915 | unsigned long flags; | 900 | unsigned long flags; |
916 | 901 | ||
@@ -938,6 +923,27 @@ void tracing_snapshot(void) | |||
938 | update_max_tr(tr, current, smp_processor_id()); | 923 | update_max_tr(tr, current, smp_processor_id()); |
939 | local_irq_restore(flags); | 924 | local_irq_restore(flags); |
940 | } | 925 | } |
926 | |||
927 | /** | ||
928 | * trace_snapshot - take a snapshot of the current buffer. | ||
929 | * | ||
930 | * This causes a swap between the snapshot buffer and the current live | ||
931 | * tracing buffer. You can use this to take snapshots of the live | ||
932 | * trace when some condition is triggered, but continue to trace. | ||
933 | * | ||
934 | * Note, make sure to allocate the snapshot with either | ||
935 | * a tracing_snapshot_alloc(), or by doing it manually | ||
936 | * with: echo 1 > /sys/kernel/debug/tracing/snapshot | ||
937 | * | ||
938 | * If the snapshot buffer is not allocated, it will stop tracing. | ||
939 | * Basically making a permanent snapshot. | ||
940 | */ | ||
941 | void tracing_snapshot(void) | ||
942 | { | ||
943 | struct trace_array *tr = &global_trace; | ||
944 | |||
945 | tracing_snapshot_instance(tr); | ||
946 | } | ||
941 | EXPORT_SYMBOL_GPL(tracing_snapshot); | 947 | EXPORT_SYMBOL_GPL(tracing_snapshot); |
942 | 948 | ||
943 | static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, | 949 | static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, |
@@ -1039,7 +1045,7 @@ void tracing_snapshot_alloc(void) | |||
1039 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); | 1045 | EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); |
1040 | #endif /* CONFIG_TRACER_SNAPSHOT */ | 1046 | #endif /* CONFIG_TRACER_SNAPSHOT */ |
1041 | 1047 | ||
1042 | static void tracer_tracing_off(struct trace_array *tr) | 1048 | void tracer_tracing_off(struct trace_array *tr) |
1043 | { | 1049 | { |
1044 | if (tr->trace_buffer.buffer) | 1050 | if (tr->trace_buffer.buffer) |
1045 | ring_buffer_record_off(tr->trace_buffer.buffer); | 1051 | ring_buffer_record_off(tr->trace_buffer.buffer); |
@@ -1424,6 +1430,28 @@ static int wait_on_pipe(struct trace_iterator *iter, bool full) | |||
1424 | } | 1430 | } |
1425 | 1431 | ||
1426 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 1432 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
1433 | static bool selftests_can_run; | ||
1434 | |||
1435 | struct trace_selftests { | ||
1436 | struct list_head list; | ||
1437 | struct tracer *type; | ||
1438 | }; | ||
1439 | |||
1440 | static LIST_HEAD(postponed_selftests); | ||
1441 | |||
1442 | static int save_selftest(struct tracer *type) | ||
1443 | { | ||
1444 | struct trace_selftests *selftest; | ||
1445 | |||
1446 | selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); | ||
1447 | if (!selftest) | ||
1448 | return -ENOMEM; | ||
1449 | |||
1450 | selftest->type = type; | ||
1451 | list_add(&selftest->list, &postponed_selftests); | ||
1452 | return 0; | ||
1453 | } | ||
1454 | |||
1427 | static int run_tracer_selftest(struct tracer *type) | 1455 | static int run_tracer_selftest(struct tracer *type) |
1428 | { | 1456 | { |
1429 | struct trace_array *tr = &global_trace; | 1457 | struct trace_array *tr = &global_trace; |
@@ -1434,6 +1462,14 @@ static int run_tracer_selftest(struct tracer *type) | |||
1434 | return 0; | 1462 | return 0; |
1435 | 1463 | ||
1436 | /* | 1464 | /* |
1465 | * If a tracer registers early in boot up (before scheduling is | ||
1466 | * initialized and such), then do not run its selftests yet. | ||
1467 | * Instead, run it a little later in the boot process. | ||
1468 | */ | ||
1469 | if (!selftests_can_run) | ||
1470 | return save_selftest(type); | ||
1471 | |||
1472 | /* | ||
1437 | * Run a selftest on this tracer. | 1473 | * Run a selftest on this tracer. |
1438 | * Here we reset the trace buffer, and set the current | 1474 | * Here we reset the trace buffer, and set the current |
1439 | * tracer to be this tracer. The tracer can then run some | 1475 | * tracer to be this tracer. The tracer can then run some |
@@ -1482,6 +1518,47 @@ static int run_tracer_selftest(struct tracer *type) | |||
1482 | printk(KERN_CONT "PASSED\n"); | 1518 | printk(KERN_CONT "PASSED\n"); |
1483 | return 0; | 1519 | return 0; |
1484 | } | 1520 | } |
1521 | |||
1522 | static __init int init_trace_selftests(void) | ||
1523 | { | ||
1524 | struct trace_selftests *p, *n; | ||
1525 | struct tracer *t, **last; | ||
1526 | int ret; | ||
1527 | |||
1528 | selftests_can_run = true; | ||
1529 | |||
1530 | mutex_lock(&trace_types_lock); | ||
1531 | |||
1532 | if (list_empty(&postponed_selftests)) | ||
1533 | goto out; | ||
1534 | |||
1535 | pr_info("Running postponed tracer tests:\n"); | ||
1536 | |||
1537 | list_for_each_entry_safe(p, n, &postponed_selftests, list) { | ||
1538 | ret = run_tracer_selftest(p->type); | ||
1539 | /* If the test fails, then warn and remove from available_tracers */ | ||
1540 | if (ret < 0) { | ||
1541 | WARN(1, "tracer: %s failed selftest, disabling\n", | ||
1542 | p->type->name); | ||
1543 | last = &trace_types; | ||
1544 | for (t = trace_types; t; t = t->next) { | ||
1545 | if (t == p->type) { | ||
1546 | *last = t->next; | ||
1547 | break; | ||
1548 | } | ||
1549 | last = &t->next; | ||
1550 | } | ||
1551 | } | ||
1552 | list_del(&p->list); | ||
1553 | kfree(p); | ||
1554 | } | ||
1555 | |||
1556 | out: | ||
1557 | mutex_unlock(&trace_types_lock); | ||
1558 | |||
1559 | return 0; | ||
1560 | } | ||
1561 | early_initcall(init_trace_selftests); | ||
1485 | #else | 1562 | #else |
1486 | static inline int run_tracer_selftest(struct tracer *type) | 1563 | static inline int run_tracer_selftest(struct tracer *type) |
1487 | { | 1564 | { |
@@ -1927,6 +2004,18 @@ void tracing_record_cmdline(struct task_struct *tsk) | |||
1927 | __this_cpu_write(trace_cmdline_save, false); | 2004 | __this_cpu_write(trace_cmdline_save, false); |
1928 | } | 2005 | } |
1929 | 2006 | ||
2007 | /* | ||
2008 | * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq | ||
2009 | * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function | ||
2010 | * simplifies those functions and keeps them in sync. | ||
2011 | */ | ||
2012 | enum print_line_t trace_handle_return(struct trace_seq *s) | ||
2013 | { | ||
2014 | return trace_seq_has_overflowed(s) ? | ||
2015 | TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; | ||
2016 | } | ||
2017 | EXPORT_SYMBOL_GPL(trace_handle_return); | ||
2018 | |||
1930 | void | 2019 | void |
1931 | tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | 2020 | tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, |
1932 | int pc) | 2021 | int pc) |
@@ -4122,6 +4211,9 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) | |||
4122 | if (mask == TRACE_ITER_EVENT_FORK) | 4211 | if (mask == TRACE_ITER_EVENT_FORK) |
4123 | trace_event_follow_fork(tr, enabled); | 4212 | trace_event_follow_fork(tr, enabled); |
4124 | 4213 | ||
4214 | if (mask == TRACE_ITER_FUNC_FORK) | ||
4215 | ftrace_pid_follow_fork(tr, enabled); | ||
4216 | |||
4125 | if (mask == TRACE_ITER_OVERWRITE) { | 4217 | if (mask == TRACE_ITER_OVERWRITE) { |
4126 | ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); | 4218 | ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); |
4127 | #ifdef CONFIG_TRACER_MAX_TRACE | 4219 | #ifdef CONFIG_TRACER_MAX_TRACE |
@@ -5962,6 +6054,7 @@ static int tracing_clock_open(struct inode *inode, struct file *file) | |||
5962 | struct ftrace_buffer_info { | 6054 | struct ftrace_buffer_info { |
5963 | struct trace_iterator iter; | 6055 | struct trace_iterator iter; |
5964 | void *spare; | 6056 | void *spare; |
6057 | unsigned int spare_cpu; | ||
5965 | unsigned int read; | 6058 | unsigned int read; |
5966 | }; | 6059 | }; |
5967 | 6060 | ||
@@ -6291,9 +6384,11 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
6291 | return -EBUSY; | 6384 | return -EBUSY; |
6292 | #endif | 6385 | #endif |
6293 | 6386 | ||
6294 | if (!info->spare) | 6387 | if (!info->spare) { |
6295 | info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, | 6388 | info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, |
6296 | iter->cpu_file); | 6389 | iter->cpu_file); |
6390 | info->spare_cpu = iter->cpu_file; | ||
6391 | } | ||
6297 | if (!info->spare) | 6392 | if (!info->spare) |
6298 | return -ENOMEM; | 6393 | return -ENOMEM; |
6299 | 6394 | ||
@@ -6353,7 +6448,8 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) | |||
6353 | __trace_array_put(iter->tr); | 6448 | __trace_array_put(iter->tr); |
6354 | 6449 | ||
6355 | if (info->spare) | 6450 | if (info->spare) |
6356 | ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); | 6451 | ring_buffer_free_read_page(iter->trace_buffer->buffer, |
6452 | info->spare_cpu, info->spare); | ||
6357 | kfree(info); | 6453 | kfree(info); |
6358 | 6454 | ||
6359 | mutex_unlock(&trace_types_lock); | 6455 | mutex_unlock(&trace_types_lock); |
@@ -6364,6 +6460,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) | |||
6364 | struct buffer_ref { | 6460 | struct buffer_ref { |
6365 | struct ring_buffer *buffer; | 6461 | struct ring_buffer *buffer; |
6366 | void *page; | 6462 | void *page; |
6463 | int cpu; | ||
6367 | int ref; | 6464 | int ref; |
6368 | }; | 6465 | }; |
6369 | 6466 | ||
@@ -6375,7 +6472,7 @@ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, | |||
6375 | if (--ref->ref) | 6472 | if (--ref->ref) |
6376 | return; | 6473 | return; |
6377 | 6474 | ||
6378 | ring_buffer_free_read_page(ref->buffer, ref->page); | 6475 | ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); |
6379 | kfree(ref); | 6476 | kfree(ref); |
6380 | buf->private = 0; | 6477 | buf->private = 0; |
6381 | } | 6478 | } |
@@ -6409,7 +6506,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) | |||
6409 | if (--ref->ref) | 6506 | if (--ref->ref) |
6410 | return; | 6507 | return; |
6411 | 6508 | ||
6412 | ring_buffer_free_read_page(ref->buffer, ref->page); | 6509 | ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); |
6413 | kfree(ref); | 6510 | kfree(ref); |
6414 | spd->partial[i].private = 0; | 6511 | spd->partial[i].private = 0; |
6415 | } | 6512 | } |
@@ -6473,11 +6570,13 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
6473 | kfree(ref); | 6570 | kfree(ref); |
6474 | break; | 6571 | break; |
6475 | } | 6572 | } |
6573 | ref->cpu = iter->cpu_file; | ||
6476 | 6574 | ||
6477 | r = ring_buffer_read_page(ref->buffer, &ref->page, | 6575 | r = ring_buffer_read_page(ref->buffer, &ref->page, |
6478 | len, iter->cpu_file, 1); | 6576 | len, iter->cpu_file, 1); |
6479 | if (r < 0) { | 6577 | if (r < 0) { |
6480 | ring_buffer_free_read_page(ref->buffer, ref->page); | 6578 | ring_buffer_free_read_page(ref->buffer, ref->cpu, |
6579 | ref->page); | ||
6481 | kfree(ref); | 6580 | kfree(ref); |
6482 | break; | 6581 | break; |
6483 | } | 6582 | } |
@@ -6648,43 +6747,89 @@ static const struct file_operations tracing_dyn_info_fops = { | |||
6648 | 6747 | ||
6649 | #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) | 6748 | #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) |
6650 | static void | 6749 | static void |
6651 | ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data) | 6750 | ftrace_snapshot(unsigned long ip, unsigned long parent_ip, |
6751 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
6752 | void *data) | ||
6652 | { | 6753 | { |
6653 | tracing_snapshot(); | 6754 | tracing_snapshot_instance(tr); |
6654 | } | 6755 | } |
6655 | 6756 | ||
6656 | static void | 6757 | static void |
6657 | ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data) | 6758 | ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, |
6759 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
6760 | void *data) | ||
6658 | { | 6761 | { |
6659 | unsigned long *count = (long *)data; | 6762 | struct ftrace_func_mapper *mapper = data; |
6763 | long *count = NULL; | ||
6660 | 6764 | ||
6661 | if (!*count) | 6765 | if (mapper) |
6662 | return; | 6766 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
6767 | |||
6768 | if (count) { | ||
6769 | |||
6770 | if (*count <= 0) | ||
6771 | return; | ||
6663 | 6772 | ||
6664 | if (*count != -1) | ||
6665 | (*count)--; | 6773 | (*count)--; |
6774 | } | ||
6666 | 6775 | ||
6667 | tracing_snapshot(); | 6776 | tracing_snapshot_instance(tr); |
6668 | } | 6777 | } |
6669 | 6778 | ||
6670 | static int | 6779 | static int |
6671 | ftrace_snapshot_print(struct seq_file *m, unsigned long ip, | 6780 | ftrace_snapshot_print(struct seq_file *m, unsigned long ip, |
6672 | struct ftrace_probe_ops *ops, void *data) | 6781 | struct ftrace_probe_ops *ops, void *data) |
6673 | { | 6782 | { |
6674 | long count = (long)data; | 6783 | struct ftrace_func_mapper *mapper = data; |
6784 | long *count = NULL; | ||
6675 | 6785 | ||
6676 | seq_printf(m, "%ps:", (void *)ip); | 6786 | seq_printf(m, "%ps:", (void *)ip); |
6677 | 6787 | ||
6678 | seq_puts(m, "snapshot"); | 6788 | seq_puts(m, "snapshot"); |
6679 | 6789 | ||
6680 | if (count == -1) | 6790 | if (mapper) |
6681 | seq_puts(m, ":unlimited\n"); | 6791 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
6792 | |||
6793 | if (count) | ||
6794 | seq_printf(m, ":count=%ld\n", *count); | ||
6682 | else | 6795 | else |
6683 | seq_printf(m, ":count=%ld\n", count); | 6796 | seq_puts(m, ":unlimited\n"); |
6684 | 6797 | ||
6685 | return 0; | 6798 | return 0; |
6686 | } | 6799 | } |
6687 | 6800 | ||
6801 | static int | ||
6802 | ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, | ||
6803 | unsigned long ip, void *init_data, void **data) | ||
6804 | { | ||
6805 | struct ftrace_func_mapper *mapper = *data; | ||
6806 | |||
6807 | if (!mapper) { | ||
6808 | mapper = allocate_ftrace_func_mapper(); | ||
6809 | if (!mapper) | ||
6810 | return -ENOMEM; | ||
6811 | *data = mapper; | ||
6812 | } | ||
6813 | |||
6814 | return ftrace_func_mapper_add_ip(mapper, ip, init_data); | ||
6815 | } | ||
6816 | |||
6817 | static void | ||
6818 | ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, | ||
6819 | unsigned long ip, void *data) | ||
6820 | { | ||
6821 | struct ftrace_func_mapper *mapper = data; | ||
6822 | |||
6823 | if (!ip) { | ||
6824 | if (!mapper) | ||
6825 | return; | ||
6826 | free_ftrace_func_mapper(mapper, NULL); | ||
6827 | return; | ||
6828 | } | ||
6829 | |||
6830 | ftrace_func_mapper_remove_ip(mapper, ip); | ||
6831 | } | ||
6832 | |||
6688 | static struct ftrace_probe_ops snapshot_probe_ops = { | 6833 | static struct ftrace_probe_ops snapshot_probe_ops = { |
6689 | .func = ftrace_snapshot, | 6834 | .func = ftrace_snapshot, |
6690 | .print = ftrace_snapshot_print, | 6835 | .print = ftrace_snapshot_print, |
@@ -6693,10 +6838,12 @@ static struct ftrace_probe_ops snapshot_probe_ops = { | |||
6693 | static struct ftrace_probe_ops snapshot_count_probe_ops = { | 6838 | static struct ftrace_probe_ops snapshot_count_probe_ops = { |
6694 | .func = ftrace_count_snapshot, | 6839 | .func = ftrace_count_snapshot, |
6695 | .print = ftrace_snapshot_print, | 6840 | .print = ftrace_snapshot_print, |
6841 | .init = ftrace_snapshot_init, | ||
6842 | .free = ftrace_snapshot_free, | ||
6696 | }; | 6843 | }; |
6697 | 6844 | ||
6698 | static int | 6845 | static int |
6699 | ftrace_trace_snapshot_callback(struct ftrace_hash *hash, | 6846 | ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, |
6700 | char *glob, char *cmd, char *param, int enable) | 6847 | char *glob, char *cmd, char *param, int enable) |
6701 | { | 6848 | { |
6702 | struct ftrace_probe_ops *ops; | 6849 | struct ftrace_probe_ops *ops; |
@@ -6710,10 +6857,8 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash, | |||
6710 | 6857 | ||
6711 | ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; | 6858 | ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; |
6712 | 6859 | ||
6713 | if (glob[0] == '!') { | 6860 | if (glob[0] == '!') |
6714 | unregister_ftrace_function_probe_func(glob+1, ops); | 6861 | return unregister_ftrace_function_probe_func(glob+1, tr, ops); |
6715 | return 0; | ||
6716 | } | ||
6717 | 6862 | ||
6718 | if (!param) | 6863 | if (!param) |
6719 | goto out_reg; | 6864 | goto out_reg; |
@@ -6732,11 +6877,11 @@ ftrace_trace_snapshot_callback(struct ftrace_hash *hash, | |||
6732 | return ret; | 6877 | return ret; |
6733 | 6878 | ||
6734 | out_reg: | 6879 | out_reg: |
6735 | ret = alloc_snapshot(&global_trace); | 6880 | ret = alloc_snapshot(tr); |
6736 | if (ret < 0) | 6881 | if (ret < 0) |
6737 | goto out; | 6882 | goto out; |
6738 | 6883 | ||
6739 | ret = register_ftrace_function_probe(glob, ops, count); | 6884 | ret = register_ftrace_function_probe(glob, tr, ops, count); |
6740 | 6885 | ||
6741 | out: | 6886 | out: |
6742 | return ret < 0 ? ret : 0; | 6887 | return ret < 0 ? ret : 0; |
@@ -7347,6 +7492,8 @@ static int instance_mkdir(const char *name) | |||
7347 | goto out_free_tr; | 7492 | goto out_free_tr; |
7348 | } | 7493 | } |
7349 | 7494 | ||
7495 | ftrace_init_trace_array(tr); | ||
7496 | |||
7350 | init_tracer_tracefs(tr, tr->dir); | 7497 | init_tracer_tracefs(tr, tr->dir); |
7351 | init_trace_flags_index(tr); | 7498 | init_trace_flags_index(tr); |
7352 | __update_tracer_options(tr); | 7499 | __update_tracer_options(tr); |
@@ -7967,6 +8114,9 @@ __init static int tracer_alloc_buffers(void) | |||
7967 | 8114 | ||
7968 | register_tracer(&nop_trace); | 8115 | register_tracer(&nop_trace); |
7969 | 8116 | ||
8117 | /* Function tracing may start here (via kernel command line) */ | ||
8118 | init_function_trace(); | ||
8119 | |||
7970 | /* All seems OK, enable tracing */ | 8120 | /* All seems OK, enable tracing */ |
7971 | tracing_disabled = 0; | 8121 | tracing_disabled = 0; |
7972 | 8122 | ||
@@ -8001,7 +8151,7 @@ out: | |||
8001 | return ret; | 8151 | return ret; |
8002 | } | 8152 | } |
8003 | 8153 | ||
8004 | void __init trace_init(void) | 8154 | void __init early_trace_init(void) |
8005 | { | 8155 | { |
8006 | if (tracepoint_printk) { | 8156 | if (tracepoint_printk) { |
8007 | tracepoint_print_iter = | 8157 | tracepoint_print_iter = |
@@ -8012,6 +8162,10 @@ void __init trace_init(void) | |||
8012 | static_key_enable(&tracepoint_printk_key.key); | 8162 | static_key_enable(&tracepoint_printk_key.key); |
8013 | } | 8163 | } |
8014 | tracer_alloc_buffers(); | 8164 | tracer_alloc_buffers(); |
8165 | } | ||
8166 | |||
8167 | void __init trace_init(void) | ||
8168 | { | ||
8015 | trace_event_init(); | 8169 | trace_event_init(); |
8016 | } | 8170 | } |
8017 | 8171 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index d19d52d600d6..291a1bca5748 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -262,6 +262,9 @@ struct trace_array { | |||
262 | #ifdef CONFIG_FUNCTION_TRACER | 262 | #ifdef CONFIG_FUNCTION_TRACER |
263 | struct ftrace_ops *ops; | 263 | struct ftrace_ops *ops; |
264 | struct trace_pid_list __rcu *function_pids; | 264 | struct trace_pid_list __rcu *function_pids; |
265 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
266 | struct list_head func_probes; | ||
267 | #endif | ||
265 | /* function tracing enabled */ | 268 | /* function tracing enabled */ |
266 | int function_enabled; | 269 | int function_enabled; |
267 | #endif | 270 | #endif |
@@ -579,6 +582,8 @@ void tracing_reset_all_online_cpus(void); | |||
579 | int tracing_open_generic(struct inode *inode, struct file *filp); | 582 | int tracing_open_generic(struct inode *inode, struct file *filp); |
580 | bool tracing_is_disabled(void); | 583 | bool tracing_is_disabled(void); |
581 | int tracer_tracing_is_on(struct trace_array *tr); | 584 | int tracer_tracing_is_on(struct trace_array *tr); |
585 | void tracer_tracing_on(struct trace_array *tr); | ||
586 | void tracer_tracing_off(struct trace_array *tr); | ||
582 | struct dentry *trace_create_file(const char *name, | 587 | struct dentry *trace_create_file(const char *name, |
583 | umode_t mode, | 588 | umode_t mode, |
584 | struct dentry *parent, | 589 | struct dentry *parent, |
@@ -696,6 +701,9 @@ extern void trace_event_follow_fork(struct trace_array *tr, bool enable); | |||
696 | 701 | ||
697 | #ifdef CONFIG_DYNAMIC_FTRACE | 702 | #ifdef CONFIG_DYNAMIC_FTRACE |
698 | extern unsigned long ftrace_update_tot_cnt; | 703 | extern unsigned long ftrace_update_tot_cnt; |
704 | void ftrace_init_trace_array(struct trace_array *tr); | ||
705 | #else | ||
706 | static inline void ftrace_init_trace_array(struct trace_array *tr) { } | ||
699 | #endif | 707 | #endif |
700 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func | 708 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
701 | extern int DYN_FTRACE_TEST_NAME(void); | 709 | extern int DYN_FTRACE_TEST_NAME(void); |
@@ -880,6 +888,14 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags) | |||
880 | extern struct list_head ftrace_pids; | 888 | extern struct list_head ftrace_pids; |
881 | 889 | ||
882 | #ifdef CONFIG_FUNCTION_TRACER | 890 | #ifdef CONFIG_FUNCTION_TRACER |
891 | struct ftrace_func_command { | ||
892 | struct list_head list; | ||
893 | char *name; | ||
894 | int (*func)(struct trace_array *tr, | ||
895 | struct ftrace_hash *hash, | ||
896 | char *func, char *cmd, | ||
897 | char *params, int enable); | ||
898 | }; | ||
883 | extern bool ftrace_filter_param __initdata; | 899 | extern bool ftrace_filter_param __initdata; |
884 | static inline int ftrace_trace_task(struct trace_array *tr) | 900 | static inline int ftrace_trace_task(struct trace_array *tr) |
885 | { | 901 | { |
@@ -897,6 +913,8 @@ void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); | |||
897 | void ftrace_init_tracefs_toplevel(struct trace_array *tr, | 913 | void ftrace_init_tracefs_toplevel(struct trace_array *tr, |
898 | struct dentry *d_tracer); | 914 | struct dentry *d_tracer); |
899 | void ftrace_clear_pids(struct trace_array *tr); | 915 | void ftrace_clear_pids(struct trace_array *tr); |
916 | int init_function_trace(void); | ||
917 | void ftrace_pid_follow_fork(struct trace_array *tr, bool enable); | ||
900 | #else | 918 | #else |
901 | static inline int ftrace_trace_task(struct trace_array *tr) | 919 | static inline int ftrace_trace_task(struct trace_array *tr) |
902 | { | 920 | { |
@@ -916,15 +934,70 @@ static inline void ftrace_reset_array_ops(struct trace_array *tr) { } | |||
916 | static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } | 934 | static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } |
917 | static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } | 935 | static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } |
918 | static inline void ftrace_clear_pids(struct trace_array *tr) { } | 936 | static inline void ftrace_clear_pids(struct trace_array *tr) { } |
937 | static inline int init_function_trace(void) { return 0; } | ||
938 | static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { } | ||
919 | /* ftace_func_t type is not defined, use macro instead of static inline */ | 939 | /* ftace_func_t type is not defined, use macro instead of static inline */ |
920 | #define ftrace_init_array_ops(tr, func) do { } while (0) | 940 | #define ftrace_init_array_ops(tr, func) do { } while (0) |
921 | #endif /* CONFIG_FUNCTION_TRACER */ | 941 | #endif /* CONFIG_FUNCTION_TRACER */ |
922 | 942 | ||
923 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) | 943 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) |
944 | |||
945 | struct ftrace_probe_ops { | ||
946 | void (*func)(unsigned long ip, | ||
947 | unsigned long parent_ip, | ||
948 | struct trace_array *tr, | ||
949 | struct ftrace_probe_ops *ops, | ||
950 | void *data); | ||
951 | int (*init)(struct ftrace_probe_ops *ops, | ||
952 | struct trace_array *tr, | ||
953 | unsigned long ip, void *init_data, | ||
954 | void **data); | ||
955 | void (*free)(struct ftrace_probe_ops *ops, | ||
956 | struct trace_array *tr, | ||
957 | unsigned long ip, void *data); | ||
958 | int (*print)(struct seq_file *m, | ||
959 | unsigned long ip, | ||
960 | struct ftrace_probe_ops *ops, | ||
961 | void *data); | ||
962 | }; | ||
963 | |||
964 | struct ftrace_func_mapper; | ||
965 | typedef int (*ftrace_mapper_func)(void *data); | ||
966 | |||
967 | struct ftrace_func_mapper *allocate_ftrace_func_mapper(void); | ||
968 | void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, | ||
969 | unsigned long ip); | ||
970 | int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, | ||
971 | unsigned long ip, void *data); | ||
972 | void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, | ||
973 | unsigned long ip); | ||
974 | void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, | ||
975 | ftrace_mapper_func free_func); | ||
976 | |||
977 | extern int | ||
978 | register_ftrace_function_probe(char *glob, struct trace_array *tr, | ||
979 | struct ftrace_probe_ops *ops, void *data); | ||
980 | extern int | ||
981 | unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, | ||
982 | struct ftrace_probe_ops *ops); | ||
983 | |||
984 | int register_ftrace_command(struct ftrace_func_command *cmd); | ||
985 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | ||
986 | |||
924 | void ftrace_create_filter_files(struct ftrace_ops *ops, | 987 | void ftrace_create_filter_files(struct ftrace_ops *ops, |
925 | struct dentry *parent); | 988 | struct dentry *parent); |
926 | void ftrace_destroy_filter_files(struct ftrace_ops *ops); | 989 | void ftrace_destroy_filter_files(struct ftrace_ops *ops); |
927 | #else | 990 | #else |
991 | struct ftrace_func_command; | ||
992 | |||
993 | static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) | ||
994 | { | ||
995 | return -EINVAL; | ||
996 | } | ||
997 | static inline __init int unregister_ftrace_command(char *cmd_name) | ||
998 | { | ||
999 | return -EINVAL; | ||
1000 | } | ||
928 | /* | 1001 | /* |
929 | * The ops parameter passed in is usually undefined. | 1002 | * The ops parameter passed in is usually undefined. |
930 | * This must be a macro. | 1003 | * This must be a macro. |
@@ -989,11 +1062,13 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | |||
989 | 1062 | ||
990 | #ifdef CONFIG_FUNCTION_TRACER | 1063 | #ifdef CONFIG_FUNCTION_TRACER |
991 | # define FUNCTION_FLAGS \ | 1064 | # define FUNCTION_FLAGS \ |
992 | C(FUNCTION, "function-trace"), | 1065 | C(FUNCTION, "function-trace"), \ |
1066 | C(FUNC_FORK, "function-fork"), | ||
993 | # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION | 1067 | # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION |
994 | #else | 1068 | #else |
995 | # define FUNCTION_FLAGS | 1069 | # define FUNCTION_FLAGS |
996 | # define FUNCTION_DEFAULT_FLAGS 0UL | 1070 | # define FUNCTION_DEFAULT_FLAGS 0UL |
1071 | # define TRACE_ITER_FUNC_FORK 0UL | ||
997 | #endif | 1072 | #endif |
998 | 1073 | ||
999 | #ifdef CONFIG_STACKTRACE | 1074 | #ifdef CONFIG_STACKTRACE |
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c index e49fbe901cfc..16a8cf02eee9 100644 --- a/kernel/trace/trace_benchmark.c +++ b/kernel/trace/trace_benchmark.c | |||
@@ -153,10 +153,18 @@ static int benchmark_event_kthread(void *arg) | |||
153 | trace_do_benchmark(); | 153 | trace_do_benchmark(); |
154 | 154 | ||
155 | /* | 155 | /* |
156 | * We don't go to sleep, but let others | 156 | * We don't go to sleep, but let others run as well. |
157 | * run as well. | 157 | * This is bascially a "yield()" to let any task that |
158 | * wants to run, schedule in, but if the CPU is idle, | ||
159 | * we'll keep burning cycles. | ||
160 | * | ||
161 | * Note the _rcu_qs() version of cond_resched() will | ||
162 | * notify synchronize_rcu_tasks() that this thread has | ||
163 | * passed a quiescent state for rcu_tasks. Otherwise | ||
164 | * this thread will never voluntarily schedule which would | ||
165 | * block synchronize_rcu_tasks() indefinitely. | ||
158 | */ | 166 | */ |
159 | cond_resched(); | 167 | cond_resched_rcu_qs(); |
160 | } | 168 | } |
161 | 169 | ||
162 | return 0; | 170 | return 0; |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 93116549a284..e7973e10398c 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -2460,15 +2460,8 @@ struct event_probe_data { | |||
2460 | bool enable; | 2460 | bool enable; |
2461 | }; | 2461 | }; |
2462 | 2462 | ||
2463 | static void | 2463 | static void update_event_probe(struct event_probe_data *data) |
2464 | event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data) | ||
2465 | { | 2464 | { |
2466 | struct event_probe_data **pdata = (struct event_probe_data **)_data; | ||
2467 | struct event_probe_data *data = *pdata; | ||
2468 | |||
2469 | if (!data) | ||
2470 | return; | ||
2471 | |||
2472 | if (data->enable) | 2465 | if (data->enable) |
2473 | clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); | 2466 | clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); |
2474 | else | 2467 | else |
@@ -2476,77 +2469,141 @@ event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data) | |||
2476 | } | 2469 | } |
2477 | 2470 | ||
2478 | static void | 2471 | static void |
2479 | event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data) | 2472 | event_enable_probe(unsigned long ip, unsigned long parent_ip, |
2473 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
2474 | void *data) | ||
2480 | { | 2475 | { |
2481 | struct event_probe_data **pdata = (struct event_probe_data **)_data; | 2476 | struct ftrace_func_mapper *mapper = data; |
2482 | struct event_probe_data *data = *pdata; | 2477 | struct event_probe_data *edata; |
2478 | void **pdata; | ||
2483 | 2479 | ||
2484 | if (!data) | 2480 | pdata = ftrace_func_mapper_find_ip(mapper, ip); |
2481 | if (!pdata || !*pdata) | ||
2482 | return; | ||
2483 | |||
2484 | edata = *pdata; | ||
2485 | update_event_probe(edata); | ||
2486 | } | ||
2487 | |||
2488 | static void | ||
2489 | event_enable_count_probe(unsigned long ip, unsigned long parent_ip, | ||
2490 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
2491 | void *data) | ||
2492 | { | ||
2493 | struct ftrace_func_mapper *mapper = data; | ||
2494 | struct event_probe_data *edata; | ||
2495 | void **pdata; | ||
2496 | |||
2497 | pdata = ftrace_func_mapper_find_ip(mapper, ip); | ||
2498 | if (!pdata || !*pdata) | ||
2485 | return; | 2499 | return; |
2486 | 2500 | ||
2487 | if (!data->count) | 2501 | edata = *pdata; |
2502 | |||
2503 | if (!edata->count) | ||
2488 | return; | 2504 | return; |
2489 | 2505 | ||
2490 | /* Skip if the event is in a state we want to switch to */ | 2506 | /* Skip if the event is in a state we want to switch to */ |
2491 | if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) | 2507 | if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) |
2492 | return; | 2508 | return; |
2493 | 2509 | ||
2494 | if (data->count != -1) | 2510 | if (edata->count != -1) |
2495 | (data->count)--; | 2511 | (edata->count)--; |
2496 | 2512 | ||
2497 | event_enable_probe(ip, parent_ip, _data); | 2513 | update_event_probe(edata); |
2498 | } | 2514 | } |
2499 | 2515 | ||
2500 | static int | 2516 | static int |
2501 | event_enable_print(struct seq_file *m, unsigned long ip, | 2517 | event_enable_print(struct seq_file *m, unsigned long ip, |
2502 | struct ftrace_probe_ops *ops, void *_data) | 2518 | struct ftrace_probe_ops *ops, void *data) |
2503 | { | 2519 | { |
2504 | struct event_probe_data *data = _data; | 2520 | struct ftrace_func_mapper *mapper = data; |
2521 | struct event_probe_data *edata; | ||
2522 | void **pdata; | ||
2523 | |||
2524 | pdata = ftrace_func_mapper_find_ip(mapper, ip); | ||
2525 | |||
2526 | if (WARN_ON_ONCE(!pdata || !*pdata)) | ||
2527 | return 0; | ||
2528 | |||
2529 | edata = *pdata; | ||
2505 | 2530 | ||
2506 | seq_printf(m, "%ps:", (void *)ip); | 2531 | seq_printf(m, "%ps:", (void *)ip); |
2507 | 2532 | ||
2508 | seq_printf(m, "%s:%s:%s", | 2533 | seq_printf(m, "%s:%s:%s", |
2509 | data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, | 2534 | edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, |
2510 | data->file->event_call->class->system, | 2535 | edata->file->event_call->class->system, |
2511 | trace_event_name(data->file->event_call)); | 2536 | trace_event_name(edata->file->event_call)); |
2512 | 2537 | ||
2513 | if (data->count == -1) | 2538 | if (edata->count == -1) |
2514 | seq_puts(m, ":unlimited\n"); | 2539 | seq_puts(m, ":unlimited\n"); |
2515 | else | 2540 | else |
2516 | seq_printf(m, ":count=%ld\n", data->count); | 2541 | seq_printf(m, ":count=%ld\n", edata->count); |
2517 | 2542 | ||
2518 | return 0; | 2543 | return 0; |
2519 | } | 2544 | } |
2520 | 2545 | ||
2521 | static int | 2546 | static int |
2522 | event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip, | 2547 | event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr, |
2523 | void **_data) | 2548 | unsigned long ip, void *init_data, void **data) |
2524 | { | 2549 | { |
2525 | struct event_probe_data **pdata = (struct event_probe_data **)_data; | 2550 | struct ftrace_func_mapper *mapper = *data; |
2526 | struct event_probe_data *data = *pdata; | 2551 | struct event_probe_data *edata = init_data; |
2552 | int ret; | ||
2553 | |||
2554 | if (!mapper) { | ||
2555 | mapper = allocate_ftrace_func_mapper(); | ||
2556 | if (!mapper) | ||
2557 | return -ENODEV; | ||
2558 | *data = mapper; | ||
2559 | } | ||
2560 | |||
2561 | ret = ftrace_func_mapper_add_ip(mapper, ip, edata); | ||
2562 | if (ret < 0) | ||
2563 | return ret; | ||
2564 | |||
2565 | edata->ref++; | ||
2527 | 2566 | ||
2528 | data->ref++; | 2567 | return 0; |
2568 | } | ||
2569 | |||
2570 | static int free_probe_data(void *data) | ||
2571 | { | ||
2572 | struct event_probe_data *edata = data; | ||
2573 | |||
2574 | edata->ref--; | ||
2575 | if (!edata->ref) { | ||
2576 | /* Remove the SOFT_MODE flag */ | ||
2577 | __ftrace_event_enable_disable(edata->file, 0, 1); | ||
2578 | module_put(edata->file->event_call->mod); | ||
2579 | kfree(edata); | ||
2580 | } | ||
2529 | return 0; | 2581 | return 0; |
2530 | } | 2582 | } |
2531 | 2583 | ||
2532 | static void | 2584 | static void |
2533 | event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip, | 2585 | event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr, |
2534 | void **_data) | 2586 | unsigned long ip, void *data) |
2535 | { | 2587 | { |
2536 | struct event_probe_data **pdata = (struct event_probe_data **)_data; | 2588 | struct ftrace_func_mapper *mapper = data; |
2537 | struct event_probe_data *data = *pdata; | 2589 | struct event_probe_data *edata; |
2538 | 2590 | ||
2539 | if (WARN_ON_ONCE(data->ref <= 0)) | 2591 | if (!ip) { |
2592 | if (!mapper) | ||
2593 | return; | ||
2594 | free_ftrace_func_mapper(mapper, free_probe_data); | ||
2540 | return; | 2595 | return; |
2541 | |||
2542 | data->ref--; | ||
2543 | if (!data->ref) { | ||
2544 | /* Remove the SOFT_MODE flag */ | ||
2545 | __ftrace_event_enable_disable(data->file, 0, 1); | ||
2546 | module_put(data->file->event_call->mod); | ||
2547 | kfree(data); | ||
2548 | } | 2596 | } |
2549 | *pdata = NULL; | 2597 | |
2598 | edata = ftrace_func_mapper_remove_ip(mapper, ip); | ||
2599 | |||
2600 | if (WARN_ON_ONCE(!edata)) | ||
2601 | return; | ||
2602 | |||
2603 | if (WARN_ON_ONCE(edata->ref <= 0)) | ||
2604 | return; | ||
2605 | |||
2606 | free_probe_data(edata); | ||
2550 | } | 2607 | } |
2551 | 2608 | ||
2552 | static struct ftrace_probe_ops event_enable_probe_ops = { | 2609 | static struct ftrace_probe_ops event_enable_probe_ops = { |
@@ -2578,10 +2635,9 @@ static struct ftrace_probe_ops event_disable_count_probe_ops = { | |||
2578 | }; | 2635 | }; |
2579 | 2636 | ||
2580 | static int | 2637 | static int |
2581 | event_enable_func(struct ftrace_hash *hash, | 2638 | event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, |
2582 | char *glob, char *cmd, char *param, int enabled) | 2639 | char *glob, char *cmd, char *param, int enabled) |
2583 | { | 2640 | { |
2584 | struct trace_array *tr = top_trace_array(); | ||
2585 | struct trace_event_file *file; | 2641 | struct trace_event_file *file; |
2586 | struct ftrace_probe_ops *ops; | 2642 | struct ftrace_probe_ops *ops; |
2587 | struct event_probe_data *data; | 2643 | struct event_probe_data *data; |
@@ -2619,12 +2675,12 @@ event_enable_func(struct ftrace_hash *hash, | |||
2619 | ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; | 2675 | ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; |
2620 | 2676 | ||
2621 | if (glob[0] == '!') { | 2677 | if (glob[0] == '!') { |
2622 | unregister_ftrace_function_probe_func(glob+1, ops); | 2678 | ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); |
2623 | ret = 0; | ||
2624 | goto out; | 2679 | goto out; |
2625 | } | 2680 | } |
2626 | 2681 | ||
2627 | ret = -ENOMEM; | 2682 | ret = -ENOMEM; |
2683 | |||
2628 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 2684 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
2629 | if (!data) | 2685 | if (!data) |
2630 | goto out; | 2686 | goto out; |
@@ -2661,7 +2717,8 @@ event_enable_func(struct ftrace_hash *hash, | |||
2661 | ret = __ftrace_event_enable_disable(file, 1, 1); | 2717 | ret = __ftrace_event_enable_disable(file, 1, 1); |
2662 | if (ret < 0) | 2718 | if (ret < 0) |
2663 | goto out_put; | 2719 | goto out_put; |
2664 | ret = register_ftrace_function_probe(glob, ops, data); | 2720 | |
2721 | ret = register_ftrace_function_probe(glob, tr, ops, data); | ||
2665 | /* | 2722 | /* |
2666 | * The above returns on success the # of functions enabled, | 2723 | * The above returns on success the # of functions enabled, |
2667 | * but if it didn't find any functions it returns zero. | 2724 | * but if it didn't find any functions it returns zero. |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 0efa00d80623..a3bddbfd0874 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -267,10 +267,14 @@ static struct tracer function_trace __tracer_data = | |||
267 | }; | 267 | }; |
268 | 268 | ||
269 | #ifdef CONFIG_DYNAMIC_FTRACE | 269 | #ifdef CONFIG_DYNAMIC_FTRACE |
270 | static void update_traceon_count(void **data, bool on) | 270 | static void update_traceon_count(struct ftrace_probe_ops *ops, |
271 | unsigned long ip, | ||
272 | struct trace_array *tr, bool on, | ||
273 | void *data) | ||
271 | { | 274 | { |
272 | long *count = (long *)data; | 275 | struct ftrace_func_mapper *mapper = data; |
273 | long old_count = *count; | 276 | long *count; |
277 | long old_count; | ||
274 | 278 | ||
275 | /* | 279 | /* |
276 | * Tracing gets disabled (or enabled) once per count. | 280 | * Tracing gets disabled (or enabled) once per count. |
@@ -301,23 +305,22 @@ static void update_traceon_count(void **data, bool on) | |||
301 | * setting the tracing_on file. But we currently don't care | 305 | * setting the tracing_on file. But we currently don't care |
302 | * about that. | 306 | * about that. |
303 | */ | 307 | */ |
304 | if (!old_count) | 308 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
309 | old_count = *count; | ||
310 | |||
311 | if (old_count <= 0) | ||
305 | return; | 312 | return; |
306 | 313 | ||
307 | /* Make sure we see count before checking tracing state */ | 314 | /* Make sure we see count before checking tracing state */ |
308 | smp_rmb(); | 315 | smp_rmb(); |
309 | 316 | ||
310 | if (on == !!tracing_is_on()) | 317 | if (on == !!tracer_tracing_is_on(tr)) |
311 | return; | 318 | return; |
312 | 319 | ||
313 | if (on) | 320 | if (on) |
314 | tracing_on(); | 321 | tracer_tracing_on(tr); |
315 | else | 322 | else |
316 | tracing_off(); | 323 | tracer_tracing_off(tr); |
317 | |||
318 | /* unlimited? */ | ||
319 | if (old_count == -1) | ||
320 | return; | ||
321 | 324 | ||
322 | /* Make sure tracing state is visible before updating count */ | 325 | /* Make sure tracing state is visible before updating count */ |
323 | smp_wmb(); | 326 | smp_wmb(); |
@@ -326,33 +329,41 @@ static void update_traceon_count(void **data, bool on) | |||
326 | } | 329 | } |
327 | 330 | ||
328 | static void | 331 | static void |
329 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) | 332 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, |
333 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
334 | void *data) | ||
330 | { | 335 | { |
331 | update_traceon_count(data, 1); | 336 | update_traceon_count(ops, ip, tr, 1, data); |
332 | } | 337 | } |
333 | 338 | ||
334 | static void | 339 | static void |
335 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) | 340 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, |
341 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
342 | void *data) | ||
336 | { | 343 | { |
337 | update_traceon_count(data, 0); | 344 | update_traceon_count(ops, ip, tr, 0, data); |
338 | } | 345 | } |
339 | 346 | ||
340 | static void | 347 | static void |
341 | ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) | 348 | ftrace_traceon(unsigned long ip, unsigned long parent_ip, |
349 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
350 | void *data) | ||
342 | { | 351 | { |
343 | if (tracing_is_on()) | 352 | if (tracer_tracing_is_on(tr)) |
344 | return; | 353 | return; |
345 | 354 | ||
346 | tracing_on(); | 355 | tracer_tracing_on(tr); |
347 | } | 356 | } |
348 | 357 | ||
349 | static void | 358 | static void |
350 | ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) | 359 | ftrace_traceoff(unsigned long ip, unsigned long parent_ip, |
360 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
361 | void *data) | ||
351 | { | 362 | { |
352 | if (!tracing_is_on()) | 363 | if (!tracer_tracing_is_on(tr)) |
353 | return; | 364 | return; |
354 | 365 | ||
355 | tracing_off(); | 366 | tracer_tracing_off(tr); |
356 | } | 367 | } |
357 | 368 | ||
358 | /* | 369 | /* |
@@ -364,144 +375,218 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) | |||
364 | */ | 375 | */ |
365 | #define STACK_SKIP 4 | 376 | #define STACK_SKIP 4 |
366 | 377 | ||
378 | static __always_inline void trace_stack(struct trace_array *tr) | ||
379 | { | ||
380 | unsigned long flags; | ||
381 | int pc; | ||
382 | |||
383 | local_save_flags(flags); | ||
384 | pc = preempt_count(); | ||
385 | |||
386 | __trace_stack(tr, flags, STACK_SKIP, pc); | ||
387 | } | ||
388 | |||
367 | static void | 389 | static void |
368 | ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data) | 390 | ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, |
391 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
392 | void *data) | ||
369 | { | 393 | { |
370 | trace_dump_stack(STACK_SKIP); | 394 | trace_stack(tr); |
371 | } | 395 | } |
372 | 396 | ||
373 | static void | 397 | static void |
374 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) | 398 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, |
399 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
400 | void *data) | ||
375 | { | 401 | { |
376 | long *count = (long *)data; | 402 | struct ftrace_func_mapper *mapper = data; |
403 | long *count; | ||
377 | long old_count; | 404 | long old_count; |
378 | long new_count; | 405 | long new_count; |
379 | 406 | ||
407 | if (!tracing_is_on()) | ||
408 | return; | ||
409 | |||
410 | /* unlimited? */ | ||
411 | if (!mapper) { | ||
412 | trace_stack(tr); | ||
413 | return; | ||
414 | } | ||
415 | |||
416 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); | ||
417 | |||
380 | /* | 418 | /* |
381 | * Stack traces should only execute the number of times the | 419 | * Stack traces should only execute the number of times the |
382 | * user specified in the counter. | 420 | * user specified in the counter. |
383 | */ | 421 | */ |
384 | do { | 422 | do { |
385 | |||
386 | if (!tracing_is_on()) | ||
387 | return; | ||
388 | |||
389 | old_count = *count; | 423 | old_count = *count; |
390 | 424 | ||
391 | if (!old_count) | 425 | if (!old_count) |
392 | return; | 426 | return; |
393 | 427 | ||
394 | /* unlimited? */ | ||
395 | if (old_count == -1) { | ||
396 | trace_dump_stack(STACK_SKIP); | ||
397 | return; | ||
398 | } | ||
399 | |||
400 | new_count = old_count - 1; | 428 | new_count = old_count - 1; |
401 | new_count = cmpxchg(count, old_count, new_count); | 429 | new_count = cmpxchg(count, old_count, new_count); |
402 | if (new_count == old_count) | 430 | if (new_count == old_count) |
403 | trace_dump_stack(STACK_SKIP); | 431 | trace_stack(tr); |
432 | |||
433 | if (!tracing_is_on()) | ||
434 | return; | ||
404 | 435 | ||
405 | } while (new_count != old_count); | 436 | } while (new_count != old_count); |
406 | } | 437 | } |
407 | 438 | ||
408 | static int update_count(void **data) | 439 | static int update_count(struct ftrace_probe_ops *ops, unsigned long ip, |
440 | void *data) | ||
409 | { | 441 | { |
410 | unsigned long *count = (long *)data; | 442 | struct ftrace_func_mapper *mapper = data; |
443 | long *count = NULL; | ||
411 | 444 | ||
412 | if (!*count) | 445 | if (mapper) |
413 | return 0; | 446 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
414 | 447 | ||
415 | if (*count != -1) | 448 | if (count) { |
449 | if (*count <= 0) | ||
450 | return 0; | ||
416 | (*count)--; | 451 | (*count)--; |
452 | } | ||
417 | 453 | ||
418 | return 1; | 454 | return 1; |
419 | } | 455 | } |
420 | 456 | ||
421 | static void | 457 | static void |
422 | ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data) | 458 | ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, |
459 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
460 | void *data) | ||
423 | { | 461 | { |
424 | if (update_count(data)) | 462 | if (update_count(ops, ip, data)) |
425 | ftrace_dump(DUMP_ALL); | 463 | ftrace_dump(DUMP_ALL); |
426 | } | 464 | } |
427 | 465 | ||
428 | /* Only dump the current CPU buffer. */ | 466 | /* Only dump the current CPU buffer. */ |
429 | static void | 467 | static void |
430 | ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data) | 468 | ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, |
469 | struct trace_array *tr, struct ftrace_probe_ops *ops, | ||
470 | void *data) | ||
431 | { | 471 | { |
432 | if (update_count(data)) | 472 | if (update_count(ops, ip, data)) |
433 | ftrace_dump(DUMP_ORIG); | 473 | ftrace_dump(DUMP_ORIG); |
434 | } | 474 | } |
435 | 475 | ||
436 | static int | 476 | static int |
437 | ftrace_probe_print(const char *name, struct seq_file *m, | 477 | ftrace_probe_print(const char *name, struct seq_file *m, |
438 | unsigned long ip, void *data) | 478 | unsigned long ip, struct ftrace_probe_ops *ops, |
479 | void *data) | ||
439 | { | 480 | { |
440 | long count = (long)data; | 481 | struct ftrace_func_mapper *mapper = data; |
482 | long *count = NULL; | ||
441 | 483 | ||
442 | seq_printf(m, "%ps:%s", (void *)ip, name); | 484 | seq_printf(m, "%ps:%s", (void *)ip, name); |
443 | 485 | ||
444 | if (count == -1) | 486 | if (mapper) |
445 | seq_puts(m, ":unlimited\n"); | 487 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
488 | |||
489 | if (count) | ||
490 | seq_printf(m, ":count=%ld\n", *count); | ||
446 | else | 491 | else |
447 | seq_printf(m, ":count=%ld\n", count); | 492 | seq_puts(m, ":unlimited\n"); |
448 | 493 | ||
449 | return 0; | 494 | return 0; |
450 | } | 495 | } |
451 | 496 | ||
452 | static int | 497 | static int |
453 | ftrace_traceon_print(struct seq_file *m, unsigned long ip, | 498 | ftrace_traceon_print(struct seq_file *m, unsigned long ip, |
454 | struct ftrace_probe_ops *ops, void *data) | 499 | struct ftrace_probe_ops *ops, |
500 | void *data) | ||
455 | { | 501 | { |
456 | return ftrace_probe_print("traceon", m, ip, data); | 502 | return ftrace_probe_print("traceon", m, ip, ops, data); |
457 | } | 503 | } |
458 | 504 | ||
459 | static int | 505 | static int |
460 | ftrace_traceoff_print(struct seq_file *m, unsigned long ip, | 506 | ftrace_traceoff_print(struct seq_file *m, unsigned long ip, |
461 | struct ftrace_probe_ops *ops, void *data) | 507 | struct ftrace_probe_ops *ops, void *data) |
462 | { | 508 | { |
463 | return ftrace_probe_print("traceoff", m, ip, data); | 509 | return ftrace_probe_print("traceoff", m, ip, ops, data); |
464 | } | 510 | } |
465 | 511 | ||
466 | static int | 512 | static int |
467 | ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, | 513 | ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, |
468 | struct ftrace_probe_ops *ops, void *data) | 514 | struct ftrace_probe_ops *ops, void *data) |
469 | { | 515 | { |
470 | return ftrace_probe_print("stacktrace", m, ip, data); | 516 | return ftrace_probe_print("stacktrace", m, ip, ops, data); |
471 | } | 517 | } |
472 | 518 | ||
473 | static int | 519 | static int |
474 | ftrace_dump_print(struct seq_file *m, unsigned long ip, | 520 | ftrace_dump_print(struct seq_file *m, unsigned long ip, |
475 | struct ftrace_probe_ops *ops, void *data) | 521 | struct ftrace_probe_ops *ops, void *data) |
476 | { | 522 | { |
477 | return ftrace_probe_print("dump", m, ip, data); | 523 | return ftrace_probe_print("dump", m, ip, ops, data); |
478 | } | 524 | } |
479 | 525 | ||
480 | static int | 526 | static int |
481 | ftrace_cpudump_print(struct seq_file *m, unsigned long ip, | 527 | ftrace_cpudump_print(struct seq_file *m, unsigned long ip, |
482 | struct ftrace_probe_ops *ops, void *data) | 528 | struct ftrace_probe_ops *ops, void *data) |
483 | { | 529 | { |
484 | return ftrace_probe_print("cpudump", m, ip, data); | 530 | return ftrace_probe_print("cpudump", m, ip, ops, data); |
531 | } | ||
532 | |||
533 | |||
534 | static int | ||
535 | ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr, | ||
536 | unsigned long ip, void *init_data, void **data) | ||
537 | { | ||
538 | struct ftrace_func_mapper *mapper = *data; | ||
539 | |||
540 | if (!mapper) { | ||
541 | mapper = allocate_ftrace_func_mapper(); | ||
542 | if (!mapper) | ||
543 | return -ENOMEM; | ||
544 | *data = mapper; | ||
545 | } | ||
546 | |||
547 | return ftrace_func_mapper_add_ip(mapper, ip, init_data); | ||
548 | } | ||
549 | |||
550 | static void | ||
551 | ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr, | ||
552 | unsigned long ip, void *data) | ||
553 | { | ||
554 | struct ftrace_func_mapper *mapper = data; | ||
555 | |||
556 | if (!ip) { | ||
557 | free_ftrace_func_mapper(mapper, NULL); | ||
558 | return; | ||
559 | } | ||
560 | |||
561 | ftrace_func_mapper_remove_ip(mapper, ip); | ||
485 | } | 562 | } |
486 | 563 | ||
487 | static struct ftrace_probe_ops traceon_count_probe_ops = { | 564 | static struct ftrace_probe_ops traceon_count_probe_ops = { |
488 | .func = ftrace_traceon_count, | 565 | .func = ftrace_traceon_count, |
489 | .print = ftrace_traceon_print, | 566 | .print = ftrace_traceon_print, |
567 | .init = ftrace_count_init, | ||
568 | .free = ftrace_count_free, | ||
490 | }; | 569 | }; |
491 | 570 | ||
492 | static struct ftrace_probe_ops traceoff_count_probe_ops = { | 571 | static struct ftrace_probe_ops traceoff_count_probe_ops = { |
493 | .func = ftrace_traceoff_count, | 572 | .func = ftrace_traceoff_count, |
494 | .print = ftrace_traceoff_print, | 573 | .print = ftrace_traceoff_print, |
574 | .init = ftrace_count_init, | ||
575 | .free = ftrace_count_free, | ||
495 | }; | 576 | }; |
496 | 577 | ||
497 | static struct ftrace_probe_ops stacktrace_count_probe_ops = { | 578 | static struct ftrace_probe_ops stacktrace_count_probe_ops = { |
498 | .func = ftrace_stacktrace_count, | 579 | .func = ftrace_stacktrace_count, |
499 | .print = ftrace_stacktrace_print, | 580 | .print = ftrace_stacktrace_print, |
581 | .init = ftrace_count_init, | ||
582 | .free = ftrace_count_free, | ||
500 | }; | 583 | }; |
501 | 584 | ||
502 | static struct ftrace_probe_ops dump_probe_ops = { | 585 | static struct ftrace_probe_ops dump_probe_ops = { |
503 | .func = ftrace_dump_probe, | 586 | .func = ftrace_dump_probe, |
504 | .print = ftrace_dump_print, | 587 | .print = ftrace_dump_print, |
588 | .init = ftrace_count_init, | ||
589 | .free = ftrace_count_free, | ||
505 | }; | 590 | }; |
506 | 591 | ||
507 | static struct ftrace_probe_ops cpudump_probe_ops = { | 592 | static struct ftrace_probe_ops cpudump_probe_ops = { |
@@ -525,7 +610,8 @@ static struct ftrace_probe_ops stacktrace_probe_ops = { | |||
525 | }; | 610 | }; |
526 | 611 | ||
527 | static int | 612 | static int |
528 | ftrace_trace_probe_callback(struct ftrace_probe_ops *ops, | 613 | ftrace_trace_probe_callback(struct trace_array *tr, |
614 | struct ftrace_probe_ops *ops, | ||
529 | struct ftrace_hash *hash, char *glob, | 615 | struct ftrace_hash *hash, char *glob, |
530 | char *cmd, char *param, int enable) | 616 | char *cmd, char *param, int enable) |
531 | { | 617 | { |
@@ -537,10 +623,8 @@ ftrace_trace_probe_callback(struct ftrace_probe_ops *ops, | |||
537 | if (!enable) | 623 | if (!enable) |
538 | return -EINVAL; | 624 | return -EINVAL; |
539 | 625 | ||
540 | if (glob[0] == '!') { | 626 | if (glob[0] == '!') |
541 | unregister_ftrace_function_probe_func(glob+1, ops); | 627 | return unregister_ftrace_function_probe_func(glob+1, tr, ops); |
542 | return 0; | ||
543 | } | ||
544 | 628 | ||
545 | if (!param) | 629 | if (!param) |
546 | goto out_reg; | 630 | goto out_reg; |
@@ -559,13 +643,13 @@ ftrace_trace_probe_callback(struct ftrace_probe_ops *ops, | |||
559 | return ret; | 643 | return ret; |
560 | 644 | ||
561 | out_reg: | 645 | out_reg: |
562 | ret = register_ftrace_function_probe(glob, ops, count); | 646 | ret = register_ftrace_function_probe(glob, tr, ops, count); |
563 | 647 | ||
564 | return ret < 0 ? ret : 0; | 648 | return ret < 0 ? ret : 0; |
565 | } | 649 | } |
566 | 650 | ||
567 | static int | 651 | static int |
568 | ftrace_trace_onoff_callback(struct ftrace_hash *hash, | 652 | ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash, |
569 | char *glob, char *cmd, char *param, int enable) | 653 | char *glob, char *cmd, char *param, int enable) |
570 | { | 654 | { |
571 | struct ftrace_probe_ops *ops; | 655 | struct ftrace_probe_ops *ops; |
@@ -576,24 +660,24 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash, | |||
576 | else | 660 | else |
577 | ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops; | 661 | ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops; |
578 | 662 | ||
579 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | 663 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
580 | param, enable); | 664 | param, enable); |
581 | } | 665 | } |
582 | 666 | ||
583 | static int | 667 | static int |
584 | ftrace_stacktrace_callback(struct ftrace_hash *hash, | 668 | ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash, |
585 | char *glob, char *cmd, char *param, int enable) | 669 | char *glob, char *cmd, char *param, int enable) |
586 | { | 670 | { |
587 | struct ftrace_probe_ops *ops; | 671 | struct ftrace_probe_ops *ops; |
588 | 672 | ||
589 | ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops; | 673 | ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops; |
590 | 674 | ||
591 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | 675 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
592 | param, enable); | 676 | param, enable); |
593 | } | 677 | } |
594 | 678 | ||
595 | static int | 679 | static int |
596 | ftrace_dump_callback(struct ftrace_hash *hash, | 680 | ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash, |
597 | char *glob, char *cmd, char *param, int enable) | 681 | char *glob, char *cmd, char *param, int enable) |
598 | { | 682 | { |
599 | struct ftrace_probe_ops *ops; | 683 | struct ftrace_probe_ops *ops; |
@@ -601,12 +685,12 @@ ftrace_dump_callback(struct ftrace_hash *hash, | |||
601 | ops = &dump_probe_ops; | 685 | ops = &dump_probe_ops; |
602 | 686 | ||
603 | /* Only dump once. */ | 687 | /* Only dump once. */ |
604 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | 688 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
605 | "1", enable); | 689 | "1", enable); |
606 | } | 690 | } |
607 | 691 | ||
608 | static int | 692 | static int |
609 | ftrace_cpudump_callback(struct ftrace_hash *hash, | 693 | ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash, |
610 | char *glob, char *cmd, char *param, int enable) | 694 | char *glob, char *cmd, char *param, int enable) |
611 | { | 695 | { |
612 | struct ftrace_probe_ops *ops; | 696 | struct ftrace_probe_ops *ops; |
@@ -614,7 +698,7 @@ ftrace_cpudump_callback(struct ftrace_hash *hash, | |||
614 | ops = &cpudump_probe_ops; | 698 | ops = &cpudump_probe_ops; |
615 | 699 | ||
616 | /* Only dump once. */ | 700 | /* Only dump once. */ |
617 | return ftrace_trace_probe_callback(ops, hash, glob, cmd, | 701 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
618 | "1", enable); | 702 | "1", enable); |
619 | } | 703 | } |
620 | 704 | ||
@@ -687,9 +771,8 @@ static inline int init_func_cmd_traceon(void) | |||
687 | } | 771 | } |
688 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 772 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
689 | 773 | ||
690 | static __init int init_function_trace(void) | 774 | __init int init_function_trace(void) |
691 | { | 775 | { |
692 | init_func_cmd_traceon(); | 776 | init_func_cmd_traceon(); |
693 | return register_tracer(&function_trace); | 777 | return register_tracer(&function_trace); |
694 | } | 778 | } |
695 | core_initcall(init_function_trace); | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 013f4e7146d4..8485f6738a87 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "trace_probe.h" | 25 | #include "trace_probe.h" |
26 | 26 | ||
27 | #define KPROBE_EVENT_SYSTEM "kprobes" | 27 | #define KPROBE_EVENT_SYSTEM "kprobes" |
28 | #define KRETPROBE_MAXACTIVE_MAX 4096 | ||
28 | 29 | ||
29 | /** | 30 | /** |
30 | * Kprobe event core functions | 31 | * Kprobe event core functions |
@@ -282,6 +283,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group, | |||
282 | void *addr, | 283 | void *addr, |
283 | const char *symbol, | 284 | const char *symbol, |
284 | unsigned long offs, | 285 | unsigned long offs, |
286 | int maxactive, | ||
285 | int nargs, bool is_return) | 287 | int nargs, bool is_return) |
286 | { | 288 | { |
287 | struct trace_kprobe *tk; | 289 | struct trace_kprobe *tk; |
@@ -309,6 +311,8 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group, | |||
309 | else | 311 | else |
310 | tk->rp.kp.pre_handler = kprobe_dispatcher; | 312 | tk->rp.kp.pre_handler = kprobe_dispatcher; |
311 | 313 | ||
314 | tk->rp.maxactive = maxactive; | ||
315 | |||
312 | if (!event || !is_good_name(event)) { | 316 | if (!event || !is_good_name(event)) { |
313 | ret = -EINVAL; | 317 | ret = -EINVAL; |
314 | goto error; | 318 | goto error; |
@@ -598,8 +602,10 @@ static int create_trace_kprobe(int argc, char **argv) | |||
598 | { | 602 | { |
599 | /* | 603 | /* |
600 | * Argument syntax: | 604 | * Argument syntax: |
601 | * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS] | 605 | * - Add kprobe: |
602 | * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS] | 606 | * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS] |
607 | * - Add kretprobe: | ||
608 | * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS] | ||
603 | * Fetch args: | 609 | * Fetch args: |
604 | * $retval : fetch return value | 610 | * $retval : fetch return value |
605 | * $stack : fetch stack address | 611 | * $stack : fetch stack address |
@@ -619,6 +625,7 @@ static int create_trace_kprobe(int argc, char **argv) | |||
619 | int i, ret = 0; | 625 | int i, ret = 0; |
620 | bool is_return = false, is_delete = false; | 626 | bool is_return = false, is_delete = false; |
621 | char *symbol = NULL, *event = NULL, *group = NULL; | 627 | char *symbol = NULL, *event = NULL, *group = NULL; |
628 | int maxactive = 0; | ||
622 | char *arg; | 629 | char *arg; |
623 | unsigned long offset = 0; | 630 | unsigned long offset = 0; |
624 | void *addr = NULL; | 631 | void *addr = NULL; |
@@ -637,8 +644,28 @@ static int create_trace_kprobe(int argc, char **argv) | |||
637 | return -EINVAL; | 644 | return -EINVAL; |
638 | } | 645 | } |
639 | 646 | ||
640 | if (argv[0][1] == ':') { | 647 | event = strchr(&argv[0][1], ':'); |
641 | event = &argv[0][2]; | 648 | if (event) { |
649 | event[0] = '\0'; | ||
650 | event++; | ||
651 | } | ||
652 | if (is_return && isdigit(argv[0][1])) { | ||
653 | ret = kstrtouint(&argv[0][1], 0, &maxactive); | ||
654 | if (ret) { | ||
655 | pr_info("Failed to parse maxactive.\n"); | ||
656 | return ret; | ||
657 | } | ||
658 | /* kretprobes instances are iterated over via a list. The | ||
659 | * maximum should stay reasonable. | ||
660 | */ | ||
661 | if (maxactive > KRETPROBE_MAXACTIVE_MAX) { | ||
662 | pr_info("Maxactive is too big (%d > %d).\n", | ||
663 | maxactive, KRETPROBE_MAXACTIVE_MAX); | ||
664 | return -E2BIG; | ||
665 | } | ||
666 | } | ||
667 | |||
668 | if (event) { | ||
642 | if (strchr(event, '/')) { | 669 | if (strchr(event, '/')) { |
643 | group = event; | 670 | group = event; |
644 | event = strchr(group, '/') + 1; | 671 | event = strchr(group, '/') + 1; |
@@ -715,8 +742,8 @@ static int create_trace_kprobe(int argc, char **argv) | |||
715 | is_return ? 'r' : 'p', addr); | 742 | is_return ? 'r' : 'p', addr); |
716 | event = buf; | 743 | event = buf; |
717 | } | 744 | } |
718 | tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc, | 745 | tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive, |
719 | is_return); | 746 | argc, is_return); |
720 | if (IS_ERR(tk)) { | 747 | if (IS_ERR(tk)) { |
721 | pr_info("Failed to allocate trace_probe.(%d)\n", | 748 | pr_info("Failed to allocate trace_probe.(%d)\n", |
722 | (int)PTR_ERR(tk)); | 749 | (int)PTR_ERR(tk)); |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 5fb1f2c87e6b..76aa04d4c925 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -35,7 +35,7 @@ unsigned long stack_trace_max_size; | |||
35 | arch_spinlock_t stack_trace_max_lock = | 35 | arch_spinlock_t stack_trace_max_lock = |
36 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 36 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
37 | 37 | ||
38 | static DEFINE_PER_CPU(int, trace_active); | 38 | DEFINE_PER_CPU(int, disable_stack_tracer); |
39 | static DEFINE_MUTEX(stack_sysctl_mutex); | 39 | static DEFINE_MUTEX(stack_sysctl_mutex); |
40 | 40 | ||
41 | int stack_tracer_enabled; | 41 | int stack_tracer_enabled; |
@@ -96,6 +96,14 @@ check_stack(unsigned long ip, unsigned long *stack) | |||
96 | if (in_nmi()) | 96 | if (in_nmi()) |
97 | return; | 97 | return; |
98 | 98 | ||
99 | /* | ||
100 | * There's a slight chance that we are tracing inside the | ||
101 | * RCU infrastructure, and rcu_irq_enter() will not work | ||
102 | * as expected. | ||
103 | */ | ||
104 | if (unlikely(rcu_irq_enter_disabled())) | ||
105 | return; | ||
106 | |||
99 | local_irq_save(flags); | 107 | local_irq_save(flags); |
100 | arch_spin_lock(&stack_trace_max_lock); | 108 | arch_spin_lock(&stack_trace_max_lock); |
101 | 109 | ||
@@ -207,13 +215,12 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, | |||
207 | struct ftrace_ops *op, struct pt_regs *pt_regs) | 215 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
208 | { | 216 | { |
209 | unsigned long stack; | 217 | unsigned long stack; |
210 | int cpu; | ||
211 | 218 | ||
212 | preempt_disable_notrace(); | 219 | preempt_disable_notrace(); |
213 | 220 | ||
214 | cpu = raw_smp_processor_id(); | ||
215 | /* no atomic needed, we only modify this variable by this cpu */ | 221 | /* no atomic needed, we only modify this variable by this cpu */ |
216 | if (per_cpu(trace_active, cpu)++ != 0) | 222 | __this_cpu_inc(disable_stack_tracer); |
223 | if (__this_cpu_read(disable_stack_tracer) != 1) | ||
217 | goto out; | 224 | goto out; |
218 | 225 | ||
219 | ip += MCOUNT_INSN_SIZE; | 226 | ip += MCOUNT_INSN_SIZE; |
@@ -221,7 +228,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, | |||
221 | check_stack(ip, &stack); | 228 | check_stack(ip, &stack); |
222 | 229 | ||
223 | out: | 230 | out: |
224 | per_cpu(trace_active, cpu)--; | 231 | __this_cpu_dec(disable_stack_tracer); |
225 | /* prevent recursion in schedule */ | 232 | /* prevent recursion in schedule */ |
226 | preempt_enable_notrace(); | 233 | preempt_enable_notrace(); |
227 | } | 234 | } |
@@ -253,7 +260,6 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
253 | long *ptr = filp->private_data; | 260 | long *ptr = filp->private_data; |
254 | unsigned long val, flags; | 261 | unsigned long val, flags; |
255 | int ret; | 262 | int ret; |
256 | int cpu; | ||
257 | 263 | ||
258 | ret = kstrtoul_from_user(ubuf, count, 10, &val); | 264 | ret = kstrtoul_from_user(ubuf, count, 10, &val); |
259 | if (ret) | 265 | if (ret) |
@@ -264,16 +270,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
264 | /* | 270 | /* |
265 | * In case we trace inside arch_spin_lock() or after (NMI), | 271 | * In case we trace inside arch_spin_lock() or after (NMI), |
266 | * we will cause circular lock, so we also need to increase | 272 | * we will cause circular lock, so we also need to increase |
267 | * the percpu trace_active here. | 273 | * the percpu disable_stack_tracer here. |
268 | */ | 274 | */ |
269 | cpu = smp_processor_id(); | 275 | __this_cpu_inc(disable_stack_tracer); |
270 | per_cpu(trace_active, cpu)++; | ||
271 | 276 | ||
272 | arch_spin_lock(&stack_trace_max_lock); | 277 | arch_spin_lock(&stack_trace_max_lock); |
273 | *ptr = val; | 278 | *ptr = val; |
274 | arch_spin_unlock(&stack_trace_max_lock); | 279 | arch_spin_unlock(&stack_trace_max_lock); |
275 | 280 | ||
276 | per_cpu(trace_active, cpu)--; | 281 | __this_cpu_dec(disable_stack_tracer); |
277 | local_irq_restore(flags); | 282 | local_irq_restore(flags); |
278 | 283 | ||
279 | return count; | 284 | return count; |
@@ -307,12 +312,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
307 | 312 | ||
308 | static void *t_start(struct seq_file *m, loff_t *pos) | 313 | static void *t_start(struct seq_file *m, loff_t *pos) |
309 | { | 314 | { |
310 | int cpu; | ||
311 | |||
312 | local_irq_disable(); | 315 | local_irq_disable(); |
313 | 316 | ||
314 | cpu = smp_processor_id(); | 317 | __this_cpu_inc(disable_stack_tracer); |
315 | per_cpu(trace_active, cpu)++; | ||
316 | 318 | ||
317 | arch_spin_lock(&stack_trace_max_lock); | 319 | arch_spin_lock(&stack_trace_max_lock); |
318 | 320 | ||
@@ -324,12 +326,9 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
324 | 326 | ||
325 | static void t_stop(struct seq_file *m, void *p) | 327 | static void t_stop(struct seq_file *m, void *p) |
326 | { | 328 | { |
327 | int cpu; | ||
328 | |||
329 | arch_spin_unlock(&stack_trace_max_lock); | 329 | arch_spin_unlock(&stack_trace_max_lock); |
330 | 330 | ||
331 | cpu = smp_processor_id(); | 331 | __this_cpu_dec(disable_stack_tracer); |
332 | per_cpu(trace_active, cpu)--; | ||
333 | 332 | ||
334 | local_irq_enable(); | 333 | local_irq_enable(); |
335 | } | 334 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1e2af704938d..2c25de46c58f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -65,6 +65,7 @@ | |||
65 | #include <linux/page_owner.h> | 65 | #include <linux/page_owner.h> |
66 | #include <linux/kthread.h> | 66 | #include <linux/kthread.h> |
67 | #include <linux/memcontrol.h> | 67 | #include <linux/memcontrol.h> |
68 | #include <linux/ftrace.h> | ||
68 | 69 | ||
69 | #include <asm/sections.h> | 70 | #include <asm/sections.h> |
70 | #include <asm/tlbflush.h> | 71 | #include <asm/tlbflush.h> |
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index aeb34223167c..16e086dcc567 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c | |||
@@ -412,6 +412,7 @@ static int | |||
412 | is_mcounted_section_name(char const *const txtname) | 412 | is_mcounted_section_name(char const *const txtname) |
413 | { | 413 | { |
414 | return strcmp(".text", txtname) == 0 || | 414 | return strcmp(".text", txtname) == 0 || |
415 | strcmp(".init.text", txtname) == 0 || | ||
415 | strcmp(".ref.text", txtname) == 0 || | 416 | strcmp(".ref.text", txtname) == 0 || |
416 | strcmp(".sched.text", txtname) == 0 || | 417 | strcmp(".sched.text", txtname) == 0 || |
417 | strcmp(".spinlock.text", txtname) == 0 || | 418 | strcmp(".spinlock.text", txtname) == 0 || |
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 0b6002b36f20..1633c3e6c0b9 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl | |||
@@ -130,6 +130,7 @@ if ($inputfile =~ m,kernel/trace/ftrace\.o$,) { | |||
130 | # Acceptable sections to record. | 130 | # Acceptable sections to record. |
131 | my %text_sections = ( | 131 | my %text_sections = ( |
132 | ".text" => 1, | 132 | ".text" => 1, |
133 | ".init.text" => 1, | ||
133 | ".ref.text" => 1, | 134 | ".ref.text" => 1, |
134 | ".sched.text" => 1, | 135 | ".sched.text" => 1, |
135 | ".spinlock.text" => 1, | 136 | ".spinlock.text" => 1, |
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest index 52e3c4df28d6..32e6211e1c6e 100755 --- a/tools/testing/selftests/ftrace/ftracetest +++ b/tools/testing/selftests/ftrace/ftracetest | |||
@@ -16,6 +16,7 @@ echo " -k|--keep Keep passed test logs" | |||
16 | echo " -v|--verbose Increase verbosity of test messages" | 16 | echo " -v|--verbose Increase verbosity of test messages" |
17 | echo " -vv Alias of -v -v (Show all results in stdout)" | 17 | echo " -vv Alias of -v -v (Show all results in stdout)" |
18 | echo " -d|--debug Debug mode (trace all shell commands)" | 18 | echo " -d|--debug Debug mode (trace all shell commands)" |
19 | echo " -l|--logdir <dir> Save logs on the <dir>" | ||
19 | exit $1 | 20 | exit $1 |
20 | } | 21 | } |
21 | 22 | ||
@@ -64,6 +65,10 @@ parse_opts() { # opts | |||
64 | DEBUG=1 | 65 | DEBUG=1 |
65 | shift 1 | 66 | shift 1 |
66 | ;; | 67 | ;; |
68 | --logdir|-l) | ||
69 | LOG_DIR=$2 | ||
70 | shift 2 | ||
71 | ;; | ||
67 | *.tc) | 72 | *.tc) |
68 | if [ -f "$1" ]; then | 73 | if [ -f "$1" ]; then |
69 | OPT_TEST_CASES="$OPT_TEST_CASES `abspath $1`" | 74 | OPT_TEST_CASES="$OPT_TEST_CASES `abspath $1`" |
@@ -145,11 +150,16 @@ XFAILED_CASES= | |||
145 | UNDEFINED_CASES= | 150 | UNDEFINED_CASES= |
146 | TOTAL_RESULT=0 | 151 | TOTAL_RESULT=0 |
147 | 152 | ||
153 | INSTANCE= | ||
148 | CASENO=0 | 154 | CASENO=0 |
149 | testcase() { # testfile | 155 | testcase() { # testfile |
150 | CASENO=$((CASENO+1)) | 156 | CASENO=$((CASENO+1)) |
151 | desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:` | 157 | desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:` |
152 | prlog -n "[$CASENO]$desc" | 158 | prlog -n "[$CASENO]$INSTANCE$desc" |
159 | } | ||
160 | |||
161 | test_on_instance() { # testfile | ||
162 | grep -q "^#[ \t]*flags:.*instance" $1 | ||
153 | } | 163 | } |
154 | 164 | ||
155 | eval_result() { # sigval | 165 | eval_result() { # sigval |
@@ -266,6 +276,17 @@ for t in $TEST_CASES; do | |||
266 | run_test $t | 276 | run_test $t |
267 | done | 277 | done |
268 | 278 | ||
279 | # Test on instance loop | ||
280 | INSTANCE=" (instance) " | ||
281 | for t in $TEST_CASES; do | ||
282 | test_on_instance $t || continue | ||
283 | SAVED_TRACING_DIR=$TRACING_DIR | ||
284 | export TRACING_DIR=`mktemp -d $TRACING_DIR/instances/ftracetest.XXXXXX` | ||
285 | run_test $t | ||
286 | rmdir $TRACING_DIR | ||
287 | TRACING_DIR=$SAVED_TRACING_DIR | ||
288 | done | ||
289 | |||
269 | prlog "" | 290 | prlog "" |
270 | prlog "# of passed: " `echo $PASSED_CASES | wc -w` | 291 | prlog "# of passed: " `echo $PASSED_CASES | wc -w` |
271 | prlog "# of failed: " `echo $FAILED_CASES | wc -w` | 292 | prlog "# of failed: " `echo $FAILED_CASES | wc -w` |
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/basic2.tc b/tools/testing/selftests/ftrace/test.d/00basic/basic2.tc index bf9a7b037924..ebfce83f35b4 100644 --- a/tools/testing/selftests/ftrace/test.d/00basic/basic2.tc +++ b/tools/testing/selftests/ftrace/test.d/00basic/basic2.tc | |||
@@ -1,5 +1,6 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # description: Basic test for tracers | 2 | # description: Basic test for tracers |
3 | # flags: instance | ||
3 | test -f available_tracers | 4 | test -f available_tracers |
4 | for t in `cat available_tracers`; do | 5 | for t in `cat available_tracers`; do |
5 | echo $t > current_tracer | 6 | echo $t > current_tracer |
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/basic3.tc b/tools/testing/selftests/ftrace/test.d/00basic/basic3.tc index bde6625d9785..9e33f841812f 100644 --- a/tools/testing/selftests/ftrace/test.d/00basic/basic3.tc +++ b/tools/testing/selftests/ftrace/test.d/00basic/basic3.tc | |||
@@ -1,5 +1,6 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # description: Basic trace clock test | 2 | # description: Basic trace clock test |
3 | # flags: instance | ||
3 | test -f trace_clock | 4 | test -f trace_clock |
4 | for c in `cat trace_clock | tr -d \[\]`; do | 5 | for c in `cat trace_clock | tr -d \[\]`; do |
5 | echo $c > trace_clock | 6 | echo $c > trace_clock |
diff --git a/tools/testing/selftests/ftrace/test.d/event/event-enable.tc b/tools/testing/selftests/ftrace/test.d/event/event-enable.tc index 87eb9d6dd4ca..283b45ecb199 100644 --- a/tools/testing/selftests/ftrace/test.d/event/event-enable.tc +++ b/tools/testing/selftests/ftrace/test.d/event/event-enable.tc | |||
@@ -1,5 +1,6 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # description: event tracing - enable/disable with event level files | 2 | # description: event tracing - enable/disable with event level files |
3 | # flags: instance | ||
3 | 4 | ||
4 | do_reset() { | 5 | do_reset() { |
5 | echo > set_event | 6 | echo > set_event |
diff --git a/tools/testing/selftests/ftrace/test.d/event/event-pid.tc b/tools/testing/selftests/ftrace/test.d/event/event-pid.tc index d4ab27b522f8..96c1a95be4f7 100644 --- a/tools/testing/selftests/ftrace/test.d/event/event-pid.tc +++ b/tools/testing/selftests/ftrace/test.d/event/event-pid.tc | |||
@@ -1,5 +1,6 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # description: event tracing - restricts events based on pid | 2 | # description: event tracing - restricts events based on pid |
3 | # flags: instance | ||
3 | 4 | ||
4 | do_reset() { | 5 | do_reset() { |
5 | echo > set_event | 6 | echo > set_event |
diff --git a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc index ced27ef0638f..b8fe2e5b9e67 100644 --- a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc +++ b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc | |||
@@ -1,5 +1,6 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # description: event tracing - enable/disable with subsystem level files | 2 | # description: event tracing - enable/disable with subsystem level files |
3 | # flags: instance | ||
3 | 4 | ||
4 | do_reset() { | 5 | do_reset() { |
5 | echo > set_event | 6 | echo > set_event |
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc new file mode 100644 index 000000000000..07bb3e5930b4 --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc | |||
@@ -0,0 +1,114 @@ | |||
1 | #!/bin/sh | ||
2 | # description: ftrace - test for function event triggers | ||
3 | # flags: instance | ||
4 | # | ||
5 | # Ftrace allows to add triggers to functions, such as enabling or disabling | ||
6 | # tracing, enabling or disabling trace events, or recording a stack trace | ||
7 | # within the ring buffer. | ||
8 | # | ||
9 | # This test is designed to test event triggers | ||
10 | # | ||
11 | |||
12 | # The triggers are set within the set_ftrace_filter file | ||
13 | if [ ! -f set_ftrace_filter ]; then | ||
14 | echo "set_ftrace_filter not found? Is dynamic ftrace not set?" | ||
15 | exit_unsupported | ||
16 | fi | ||
17 | |||
18 | do_reset() { | ||
19 | reset_ftrace_filter | ||
20 | reset_tracer | ||
21 | disable_events | ||
22 | clear_trace | ||
23 | enable_tracing | ||
24 | } | ||
25 | |||
26 | fail() { # mesg | ||
27 | do_reset | ||
28 | echo $1 | ||
29 | exit $FAIL | ||
30 | } | ||
31 | |||
32 | SLEEP_TIME=".1" | ||
33 | |||
34 | do_reset | ||
35 | |||
36 | echo "Testing function probes with events:" | ||
37 | |||
38 | EVENT="sched:sched_switch" | ||
39 | EVENT_ENABLE="events/sched/sched_switch/enable" | ||
40 | |||
41 | cnt_trace() { | ||
42 | grep -v '^#' trace | wc -l | ||
43 | } | ||
44 | |||
45 | test_event_enabled() { | ||
46 | val=$1 | ||
47 | |||
48 | e=`cat $EVENT_ENABLE` | ||
49 | if [ "$e" != $val ]; then | ||
50 | echo "Expected $val but found $e" | ||
51 | exit -1 | ||
52 | fi | ||
53 | } | ||
54 | |||
55 | run_enable_disable() { | ||
56 | enable=$1 # enable | ||
57 | Enable=$2 # Enable | ||
58 | check_disable=$3 # 0 | ||
59 | check_enable_star=$4 # 1* | ||
60 | check_disable_star=$5 # 0* | ||
61 | |||
62 | cnt=`cnt_trace` | ||
63 | if [ $cnt -ne 0 ]; then | ||
64 | fail "Found junk in trace file" | ||
65 | fi | ||
66 | |||
67 | echo "$Enable event all the time" | ||
68 | |||
69 | echo $check_disable > $EVENT_ENABLE | ||
70 | sleep $SLEEP_TIME | ||
71 | |||
72 | test_event_enabled $check_disable | ||
73 | |||
74 | echo "schedule:${enable}_event:$EVENT" > set_ftrace_filter | ||
75 | |||
76 | echo " make sure it works 5 times" | ||
77 | |||
78 | for i in `seq 5`; do | ||
79 | sleep $SLEEP_TIME | ||
80 | echo " test $i" | ||
81 | test_event_enabled $check_enable_star | ||
82 | |||
83 | echo $check_disable > $EVENT_ENABLE | ||
84 | done | ||
85 | sleep $SLEEP_TIME | ||
86 | echo " make sure it's still works" | ||
87 | test_event_enabled $check_enable_star | ||
88 | |||
89 | reset_ftrace_filter | ||
90 | |||
91 | echo " make sure it only works 3 times" | ||
92 | |||
93 | echo $check_disable > $EVENT_ENABLE | ||
94 | sleep $SLEEP_TIME | ||
95 | |||
96 | echo "schedule:${enable}_event:$EVENT:3" > set_ftrace_filter | ||
97 | |||
98 | for i in `seq 3`; do | ||
99 | sleep $SLEEP_TIME | ||
100 | echo " test $i" | ||
101 | test_event_enabled $check_enable_star | ||
102 | |||
103 | echo $check_disable > $EVENT_ENABLE | ||
104 | done | ||
105 | |||
106 | sleep $SLEEP_TIME | ||
107 | echo " make sure it stop working" | ||
108 | test_event_enabled $check_disable_star | ||
109 | |||
110 | do_reset | ||
111 | } | ||
112 | |||
113 | run_enable_disable enable Enable 0 "1*" "0*" | ||
114 | run_enable_disable disable Disable 1 "0*" "1*" | ||
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc new file mode 100644 index 000000000000..113b4d9bc733 --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc | |||
@@ -0,0 +1,132 @@ | |||
1 | #!/bin/sh | ||
2 | # description: ftrace - test reading of set_ftrace_filter | ||
3 | # | ||
4 | # The set_ftrace_filter file of ftrace is used to list functions as well as | ||
5 | # triggers (probes) attached to functions. The code to read this file is not | ||
6 | # straight forward and has had various bugs in the past. This test is designed | ||
7 | # to add functions and triggers to that file in various ways and read that | ||
8 | # file in various ways (cat vs dd). | ||
9 | # | ||
10 | |||
11 | # The triggers are set within the set_ftrace_filter file | ||
12 | if [ ! -f set_ftrace_filter ]; then | ||
13 | echo "set_ftrace_filter not found? Is dynamic ftrace not set?" | ||
14 | exit_unsupported | ||
15 | fi | ||
16 | |||
17 | do_reset() { | ||
18 | reset_tracer | ||
19 | reset_ftrace_filter | ||
20 | disable_events | ||
21 | clear_trace | ||
22 | enable_tracing | ||
23 | } | ||
24 | |||
25 | fail() { # mesg | ||
26 | do_reset | ||
27 | echo $1 | ||
28 | exit $FAIL | ||
29 | } | ||
30 | |||
31 | do_reset | ||
32 | |||
33 | FILTER=set_ftrace_filter | ||
34 | FUNC1="schedule" | ||
35 | FUNC2="do_IRQ" | ||
36 | |||
37 | ALL_FUNCS="#### all functions enabled ####" | ||
38 | |||
39 | test_func() { | ||
40 | if ! echo "$1" | grep -q "^$2\$"; then | ||
41 | return 0 | ||
42 | fi | ||
43 | echo "$1" | grep -v "^$2\$" | ||
44 | return 1 | ||
45 | } | ||
46 | |||
47 | check_set_ftrace_filter() { | ||
48 | cat=`cat $FILTER` | ||
49 | dd1=`dd if=$FILTER bs=1 | grep -v -e 'records in' -e 'records out' -e 'bytes copied'` | ||
50 | dd100=`dd if=$FILTER bs=100 | grep -v -e 'records in' -e 'records out' -e 'bytes copied'` | ||
51 | |||
52 | echo "Testing '$@'" | ||
53 | |||
54 | while [ $# -gt 0 ]; do | ||
55 | echo "test $1" | ||
56 | if cat=`test_func "$cat" "$1"`; then | ||
57 | return 0 | ||
58 | fi | ||
59 | if dd1=`test_func "$dd1" "$1"`; then | ||
60 | return 0 | ||
61 | fi | ||
62 | if dd100=`test_func "$dd100" "$1"`; then | ||
63 | return 0 | ||
64 | fi | ||
65 | shift | ||
66 | done | ||
67 | |||
68 | if [ -n "$cat" ]; then | ||
69 | return 0 | ||
70 | fi | ||
71 | if [ -n "$dd1" ]; then | ||
72 | return 0 | ||
73 | fi | ||
74 | if [ -n "$dd100" ]; then | ||
75 | return 0 | ||
76 | fi | ||
77 | return 1; | ||
78 | } | ||
79 | |||
80 | if check_set_ftrace_filter "$ALL_FUNCS"; then | ||
81 | fail "Expected only $ALL_FUNCS" | ||
82 | fi | ||
83 | |||
84 | echo "$FUNC1:traceoff" > set_ftrace_filter | ||
85 | if check_set_ftrace_filter "$ALL_FUNCS" "$FUNC1:traceoff:unlimited"; then | ||
86 | fail "Expected $ALL_FUNCS and $FUNC1:traceoff:unlimited" | ||
87 | fi | ||
88 | |||
89 | echo "$FUNC1" > set_ftrace_filter | ||
90 | if check_set_ftrace_filter "$FUNC1" "$FUNC1:traceoff:unlimited"; then | ||
91 | fail "Expected $FUNC1 and $FUNC1:traceoff:unlimited" | ||
92 | fi | ||
93 | |||
94 | echo "$FUNC2" >> set_ftrace_filter | ||
95 | if check_set_ftrace_filter "$FUNC1" "$FUNC2" "$FUNC1:traceoff:unlimited"; then | ||
96 | fail "Expected $FUNC1 $FUNC2 and $FUNC1:traceoff:unlimited" | ||
97 | fi | ||
98 | |||
99 | echo "$FUNC2:traceoff" >> set_ftrace_filter | ||
100 | if check_set_ftrace_filter "$FUNC1" "$FUNC2" "$FUNC1:traceoff:unlimited" "$FUNC2:traceoff:unlimited"; then | ||
101 | fail "Expected $FUNC1 $FUNC2 $FUNC1:traceoff:unlimited and $FUNC2:traceoff:unlimited" | ||
102 | fi | ||
103 | |||
104 | echo "$FUNC1" > set_ftrace_filter | ||
105 | if check_set_ftrace_filter "$FUNC1" "$FUNC1:traceoff:unlimited" "$FUNC2:traceoff:unlimited"; then | ||
106 | fail "Expected $FUNC1 $FUNC1:traceoff:unlimited and $FUNC2:traceoff:unlimited" | ||
107 | fi | ||
108 | |||
109 | echo > set_ftrace_filter | ||
110 | if check_set_ftrace_filter "$ALL_FUNCS" "$FUNC1:traceoff:unlimited" "$FUNC2:traceoff:unlimited"; then | ||
111 | fail "Expected $ALL_FUNCS $FUNC1:traceoff:unlimited and $FUNC2:traceoff:unlimited" | ||
112 | fi | ||
113 | |||
114 | reset_ftrace_filter | ||
115 | |||
116 | if check_set_ftrace_filter "$ALL_FUNCS"; then | ||
117 | fail "Expected $ALL_FUNCS" | ||
118 | fi | ||
119 | |||
120 | echo "$FUNC1" > set_ftrace_filter | ||
121 | if check_set_ftrace_filter "$FUNC1" ; then | ||
122 | fail "Expected $FUNC1" | ||
123 | fi | ||
124 | |||
125 | echo "$FUNC2" >> set_ftrace_filter | ||
126 | if check_set_ftrace_filter "$FUNC1" "$FUNC2" ; then | ||
127 | fail "Expected $FUNC1 and $FUNC2" | ||
128 | fi | ||
129 | |||
130 | do_reset | ||
131 | |||
132 | exit 0 | ||
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc new file mode 100644 index 000000000000..c8e02ec01eaf --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc | |||
@@ -0,0 +1,172 @@ | |||
1 | #!/bin/sh | ||
2 | # description: ftrace - test for function traceon/off triggers | ||
3 | # flags: instance | ||
4 | # | ||
5 | # Ftrace allows to add triggers to functions, such as enabling or disabling | ||
6 | # tracing, enabling or disabling trace events, or recording a stack trace | ||
7 | # within the ring buffer. | ||
8 | # | ||
9 | # This test is designed to test enabling and disabling tracing triggers | ||
10 | # | ||
11 | |||
12 | # The triggers are set within the set_ftrace_filter file | ||
13 | if [ ! -f set_ftrace_filter ]; then | ||
14 | echo "set_ftrace_filter not found? Is dynamic ftrace not set?" | ||
15 | exit_unsupported | ||
16 | fi | ||
17 | |||
18 | do_reset() { | ||
19 | reset_ftrace_filter | ||
20 | reset_tracer | ||
21 | disable_events | ||
22 | clear_trace | ||
23 | enable_tracing | ||
24 | } | ||
25 | |||
26 | fail() { # mesg | ||
27 | do_reset | ||
28 | echo $1 | ||
29 | exit $FAIL | ||
30 | } | ||
31 | |||
32 | SLEEP_TIME=".1" | ||
33 | |||
34 | do_reset | ||
35 | |||
36 | echo "Testing function probes with enabling disabling tracing:" | ||
37 | |||
38 | cnt_trace() { | ||
39 | grep -v '^#' trace | wc -l | ||
40 | } | ||
41 | |||
42 | echo '** DISABLE TRACING' | ||
43 | disable_tracing | ||
44 | clear_trace | ||
45 | |||
46 | cnt=`cnt_trace` | ||
47 | if [ $cnt -ne 0 ]; then | ||
48 | fail "Found junk in trace" | ||
49 | fi | ||
50 | |||
51 | |||
52 | echo '** ENABLE EVENTS' | ||
53 | |||
54 | echo 1 > events/enable | ||
55 | |||
56 | echo '** ENABLE TRACING' | ||
57 | enable_tracing | ||
58 | |||
59 | cnt=`cnt_trace` | ||
60 | if [ $cnt -eq 0 ]; then | ||
61 | fail "Nothing found in trace" | ||
62 | fi | ||
63 | |||
64 | # powerpc uses .schedule | ||
65 | func="schedule" | ||
66 | x=`grep '^\.schedule$' available_filter_functions | wc -l` | ||
67 | if [ "$x" -eq 1 ]; then | ||
68 | func=".schedule" | ||
69 | fi | ||
70 | |||
71 | echo '** SET TRACEOFF' | ||
72 | |||
73 | echo "$func:traceoff" > set_ftrace_filter | ||
74 | |||
75 | cnt=`grep schedule set_ftrace_filter | wc -l` | ||
76 | if [ $cnt -ne 1 ]; then | ||
77 | fail "Did not find traceoff trigger" | ||
78 | fi | ||
79 | |||
80 | cnt=`cnt_trace` | ||
81 | sleep $SLEEP_TIME | ||
82 | cnt2=`cnt_trace` | ||
83 | |||
84 | if [ $cnt -ne $cnt2 ]; then | ||
85 | fail "Tracing is not stopped" | ||
86 | fi | ||
87 | |||
88 | on=`cat tracing_on` | ||
89 | if [ $on != "0" ]; then | ||
90 | fail "Tracing is not off" | ||
91 | fi | ||
92 | |||
93 | line1=`cat trace | tail -1` | ||
94 | sleep $SLEEP_TIME | ||
95 | line2=`cat trace | tail -1` | ||
96 | |||
97 | if [ "$line1" != "$line2" ]; then | ||
98 | fail "Tracing file is still changing" | ||
99 | fi | ||
100 | |||
101 | clear_trace | ||
102 | |||
103 | cnt=`cnt_trace` | ||
104 | if [ $cnt -ne 0 ]; then | ||
105 | fail "Tracing is still happeing" | ||
106 | fi | ||
107 | |||
108 | echo "!$func:traceoff" >> set_ftrace_filter | ||
109 | |||
110 | cnt=`grep schedule set_ftrace_filter | wc -l` | ||
111 | if [ $cnt -ne 0 ]; then | ||
112 | fail "traceoff trigger still exists" | ||
113 | fi | ||
114 | |||
115 | on=`cat tracing_on` | ||
116 | if [ $on != "0" ]; then | ||
117 | fail "Tracing is started again" | ||
118 | fi | ||
119 | |||
120 | echo "$func:traceon" > set_ftrace_filter | ||
121 | |||
122 | cnt=`grep schedule set_ftrace_filter | wc -l` | ||
123 | if [ $cnt -ne 1 ]; then | ||
124 | fail "traceon trigger not found" | ||
125 | fi | ||
126 | |||
127 | cnt=`cnt_trace` | ||
128 | if [ $cnt -eq 0 ]; then | ||
129 | fail "Tracing did not start" | ||
130 | fi | ||
131 | |||
132 | on=`cat tracing_on` | ||
133 | if [ $on != "1" ]; then | ||
134 | fail "Tracing was not enabled" | ||
135 | fi | ||
136 | |||
137 | |||
138 | echo "!$func:traceon" >> set_ftrace_filter | ||
139 | |||
140 | cnt=`grep schedule set_ftrace_filter | wc -l` | ||
141 | if [ $cnt -ne 0 ]; then | ||
142 | fail "traceon trigger still exists" | ||
143 | fi | ||
144 | |||
145 | check_sleep() { | ||
146 | val=$1 | ||
147 | sleep $SLEEP_TIME | ||
148 | cat set_ftrace_filter | ||
149 | on=`cat tracing_on` | ||
150 | if [ $on != "$val" ]; then | ||
151 | fail "Expected tracing_on to be $val, but it was $on" | ||
152 | fi | ||
153 | } | ||
154 | |||
155 | |||
156 | echo "$func:traceoff:3" > set_ftrace_filter | ||
157 | check_sleep "0" | ||
158 | echo 1 > tracing_on | ||
159 | check_sleep "0" | ||
160 | echo 1 > tracing_on | ||
161 | check_sleep "0" | ||
162 | echo 1 > tracing_on | ||
163 | check_sleep "1" | ||
164 | echo "!$func:traceoff:0" > set_ftrace_filter | ||
165 | |||
166 | if grep -e traceon -e traceoff set_ftrace_filter; then | ||
167 | fail "Tracing on and off triggers still exist" | ||
168 | fi | ||
169 | |||
170 | disable_events | ||
171 | |||
172 | exit 0 | ||
diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions index 91de1a8e4f19..9aec6fcb7729 100644 --- a/tools/testing/selftests/ftrace/test.d/functions +++ b/tools/testing/selftests/ftrace/test.d/functions | |||
@@ -30,6 +30,27 @@ reset_events_filter() { # reset all current setting filters | |||
30 | done | 30 | done |
31 | } | 31 | } |
32 | 32 | ||
33 | reset_ftrace_filter() { # reset all triggers in set_ftrace_filter | ||
34 | echo > set_ftrace_filter | ||
35 | grep -v '^#' set_ftrace_filter | while read t; do | ||
36 | tr=`echo $t | cut -d: -f2` | ||
37 | if [ "$tr" == "" ]; then | ||
38 | continue | ||
39 | fi | ||
40 | if [ $tr == "enable_event" -o $tr == "disable_event" ]; then | ||
41 | tr=`echo $t | cut -d: -f1-4` | ||
42 | limit=`echo $t | cut -d: -f5` | ||
43 | else | ||
44 | tr=`echo $t | cut -d: -f1-2` | ||
45 | limit=`echo $t | cut -d: -f3` | ||
46 | fi | ||
47 | if [ "$limit" != "unlimited" ]; then | ||
48 | tr="$tr:$limit" | ||
49 | fi | ||
50 | echo "!$tr" > set_ftrace_filter | ||
51 | done | ||
52 | } | ||
53 | |||
33 | disable_events() { | 54 | disable_events() { |
34 | echo 0 > events/enable | 55 | echo 0 > events/enable |
35 | } | 56 | } |
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc new file mode 100644 index 000000000000..57abdf1caabf --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc | |||
@@ -0,0 +1,39 @@ | |||
1 | #!/bin/sh | ||
2 | # description: Kretprobe dynamic event with maxactive | ||
3 | |||
4 | [ -f kprobe_events ] || exit_unsupported # this is configurable | ||
5 | |||
6 | echo > kprobe_events | ||
7 | |||
8 | # Test if we successfully reject unknown messages | ||
9 | if echo 'a:myprobeaccept inet_csk_accept' > kprobe_events; then false; else true; fi | ||
10 | |||
11 | # Test if we successfully reject too big maxactive | ||
12 | if echo 'r1000000:myprobeaccept inet_csk_accept' > kprobe_events; then false; else true; fi | ||
13 | |||
14 | # Test if we successfully reject unparsable numbers for maxactive | ||
15 | if echo 'r10fuzz:myprobeaccept inet_csk_accept' > kprobe_events; then false; else true; fi | ||
16 | |||
17 | # Test for kretprobe with event name without maxactive | ||
18 | echo 'r:myprobeaccept inet_csk_accept' > kprobe_events | ||
19 | grep myprobeaccept kprobe_events | ||
20 | test -d events/kprobes/myprobeaccept | ||
21 | echo '-:myprobeaccept' >> kprobe_events | ||
22 | |||
23 | # Test for kretprobe with event name with a small maxactive | ||
24 | echo 'r10:myprobeaccept inet_csk_accept' > kprobe_events | ||
25 | grep myprobeaccept kprobe_events | ||
26 | test -d events/kprobes/myprobeaccept | ||
27 | echo '-:myprobeaccept' >> kprobe_events | ||
28 | |||
29 | # Test for kretprobe without event name without maxactive | ||
30 | echo 'r inet_csk_accept' > kprobe_events | ||
31 | grep inet_csk_accept kprobe_events | ||
32 | echo > kprobe_events | ||
33 | |||
34 | # Test for kretprobe without event name with a small maxactive | ||
35 | echo 'r10 inet_csk_accept' > kprobe_events | ||
36 | grep inet_csk_accept kprobe_events | ||
37 | echo > kprobe_events | ||
38 | |||
39 | clear_trace | ||
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc index 1a9445021bf1..c5435adfdd93 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc | |||
@@ -1,5 +1,6 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # description: event trigger - test event enable/disable trigger | 2 | # description: event trigger - test event enable/disable trigger |
3 | # flags: instance | ||
3 | 4 | ||
4 | do_reset() { | 5 | do_reset() { |
5 | reset_trigger | 6 | reset_trigger |
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc index 514e466e198b..48849a8d577f 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc | |||
@@ -1,5 +1,6 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # description: event trigger - test trigger filter | 2 | # description: event trigger - test trigger filter |
3 | # flags: instance | ||
3 | 4 | ||
4 | do_reset() { | 5 | do_reset() { |
5 | reset_trigger | 6 | reset_trigger |
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc index 400e98b64948..b7f86d10b549 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc | |||
@@ -1,5 +1,6 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # description: event trigger - test histogram modifiers | 2 | # description: event trigger - test histogram modifiers |
3 | # flags: instance | ||
3 | 4 | ||
4 | do_reset() { | 5 | do_reset() { |
5 | reset_trigger | 6 | reset_trigger |
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc index a00184cd9c95..fb66f7d9339d 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc | |||
@@ -1,5 +1,6 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # description: event trigger - test histogram trigger | 2 | # description: event trigger - test histogram trigger |
3 | # flags: instance | ||
3 | 4 | ||
4 | do_reset() { | 5 | do_reset() { |
5 | reset_trigger | 6 | reset_trigger |
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc index 3478b00ead57..f9153087dd7c 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc | |||
@@ -1,5 +1,6 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # description: event trigger - test multiple histogram triggers | 2 | # description: event trigger - test multiple histogram triggers |
3 | # flags: instance | ||
3 | 4 | ||
4 | do_reset() { | 5 | do_reset() { |
5 | reset_trigger | 6 | reset_trigger |