diff options
-rw-r--r-- | Documentation/trace/ftrace-design.txt | 233 | ||||
-rw-r--r-- | Documentation/trace/ftrace.txt | 6 | ||||
-rw-r--r-- | include/linux/kprobes.h | 4 | ||||
-rw-r--r-- | include/trace/ftrace.h | 8 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 28 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_clock.c | 24 | ||||
-rw-r--r-- | kernel/trace/trace_entries.h | 27 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 14 | ||||
-rw-r--r-- | kernel/trace/trace_export.c | 71 |
10 files changed, 349 insertions, 70 deletions
diff --git a/Documentation/trace/ftrace-design.txt b/Documentation/trace/ftrace-design.txt new file mode 100644 index 00000000000..7003e10f10f --- /dev/null +++ b/Documentation/trace/ftrace-design.txt | |||
@@ -0,0 +1,233 @@ | |||
1 | function tracer guts | ||
2 | ==================== | ||
3 | |||
4 | Introduction | ||
5 | ------------ | ||
6 | |||
7 | Here we will cover the architecture pieces that the common function tracing | ||
8 | code relies on for proper functioning. Things are broken down into increasing | ||
9 | complexity so that you can start simple and at least get basic functionality. | ||
10 | |||
11 | Note that this focuses on architecture implementation details only. If you | ||
12 | want more explanation of a feature in terms of common code, review the common | ||
13 | ftrace.txt file. | ||
14 | |||
15 | |||
16 | Prerequisites | ||
17 | ------------- | ||
18 | |||
19 | Ftrace relies on these features being implemented: | ||
20 | STACKTRACE_SUPPORT - implement save_stack_trace() | ||
21 | TRACE_IRQFLAGS_SUPPORT - implement include/asm/irqflags.h | ||
22 | |||
23 | |||
24 | HAVE_FUNCTION_TRACER | ||
25 | -------------------- | ||
26 | |||
27 | You will need to implement the mcount and the ftrace_stub functions. | ||
28 | |||
29 | The exact mcount symbol name will depend on your toolchain. Some call it | ||
30 | "mcount", "_mcount", or even "__mcount". You can probably figure it out by | ||
31 | running something like: | ||
32 | $ echo 'main(){}' | gcc -x c -S -o - - -pg | grep mcount | ||
33 | call mcount | ||
34 | We'll make the assumption below that the symbol is "mcount" just to keep things | ||
35 | nice and simple in the examples. | ||
36 | |||
37 | Keep in mind that the ABI that is in effect inside of the mcount function is | ||
38 | *highly* architecture/toolchain specific. We cannot help you in this regard, | ||
39 | sorry. Dig up some old documentation and/or find someone more familiar than | ||
40 | you to bang ideas off of. Typically, register usage (argument/scratch/etc...) | ||
41 | is a major issue at this point, especially in relation to the location of the | ||
42 | mcount call (before/after function prologue). You might also want to look at | ||
43 | how glibc has implemented the mcount function for your architecture. It might | ||
44 | be (semi-)relevant. | ||
45 | |||
46 | The mcount function should check the function pointer ftrace_trace_function | ||
47 | to see if it is set to ftrace_stub. If it is, there is nothing for you to do, | ||
48 | so return immediately. If it isn't, then call that function in the same way | ||
49 | the mcount function normally calls __mcount_internal -- the first argument is | ||
50 | the "frompc" while the second argument is the "selfpc" (adjusted to remove the | ||
51 | size of the mcount call that is embedded in the function). | ||
52 | |||
53 | For example, if the function foo() calls bar(), when the bar() function calls | ||
54 | mcount(), the arguments mcount() will pass to the tracer are: | ||
55 | "frompc" - the address bar() will use to return to foo() | ||
56 | "selfpc" - the address bar() (with _mcount() size adjustment) | ||
57 | |||
58 | Also keep in mind that this mcount function will be called *a lot*, so | ||
59 | optimizing for the default case of no tracer will help the smooth running of | ||
60 | your system when tracing is disabled. So the start of the mcount function is | ||
61 | typically the bare min with checking things before returning. That also means | ||
62 | the code flow should usually kept linear (i.e. no branching in the nop case). | ||
63 | This is of course an optimization and not a hard requirement. | ||
64 | |||
65 | Here is some pseudo code that should help (these functions should actually be | ||
66 | implemented in assembly): | ||
67 | |||
68 | void ftrace_stub(void) | ||
69 | { | ||
70 | return; | ||
71 | } | ||
72 | |||
73 | void mcount(void) | ||
74 | { | ||
75 | /* save any bare state needed in order to do initial checking */ | ||
76 | |||
77 | extern void (*ftrace_trace_function)(unsigned long, unsigned long); | ||
78 | if (ftrace_trace_function != ftrace_stub) | ||
79 | goto do_trace; | ||
80 | |||
81 | /* restore any bare state */ | ||
82 | |||
83 | return; | ||
84 | |||
85 | do_trace: | ||
86 | |||
87 | /* save all state needed by the ABI (see paragraph above) */ | ||
88 | |||
89 | unsigned long frompc = ...; | ||
90 | unsigned long selfpc = <return address> - MCOUNT_INSN_SIZE; | ||
91 | ftrace_trace_function(frompc, selfpc); | ||
92 | |||
93 | /* restore all state needed by the ABI */ | ||
94 | } | ||
95 | |||
96 | Don't forget to export mcount for modules ! | ||
97 | extern void mcount(void); | ||
98 | EXPORT_SYMBOL(mcount); | ||
99 | |||
100 | |||
101 | HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
102 | ------------------------------- | ||
103 | |||
104 | This is an optional optimization for the normal case when tracing is turned off | ||
105 | in the system. If you do not enable this Kconfig option, the common ftrace | ||
106 | code will take care of doing the checking for you. | ||
107 | |||
108 | To support this feature, you only need to check the function_trace_stop | ||
109 | variable in the mcount function. If it is non-zero, there is no tracing to be | ||
110 | done at all, so you can return. | ||
111 | |||
112 | This additional pseudo code would simply be: | ||
113 | void mcount(void) | ||
114 | { | ||
115 | /* save any bare state needed in order to do initial checking */ | ||
116 | |||
117 | + if (function_trace_stop) | ||
118 | + return; | ||
119 | |||
120 | extern void (*ftrace_trace_function)(unsigned long, unsigned long); | ||
121 | if (ftrace_trace_function != ftrace_stub) | ||
122 | ... | ||
123 | |||
124 | |||
125 | HAVE_FUNCTION_GRAPH_TRACER | ||
126 | -------------------------- | ||
127 | |||
128 | Deep breath ... time to do some real work. Here you will need to update the | ||
129 | mcount function to check ftrace graph function pointers, as well as implement | ||
130 | some functions to save (hijack) and restore the return address. | ||
131 | |||
132 | The mcount function should check the function pointers ftrace_graph_return | ||
133 | (compare to ftrace_stub) and ftrace_graph_entry (compare to | ||
134 | ftrace_graph_entry_stub). If either of those are not set to the relevant stub | ||
135 | function, call the arch-specific function ftrace_graph_caller which in turn | ||
136 | calls the arch-specific function prepare_ftrace_return. Neither of these | ||
137 | function names are strictly required, but you should use them anyways to stay | ||
138 | consistent across the architecture ports -- easier to compare & contrast | ||
139 | things. | ||
140 | |||
141 | The arguments to prepare_ftrace_return are slightly different than what are | ||
142 | passed to ftrace_trace_function. The second argument "selfpc" is the same, | ||
143 | but the first argument should be a pointer to the "frompc". Typically this is | ||
144 | located on the stack. This allows the function to hijack the return address | ||
145 | temporarily to have it point to the arch-specific function return_to_handler. | ||
146 | That function will simply call the common ftrace_return_to_handler function and | ||
147 | that will return the original return address with which, you can return to the | ||
148 | original call site. | ||
149 | |||
150 | Here is the updated mcount pseudo code: | ||
151 | void mcount(void) | ||
152 | { | ||
153 | ... | ||
154 | if (ftrace_trace_function != ftrace_stub) | ||
155 | goto do_trace; | ||
156 | |||
157 | +#ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
158 | + extern void (*ftrace_graph_return)(...); | ||
159 | + extern void (*ftrace_graph_entry)(...); | ||
160 | + if (ftrace_graph_return != ftrace_stub || | ||
161 | + ftrace_graph_entry != ftrace_graph_entry_stub) | ||
162 | + ftrace_graph_caller(); | ||
163 | +#endif | ||
164 | |||
165 | /* restore any bare state */ | ||
166 | ... | ||
167 | |||
168 | Here is the pseudo code for the new ftrace_graph_caller assembly function: | ||
169 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
170 | void ftrace_graph_caller(void) | ||
171 | { | ||
172 | /* save all state needed by the ABI */ | ||
173 | |||
174 | unsigned long *frompc = &...; | ||
175 | unsigned long selfpc = <return address> - MCOUNT_INSN_SIZE; | ||
176 | prepare_ftrace_return(frompc, selfpc); | ||
177 | |||
178 | /* restore all state needed by the ABI */ | ||
179 | } | ||
180 | #endif | ||
181 | |||
182 | For information on how to implement prepare_ftrace_return(), simply look at | ||
183 | the x86 version. The only architecture-specific piece in it is the setup of | ||
184 | the fault recovery table (the asm(...) code). The rest should be the same | ||
185 | across architectures. | ||
186 | |||
187 | Here is the pseudo code for the new return_to_handler assembly function. Note | ||
188 | that the ABI that applies here is different from what applies to the mcount | ||
189 | code. Since you are returning from a function (after the epilogue), you might | ||
190 | be able to skimp on things saved/restored (usually just registers used to pass | ||
191 | return values). | ||
192 | |||
193 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
194 | void return_to_handler(void) | ||
195 | { | ||
196 | /* save all state needed by the ABI (see paragraph above) */ | ||
197 | |||
198 | void (*original_return_point)(void) = ftrace_return_to_handler(); | ||
199 | |||
200 | /* restore all state needed by the ABI */ | ||
201 | |||
202 | /* this is usually either a return or a jump */ | ||
203 | original_return_point(); | ||
204 | } | ||
205 | #endif | ||
206 | |||
207 | |||
208 | HAVE_FTRACE_NMI_ENTER | ||
209 | --------------------- | ||
210 | |||
211 | If you can't trace NMI functions, then skip this option. | ||
212 | |||
213 | <details to be filled> | ||
214 | |||
215 | |||
216 | HAVE_FTRACE_SYSCALLS | ||
217 | --------------------- | ||
218 | |||
219 | <details to be filled> | ||
220 | |||
221 | |||
222 | HAVE_FTRACE_MCOUNT_RECORD | ||
223 | ------------------------- | ||
224 | |||
225 | See scripts/recordmcount.pl for more info. | ||
226 | |||
227 | <details to be filled> | ||
228 | |||
229 | |||
230 | HAVE_DYNAMIC_FTRACE | ||
231 | --------------------- | ||
232 | |||
233 | <details to be filled> | ||
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt index 355d0f1f8c5..1b6292bbdd6 100644 --- a/Documentation/trace/ftrace.txt +++ b/Documentation/trace/ftrace.txt | |||
@@ -26,6 +26,12 @@ disabled, and more (ftrace allows for tracer plugins, which | |||
26 | means that the list of tracers can always grow). | 26 | means that the list of tracers can always grow). |
27 | 27 | ||
28 | 28 | ||
29 | Implementation Details | ||
30 | ---------------------- | ||
31 | |||
32 | See ftrace-design.txt for details for arch porters and such. | ||
33 | |||
34 | |||
29 | The File System | 35 | The File System |
30 | --------------- | 36 | --------------- |
31 | 37 | ||
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index bcd9c07848b..3a46b7b7abb 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -48,13 +48,13 @@ | |||
48 | #define KPROBE_HIT_SSDONE 0x00000008 | 48 | #define KPROBE_HIT_SSDONE 0x00000008 |
49 | 49 | ||
50 | /* Attach to insert probes on any functions which should be ignored*/ | 50 | /* Attach to insert probes on any functions which should be ignored*/ |
51 | #define __kprobes __attribute__((__section__(".kprobes.text"))) notrace | 51 | #define __kprobes __attribute__((__section__(".kprobes.text"))) |
52 | #else /* CONFIG_KPROBES */ | 52 | #else /* CONFIG_KPROBES */ |
53 | typedef int kprobe_opcode_t; | 53 | typedef int kprobe_opcode_t; |
54 | struct arch_specific_insn { | 54 | struct arch_specific_insn { |
55 | int dummy; | 55 | int dummy; |
56 | }; | 56 | }; |
57 | #define __kprobes notrace | 57 | #define __kprobes |
58 | #endif /* CONFIG_KPROBES */ | 58 | #endif /* CONFIG_KPROBES */ |
59 | 59 | ||
60 | struct kprobe; | 60 | struct kprobe; |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 308bafd9332..72a3b437b82 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -239,9 +239,9 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ | |||
239 | #undef __print_flags | 239 | #undef __print_flags |
240 | #define __print_flags(flag, delim, flag_array...) \ | 240 | #define __print_flags(flag, delim, flag_array...) \ |
241 | ({ \ | 241 | ({ \ |
242 | static const struct trace_print_flags flags[] = \ | 242 | static const struct trace_print_flags __flags[] = \ |
243 | { flag_array, { -1, NULL }}; \ | 243 | { flag_array, { -1, NULL }}; \ |
244 | ftrace_print_flags_seq(p, delim, flag, flags); \ | 244 | ftrace_print_flags_seq(p, delim, flag, __flags); \ |
245 | }) | 245 | }) |
246 | 246 | ||
247 | #undef __print_symbolic | 247 | #undef __print_symbolic |
@@ -254,7 +254,7 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ | |||
254 | 254 | ||
255 | #undef TRACE_EVENT | 255 | #undef TRACE_EVENT |
256 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 256 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ |
257 | enum print_line_t \ | 257 | static enum print_line_t \ |
258 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | 258 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ |
259 | { \ | 259 | { \ |
260 | struct trace_seq *s = &iter->seq; \ | 260 | struct trace_seq *s = &iter->seq; \ |
@@ -317,7 +317,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
317 | 317 | ||
318 | #undef TRACE_EVENT | 318 | #undef TRACE_EVENT |
319 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 319 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
320 | int \ | 320 | static int \ |
321 | ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ | 321 | ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ |
322 | { \ | 322 | { \ |
323 | struct ftrace_raw_##call field; \ | 323 | struct ftrace_raw_##call field; \ |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 1ea0d1234f4..e7163460440 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -11,12 +11,18 @@ config NOP_TRACER | |||
11 | 11 | ||
12 | config HAVE_FTRACE_NMI_ENTER | 12 | config HAVE_FTRACE_NMI_ENTER |
13 | bool | 13 | bool |
14 | help | ||
15 | See Documentation/trace/ftrace-implementation.txt | ||
14 | 16 | ||
15 | config HAVE_FUNCTION_TRACER | 17 | config HAVE_FUNCTION_TRACER |
16 | bool | 18 | bool |
19 | help | ||
20 | See Documentation/trace/ftrace-implementation.txt | ||
17 | 21 | ||
18 | config HAVE_FUNCTION_GRAPH_TRACER | 22 | config HAVE_FUNCTION_GRAPH_TRACER |
19 | bool | 23 | bool |
24 | help | ||
25 | See Documentation/trace/ftrace-implementation.txt | ||
20 | 26 | ||
21 | config HAVE_FUNCTION_GRAPH_FP_TEST | 27 | config HAVE_FUNCTION_GRAPH_FP_TEST |
22 | bool | 28 | bool |
@@ -28,21 +34,25 @@ config HAVE_FUNCTION_GRAPH_FP_TEST | |||
28 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST | 34 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST |
29 | bool | 35 | bool |
30 | help | 36 | help |
31 | This gets selected when the arch tests the function_trace_stop | 37 | See Documentation/trace/ftrace-implementation.txt |
32 | variable at the mcount call site. Otherwise, this variable | ||
33 | is tested by the called function. | ||
34 | 38 | ||
35 | config HAVE_DYNAMIC_FTRACE | 39 | config HAVE_DYNAMIC_FTRACE |
36 | bool | 40 | bool |
41 | help | ||
42 | See Documentation/trace/ftrace-implementation.txt | ||
37 | 43 | ||
38 | config HAVE_FTRACE_MCOUNT_RECORD | 44 | config HAVE_FTRACE_MCOUNT_RECORD |
39 | bool | 45 | bool |
46 | help | ||
47 | See Documentation/trace/ftrace-implementation.txt | ||
40 | 48 | ||
41 | config HAVE_HW_BRANCH_TRACER | 49 | config HAVE_HW_BRANCH_TRACER |
42 | bool | 50 | bool |
43 | 51 | ||
44 | config HAVE_SYSCALL_TRACEPOINTS | 52 | config HAVE_SYSCALL_TRACEPOINTS |
45 | bool | 53 | bool |
54 | help | ||
55 | See Documentation/trace/ftrace-implementation.txt | ||
46 | 56 | ||
47 | config TRACER_MAX_TRACE | 57 | config TRACER_MAX_TRACE |
48 | bool | 58 | bool |
@@ -469,6 +479,18 @@ config FTRACE_STARTUP_TEST | |||
469 | functioning properly. It will do tests on all the configured | 479 | functioning properly. It will do tests on all the configured |
470 | tracers of ftrace. | 480 | tracers of ftrace. |
471 | 481 | ||
482 | config EVENT_TRACE_TEST_SYSCALLS | ||
483 | bool "Run selftest on syscall events" | ||
484 | depends on FTRACE_STARTUP_TEST | ||
485 | help | ||
486 | This option will also enable testing every syscall event. | ||
487 | It only enables the event and disables it and runs various loads | ||
488 | with the event enabled. This adds a bit more time for kernel boot | ||
489 | up since it runs this on every system call defined. | ||
490 | |||
491 | TBD - enable a way to actually call the syscalls as we test their | ||
492 | events | ||
493 | |||
472 | config MMIOTRACE | 494 | config MMIOTRACE |
473 | bool "Memory mapped IO tracing" | 495 | bool "Memory mapped IO tracing" |
474 | depends on HAVE_MMIOTRACE_SUPPORT && PCI | 496 | depends on HAVE_MMIOTRACE_SUPPORT && PCI |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 8b23d567008..f7ab7fc162c 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2062,9 +2062,9 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
2062 | int i, len = 0; | 2062 | int i, len = 0; |
2063 | char *search; | 2063 | char *search; |
2064 | 2064 | ||
2065 | if (glob && (strcmp(glob, "*") || !strlen(glob))) | 2065 | if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) |
2066 | glob = NULL; | 2066 | glob = NULL; |
2067 | else { | 2067 | else if (glob) { |
2068 | int not; | 2068 | int not; |
2069 | 2069 | ||
2070 | type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); | 2070 | type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); |
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index b588fd81f7f..20c5f92e28a 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -66,10 +66,14 @@ u64 notrace trace_clock(void) | |||
66 | * Used by plugins that need globally coherent timestamps. | 66 | * Used by plugins that need globally coherent timestamps. |
67 | */ | 67 | */ |
68 | 68 | ||
69 | static u64 prev_trace_clock_time; | 69 | /* keep prev_time and lock in the same cacheline. */ |
70 | 70 | static struct { | |
71 | static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp = | 71 | u64 prev_time; |
72 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 72 | raw_spinlock_t lock; |
73 | } trace_clock_struct ____cacheline_aligned_in_smp = | ||
74 | { | ||
75 | .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, | ||
76 | }; | ||
73 | 77 | ||
74 | u64 notrace trace_clock_global(void) | 78 | u64 notrace trace_clock_global(void) |
75 | { | 79 | { |
@@ -88,19 +92,19 @@ u64 notrace trace_clock_global(void) | |||
88 | if (unlikely(in_nmi())) | 92 | if (unlikely(in_nmi())) |
89 | goto out; | 93 | goto out; |
90 | 94 | ||
91 | __raw_spin_lock(&trace_clock_lock); | 95 | __raw_spin_lock(&trace_clock_struct.lock); |
92 | 96 | ||
93 | /* | 97 | /* |
94 | * TODO: if this happens often then maybe we should reset | 98 | * TODO: if this happens often then maybe we should reset |
95 | * my_scd->clock to prev_trace_clock_time+1, to make sure | 99 | * my_scd->clock to prev_time+1, to make sure |
96 | * we start ticking with the local clock from now on? | 100 | * we start ticking with the local clock from now on? |
97 | */ | 101 | */ |
98 | if ((s64)(now - prev_trace_clock_time) < 0) | 102 | if ((s64)(now - trace_clock_struct.prev_time) < 0) |
99 | now = prev_trace_clock_time + 1; | 103 | now = trace_clock_struct.prev_time + 1; |
100 | 104 | ||
101 | prev_trace_clock_time = now; | 105 | trace_clock_struct.prev_time = now; |
102 | 106 | ||
103 | __raw_spin_unlock(&trace_clock_lock); | 107 | __raw_spin_unlock(&trace_clock_struct.lock); |
104 | 108 | ||
105 | out: | 109 | out: |
106 | raw_local_irq_restore(flags); | 110 | raw_local_irq_restore(flags); |
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index c866d34e014..a431748ddd6 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h | |||
@@ -78,7 +78,7 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry, | |||
78 | __field_desc( int, graph_ent, depth ) | 78 | __field_desc( int, graph_ent, depth ) |
79 | ), | 79 | ), |
80 | 80 | ||
81 | F_printk("--> %lx (%d)", __entry->graph_ent.func, __entry->depth) | 81 | F_printk("--> %lx (%d)", __entry->func, __entry->depth) |
82 | ); | 82 | ); |
83 | 83 | ||
84 | /* Function return entry */ | 84 | /* Function return entry */ |
@@ -97,8 +97,8 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry, | |||
97 | 97 | ||
98 | F_printk("<-- %lx (%d) (start: %llx end: %llx) over: %d", | 98 | F_printk("<-- %lx (%d) (start: %llx end: %llx) over: %d", |
99 | __entry->func, __entry->depth, | 99 | __entry->func, __entry->depth, |
100 | __entry->calltime, __entry->rettim, | 100 | __entry->calltime, __entry->rettime, |
101 | __entrty->depth) | 101 | __entry->depth) |
102 | ); | 102 | ); |
103 | 103 | ||
104 | /* | 104 | /* |
@@ -116,15 +116,6 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry, | |||
116 | __field( unsigned char, next_state ) \ | 116 | __field( unsigned char, next_state ) \ |
117 | __field( unsigned int, next_cpu ) | 117 | __field( unsigned int, next_cpu ) |
118 | 118 | ||
119 | #if 0 | ||
120 | FTRACE_ENTRY_STRUCT_ONLY(ctx_switch_entry, | ||
121 | |||
122 | F_STRUCT( | ||
123 | FTRACE_CTX_FIELDS | ||
124 | ) | ||
125 | ); | ||
126 | #endif | ||
127 | |||
128 | FTRACE_ENTRY(context_switch, ctx_switch_entry, | 119 | FTRACE_ENTRY(context_switch, ctx_switch_entry, |
129 | 120 | ||
130 | TRACE_CTX, | 121 | TRACE_CTX, |
@@ -133,7 +124,7 @@ FTRACE_ENTRY(context_switch, ctx_switch_entry, | |||
133 | FTRACE_CTX_FIELDS | 124 | FTRACE_CTX_FIELDS |
134 | ), | 125 | ), |
135 | 126 | ||
136 | F_printk(b"%u:%u:%u ==> %u:%u:%u [%03u]", | 127 | F_printk("%u:%u:%u ==> %u:%u:%u [%03u]", |
137 | __entry->prev_pid, __entry->prev_prio, __entry->prev_state, | 128 | __entry->prev_pid, __entry->prev_prio, __entry->prev_state, |
138 | __entry->next_pid, __entry->next_prio, __entry->next_state, | 129 | __entry->next_pid, __entry->next_prio, __entry->next_state, |
139 | __entry->next_cpu | 130 | __entry->next_cpu |
@@ -257,8 +248,8 @@ FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw, | |||
257 | __field_desc( unsigned char, rw, width ) | 248 | __field_desc( unsigned char, rw, width ) |
258 | ), | 249 | ), |
259 | 250 | ||
260 | F_printk("%lx %lx %lx %d %lx %lx", | 251 | F_printk("%lx %lx %lx %d %x %x", |
261 | __entry->phs, __entry->value, __entry->pc, | 252 | (unsigned long)__entry->phys, __entry->value, __entry->pc, |
262 | __entry->map_id, __entry->opcode, __entry->width) | 253 | __entry->map_id, __entry->opcode, __entry->width) |
263 | ); | 254 | ); |
264 | 255 | ||
@@ -275,8 +266,8 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map, | |||
275 | __field_desc( unsigned char, map, opcode ) | 266 | __field_desc( unsigned char, map, opcode ) |
276 | ), | 267 | ), |
277 | 268 | ||
278 | F_printk("%lx %lx %lx %d %lx", | 269 | F_printk("%lx %lx %lx %d %x", |
279 | __entry->phs, __entry->virt, __entry->len, | 270 | (unsigned long)__entry->phys, __entry->virt, __entry->len, |
280 | __entry->map_id, __entry->opcode) | 271 | __entry->map_id, __entry->opcode) |
281 | ); | 272 | ); |
282 | 273 | ||
@@ -370,7 +361,7 @@ FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry, | |||
370 | __field( int, node ) | 361 | __field( int, node ) |
371 | ), | 362 | ), |
372 | 363 | ||
373 | F_printk("type:%u call_site:%lx ptr:%p req:%lu alloc:%lu" | 364 | F_printk("type:%u call_site:%lx ptr:%p req:%zi alloc:%zi" |
374 | " flags:%x node:%d", | 365 | " flags:%x node:%d", |
375 | __entry->type_id, __entry->call_site, __entry->ptr, | 366 | __entry->type_id, __entry->call_site, __entry->ptr, |
376 | __entry->bytes_req, __entry->bytes_alloc, | 367 | __entry->bytes_req, __entry->bytes_alloc, |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index adbed124c3e..787f0fb0994 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -1154,7 +1154,7 @@ static int trace_module_notify(struct notifier_block *self, | |||
1154 | } | 1154 | } |
1155 | #endif /* CONFIG_MODULES */ | 1155 | #endif /* CONFIG_MODULES */ |
1156 | 1156 | ||
1157 | struct notifier_block trace_module_nb = { | 1157 | static struct notifier_block trace_module_nb = { |
1158 | .notifier_call = trace_module_notify, | 1158 | .notifier_call = trace_module_notify, |
1159 | .priority = 0, | 1159 | .priority = 0, |
1160 | }; | 1160 | }; |
@@ -1326,6 +1326,18 @@ static __init void event_trace_self_tests(void) | |||
1326 | if (!call->regfunc) | 1326 | if (!call->regfunc) |
1327 | continue; | 1327 | continue; |
1328 | 1328 | ||
1329 | /* | ||
1330 | * Testing syscall events here is pretty useless, but | ||
1331 | * we still do it if configured. But this is time consuming. | ||
1332 | * What we really need is a user thread to perform the | ||
1333 | * syscalls as we test. | ||
1334 | */ | ||
1335 | #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS | ||
1336 | if (call->system && | ||
1337 | strcmp(call->system, "syscalls") == 0) | ||
1338 | continue; | ||
1339 | #endif | ||
1340 | |||
1329 | pr_info("Testing event %s: ", call->name); | 1341 | pr_info("Testing event %s: ", call->name); |
1330 | 1342 | ||
1331 | /* | 1343 | /* |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 4cb29d84d73..9753fcc61bc 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -23,6 +23,47 @@ | |||
23 | #define __field_struct(type, item) | 23 | #define __field_struct(type, item) |
24 | 24 | ||
25 | #undef __field | 25 | #undef __field |
26 | #define __field(type, item) type item; | ||
27 | |||
28 | #undef __field_desc | ||
29 | #define __field_desc(type, container, item) type item; | ||
30 | |||
31 | #undef __array | ||
32 | #define __array(type, item, size) type item[size]; | ||
33 | |||
34 | #undef __array_desc | ||
35 | #define __array_desc(type, container, item, size) type item[size]; | ||
36 | |||
37 | #undef __dynamic_array | ||
38 | #define __dynamic_array(type, item) type item[]; | ||
39 | |||
40 | #undef F_STRUCT | ||
41 | #define F_STRUCT(args...) args | ||
42 | |||
43 | #undef F_printk | ||
44 | #define F_printk(fmt, args...) fmt, args | ||
45 | |||
46 | #undef FTRACE_ENTRY | ||
47 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ | ||
48 | struct ____ftrace_##name { \ | ||
49 | tstruct \ | ||
50 | }; \ | ||
51 | static void __used ____ftrace_check_##name(void) \ | ||
52 | { \ | ||
53 | struct ____ftrace_##name *__entry = NULL; \ | ||
54 | \ | ||
55 | /* force cmpile-time check on F_printk() */ \ | ||
56 | printk(print); \ | ||
57 | } | ||
58 | |||
59 | #undef FTRACE_ENTRY_DUP | ||
60 | #define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \ | ||
61 | FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) | ||
62 | |||
63 | #include "trace_entries.h" | ||
64 | |||
65 | |||
66 | #undef __field | ||
26 | #define __field(type, item) \ | 67 | #define __field(type, item) \ |
27 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | 68 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ |
28 | "offset:%zu;\tsize:%zu;\n", \ | 69 | "offset:%zu;\tsize:%zu;\n", \ |
@@ -88,10 +129,6 @@ ftrace_format_##name(struct ftrace_event_call *unused, \ | |||
88 | return ret; \ | 129 | return ret; \ |
89 | } | 130 | } |
90 | 131 | ||
91 | #undef FTRACE_ENTRY_DUP | ||
92 | #define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \ | ||
93 | FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) | ||
94 | |||
95 | #include "trace_entries.h" | 132 | #include "trace_entries.h" |
96 | 133 | ||
97 | 134 | ||
@@ -172,32 +209,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ | |||
172 | #undef __dynamic_array | 209 | #undef __dynamic_array |
173 | #define __dynamic_array(type, item) | 210 | #define __dynamic_array(type, item) |
174 | 211 | ||
175 | |||
176 | #undef TRACE_ZERO_CHAR | ||
177 | #define TRACE_ZERO_CHAR(arg) | ||
178 | |||
179 | #undef TRACE_FIELD | ||
180 | #define TRACE_FIELD(type, item, assign)\ | ||
181 | entry->item = assign; | ||
182 | |||
183 | #undef TRACE_FIELD | ||
184 | #define TRACE_FIELD(type, item, assign)\ | ||
185 | entry->item = assign; | ||
186 | |||
187 | #undef TRACE_FIELD_SIGN | ||
188 | #define TRACE_FIELD_SIGN(type, item, assign, is_signed) \ | ||
189 | TRACE_FIELD(type, item, assign) | ||
190 | |||
191 | #undef TP_CMD | ||
192 | #define TP_CMD(cmd...) cmd | ||
193 | |||
194 | #undef TRACE_ENTRY | ||
195 | #define TRACE_ENTRY entry | ||
196 | |||
197 | #undef TRACE_FIELD_SPECIAL | ||
198 | #define TRACE_FIELD_SPECIAL(type_item, item, len, cmd) \ | ||
199 | cmd; | ||
200 | |||
201 | #undef FTRACE_ENTRY | 212 | #undef FTRACE_ENTRY |
202 | #define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ | 213 | #define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ |
203 | static int ftrace_raw_init_event_##call(void); \ | 214 | static int ftrace_raw_init_event_##call(void); \ |