diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/trace/define_trace.h | 4 | ||||
-rw-r--r-- | include/trace/ftrace.h | 492 | ||||
-rw-r--r-- | include/trace/trace_events.h | 7 |
3 files changed, 496 insertions, 7 deletions
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index de9dc7d8508b..980eb66a6e38 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h | |||
@@ -56,6 +56,10 @@ | |||
56 | 56 | ||
57 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 57 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
58 | 58 | ||
59 | #ifdef CONFIG_EVENT_TRACER | ||
60 | #include <trace/ftrace.h> | ||
61 | #endif | ||
62 | |||
59 | #undef TRACE_HEADER_MULTI_READ | 63 | #undef TRACE_HEADER_MULTI_READ |
60 | 64 | ||
61 | /* Only undef what we defined in this file */ | 65 | /* Only undef what we defined in this file */ |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h new file mode 100644 index 000000000000..955b967acd74 --- /dev/null +++ b/include/trace/ftrace.h | |||
@@ -0,0 +1,492 @@ | |||
1 | /* | ||
2 | * Stage 1 of the trace events. | ||
3 | * | ||
4 | * Override the macros in <trace/trace_events.h> to include the following: | ||
5 | * | ||
6 | * struct ftrace_raw_<call> { | ||
7 | * struct trace_entry ent; | ||
8 | * <type> <item>; | ||
9 | * <type2> <item2>[<len>]; | ||
10 | * [...] | ||
11 | * }; | ||
12 | * | ||
13 | * The <type> <item> is created by the __field(type, item) macro or | ||
14 | * the __array(type2, item2, len) macro. | ||
15 | * We simply do "type item;", and that will create the fields | ||
16 | * in the structure. | ||
17 | */ | ||
18 | |||
19 | #include <linux/ftrace_event.h> | ||
20 | |||
21 | #undef TRACE_FORMAT | ||
22 | #define TRACE_FORMAT(call, proto, args, fmt) | ||
23 | |||
24 | #undef __array | ||
25 | #define __array(type, item, len) type item[len]; | ||
26 | |||
27 | #undef __field | ||
28 | #define __field(type, item) type item; | ||
29 | |||
30 | #undef TP_STRUCT__entry | ||
31 | #define TP_STRUCT__entry(args...) args | ||
32 | |||
33 | #undef TRACE_EVENT | ||
34 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | ||
35 | struct ftrace_raw_##name { \ | ||
36 | struct trace_entry ent; \ | ||
37 | tstruct \ | ||
38 | }; \ | ||
39 | static struct ftrace_event_call event_##name | ||
40 | |||
41 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
42 | |||
43 | /* | ||
44 | * Stage 2 of the trace events. | ||
45 | * | ||
46 | * Override the macros in <trace/trace_events.h> to include the following: | ||
47 | * | ||
48 | * enum print_line_t | ||
49 | * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) | ||
50 | * { | ||
51 | * struct trace_seq *s = &iter->seq; | ||
52 | * struct ftrace_raw_<call> *field; <-- defined in stage 1 | ||
53 | * struct trace_entry *entry; | ||
54 | * int ret; | ||
55 | * | ||
56 | * entry = iter->ent; | ||
57 | * | ||
58 | * if (entry->type != event_<call>.id) { | ||
59 | * WARN_ON_ONCE(1); | ||
60 | * return TRACE_TYPE_UNHANDLED; | ||
61 | * } | ||
62 | * | ||
63 | * field = (typeof(field))entry; | ||
64 | * | ||
65 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | ||
66 | * if (!ret) | ||
67 | * return TRACE_TYPE_PARTIAL_LINE; | ||
68 | * | ||
69 | * return TRACE_TYPE_HANDLED; | ||
70 | * } | ||
71 | * | ||
72 | * This is the method used to print the raw event to the trace | ||
73 | * output format. Note, this is not needed if the data is read | ||
74 | * in binary. | ||
75 | */ | ||
76 | |||
77 | #undef __entry | ||
78 | #define __entry field | ||
79 | |||
80 | #undef TP_printk | ||
81 | #define TP_printk(fmt, args...) fmt "\n", args | ||
82 | |||
83 | #undef TRACE_EVENT | ||
84 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
85 | enum print_line_t \ | ||
86 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | ||
87 | { \ | ||
88 | struct trace_seq *s = &iter->seq; \ | ||
89 | struct ftrace_raw_##call *field; \ | ||
90 | struct trace_entry *entry; \ | ||
91 | int ret; \ | ||
92 | \ | ||
93 | entry = iter->ent; \ | ||
94 | \ | ||
95 | if (entry->type != event_##call.id) { \ | ||
96 | WARN_ON_ONCE(1); \ | ||
97 | return TRACE_TYPE_UNHANDLED; \ | ||
98 | } \ | ||
99 | \ | ||
100 | field = (typeof(field))entry; \ | ||
101 | \ | ||
102 | ret = trace_seq_printf(s, #call ": " print); \ | ||
103 | if (!ret) \ | ||
104 | return TRACE_TYPE_PARTIAL_LINE; \ | ||
105 | \ | ||
106 | return TRACE_TYPE_HANDLED; \ | ||
107 | } | ||
108 | |||
109 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
110 | |||
111 | /* | ||
112 | * Setup the showing format of trace point. | ||
113 | * | ||
114 | * int | ||
115 | * ftrace_format_##call(struct trace_seq *s) | ||
116 | * { | ||
117 | * struct ftrace_raw_##call field; | ||
118 | * int ret; | ||
119 | * | ||
120 | * ret = trace_seq_printf(s, #type " " #item ";" | ||
121 | * " offset:%u; size:%u;\n", | ||
122 | * offsetof(struct ftrace_raw_##call, item), | ||
123 | * sizeof(field.type)); | ||
124 | * | ||
125 | * } | ||
126 | */ | ||
127 | |||
128 | #undef TP_STRUCT__entry | ||
129 | #define TP_STRUCT__entry(args...) args | ||
130 | |||
131 | #undef __field | ||
132 | #define __field(type, item) \ | ||
133 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | ||
134 | "offset:%u;\tsize:%u;\n", \ | ||
135 | (unsigned int)offsetof(typeof(field), item), \ | ||
136 | (unsigned int)sizeof(field.item)); \ | ||
137 | if (!ret) \ | ||
138 | return 0; | ||
139 | |||
140 | #undef __array | ||
141 | #define __array(type, item, len) \ | ||
142 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ | ||
143 | "offset:%u;\tsize:%u;\n", \ | ||
144 | (unsigned int)offsetof(typeof(field), item), \ | ||
145 | (unsigned int)sizeof(field.item)); \ | ||
146 | if (!ret) \ | ||
147 | return 0; | ||
148 | |||
149 | #undef __entry | ||
150 | #define __entry REC | ||
151 | |||
152 | #undef TP_printk | ||
153 | #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) | ||
154 | |||
155 | #undef TP_fast_assign | ||
156 | #define TP_fast_assign(args...) args | ||
157 | |||
158 | #undef TRACE_EVENT | ||
159 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | ||
160 | static int \ | ||
161 | ftrace_format_##call(struct trace_seq *s) \ | ||
162 | { \ | ||
163 | struct ftrace_raw_##call field; \ | ||
164 | int ret; \ | ||
165 | \ | ||
166 | tstruct; \ | ||
167 | \ | ||
168 | trace_seq_printf(s, "\nprint fmt: " print); \ | ||
169 | \ | ||
170 | return ret; \ | ||
171 | } | ||
172 | |||
173 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
174 | |||
175 | #undef __field | ||
176 | #define __field(type, item) \ | ||
177 | ret = trace_define_field(event_call, #type, #item, \ | ||
178 | offsetof(typeof(field), item), \ | ||
179 | sizeof(field.item)); \ | ||
180 | if (ret) \ | ||
181 | return ret; | ||
182 | |||
183 | #undef __array | ||
184 | #define __array(type, item, len) \ | ||
185 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | ||
186 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | ||
187 | offsetof(typeof(field), item), \ | ||
188 | sizeof(field.item)); \ | ||
189 | if (ret) \ | ||
190 | return ret; | ||
191 | |||
192 | #undef TRACE_EVENT | ||
193 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | ||
194 | int \ | ||
195 | ftrace_define_fields_##call(void) \ | ||
196 | { \ | ||
197 | struct ftrace_raw_##call field; \ | ||
198 | struct ftrace_event_call *event_call = &event_##call; \ | ||
199 | int ret; \ | ||
200 | \ | ||
201 | __common_field(unsigned char, type); \ | ||
202 | __common_field(unsigned char, flags); \ | ||
203 | __common_field(unsigned char, preempt_count); \ | ||
204 | __common_field(int, pid); \ | ||
205 | __common_field(int, tgid); \ | ||
206 | \ | ||
207 | tstruct; \ | ||
208 | \ | ||
209 | return ret; \ | ||
210 | } | ||
211 | |||
212 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
213 | |||
214 | /* | ||
215 | * Stage 3 of the trace events. | ||
216 | * | ||
217 | * Override the macros in <trace/trace_events.h> to include the following: | ||
218 | * | ||
219 | * static void ftrace_event_<call>(proto) | ||
220 | * { | ||
221 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); | ||
222 | * } | ||
223 | * | ||
224 | * static int ftrace_reg_event_<call>(void) | ||
225 | * { | ||
226 | * int ret; | ||
227 | * | ||
228 | * ret = register_trace_<call>(ftrace_event_<call>); | ||
229 | * if (!ret) | ||
230 | * pr_info("event trace: Could not activate trace point " | ||
231 | * "probe to <call>"); | ||
232 | * return ret; | ||
233 | * } | ||
234 | * | ||
235 | * static void ftrace_unreg_event_<call>(void) | ||
236 | * { | ||
237 | * unregister_trace_<call>(ftrace_event_<call>); | ||
238 | * } | ||
239 | * | ||
240 | * For those macros defined with TRACE_FORMAT: | ||
241 | * | ||
242 | * static struct ftrace_event_call __used | ||
243 | * __attribute__((__aligned__(4))) | ||
244 | * __attribute__((section("_ftrace_events"))) event_<call> = { | ||
245 | * .name = "<call>", | ||
246 | * .regfunc = ftrace_reg_event_<call>, | ||
247 | * .unregfunc = ftrace_unreg_event_<call>, | ||
248 | * } | ||
249 | * | ||
250 | * | ||
251 | * For those macros defined with TRACE_EVENT: | ||
252 | * | ||
253 | * static struct ftrace_event_call event_<call>; | ||
254 | * | ||
255 | * static void ftrace_raw_event_<call>(proto) | ||
256 | * { | ||
257 | * struct ring_buffer_event *event; | ||
258 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | ||
259 | * unsigned long irq_flags; | ||
260 | * int pc; | ||
261 | * | ||
262 | * local_save_flags(irq_flags); | ||
263 | * pc = preempt_count(); | ||
264 | * | ||
265 | * event = trace_current_buffer_lock_reserve(event_<call>.id, | ||
266 | * sizeof(struct ftrace_raw_<call>), | ||
267 | * irq_flags, pc); | ||
268 | * if (!event) | ||
269 | * return; | ||
270 | * entry = ring_buffer_event_data(event); | ||
271 | * | ||
272 | * <assign>; <-- Here we assign the entries by the __field and | ||
273 | * __array macros. | ||
274 | * | ||
275 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); | ||
276 | * } | ||
277 | * | ||
278 | * static int ftrace_raw_reg_event_<call>(void) | ||
279 | * { | ||
280 | * int ret; | ||
281 | * | ||
282 | * ret = register_trace_<call>(ftrace_raw_event_<call>); | ||
283 | * if (!ret) | ||
284 | * pr_info("event trace: Could not activate trace point " | ||
285 | * "probe to <call>"); | ||
286 | * return ret; | ||
287 | * } | ||
288 | * | ||
289 | * static void ftrace_unreg_event_<call>(void) | ||
290 | * { | ||
291 | * unregister_trace_<call>(ftrace_raw_event_<call>); | ||
292 | * } | ||
293 | * | ||
294 | * static struct trace_event ftrace_event_type_<call> = { | ||
295 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | ||
296 | * }; | ||
297 | * | ||
298 | * static int ftrace_raw_init_event_<call>(void) | ||
299 | * { | ||
300 | * int id; | ||
301 | * | ||
302 | * id = register_ftrace_event(&ftrace_event_type_<call>); | ||
303 | * if (!id) | ||
304 | * return -ENODEV; | ||
305 | * event_<call>.id = id; | ||
306 | * return 0; | ||
307 | * } | ||
308 | * | ||
309 | * static struct ftrace_event_call __used | ||
310 | * __attribute__((__aligned__(4))) | ||
311 | * __attribute__((section("_ftrace_events"))) event_<call> = { | ||
312 | * .name = "<call>", | ||
313 | * .system = "<system>", | ||
314 | * .raw_init = ftrace_raw_init_event_<call>, | ||
315 | * .regfunc = ftrace_reg_event_<call>, | ||
316 | * .unregfunc = ftrace_unreg_event_<call>, | ||
317 | * .show_format = ftrace_format_<call>, | ||
318 | * } | ||
319 | * | ||
320 | */ | ||
321 | |||
322 | #undef TP_FMT | ||
323 | #define TP_FMT(fmt, args...) fmt "\n", ##args | ||
324 | |||
325 | #ifdef CONFIG_EVENT_PROFILE | ||
326 | #define _TRACE_PROFILE(call, proto, args) \ | ||
327 | static void ftrace_profile_##call(proto) \ | ||
328 | { \ | ||
329 | extern void perf_tpcounter_event(int); \ | ||
330 | perf_tpcounter_event(event_##call.id); \ | ||
331 | } \ | ||
332 | \ | ||
333 | static int ftrace_profile_enable_##call(struct ftrace_event_call *call) \ | ||
334 | { \ | ||
335 | int ret = 0; \ | ||
336 | \ | ||
337 | if (!atomic_inc_return(&call->profile_count)) \ | ||
338 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
339 | \ | ||
340 | return ret; \ | ||
341 | } \ | ||
342 | \ | ||
343 | static void ftrace_profile_disable_##call(struct ftrace_event_call *call) \ | ||
344 | { \ | ||
345 | if (atomic_add_negative(-1, &call->profile_count)) \ | ||
346 | unregister_trace_##call(ftrace_profile_##call); \ | ||
347 | } | ||
348 | |||
349 | #define _TRACE_PROFILE_INIT(call) \ | ||
350 | .profile_count = ATOMIC_INIT(-1), \ | ||
351 | .profile_enable = ftrace_profile_enable_##call, \ | ||
352 | .profile_disable = ftrace_profile_disable_##call, | ||
353 | |||
354 | #else | ||
355 | #define _TRACE_PROFILE(call, proto, args) | ||
356 | #define _TRACE_PROFILE_INIT(call) | ||
357 | #endif | ||
358 | |||
359 | #define _TRACE_FORMAT(call, proto, args, fmt) \ | ||
360 | static void ftrace_event_##call(proto) \ | ||
361 | { \ | ||
362 | event_trace_printk(_RET_IP_, #call ": " fmt); \ | ||
363 | } \ | ||
364 | \ | ||
365 | static int ftrace_reg_event_##call(void) \ | ||
366 | { \ | ||
367 | int ret; \ | ||
368 | \ | ||
369 | ret = register_trace_##call(ftrace_event_##call); \ | ||
370 | if (ret) \ | ||
371 | pr_info("event trace: Could not activate trace point " \ | ||
372 | "probe to " #call "\n"); \ | ||
373 | return ret; \ | ||
374 | } \ | ||
375 | \ | ||
376 | static void ftrace_unreg_event_##call(void) \ | ||
377 | { \ | ||
378 | unregister_trace_##call(ftrace_event_##call); \ | ||
379 | } \ | ||
380 | \ | ||
381 | static struct ftrace_event_call event_##call; \ | ||
382 | \ | ||
383 | static int ftrace_init_event_##call(void) \ | ||
384 | { \ | ||
385 | int id; \ | ||
386 | \ | ||
387 | id = register_ftrace_event(NULL); \ | ||
388 | if (!id) \ | ||
389 | return -ENODEV; \ | ||
390 | event_##call.id = id; \ | ||
391 | return 0; \ | ||
392 | } | ||
393 | |||
394 | #undef TRACE_FORMAT | ||
395 | #define TRACE_FORMAT(call, proto, args, fmt) \ | ||
396 | _TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \ | ||
397 | _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | ||
398 | static struct ftrace_event_call __used \ | ||
399 | __attribute__((__aligned__(4))) \ | ||
400 | __attribute__((section("_ftrace_events"))) event_##call = { \ | ||
401 | .name = #call, \ | ||
402 | .system = __stringify(TRACE_SYSTEM), \ | ||
403 | .raw_init = ftrace_init_event_##call, \ | ||
404 | .regfunc = ftrace_reg_event_##call, \ | ||
405 | .unregfunc = ftrace_unreg_event_##call, \ | ||
406 | _TRACE_PROFILE_INIT(call) \ | ||
407 | } | ||
408 | |||
409 | #undef __entry | ||
410 | #define __entry entry | ||
411 | |||
412 | #undef TRACE_EVENT | ||
413 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
414 | _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | ||
415 | \ | ||
416 | static struct ftrace_event_call event_##call; \ | ||
417 | \ | ||
418 | static void ftrace_raw_event_##call(proto) \ | ||
419 | { \ | ||
420 | struct ftrace_event_call *call = &event_##call; \ | ||
421 | struct ring_buffer_event *event; \ | ||
422 | struct ftrace_raw_##call *entry; \ | ||
423 | unsigned long irq_flags; \ | ||
424 | int pc; \ | ||
425 | \ | ||
426 | local_save_flags(irq_flags); \ | ||
427 | pc = preempt_count(); \ | ||
428 | \ | ||
429 | event = trace_current_buffer_lock_reserve(event_##call.id, \ | ||
430 | sizeof(struct ftrace_raw_##call), \ | ||
431 | irq_flags, pc); \ | ||
432 | if (!event) \ | ||
433 | return; \ | ||
434 | entry = ring_buffer_event_data(event); \ | ||
435 | \ | ||
436 | assign; \ | ||
437 | \ | ||
438 | if (!filter_current_check_discard(call, entry, event)) \ | ||
439 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | ||
440 | } \ | ||
441 | \ | ||
442 | static int ftrace_raw_reg_event_##call(void) \ | ||
443 | { \ | ||
444 | int ret; \ | ||
445 | \ | ||
446 | ret = register_trace_##call(ftrace_raw_event_##call); \ | ||
447 | if (ret) \ | ||
448 | pr_info("event trace: Could not activate trace point " \ | ||
449 | "probe to " #call "\n"); \ | ||
450 | return ret; \ | ||
451 | } \ | ||
452 | \ | ||
453 | static void ftrace_raw_unreg_event_##call(void) \ | ||
454 | { \ | ||
455 | unregister_trace_##call(ftrace_raw_event_##call); \ | ||
456 | } \ | ||
457 | \ | ||
458 | static struct trace_event ftrace_event_type_##call = { \ | ||
459 | .trace = ftrace_raw_output_##call, \ | ||
460 | }; \ | ||
461 | \ | ||
462 | static int ftrace_raw_init_event_##call(void) \ | ||
463 | { \ | ||
464 | int id; \ | ||
465 | \ | ||
466 | id = register_ftrace_event(&ftrace_event_type_##call); \ | ||
467 | if (!id) \ | ||
468 | return -ENODEV; \ | ||
469 | event_##call.id = id; \ | ||
470 | INIT_LIST_HEAD(&event_##call.fields); \ | ||
471 | init_preds(&event_##call); \ | ||
472 | return 0; \ | ||
473 | } \ | ||
474 | \ | ||
475 | static struct ftrace_event_call __used \ | ||
476 | __attribute__((__aligned__(4))) \ | ||
477 | __attribute__((section("_ftrace_events"))) event_##call = { \ | ||
478 | .name = #call, \ | ||
479 | .system = __stringify(TRACE_SYSTEM), \ | ||
480 | .raw_init = ftrace_raw_init_event_##call, \ | ||
481 | .regfunc = ftrace_raw_reg_event_##call, \ | ||
482 | .unregfunc = ftrace_raw_unreg_event_##call, \ | ||
483 | .show_format = ftrace_format_##call, \ | ||
484 | .define_fields = ftrace_define_fields_##call, \ | ||
485 | _TRACE_PROFILE_INIT(call) \ | ||
486 | } | ||
487 | |||
488 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
489 | |||
490 | #undef _TRACE_PROFILE | ||
491 | #undef _TRACE_PROFILE_INIT | ||
492 | |||
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h deleted file mode 100644 index 13d6b85668cf..000000000000 --- a/include/trace/trace_events.h +++ /dev/null | |||
@@ -1,7 +0,0 @@ | |||
1 | /* trace/<type>.h here */ | ||
2 | |||
3 | #include <trace/sched.h> | ||
4 | #include <trace/irq.h> | ||
5 | #include <trace/lockdep.h> | ||
6 | #include <trace/skb.h> | ||
7 | #include <trace/kmem.h> | ||