diff options
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r-- | include/trace/ftrace.h | 591 |
1 files changed, 591 insertions, 0 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h new file mode 100644 index 000000000000..1867553c61e5 --- /dev/null +++ b/include/trace/ftrace.h | |||
@@ -0,0 +1,591 @@ | |||
1 | /* | ||
2 | * Stage 1 of the trace events. | ||
3 | * | ||
4 | * Override the macros in <trace/trace_events.h> to include the following: | ||
5 | * | ||
6 | * struct ftrace_raw_<call> { | ||
7 | * struct trace_entry ent; | ||
8 | * <type> <item>; | ||
9 | * <type2> <item2>[<len>]; | ||
10 | * [...] | ||
11 | * }; | ||
12 | * | ||
13 | * The <type> <item> is created by the __field(type, item) macro or | ||
14 | * the __array(type2, item2, len) macro. | ||
15 | * We simply do "type item;", and that will create the fields | ||
16 | * in the structure. | ||
17 | */ | ||
18 | |||
19 | #include <linux/ftrace_event.h> | ||
20 | |||
21 | #undef __field | ||
22 | #define __field(type, item) type item; | ||
23 | |||
24 | #undef __array | ||
25 | #define __array(type, item, len) type item[len]; | ||
26 | |||
27 | #undef __dynamic_array | ||
28 | #define __dynamic_array(type, item, len) unsigned short __data_loc_##item; | ||
29 | |||
30 | #undef __string | ||
31 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
32 | |||
33 | #undef TP_STRUCT__entry | ||
34 | #define TP_STRUCT__entry(args...) args | ||
35 | |||
36 | #undef TRACE_EVENT | ||
37 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | ||
38 | struct ftrace_raw_##name { \ | ||
39 | struct trace_entry ent; \ | ||
40 | tstruct \ | ||
41 | char __data[0]; \ | ||
42 | }; \ | ||
43 | static struct ftrace_event_call event_##name | ||
44 | |||
45 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
46 | |||
47 | |||
48 | /* | ||
49 | * Stage 2 of the trace events. | ||
50 | * | ||
51 | * Include the following: | ||
52 | * | ||
53 | * struct ftrace_data_offsets_<call> { | ||
54 | * int <item1>; | ||
55 | * int <item2>; | ||
56 | * [...] | ||
57 | * }; | ||
58 | * | ||
59 | * The __dynamic_array() macro will create each int <item>, this is | ||
60 | * to keep the offset of each array from the beginning of the event. | ||
61 | */ | ||
62 | |||
63 | #undef __field | ||
64 | #define __field(type, item); | ||
65 | |||
66 | #undef __array | ||
67 | #define __array(type, item, len) | ||
68 | |||
69 | #undef __dynamic_array | ||
70 | #define __dynamic_array(type, item, len) int item; | ||
71 | |||
72 | #undef __string | ||
73 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
74 | |||
75 | #undef TRACE_EVENT | ||
76 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
77 | struct ftrace_data_offsets_##call { \ | ||
78 | tstruct; \ | ||
79 | }; | ||
80 | |||
81 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
82 | |||
83 | /* | ||
84 | * Setup the showing format of trace point. | ||
85 | * | ||
86 | * int | ||
87 | * ftrace_format_##call(struct trace_seq *s) | ||
88 | * { | ||
89 | * struct ftrace_raw_##call field; | ||
90 | * int ret; | ||
91 | * | ||
92 | * ret = trace_seq_printf(s, #type " " #item ";" | ||
93 | * " offset:%u; size:%u;\n", | ||
94 | * offsetof(struct ftrace_raw_##call, item), | ||
95 | * sizeof(field.type)); | ||
96 | * | ||
97 | * } | ||
98 | */ | ||
99 | |||
100 | #undef TP_STRUCT__entry | ||
101 | #define TP_STRUCT__entry(args...) args | ||
102 | |||
103 | #undef __field | ||
104 | #define __field(type, item) \ | ||
105 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | ||
106 | "offset:%u;\tsize:%u;\n", \ | ||
107 | (unsigned int)offsetof(typeof(field), item), \ | ||
108 | (unsigned int)sizeof(field.item)); \ | ||
109 | if (!ret) \ | ||
110 | return 0; | ||
111 | |||
112 | #undef __array | ||
113 | #define __array(type, item, len) \ | ||
114 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ | ||
115 | "offset:%u;\tsize:%u;\n", \ | ||
116 | (unsigned int)offsetof(typeof(field), item), \ | ||
117 | (unsigned int)sizeof(field.item)); \ | ||
118 | if (!ret) \ | ||
119 | return 0; | ||
120 | |||
121 | #undef __dynamic_array | ||
122 | #define __dynamic_array(type, item, len) \ | ||
123 | ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \ | ||
124 | "offset:%u;\tsize:%u;\n", \ | ||
125 | (unsigned int)offsetof(typeof(field), \ | ||
126 | __data_loc_##item), \ | ||
127 | (unsigned int)sizeof(field.__data_loc_##item)); \ | ||
128 | if (!ret) \ | ||
129 | return 0; | ||
130 | |||
131 | #undef __string | ||
132 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
133 | |||
134 | #undef __entry | ||
135 | #define __entry REC | ||
136 | |||
137 | #undef __print_symbolic | ||
138 | #undef __get_dynamic_array | ||
139 | #undef __get_str | ||
140 | |||
141 | #undef TP_printk | ||
142 | #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) | ||
143 | |||
144 | #undef TP_fast_assign | ||
145 | #define TP_fast_assign(args...) args | ||
146 | |||
147 | #undef TRACE_EVENT | ||
148 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | ||
149 | static int \ | ||
150 | ftrace_format_##call(struct trace_seq *s) \ | ||
151 | { \ | ||
152 | struct ftrace_raw_##call field __attribute__((unused)); \ | ||
153 | int ret = 0; \ | ||
154 | \ | ||
155 | tstruct; \ | ||
156 | \ | ||
157 | trace_seq_printf(s, "\nprint fmt: " print); \ | ||
158 | \ | ||
159 | return ret; \ | ||
160 | } | ||
161 | |||
162 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
163 | |||
164 | /* | ||
165 | * Stage 3 of the trace events. | ||
166 | * | ||
167 | * Override the macros in <trace/trace_events.h> to include the following: | ||
168 | * | ||
169 | * enum print_line_t | ||
170 | * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) | ||
171 | * { | ||
172 | * struct trace_seq *s = &iter->seq; | ||
173 | * struct ftrace_raw_<call> *field; <-- defined in stage 1 | ||
174 | * struct trace_entry *entry; | ||
175 | * struct trace_seq *p; | ||
176 | * int ret; | ||
177 | * | ||
178 | * entry = iter->ent; | ||
179 | * | ||
180 | * if (entry->type != event_<call>.id) { | ||
181 | * WARN_ON_ONCE(1); | ||
182 | * return TRACE_TYPE_UNHANDLED; | ||
183 | * } | ||
184 | * | ||
185 | * field = (typeof(field))entry; | ||
186 | * | ||
187 | * p = get_cpu_var(ftrace_event_seq); | ||
188 | * trace_seq_init(p); | ||
189 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | ||
190 | * put_cpu(); | ||
191 | * if (!ret) | ||
192 | * return TRACE_TYPE_PARTIAL_LINE; | ||
193 | * | ||
194 | * return TRACE_TYPE_HANDLED; | ||
195 | * } | ||
196 | * | ||
197 | * This is the method used to print the raw event to the trace | ||
198 | * output format. Note, this is not needed if the data is read | ||
199 | * in binary. | ||
200 | */ | ||
201 | |||
202 | #undef __entry | ||
203 | #define __entry field | ||
204 | |||
205 | #undef TP_printk | ||
206 | #define TP_printk(fmt, args...) fmt "\n", args | ||
207 | |||
208 | #undef __get_dynamic_array | ||
209 | #define __get_dynamic_array(field) \ | ||
210 | ((void *)__entry + __entry->__data_loc_##field) | ||
211 | |||
212 | #undef __get_str | ||
213 | #define __get_str(field) (char *)__get_dynamic_array(field) | ||
214 | |||
215 | #undef __print_flags | ||
216 | #define __print_flags(flag, delim, flag_array...) \ | ||
217 | ({ \ | ||
218 | static const struct trace_print_flags flags[] = \ | ||
219 | { flag_array, { -1, NULL }}; \ | ||
220 | ftrace_print_flags_seq(p, delim, flag, flags); \ | ||
221 | }) | ||
222 | |||
223 | #undef __print_symbolic | ||
224 | #define __print_symbolic(value, symbol_array...) \ | ||
225 | ({ \ | ||
226 | static const struct trace_print_flags symbols[] = \ | ||
227 | { symbol_array, { -1, NULL }}; \ | ||
228 | ftrace_print_symbols_seq(p, value, symbols); \ | ||
229 | }) | ||
230 | |||
231 | #undef TRACE_EVENT | ||
232 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
233 | enum print_line_t \ | ||
234 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | ||
235 | { \ | ||
236 | struct trace_seq *s = &iter->seq; \ | ||
237 | struct ftrace_raw_##call *field; \ | ||
238 | struct trace_entry *entry; \ | ||
239 | struct trace_seq *p; \ | ||
240 | int ret; \ | ||
241 | \ | ||
242 | entry = iter->ent; \ | ||
243 | \ | ||
244 | if (entry->type != event_##call.id) { \ | ||
245 | WARN_ON_ONCE(1); \ | ||
246 | return TRACE_TYPE_UNHANDLED; \ | ||
247 | } \ | ||
248 | \ | ||
249 | field = (typeof(field))entry; \ | ||
250 | \ | ||
251 | p = &get_cpu_var(ftrace_event_seq); \ | ||
252 | trace_seq_init(p); \ | ||
253 | ret = trace_seq_printf(s, #call ": " print); \ | ||
254 | put_cpu(); \ | ||
255 | if (!ret) \ | ||
256 | return TRACE_TYPE_PARTIAL_LINE; \ | ||
257 | \ | ||
258 | return TRACE_TYPE_HANDLED; \ | ||
259 | } | ||
260 | |||
261 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
262 | |||
263 | #undef __field | ||
264 | #define __field(type, item) \ | ||
265 | ret = trace_define_field(event_call, #type, #item, \ | ||
266 | offsetof(typeof(field), item), \ | ||
267 | sizeof(field.item), is_signed_type(type)); \ | ||
268 | if (ret) \ | ||
269 | return ret; | ||
270 | |||
271 | #undef __array | ||
272 | #define __array(type, item, len) \ | ||
273 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | ||
274 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | ||
275 | offsetof(typeof(field), item), \ | ||
276 | sizeof(field.item), 0); \ | ||
277 | if (ret) \ | ||
278 | return ret; | ||
279 | |||
280 | #undef __dynamic_array | ||
281 | #define __dynamic_array(type, item, len) \ | ||
282 | ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\ | ||
283 | offsetof(typeof(field), __data_loc_##item), \ | ||
284 | sizeof(field.__data_loc_##item), 0); | ||
285 | |||
286 | #undef __string | ||
287 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
288 | |||
289 | #undef TRACE_EVENT | ||
290 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | ||
291 | int \ | ||
292 | ftrace_define_fields_##call(void) \ | ||
293 | { \ | ||
294 | struct ftrace_raw_##call field; \ | ||
295 | struct ftrace_event_call *event_call = &event_##call; \ | ||
296 | int ret; \ | ||
297 | \ | ||
298 | __common_field(int, type, 1); \ | ||
299 | __common_field(unsigned char, flags, 0); \ | ||
300 | __common_field(unsigned char, preempt_count, 0); \ | ||
301 | __common_field(int, pid, 1); \ | ||
302 | __common_field(int, tgid, 1); \ | ||
303 | \ | ||
304 | tstruct; \ | ||
305 | \ | ||
306 | return ret; \ | ||
307 | } | ||
308 | |||
309 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
310 | |||
311 | /* | ||
312 | * remember the offset of each array from the beginning of the event. | ||
313 | */ | ||
314 | |||
315 | #undef __entry | ||
316 | #define __entry entry | ||
317 | |||
318 | #undef __field | ||
319 | #define __field(type, item) | ||
320 | |||
321 | #undef __array | ||
322 | #define __array(type, item, len) | ||
323 | |||
324 | #undef __dynamic_array | ||
325 | #define __dynamic_array(type, item, len) \ | ||
326 | __data_offsets->item = __data_size + \ | ||
327 | offsetof(typeof(*entry), __data); \ | ||
328 | __data_size += (len) * sizeof(type); | ||
329 | |||
330 | #undef __string | ||
331 | #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \ | ||
332 | |||
333 | #undef TRACE_EVENT | ||
334 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
335 | static inline int ftrace_get_offsets_##call( \ | ||
336 | struct ftrace_data_offsets_##call *__data_offsets, proto) \ | ||
337 | { \ | ||
338 | int __data_size = 0; \ | ||
339 | struct ftrace_raw_##call __maybe_unused *entry; \ | ||
340 | \ | ||
341 | tstruct; \ | ||
342 | \ | ||
343 | return __data_size; \ | ||
344 | } | ||
345 | |||
346 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
347 | |||
348 | /* | ||
349 | * Stage 4 of the trace events. | ||
350 | * | ||
351 | * Override the macros in <trace/trace_events.h> to include the following: | ||
352 | * | ||
353 | * static void ftrace_event_<call>(proto) | ||
354 | * { | ||
355 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); | ||
356 | * } | ||
357 | * | ||
358 | * static int ftrace_reg_event_<call>(void) | ||
359 | * { | ||
360 | * int ret; | ||
361 | * | ||
362 | * ret = register_trace_<call>(ftrace_event_<call>); | ||
363 | * if (!ret) | ||
364 | * pr_info("event trace: Could not activate trace point " | ||
365 | * "probe to <call>"); | ||
366 | * return ret; | ||
367 | * } | ||
368 | * | ||
369 | * static void ftrace_unreg_event_<call>(void) | ||
370 | * { | ||
371 | * unregister_trace_<call>(ftrace_event_<call>); | ||
372 | * } | ||
373 | * | ||
374 | * | ||
375 | * For those macros defined with TRACE_EVENT: | ||
376 | * | ||
377 | * static struct ftrace_event_call event_<call>; | ||
378 | * | ||
379 | * static void ftrace_raw_event_<call>(proto) | ||
380 | * { | ||
381 | * struct ring_buffer_event *event; | ||
382 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | ||
383 | * unsigned long irq_flags; | ||
384 | * int pc; | ||
385 | * | ||
386 | * local_save_flags(irq_flags); | ||
387 | * pc = preempt_count(); | ||
388 | * | ||
389 | * event = trace_current_buffer_lock_reserve(event_<call>.id, | ||
390 | * sizeof(struct ftrace_raw_<call>), | ||
391 | * irq_flags, pc); | ||
392 | * if (!event) | ||
393 | * return; | ||
394 | * entry = ring_buffer_event_data(event); | ||
395 | * | ||
396 | * <assign>; <-- Here we assign the entries by the __field and | ||
397 | * __array macros. | ||
398 | * | ||
399 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); | ||
400 | * } | ||
401 | * | ||
402 | * static int ftrace_raw_reg_event_<call>(void) | ||
403 | * { | ||
404 | * int ret; | ||
405 | * | ||
406 | * ret = register_trace_<call>(ftrace_raw_event_<call>); | ||
407 | * if (!ret) | ||
408 | * pr_info("event trace: Could not activate trace point " | ||
409 | * "probe to <call>"); | ||
410 | * return ret; | ||
411 | * } | ||
412 | * | ||
413 | * static void ftrace_unreg_event_<call>(void) | ||
414 | * { | ||
415 | * unregister_trace_<call>(ftrace_raw_event_<call>); | ||
416 | * } | ||
417 | * | ||
418 | * static struct trace_event ftrace_event_type_<call> = { | ||
419 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | ||
420 | * }; | ||
421 | * | ||
422 | * static int ftrace_raw_init_event_<call>(void) | ||
423 | * { | ||
424 | * int id; | ||
425 | * | ||
426 | * id = register_ftrace_event(&ftrace_event_type_<call>); | ||
427 | * if (!id) | ||
428 | * return -ENODEV; | ||
429 | * event_<call>.id = id; | ||
430 | * return 0; | ||
431 | * } | ||
432 | * | ||
433 | * static struct ftrace_event_call __used | ||
434 | * __attribute__((__aligned__(4))) | ||
435 | * __attribute__((section("_ftrace_events"))) event_<call> = { | ||
436 | * .name = "<call>", | ||
437 | * .system = "<system>", | ||
438 | * .raw_init = ftrace_raw_init_event_<call>, | ||
439 | * .regfunc = ftrace_reg_event_<call>, | ||
440 | * .unregfunc = ftrace_unreg_event_<call>, | ||
441 | * .show_format = ftrace_format_<call>, | ||
442 | * } | ||
443 | * | ||
444 | */ | ||
445 | |||
446 | #undef TP_FMT | ||
447 | #define TP_FMT(fmt, args...) fmt "\n", ##args | ||
448 | |||
449 | #ifdef CONFIG_EVENT_PROFILE | ||
450 | #define _TRACE_PROFILE(call, proto, args) \ | ||
451 | static void ftrace_profile_##call(proto) \ | ||
452 | { \ | ||
453 | extern void perf_tpcounter_event(int); \ | ||
454 | perf_tpcounter_event(event_##call.id); \ | ||
455 | } \ | ||
456 | \ | ||
457 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
458 | { \ | ||
459 | int ret = 0; \ | ||
460 | \ | ||
461 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
462 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
463 | \ | ||
464 | return ret; \ | ||
465 | } \ | ||
466 | \ | ||
467 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
468 | { \ | ||
469 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
470 | unregister_trace_##call(ftrace_profile_##call); \ | ||
471 | } | ||
472 | |||
473 | #define _TRACE_PROFILE_INIT(call) \ | ||
474 | .profile_count = ATOMIC_INIT(-1), \ | ||
475 | .profile_enable = ftrace_profile_enable_##call, \ | ||
476 | .profile_disable = ftrace_profile_disable_##call, | ||
477 | |||
478 | #else | ||
479 | #define _TRACE_PROFILE(call, proto, args) | ||
480 | #define _TRACE_PROFILE_INIT(call) | ||
481 | #endif | ||
482 | |||
483 | #undef __entry | ||
484 | #define __entry entry | ||
485 | |||
486 | #undef __field | ||
487 | #define __field(type, item) | ||
488 | |||
489 | #undef __array | ||
490 | #define __array(type, item, len) | ||
491 | |||
492 | #undef __dynamic_array | ||
493 | #define __dynamic_array(type, item, len) \ | ||
494 | __entry->__data_loc_##item = __data_offsets.item; | ||
495 | |||
496 | #undef __string | ||
497 | #define __string(item, src) __dynamic_array(char, item, -1) \ | ||
498 | |||
499 | #undef __assign_str | ||
500 | #define __assign_str(dst, src) \ | ||
501 | strcpy(__get_str(dst), src); | ||
502 | |||
503 | #undef TRACE_EVENT | ||
504 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
505 | _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | ||
506 | \ | ||
507 | static struct ftrace_event_call event_##call; \ | ||
508 | \ | ||
509 | static void ftrace_raw_event_##call(proto) \ | ||
510 | { \ | ||
511 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | ||
512 | struct ftrace_event_call *event_call = &event_##call; \ | ||
513 | struct ring_buffer_event *event; \ | ||
514 | struct ftrace_raw_##call *entry; \ | ||
515 | unsigned long irq_flags; \ | ||
516 | int __data_size; \ | ||
517 | int pc; \ | ||
518 | \ | ||
519 | local_save_flags(irq_flags); \ | ||
520 | pc = preempt_count(); \ | ||
521 | \ | ||
522 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | ||
523 | \ | ||
524 | event = trace_current_buffer_lock_reserve(event_##call.id, \ | ||
525 | sizeof(*entry) + __data_size, \ | ||
526 | irq_flags, pc); \ | ||
527 | if (!event) \ | ||
528 | return; \ | ||
529 | entry = ring_buffer_event_data(event); \ | ||
530 | \ | ||
531 | \ | ||
532 | tstruct \ | ||
533 | \ | ||
534 | { assign; } \ | ||
535 | \ | ||
536 | if (!filter_current_check_discard(event_call, entry, event)) \ | ||
537 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | ||
538 | } \ | ||
539 | \ | ||
540 | static int ftrace_raw_reg_event_##call(void) \ | ||
541 | { \ | ||
542 | int ret; \ | ||
543 | \ | ||
544 | ret = register_trace_##call(ftrace_raw_event_##call); \ | ||
545 | if (ret) \ | ||
546 | pr_info("event trace: Could not activate trace point " \ | ||
547 | "probe to " #call "\n"); \ | ||
548 | return ret; \ | ||
549 | } \ | ||
550 | \ | ||
551 | static void ftrace_raw_unreg_event_##call(void) \ | ||
552 | { \ | ||
553 | unregister_trace_##call(ftrace_raw_event_##call); \ | ||
554 | } \ | ||
555 | \ | ||
556 | static struct trace_event ftrace_event_type_##call = { \ | ||
557 | .trace = ftrace_raw_output_##call, \ | ||
558 | }; \ | ||
559 | \ | ||
560 | static int ftrace_raw_init_event_##call(void) \ | ||
561 | { \ | ||
562 | int id; \ | ||
563 | \ | ||
564 | id = register_ftrace_event(&ftrace_event_type_##call); \ | ||
565 | if (!id) \ | ||
566 | return -ENODEV; \ | ||
567 | event_##call.id = id; \ | ||
568 | INIT_LIST_HEAD(&event_##call.fields); \ | ||
569 | init_preds(&event_##call); \ | ||
570 | return 0; \ | ||
571 | } \ | ||
572 | \ | ||
573 | static struct ftrace_event_call __used \ | ||
574 | __attribute__((__aligned__(4))) \ | ||
575 | __attribute__((section("_ftrace_events"))) event_##call = { \ | ||
576 | .name = #call, \ | ||
577 | .system = __stringify(TRACE_SYSTEM), \ | ||
578 | .event = &ftrace_event_type_##call, \ | ||
579 | .raw_init = ftrace_raw_init_event_##call, \ | ||
580 | .regfunc = ftrace_raw_reg_event_##call, \ | ||
581 | .unregfunc = ftrace_raw_unreg_event_##call, \ | ||
582 | .show_format = ftrace_format_##call, \ | ||
583 | .define_fields = ftrace_define_fields_##call, \ | ||
584 | _TRACE_PROFILE_INIT(call) \ | ||
585 | } | ||
586 | |||
587 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
588 | |||
589 | #undef _TRACE_PROFILE | ||
590 | #undef _TRACE_PROFILE_INIT | ||
591 | |||