aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/ftrace.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r--include/trace/ftrace.h514
1 files changed, 278 insertions, 236 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index cc0d9667e182..ea6f9d4a20e9 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -18,6 +18,26 @@
18 18
19#include <linux/ftrace_event.h> 19#include <linux/ftrace_event.h>
20 20
21/*
22 * DECLARE_EVENT_CLASS can be used to add a generic function
23 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
27 *
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
29 */
30#undef TRACE_EVENT
31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32 DECLARE_EVENT_CLASS(name, \
33 PARAMS(proto), \
34 PARAMS(args), \
35 PARAMS(tstruct), \
36 PARAMS(assign), \
37 PARAMS(print)); \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
39
40
21#undef __field 41#undef __field
22#define __field(type, item) type item; 42#define __field(type, item) type item;
23 43
@@ -36,14 +56,21 @@
36#undef TP_STRUCT__entry 56#undef TP_STRUCT__entry
37#define TP_STRUCT__entry(args...) args 57#define TP_STRUCT__entry(args...) args
38 58
39#undef TRACE_EVENT 59#undef DECLARE_EVENT_CLASS
40#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ 60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
41 struct ftrace_raw_##name { \ 61 struct ftrace_raw_##name { \
42 struct trace_entry ent; \ 62 struct trace_entry ent; \
43 tstruct \ 63 tstruct \
44 char __data[0]; \ 64 char __data[0]; \
45 }; \ 65 };
46 static struct ftrace_event_call event_##name 66#undef DEFINE_EVENT
67#define DEFINE_EVENT(template, name, proto, args) \
68 static struct ftrace_event_call \
69 __attribute__((__aligned__(4))) event_##name
70
71#undef DEFINE_EVENT_PRINT
72#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
73 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
47 74
48#undef __cpparg 75#undef __cpparg
49#define __cpparg(arg...) arg 76#define __cpparg(arg...) arg
@@ -89,99 +116,18 @@
89#undef __string 116#undef __string
90#define __string(item, src) __dynamic_array(char, item, -1) 117#define __string(item, src) __dynamic_array(char, item, -1)
91 118
92#undef TRACE_EVENT 119#undef DECLARE_EVENT_CLASS
93#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 120#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
94 struct ftrace_data_offsets_##call { \ 121 struct ftrace_data_offsets_##call { \
95 tstruct; \ 122 tstruct; \
96 }; 123 };
97 124
98#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 125#undef DEFINE_EVENT
99 126#define DEFINE_EVENT(template, name, proto, args)
100/*
101 * Setup the showing format of trace point.
102 *
103 * int
104 * ftrace_format_##call(struct trace_seq *s)
105 * {
106 * struct ftrace_raw_##call field;
107 * int ret;
108 *
109 * ret = trace_seq_printf(s, #type " " #item ";"
110 * " offset:%u; size:%u;\n",
111 * offsetof(struct ftrace_raw_##call, item),
112 * sizeof(field.type));
113 *
114 * }
115 */
116
117#undef TP_STRUCT__entry
118#define TP_STRUCT__entry(args...) args
119
120#undef __field
121#define __field(type, item) \
122 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
123 "offset:%u;\tsize:%u;\n", \
124 (unsigned int)offsetof(typeof(field), item), \
125 (unsigned int)sizeof(field.item)); \
126 if (!ret) \
127 return 0;
128
129#undef __field_ext
130#define __field_ext(type, item, filter_type) __field(type, item)
131
132#undef __array
133#define __array(type, item, len) \
134 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
135 "offset:%u;\tsize:%u;\n", \
136 (unsigned int)offsetof(typeof(field), item), \
137 (unsigned int)sizeof(field.item)); \
138 if (!ret) \
139 return 0;
140
141#undef __dynamic_array
142#define __dynamic_array(type, item, len) \
143 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
144 "offset:%u;\tsize:%u;\n", \
145 (unsigned int)offsetof(typeof(field), \
146 __data_loc_##item), \
147 (unsigned int)sizeof(field.__data_loc_##item)); \
148 if (!ret) \
149 return 0;
150
151#undef __string
152#define __string(item, src) __dynamic_array(char, item, -1)
153
154#undef __entry
155#define __entry REC
156
157#undef __print_symbolic
158#undef __get_dynamic_array
159#undef __get_str
160
161#undef TP_printk
162#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
163
164#undef TP_fast_assign
165#define TP_fast_assign(args...) args
166
167#undef TP_perf_assign
168#define TP_perf_assign(args...)
169 127
170#undef TRACE_EVENT 128#undef DEFINE_EVENT_PRINT
171#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 129#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
172static int \ 130 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
173ftrace_format_##call(struct ftrace_event_call *unused, \
174 struct trace_seq *s) \
175{ \
176 struct ftrace_raw_##call field __attribute__((unused)); \
177 int ret = 0; \
178 \
179 tstruct; \
180 \
181 trace_seq_printf(s, "\nprint fmt: " print); \
182 \
183 return ret; \
184}
185 131
186#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 132#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
187 133
@@ -252,10 +198,11 @@ ftrace_format_##call(struct ftrace_event_call *unused, \
252 ftrace_print_symbols_seq(p, value, symbols); \ 198 ftrace_print_symbols_seq(p, value, symbols); \
253 }) 199 })
254 200
255#undef TRACE_EVENT 201#undef DECLARE_EVENT_CLASS
256#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 202#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
257static enum print_line_t \ 203static notrace enum print_line_t \
258ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 204ftrace_raw_output_id_##call(int event_id, const char *name, \
205 struct trace_iterator *iter, int flags) \
259{ \ 206{ \
260 struct trace_seq *s = &iter->seq; \ 207 struct trace_seq *s = &iter->seq; \
261 struct ftrace_raw_##call *field; \ 208 struct ftrace_raw_##call *field; \
@@ -265,6 +212,47 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
265 \ 212 \
266 entry = iter->ent; \ 213 entry = iter->ent; \
267 \ 214 \
215 if (entry->type != event_id) { \
216 WARN_ON_ONCE(1); \
217 return TRACE_TYPE_UNHANDLED; \
218 } \
219 \
220 field = (typeof(field))entry; \
221 \
222 p = &get_cpu_var(ftrace_event_seq); \
223 trace_seq_init(p); \
224 ret = trace_seq_printf(s, "%s: ", name); \
225 if (ret) \
226 ret = trace_seq_printf(s, print); \
227 put_cpu(); \
228 if (!ret) \
229 return TRACE_TYPE_PARTIAL_LINE; \
230 \
231 return TRACE_TYPE_HANDLED; \
232}
233
234#undef DEFINE_EVENT
235#define DEFINE_EVENT(template, name, proto, args) \
236static notrace enum print_line_t \
237ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
238{ \
239 return ftrace_raw_output_id_##template(event_##name.id, \
240 #name, iter, flags); \
241}
242
243#undef DEFINE_EVENT_PRINT
244#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
245static notrace enum print_line_t \
246ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
247{ \
248 struct trace_seq *s = &iter->seq; \
249 struct ftrace_raw_##template *field; \
250 struct trace_entry *entry; \
251 struct trace_seq *p; \
252 int ret; \
253 \
254 entry = iter->ent; \
255 \
268 if (entry->type != event_##call.id) { \ 256 if (entry->type != event_##call.id) { \
269 WARN_ON_ONCE(1); \ 257 WARN_ON_ONCE(1); \
270 return TRACE_TYPE_UNHANDLED; \ 258 return TRACE_TYPE_UNHANDLED; \
@@ -274,14 +262,16 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
274 \ 262 \
275 p = &get_cpu_var(ftrace_event_seq); \ 263 p = &get_cpu_var(ftrace_event_seq); \
276 trace_seq_init(p); \ 264 trace_seq_init(p); \
277 ret = trace_seq_printf(s, #call ": " print); \ 265 ret = trace_seq_printf(s, "%s: ", #call); \
266 if (ret) \
267 ret = trace_seq_printf(s, print); \
278 put_cpu(); \ 268 put_cpu(); \
279 if (!ret) \ 269 if (!ret) \
280 return TRACE_TYPE_PARTIAL_LINE; \ 270 return TRACE_TYPE_PARTIAL_LINE; \
281 \ 271 \
282 return TRACE_TYPE_HANDLED; \ 272 return TRACE_TYPE_HANDLED; \
283} 273}
284 274
285#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 275#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
286 276
287#undef __field_ext 277#undef __field_ext
@@ -301,7 +291,8 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
301 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 291 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
302 ret = trace_define_field(event_call, #type "[" #len "]", #item, \ 292 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
303 offsetof(typeof(field), item), \ 293 offsetof(typeof(field), item), \
304 sizeof(field.item), 0, FILTER_OTHER); \ 294 sizeof(field.item), \
295 is_signed_type(type), FILTER_OTHER); \
305 if (ret) \ 296 if (ret) \
306 return ret; 297 return ret;
307 298
@@ -309,29 +300,32 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
309#define __dynamic_array(type, item, len) \ 300#define __dynamic_array(type, item, len) \
310 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ 301 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
311 offsetof(typeof(field), __data_loc_##item), \ 302 offsetof(typeof(field), __data_loc_##item), \
312 sizeof(field.__data_loc_##item), 0, \ 303 sizeof(field.__data_loc_##item), \
313 FILTER_OTHER); 304 is_signed_type(type), FILTER_OTHER);
314 305
315#undef __string 306#undef __string
316#define __string(item, src) __dynamic_array(char, item, -1) 307#define __string(item, src) __dynamic_array(char, item, -1)
317 308
318#undef TRACE_EVENT 309#undef DECLARE_EVENT_CLASS
319#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 310#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
320static int \ 311static int notrace \
321ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 312ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
322{ \ 313{ \
323 struct ftrace_raw_##call field; \ 314 struct ftrace_raw_##call field; \
324 int ret; \ 315 int ret; \
325 \ 316 \
326 ret = trace_define_common_fields(event_call); \
327 if (ret) \
328 return ret; \
329 \
330 tstruct; \ 317 tstruct; \
331 \ 318 \
332 return ret; \ 319 return ret; \
333} 320}
334 321
322#undef DEFINE_EVENT
323#define DEFINE_EVENT(template, name, proto, args)
324
325#undef DEFINE_EVENT_PRINT
326#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
327 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
328
335#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 329#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
336 330
337/* 331/*
@@ -358,11 +352,11 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
358 __data_size += (len) * sizeof(type); 352 __data_size += (len) * sizeof(type);
359 353
360#undef __string 354#undef __string
361#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \ 355#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
362 356
363#undef TRACE_EVENT 357#undef DECLARE_EVENT_CLASS
364#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 358#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
365static inline int ftrace_get_offsets_##call( \ 359static inline notrace int ftrace_get_offsets_##call( \
366 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 360 struct ftrace_data_offsets_##call *__data_offsets, proto) \
367{ \ 361{ \
368 int __data_size = 0; \ 362 int __data_size = 0; \
@@ -373,9 +367,16 @@ static inline int ftrace_get_offsets_##call( \
373 return __data_size; \ 367 return __data_size; \
374} 368}
375 369
370#undef DEFINE_EVENT
371#define DEFINE_EVENT(template, name, proto, args)
372
373#undef DEFINE_EVENT_PRINT
374#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
375 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
376
376#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 377#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
377 378
378#ifdef CONFIG_EVENT_PROFILE 379#ifdef CONFIG_PERF_EVENTS
379 380
380/* 381/*
381 * Generate the functions needed for tracepoint perf_event support. 382 * Generate the functions needed for tracepoint perf_event support.
@@ -394,24 +395,33 @@ static inline int ftrace_get_offsets_##call( \
394 * 395 *
395 */ 396 */
396 397
397#undef TRACE_EVENT 398#undef DECLARE_EVENT_CLASS
398#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 399#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
400
401#undef DEFINE_EVENT
402#define DEFINE_EVENT(template, name, proto, args) \
399 \ 403 \
400static void ftrace_profile_##call(proto); \ 404static void perf_trace_##name(proto); \
401 \ 405 \
402static int ftrace_profile_enable_##call(void) \ 406static notrace int \
407perf_trace_enable_##name(struct ftrace_event_call *unused) \
403{ \ 408{ \
404 return register_trace_##call(ftrace_profile_##call); \ 409 return register_trace_##name(perf_trace_##name); \
405} \ 410} \
406 \ 411 \
407static void ftrace_profile_disable_##call(void) \ 412static notrace void \
413perf_trace_disable_##name(struct ftrace_event_call *unused) \
408{ \ 414{ \
409 unregister_trace_##call(ftrace_profile_##call); \ 415 unregister_trace_##name(perf_trace_##name); \
410} 416}
411 417
418#undef DEFINE_EVENT_PRINT
419#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
420 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
421
412#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 422#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
413 423
414#endif 424#endif /* CONFIG_PERF_EVENTS */
415 425
416/* 426/*
417 * Stage 4 of the trace events. 427 * Stage 4 of the trace events.
@@ -423,18 +433,12 @@ static void ftrace_profile_disable_##call(void) \
423 * event_trace_printk(_RET_IP_, "<call>: " <fmt>); 433 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
424 * } 434 * }
425 * 435 *
426 * static int ftrace_reg_event_<call>(void) 436 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
427 * { 437 * {
428 * int ret; 438 * return register_trace_<call>(ftrace_event_<call>);
429 *
430 * ret = register_trace_<call>(ftrace_event_<call>);
431 * if (!ret)
432 * pr_info("event trace: Could not activate trace point "
433 * "probe to <call>");
434 * return ret;
435 * } 439 * }
436 * 440 *
437 * static void ftrace_unreg_event_<call>(void) 441 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
438 * { 442 * {
439 * unregister_trace_<call>(ftrace_event_<call>); 443 * unregister_trace_<call>(ftrace_event_<call>);
440 * } 444 * }
@@ -469,7 +473,7 @@ static void ftrace_profile_disable_##call(void) \
469 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); 473 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
470 * } 474 * }
471 * 475 *
472 * static int ftrace_raw_reg_event_<call>(void) 476 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
473 * { 477 * {
474 * int ret; 478 * int ret;
475 * 479 *
@@ -480,7 +484,7 @@ static void ftrace_profile_disable_##call(void) \
480 * return ret; 484 * return ret;
481 * } 485 * }
482 * 486 *
483 * static void ftrace_unreg_event_<call>(void) 487 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
484 * { 488 * {
485 * unregister_trace_<call>(ftrace_raw_event_<call>); 489 * unregister_trace_<call>(ftrace_raw_event_<call>);
486 * } 490 * }
@@ -489,43 +493,27 @@ static void ftrace_profile_disable_##call(void) \
489 * .trace = ftrace_raw_output_<call>, <-- stage 2 493 * .trace = ftrace_raw_output_<call>, <-- stage 2
490 * }; 494 * };
491 * 495 *
492 * static int ftrace_raw_init_event_<call>(void)
493 * {
494 * int id;
495 *
496 * id = register_ftrace_event(&ftrace_event_type_<call>);
497 * if (!id)
498 * return -ENODEV;
499 * event_<call>.id = id;
500 * return 0;
501 * }
502 *
503 * static struct ftrace_event_call __used 496 * static struct ftrace_event_call __used
504 * __attribute__((__aligned__(4))) 497 * __attribute__((__aligned__(4)))
505 * __attribute__((section("_ftrace_events"))) event_<call> = { 498 * __attribute__((section("_ftrace_events"))) event_<call> = {
506 * .name = "<call>", 499 * .name = "<call>",
507 * .system = "<system>", 500 * .system = "<system>",
508 * .raw_init = ftrace_raw_init_event_<call>, 501 * .raw_init = trace_event_raw_init,
509 * .regfunc = ftrace_reg_event_<call>, 502 * .regfunc = ftrace_reg_event_<call>,
510 * .unregfunc = ftrace_unreg_event_<call>, 503 * .unregfunc = ftrace_unreg_event_<call>,
511 * .show_format = ftrace_format_<call>,
512 * } 504 * }
513 * 505 *
514 */ 506 */
515 507
516#undef TP_FMT 508#ifdef CONFIG_PERF_EVENTS
517#define TP_FMT(fmt, args...) fmt "\n", ##args
518
519#ifdef CONFIG_EVENT_PROFILE
520 509
521#define _TRACE_PROFILE_INIT(call) \ 510#define _TRACE_PERF_INIT(call) \
522 .profile_count = ATOMIC_INIT(-1), \ 511 .perf_event_enable = perf_trace_enable_##call, \
523 .profile_enable = ftrace_profile_enable_##call, \ 512 .perf_event_disable = perf_trace_disable_##call,
524 .profile_disable = ftrace_profile_disable_##call,
525 513
526#else 514#else
527#define _TRACE_PROFILE_INIT(call) 515#define _TRACE_PERF_INIT(call)
528#endif 516#endif /* CONFIG_PERF_EVENTS */
529 517
530#undef __entry 518#undef __entry
531#define __entry entry 519#define __entry entry
@@ -547,15 +535,20 @@ static void ftrace_profile_disable_##call(void) \
547#define __assign_str(dst, src) \ 535#define __assign_str(dst, src) \
548 strcpy(__get_str(dst), src); 536 strcpy(__get_str(dst), src);
549 537
550#undef TRACE_EVENT 538#undef TP_fast_assign
551#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 539#define TP_fast_assign(args...) args
552 \ 540
553static struct ftrace_event_call event_##call; \ 541#undef TP_perf_assign
542#define TP_perf_assign(args...)
543
544#undef DECLARE_EVENT_CLASS
545#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
554 \ 546 \
555static void ftrace_raw_event_##call(proto) \ 547static notrace void \
548ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
549 proto) \
556{ \ 550{ \
557 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 551 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
558 struct ftrace_event_call *event_call = &event_##call; \
559 struct ring_buffer_event *event; \ 552 struct ring_buffer_event *event; \
560 struct ftrace_raw_##call *entry; \ 553 struct ftrace_raw_##call *entry; \
561 struct ring_buffer *buffer; \ 554 struct ring_buffer *buffer; \
@@ -569,7 +562,7 @@ static void ftrace_raw_event_##call(proto) \
569 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 562 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
570 \ 563 \
571 event = trace_current_buffer_lock_reserve(&buffer, \ 564 event = trace_current_buffer_lock_reserve(&buffer, \
572 event_##call.id, \ 565 event_call->id, \
573 sizeof(*entry) + __data_size, \ 566 sizeof(*entry) + __data_size, \
574 irq_flags, pc); \ 567 irq_flags, pc); \
575 if (!event) \ 568 if (!event) \
@@ -584,39 +577,74 @@ static void ftrace_raw_event_##call(proto) \
584 if (!filter_current_check_discard(buffer, event_call, entry, event)) \ 577 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
585 trace_nowake_buffer_unlock_commit(buffer, \ 578 trace_nowake_buffer_unlock_commit(buffer, \
586 event, irq_flags, pc); \ 579 event, irq_flags, pc); \
587} \ 580}
581
582#undef DEFINE_EVENT
583#define DEFINE_EVENT(template, call, proto, args) \
588 \ 584 \
589static int ftrace_raw_reg_event_##call(void *ptr) \ 585static notrace void ftrace_raw_event_##call(proto) \
590{ \ 586{ \
591 int ret; \ 587 ftrace_raw_event_id_##template(&event_##call, args); \
588} \
592 \ 589 \
593 ret = register_trace_##call(ftrace_raw_event_##call); \ 590static notrace int \
594 if (ret) \ 591ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \
595 pr_info("event trace: Could not activate trace point " \ 592{ \
596 "probe to " #call "\n"); \ 593 return register_trace_##call(ftrace_raw_event_##call); \
597 return ret; \
598} \ 594} \
599 \ 595 \
600static void ftrace_raw_unreg_event_##call(void *ptr) \ 596static notrace void \
597ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \
601{ \ 598{ \
602 unregister_trace_##call(ftrace_raw_event_##call); \ 599 unregister_trace_##call(ftrace_raw_event_##call); \
603} \ 600} \
604 \ 601 \
605static struct trace_event ftrace_event_type_##call = { \ 602static struct trace_event ftrace_event_type_##call = { \
606 .trace = ftrace_raw_output_##call, \ 603 .trace = ftrace_raw_output_##call, \
607}; \ 604};
605
606#undef DEFINE_EVENT_PRINT
607#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
608 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
609
610#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
611
612#undef __entry
613#define __entry REC
614
615#undef __print_flags
616#undef __print_symbolic
617#undef __get_dynamic_array
618#undef __get_str
619
620#undef TP_printk
621#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
622
623#undef DECLARE_EVENT_CLASS
624#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
625static const char print_fmt_##call[] = print;
626
627#undef DEFINE_EVENT
628#define DEFINE_EVENT(template, call, proto, args) \
608 \ 629 \
609static int ftrace_raw_init_event_##call(void) \ 630static struct ftrace_event_call __used \
610{ \ 631__attribute__((__aligned__(4))) \
611 int id; \ 632__attribute__((section("_ftrace_events"))) event_##call = { \
612 \ 633 .name = #call, \
613 id = register_ftrace_event(&ftrace_event_type_##call); \ 634 .system = __stringify(TRACE_SYSTEM), \
614 if (!id) \ 635 .event = &ftrace_event_type_##call, \
615 return -ENODEV; \ 636 .raw_init = trace_event_raw_init, \
616 event_##call.id = id; \ 637 .regfunc = ftrace_raw_reg_event_##call, \
617 INIT_LIST_HEAD(&event_##call.fields); \ 638 .unregfunc = ftrace_raw_unreg_event_##call, \
618 return 0; \ 639 .print_fmt = print_fmt_##template, \
619} \ 640 .define_fields = ftrace_define_fields_##template, \
641 _TRACE_PERF_INIT(call) \
642}
643
644#undef DEFINE_EVENT_PRINT
645#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
646 \
647static const char print_fmt_##call[] = print; \
620 \ 648 \
621static struct ftrace_event_call __used \ 649static struct ftrace_event_call __used \
622__attribute__((__aligned__(4))) \ 650__attribute__((__aligned__(4))) \
@@ -624,28 +652,29 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
624 .name = #call, \ 652 .name = #call, \
625 .system = __stringify(TRACE_SYSTEM), \ 653 .system = __stringify(TRACE_SYSTEM), \
626 .event = &ftrace_event_type_##call, \ 654 .event = &ftrace_event_type_##call, \
627 .raw_init = ftrace_raw_init_event_##call, \ 655 .raw_init = trace_event_raw_init, \
628 .regfunc = ftrace_raw_reg_event_##call, \ 656 .regfunc = ftrace_raw_reg_event_##call, \
629 .unregfunc = ftrace_raw_unreg_event_##call, \ 657 .unregfunc = ftrace_raw_unreg_event_##call, \
630 .show_format = ftrace_format_##call, \ 658 .print_fmt = print_fmt_##call, \
631 .define_fields = ftrace_define_fields_##call, \ 659 .define_fields = ftrace_define_fields_##template, \
632 _TRACE_PROFILE_INIT(call) \ 660 _TRACE_PERF_INIT(call) \
633} 661}
634 662
635#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 663#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
636 664
637/* 665/*
638 * Define the insertion callback to profile events 666 * Define the insertion callback to perf events
639 * 667 *
640 * The job is very similar to ftrace_raw_event_<call> except that we don't 668 * The job is very similar to ftrace_raw_event_<call> except that we don't
641 * insert in the ring buffer but in a perf counter. 669 * insert in the ring buffer but in a perf counter.
642 * 670 *
643 * static void ftrace_profile_<call>(proto) 671 * static void ftrace_perf_<call>(proto)
644 * { 672 * {
645 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 673 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
646 * struct ftrace_event_call *event_call = &event_<call>; 674 * struct ftrace_event_call *event_call = &event_<call>;
647 * extern void perf_tp_event(int, u64, u64, void *, int); 675 * extern void perf_tp_event(int, u64, u64, void *, int);
648 * struct ftrace_raw_##call *entry; 676 * struct ftrace_raw_##call *entry;
677 * struct perf_trace_buf *trace_buf;
649 * u64 __addr = 0, __count = 1; 678 * u64 __addr = 0, __count = 1;
650 * unsigned long irq_flags; 679 * unsigned long irq_flags;
651 * struct trace_entry *ent; 680 * struct trace_entry *ent;
@@ -670,14 +699,25 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
670 * __cpu = smp_processor_id(); 699 * __cpu = smp_processor_id();
671 * 700 *
672 * if (in_nmi()) 701 * if (in_nmi())
673 * raw_data = rcu_dereference(trace_profile_buf_nmi); 702 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
674 * else 703 * else
675 * raw_data = rcu_dereference(trace_profile_buf); 704 * trace_buf = rcu_dereference_sched(perf_trace_buf);
676 * 705 *
677 * if (!raw_data) 706 * if (!trace_buf)
678 * goto end; 707 * goto end;
679 * 708 *
680 * raw_data = per_cpu_ptr(raw_data, __cpu); 709 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
710 *
711 * // Avoid recursion from perf that could mess up the buffer
712 * if (trace_buf->recursion++)
713 * goto end_recursion;
714 *
715 * raw_data = trace_buf->buf;
716 *
717 * // Make recursion update visible before entering perf_tp_event
718 * // so that we protect from perf recursions.
719 *
720 * barrier();
681 * 721 *
682 * //zero dead bytes from alignment to avoid stack leak to userspace: 722 * //zero dead bytes from alignment to avoid stack leak to userspace:
683 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; 723 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
@@ -696,7 +736,17 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
696 * } 736 * }
697 */ 737 */
698 738
699#ifdef CONFIG_EVENT_PROFILE 739#ifdef CONFIG_PERF_EVENTS
740
741#undef __entry
742#define __entry entry
743
744#undef __get_dynamic_array
745#define __get_dynamic_array(field) \
746 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
747
748#undef __get_str
749#define __get_str(field) (char *)__get_dynamic_array(field)
700 750
701#undef __perf_addr 751#undef __perf_addr
702#define __perf_addr(a) __addr = (a) 752#define __perf_addr(a) __addr = (a)
@@ -704,67 +754,59 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
704#undef __perf_count 754#undef __perf_count
705#define __perf_count(c) __count = (c) 755#define __perf_count(c) __count = (c)
706 756
707#undef TRACE_EVENT 757#undef DECLARE_EVENT_CLASS
708#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
709static void ftrace_profile_##call(proto) \ 759static notrace void \
760perf_trace_templ_##call(struct ftrace_event_call *event_call, \
761 proto) \
710{ \ 762{ \
711 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
712 struct ftrace_event_call *event_call = &event_##call; \
713 extern void perf_tp_event(int, u64, u64, void *, int); \
714 struct ftrace_raw_##call *entry; \ 764 struct ftrace_raw_##call *entry; \
715 u64 __addr = 0, __count = 1; \ 765 u64 __addr = 0, __count = 1; \
716 unsigned long irq_flags; \ 766 unsigned long irq_flags; \
717 struct trace_entry *ent; \ 767 struct pt_regs *__regs; \
718 int __entry_size; \ 768 int __entry_size; \
719 int __data_size; \ 769 int __data_size; \
720 char *raw_data; \ 770 int rctx; \
721 int __cpu; \
722 int pc; \
723 \
724 pc = preempt_count(); \
725 \ 771 \
726 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 772 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
727 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 773 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
728 sizeof(u64)); \ 774 sizeof(u64)); \
729 __entry_size -= sizeof(u32); \ 775 __entry_size -= sizeof(u32); \
730 \ 776 \
731 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 777 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
732 "profile buffer not large enough")) \ 778 "profile buffer not large enough")) \
733 return; \ 779 return; \
734 \ 780 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
735 local_irq_save(irq_flags); \ 781 __entry_size, event_call->id, &rctx, &irq_flags); \
736 __cpu = smp_processor_id(); \ 782 if (!entry) \
737 \ 783 return; \
738 if (in_nmi()) \
739 raw_data = rcu_dereference(trace_profile_buf_nmi); \
740 else \
741 raw_data = rcu_dereference(trace_profile_buf); \
742 \
743 if (!raw_data) \
744 goto end; \
745 \
746 raw_data = per_cpu_ptr(raw_data, __cpu); \
747 \
748 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
749 entry = (struct ftrace_raw_##call *)raw_data; \
750 ent = &entry->ent; \
751 tracing_generic_entry_update(ent, irq_flags, pc); \
752 ent->type = event_call->id; \
753 \
754 tstruct \ 784 tstruct \
755 \ 785 \
756 { assign; } \ 786 { assign; } \
757 \ 787 \
758 perf_tp_event(event_call->id, __addr, __count, entry, \ 788 __regs = &__get_cpu_var(perf_trace_regs); \
759 __entry_size); \ 789 perf_fetch_caller_regs(__regs, 2); \
760 \
761end: \
762 local_irq_restore(irq_flags); \
763 \ 790 \
791 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
792 __count, irq_flags, __regs); \
764} 793}
765 794
795#undef DEFINE_EVENT
796#define DEFINE_EVENT(template, call, proto, args) \
797static notrace void perf_trace_##call(proto) \
798{ \
799 struct ftrace_event_call *event_call = &event_##call; \
800 \
801 perf_trace_templ_##template(event_call, args); \
802}
803
804#undef DEFINE_EVENT_PRINT
805#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
806 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
807
766#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 808#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
767#endif /* CONFIG_EVENT_PROFILE */ 809#endif /* CONFIG_PERF_EVENTS */
768 810
769#undef _TRACE_PROFILE_INIT 811#undef _TRACE_PROFILE_INIT
770 812