aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/ftrace.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r--include/trace/ftrace.h526
1 files changed, 170 insertions, 356 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index c6fe03e902ca..a9377c0083ad 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -62,24 +62,25 @@
62 struct trace_entry ent; \ 62 struct trace_entry ent; \
63 tstruct \ 63 tstruct \
64 char __data[0]; \ 64 char __data[0]; \
65 }; 65 }; \
66 \
67 static struct ftrace_event_class event_class_##name;
68
66#undef DEFINE_EVENT 69#undef DEFINE_EVENT
67#define DEFINE_EVENT(template, name, proto, args) \ 70#define DEFINE_EVENT(template, name, proto, args) \
68 static struct ftrace_event_call event_##name 71 static struct ftrace_event_call __used \
72 __attribute__((__aligned__(4))) event_##name
69 73
70#undef DEFINE_EVENT_PRINT 74#undef DEFINE_EVENT_PRINT
71#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 75#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
72 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 76 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
73 77
74#undef __cpparg
75#define __cpparg(arg...) arg
76
77/* Callbacks are meaningless to ftrace. */ 78/* Callbacks are meaningless to ftrace. */
78#undef TRACE_EVENT_FN 79#undef TRACE_EVENT_FN
79#define TRACE_EVENT_FN(name, proto, args, tstruct, \ 80#define TRACE_EVENT_FN(name, proto, args, tstruct, \
80 assign, print, reg, unreg) \ 81 assign, print, reg, unreg) \
81 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \ 82 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
82 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \ 83 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
83 84
84#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 85#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
85 86
@@ -131,130 +132,6 @@
131#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 132#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
132 133
133/* 134/*
134 * Setup the showing format of trace point.
135 *
136 * int
137 * ftrace_format_##call(struct trace_seq *s)
138 * {
139 * struct ftrace_raw_##call field;
140 * int ret;
141 *
142 * ret = trace_seq_printf(s, #type " " #item ";"
143 * " offset:%u; size:%u;\n",
144 * offsetof(struct ftrace_raw_##call, item),
145 * sizeof(field.type));
146 *
147 * }
148 */
149
150#undef TP_STRUCT__entry
151#define TP_STRUCT__entry(args...) args
152
153#undef __field
154#define __field(type, item) \
155 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
156 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
157 (unsigned int)offsetof(typeof(field), item), \
158 (unsigned int)sizeof(field.item), \
159 (unsigned int)is_signed_type(type)); \
160 if (!ret) \
161 return 0;
162
163#undef __field_ext
164#define __field_ext(type, item, filter_type) __field(type, item)
165
166#undef __array
167#define __array(type, item, len) \
168 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
169 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
170 (unsigned int)offsetof(typeof(field), item), \
171 (unsigned int)sizeof(field.item), \
172 (unsigned int)is_signed_type(type)); \
173 if (!ret) \
174 return 0;
175
176#undef __dynamic_array
177#define __dynamic_array(type, item, len) \
178 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
179 "offset:%u;\tsize:%u;\tsigned:%u;\n", \
180 (unsigned int)offsetof(typeof(field), \
181 __data_loc_##item), \
182 (unsigned int)sizeof(field.__data_loc_##item), \
183 (unsigned int)is_signed_type(type)); \
184 if (!ret) \
185 return 0;
186
187#undef __string
188#define __string(item, src) __dynamic_array(char, item, -1)
189
190#undef __entry
191#define __entry REC
192
193#undef __print_symbolic
194#undef __get_dynamic_array
195#undef __get_str
196
197#undef TP_printk
198#define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args)
199
200#undef TP_fast_assign
201#define TP_fast_assign(args...) args
202
203#undef TP_perf_assign
204#define TP_perf_assign(args...)
205
206#undef DECLARE_EVENT_CLASS
207#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
208static int \
209ftrace_format_setup_##call(struct ftrace_event_call *unused, \
210 struct trace_seq *s) \
211{ \
212 struct ftrace_raw_##call field __attribute__((unused)); \
213 int ret = 0; \
214 \
215 tstruct; \
216 \
217 return ret; \
218} \
219 \
220static int \
221ftrace_format_##call(struct ftrace_event_call *unused, \
222 struct trace_seq *s) \
223{ \
224 int ret = 0; \
225 \
226 ret = ftrace_format_setup_##call(unused, s); \
227 if (!ret) \
228 return ret; \
229 \
230 ret = trace_seq_printf(s, "\nprint fmt: " print); \
231 \
232 return ret; \
233}
234
235#undef DEFINE_EVENT
236#define DEFINE_EVENT(template, name, proto, args)
237
238#undef DEFINE_EVENT_PRINT
239#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
240static int \
241ftrace_format_##name(struct ftrace_event_call *unused, \
242 struct trace_seq *s) \
243{ \
244 int ret = 0; \
245 \
246 ret = ftrace_format_setup_##template(unused, s); \
247 if (!ret) \
248 return ret; \
249 \
250 trace_seq_printf(s, "\nprint fmt: " print); \
251 \
252 return ret; \
253}
254
255#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
256
257/*
258 * Stage 3 of the trace events. 135 * Stage 3 of the trace events.
259 * 136 *
260 * Override the macros in <trace/trace_events.h> to include the following: 137 * Override the macros in <trace/trace_events.h> to include the following:
@@ -265,22 +142,22 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
265 * struct trace_seq *s = &iter->seq; 142 * struct trace_seq *s = &iter->seq;
266 * struct ftrace_raw_<call> *field; <-- defined in stage 1 143 * struct ftrace_raw_<call> *field; <-- defined in stage 1
267 * struct trace_entry *entry; 144 * struct trace_entry *entry;
268 * struct trace_seq *p; 145 * struct trace_seq *p = &iter->tmp_seq;
269 * int ret; 146 * int ret;
270 * 147 *
271 * entry = iter->ent; 148 * entry = iter->ent;
272 * 149 *
273 * if (entry->type != event_<call>.id) { 150 * if (entry->type != event_<call>->event.type) {
274 * WARN_ON_ONCE(1); 151 * WARN_ON_ONCE(1);
275 * return TRACE_TYPE_UNHANDLED; 152 * return TRACE_TYPE_UNHANDLED;
276 * } 153 * }
277 * 154 *
278 * field = (typeof(field))entry; 155 * field = (typeof(field))entry;
279 * 156 *
280 * p = get_cpu_var(ftrace_event_seq);
281 * trace_seq_init(p); 157 * trace_seq_init(p);
282 * ret = trace_seq_printf(s, <TP_printk> "\n"); 158 * ret = trace_seq_printf(s, "%s: ", <call>);
283 * put_cpu(); 159 * if (ret)
160 * ret = trace_seq_printf(s, <TP_printk> "\n");
284 * if (!ret) 161 * if (!ret)
285 * return TRACE_TYPE_PARTIAL_LINE; 162 * return TRACE_TYPE_PARTIAL_LINE;
286 * 163 *
@@ -321,79 +198,80 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
321 ftrace_print_symbols_seq(p, value, symbols); \ 198 ftrace_print_symbols_seq(p, value, symbols); \
322 }) 199 })
323 200
201#undef __print_hex
202#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
203
324#undef DECLARE_EVENT_CLASS 204#undef DECLARE_EVENT_CLASS
325#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 205#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
326static enum print_line_t \ 206static notrace enum print_line_t \
327ftrace_raw_output_id_##call(int event_id, const char *name, \ 207ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
328 struct trace_iterator *iter, int flags) \ 208 struct trace_event *trace_event) \
329{ \ 209{ \
210 struct ftrace_event_call *event; \
330 struct trace_seq *s = &iter->seq; \ 211 struct trace_seq *s = &iter->seq; \
331 struct ftrace_raw_##call *field; \ 212 struct ftrace_raw_##call *field; \
332 struct trace_entry *entry; \ 213 struct trace_entry *entry; \
333 struct trace_seq *p; \ 214 struct trace_seq *p = &iter->tmp_seq; \
334 int ret; \ 215 int ret; \
335 \ 216 \
217 event = container_of(trace_event, struct ftrace_event_call, \
218 event); \
219 \
336 entry = iter->ent; \ 220 entry = iter->ent; \
337 \ 221 \
338 if (entry->type != event_id) { \ 222 if (entry->type != event->event.type) { \
339 WARN_ON_ONCE(1); \ 223 WARN_ON_ONCE(1); \
340 return TRACE_TYPE_UNHANDLED; \ 224 return TRACE_TYPE_UNHANDLED; \
341 } \ 225 } \
342 \ 226 \
343 field = (typeof(field))entry; \ 227 field = (typeof(field))entry; \
344 \ 228 \
345 p = &get_cpu_var(ftrace_event_seq); \
346 trace_seq_init(p); \ 229 trace_seq_init(p); \
347 ret = trace_seq_printf(s, "%s: ", name); \ 230 ret = trace_seq_printf(s, "%s: ", event->name); \
348 if (ret) \ 231 if (ret) \
349 ret = trace_seq_printf(s, print); \ 232 ret = trace_seq_printf(s, print); \
350 put_cpu(); \
351 if (!ret) \ 233 if (!ret) \
352 return TRACE_TYPE_PARTIAL_LINE; \ 234 return TRACE_TYPE_PARTIAL_LINE; \
353 \ 235 \
354 return TRACE_TYPE_HANDLED; \ 236 return TRACE_TYPE_HANDLED; \
355} 237} \
356 238static struct trace_event_functions ftrace_event_type_funcs_##call = { \
357#undef DEFINE_EVENT 239 .trace = ftrace_raw_output_##call, \
358#define DEFINE_EVENT(template, name, proto, args) \ 240};
359static enum print_line_t \
360ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
361{ \
362 return ftrace_raw_output_id_##template(event_##name.id, \
363 #name, iter, flags); \
364}
365 241
366#undef DEFINE_EVENT_PRINT 242#undef DEFINE_EVENT_PRINT
367#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 243#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
368static enum print_line_t \ 244static notrace enum print_line_t \
369ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 245ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
246 struct trace_event *event) \
370{ \ 247{ \
371 struct trace_seq *s = &iter->seq; \ 248 struct trace_seq *s = &iter->seq; \
372 struct ftrace_raw_##template *field; \ 249 struct ftrace_raw_##template *field; \
373 struct trace_entry *entry; \ 250 struct trace_entry *entry; \
374 struct trace_seq *p; \ 251 struct trace_seq *p = &iter->tmp_seq; \
375 int ret; \ 252 int ret; \
376 \ 253 \
377 entry = iter->ent; \ 254 entry = iter->ent; \
378 \ 255 \
379 if (entry->type != event_##call.id) { \ 256 if (entry->type != event_##call.event.type) { \
380 WARN_ON_ONCE(1); \ 257 WARN_ON_ONCE(1); \
381 return TRACE_TYPE_UNHANDLED; \ 258 return TRACE_TYPE_UNHANDLED; \
382 } \ 259 } \
383 \ 260 \
384 field = (typeof(field))entry; \ 261 field = (typeof(field))entry; \
385 \ 262 \
386 p = &get_cpu_var(ftrace_event_seq); \
387 trace_seq_init(p); \ 263 trace_seq_init(p); \
388 ret = trace_seq_printf(s, "%s: ", #call); \ 264 ret = trace_seq_printf(s, "%s: ", #call); \
389 if (ret) \ 265 if (ret) \
390 ret = trace_seq_printf(s, print); \ 266 ret = trace_seq_printf(s, print); \
391 put_cpu(); \
392 if (!ret) \ 267 if (!ret) \
393 return TRACE_TYPE_PARTIAL_LINE; \ 268 return TRACE_TYPE_PARTIAL_LINE; \
394 \ 269 \
395 return TRACE_TYPE_HANDLED; \ 270 return TRACE_TYPE_HANDLED; \
396} 271} \
272static struct trace_event_functions ftrace_event_type_funcs_##call = { \
273 .trace = ftrace_raw_output_##call, \
274};
397 275
398#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 276#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
399 277
@@ -431,7 +309,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
431 309
432#undef DECLARE_EVENT_CLASS 310#undef DECLARE_EVENT_CLASS
433#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 311#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
434static int \ 312static int notrace \
435ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 313ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
436{ \ 314{ \
437 struct ftrace_raw_##call field; \ 315 struct ftrace_raw_##call field; \
@@ -479,7 +357,7 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
479 357
480#undef DECLARE_EVENT_CLASS 358#undef DECLARE_EVENT_CLASS
481#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 359#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
482static inline int ftrace_get_offsets_##call( \ 360static inline notrace int ftrace_get_offsets_##call( \
483 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 361 struct ftrace_data_offsets_##call *__data_offsets, proto) \
484{ \ 362{ \
485 int __data_size = 0; \ 363 int __data_size = 0; \
@@ -499,143 +377,86 @@ static inline int ftrace_get_offsets_##call( \
499 377
500#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 378#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
501 379
502#ifdef CONFIG_EVENT_PROFILE
503
504/*
505 * Generate the functions needed for tracepoint perf_event support.
506 *
507 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
508 *
509 * static int ftrace_profile_enable_<call>(void)
510 * {
511 * return register_trace_<call>(ftrace_profile_<call>);
512 * }
513 *
514 * static void ftrace_profile_disable_<call>(void)
515 * {
516 * unregister_trace_<call>(ftrace_profile_<call>);
517 * }
518 *
519 */
520
521#undef DECLARE_EVENT_CLASS
522#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
523
524#undef DEFINE_EVENT
525#define DEFINE_EVENT(template, name, proto, args) \
526 \
527static void ftrace_profile_##name(proto); \
528 \
529static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
530{ \
531 return register_trace_##name(ftrace_profile_##name); \
532} \
533 \
534static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
535{ \
536 unregister_trace_##name(ftrace_profile_##name); \
537}
538
539#undef DEFINE_EVENT_PRINT
540#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
541 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
542
543#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
544
545#endif
546
547/* 380/*
548 * Stage 4 of the trace events. 381 * Stage 4 of the trace events.
549 * 382 *
550 * Override the macros in <trace/trace_events.h> to include the following: 383 * Override the macros in <trace/trace_events.h> to include the following:
551 * 384 *
552 * static void ftrace_event_<call>(proto)
553 * {
554 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
555 * }
556 *
557 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
558 * {
559 * return register_trace_<call>(ftrace_event_<call>);
560 * }
561 *
562 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
563 * {
564 * unregister_trace_<call>(ftrace_event_<call>);
565 * }
566 *
567 *
568 * For those macros defined with TRACE_EVENT: 385 * For those macros defined with TRACE_EVENT:
569 * 386 *
570 * static struct ftrace_event_call event_<call>; 387 * static struct ftrace_event_call event_<call>;
571 * 388 *
572 * static void ftrace_raw_event_<call>(proto) 389 * static void ftrace_raw_event_<call>(void *__data, proto)
573 * { 390 * {
391 * struct ftrace_event_call *event_call = __data;
392 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
574 * struct ring_buffer_event *event; 393 * struct ring_buffer_event *event;
575 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 394 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
576 * struct ring_buffer *buffer; 395 * struct ring_buffer *buffer;
577 * unsigned long irq_flags; 396 * unsigned long irq_flags;
397 * int __data_size;
578 * int pc; 398 * int pc;
579 * 399 *
580 * local_save_flags(irq_flags); 400 * local_save_flags(irq_flags);
581 * pc = preempt_count(); 401 * pc = preempt_count();
582 * 402 *
403 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
404 *
583 * event = trace_current_buffer_lock_reserve(&buffer, 405 * event = trace_current_buffer_lock_reserve(&buffer,
584 * event_<call>.id, 406 * event_<call>->event.type,
585 * sizeof(struct ftrace_raw_<call>), 407 * sizeof(*entry) + __data_size,
586 * irq_flags, pc); 408 * irq_flags, pc);
587 * if (!event) 409 * if (!event)
588 * return; 410 * return;
589 * entry = ring_buffer_event_data(event); 411 * entry = ring_buffer_event_data(event);
590 * 412 *
591 * <assign>; <-- Here we assign the entries by the __field and 413 * { <assign>; } <-- Here we assign the entries by the __field and
592 * __array macros. 414 * __array macros.
593 *
594 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
595 * }
596 *
597 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
598 * {
599 * int ret;
600 *
601 * ret = register_trace_<call>(ftrace_raw_event_<call>);
602 * if (!ret)
603 * pr_info("event trace: Could not activate trace point "
604 * "probe to <call>");
605 * return ret;
606 * }
607 * 415 *
608 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) 416 * if (!filter_current_check_discard(buffer, event_call, entry, event))
609 * { 417 * trace_current_buffer_unlock_commit(buffer,
610 * unregister_trace_<call>(ftrace_raw_event_<call>); 418 * event, irq_flags, pc);
611 * } 419 * }
612 * 420 *
613 * static struct trace_event ftrace_event_type_<call> = { 421 * static struct trace_event ftrace_event_type_<call> = {
614 * .trace = ftrace_raw_output_<call>, <-- stage 2 422 * .trace = ftrace_raw_output_<call>, <-- stage 2
615 * }; 423 * };
616 * 424 *
425 * static const char print_fmt_<call>[] = <TP_printk>;
426 *
427 * static struct ftrace_event_class __used event_class_<template> = {
428 * .system = "<system>",
429 * .define_fields = ftrace_define_fields_<call>,
430 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
431 * .raw_init = trace_event_raw_init,
432 * .probe = ftrace_raw_event_##call,
433 * .reg = ftrace_event_reg,
434 * };
435 *
617 * static struct ftrace_event_call __used 436 * static struct ftrace_event_call __used
618 * __attribute__((__aligned__(4))) 437 * __attribute__((__aligned__(4)))
619 * __attribute__((section("_ftrace_events"))) event_<call> = { 438 * __attribute__((section("_ftrace_events"))) event_<call> = {
620 * .name = "<call>", 439 * .name = "<call>",
621 * .system = "<system>", 440 * .class = event_class_<template>,
622 * .raw_init = trace_event_raw_init, 441 * .event = &ftrace_event_type_<call>,
623 * .regfunc = ftrace_reg_event_<call>, 442 * .print_fmt = print_fmt_<call>,
624 * .unregfunc = ftrace_unreg_event_<call>, 443 * };
625 * .show_format = ftrace_format_<call>,
626 * }
627 * 444 *
628 */ 445 */
629 446
630#ifdef CONFIG_EVENT_PROFILE 447#ifdef CONFIG_PERF_EVENTS
448
449#define _TRACE_PERF_PROTO(call, proto) \
450 static notrace void \
451 perf_trace_##call(void *__data, proto);
631 452
632#define _TRACE_PROFILE_INIT(call) \ 453#define _TRACE_PERF_INIT(call) \
633 .profile_enable = ftrace_profile_enable_##call, \ 454 .perf_probe = perf_trace_##call,
634 .profile_disable = ftrace_profile_disable_##call,
635 455
636#else 456#else
637#define _TRACE_PROFILE_INIT(call) 457#define _TRACE_PERF_PROTO(call, proto)
638#endif 458#define _TRACE_PERF_INIT(call)
459#endif /* CONFIG_PERF_EVENTS */
639 460
640#undef __entry 461#undef __entry
641#define __entry entry 462#define __entry entry
@@ -657,12 +478,19 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
657#define __assign_str(dst, src) \ 478#define __assign_str(dst, src) \
658 strcpy(__get_str(dst), src); 479 strcpy(__get_str(dst), src);
659 480
481#undef TP_fast_assign
482#define TP_fast_assign(args...) args
483
484#undef TP_perf_assign
485#define TP_perf_assign(args...)
486
660#undef DECLARE_EVENT_CLASS 487#undef DECLARE_EVENT_CLASS
661#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 488#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
662 \ 489 \
663static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ 490static notrace void \
664 proto) \ 491ftrace_raw_event_##call(void *__data, proto) \
665{ \ 492{ \
493 struct ftrace_event_call *event_call = __data; \
666 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 494 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
667 struct ring_buffer_event *event; \ 495 struct ring_buffer_event *event; \
668 struct ftrace_raw_##call *entry; \ 496 struct ftrace_raw_##call *entry; \
@@ -677,14 +505,13 @@ static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
677 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 505 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
678 \ 506 \
679 event = trace_current_buffer_lock_reserve(&buffer, \ 507 event = trace_current_buffer_lock_reserve(&buffer, \
680 event_call->id, \ 508 event_call->event.type, \
681 sizeof(*entry) + __data_size, \ 509 sizeof(*entry) + __data_size, \
682 irq_flags, pc); \ 510 irq_flags, pc); \
683 if (!event) \ 511 if (!event) \
684 return; \ 512 return; \
685 entry = ring_buffer_event_data(event); \ 513 entry = ring_buffer_event_data(event); \
686 \ 514 \
687 \
688 tstruct \ 515 tstruct \
689 \ 516 \
690 { assign; } \ 517 { assign; } \
@@ -693,37 +520,48 @@ static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
693 trace_nowake_buffer_unlock_commit(buffer, \ 520 trace_nowake_buffer_unlock_commit(buffer, \
694 event, irq_flags, pc); \ 521 event, irq_flags, pc); \
695} 522}
523/*
524 * The ftrace_test_probe is compiled out, it is only here as a build time check
525 * to make sure that if the tracepoint handling changes, the ftrace probe will
526 * fail to compile unless it too is updated.
527 */
696 528
697#undef DEFINE_EVENT 529#undef DEFINE_EVENT
698#define DEFINE_EVENT(template, call, proto, args) \ 530#define DEFINE_EVENT(template, call, proto, args) \
699 \ 531static inline void ftrace_test_probe_##call(void) \
700static void ftrace_raw_event_##call(proto) \
701{ \ 532{ \
702 ftrace_raw_event_id_##template(&event_##call, args); \ 533 check_trace_callback_type_##call(ftrace_raw_event_##template); \
703} \ 534}
704 \
705static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
706{ \
707 return register_trace_##call(ftrace_raw_event_##call); \
708} \
709 \
710static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
711{ \
712 unregister_trace_##call(ftrace_raw_event_##call); \
713} \
714 \
715static struct trace_event ftrace_event_type_##call = { \
716 .trace = ftrace_raw_output_##call, \
717};
718 535
719#undef DEFINE_EVENT_PRINT 536#undef DEFINE_EVENT_PRINT
720#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 537#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
721 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
722 538
723#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 539#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
724 540
541#undef __entry
542#define __entry REC
543
544#undef __print_flags
545#undef __print_symbolic
546#undef __get_dynamic_array
547#undef __get_str
548
549#undef TP_printk
550#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
551
725#undef DECLARE_EVENT_CLASS 552#undef DECLARE_EVENT_CLASS
726#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) 553#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
554_TRACE_PERF_PROTO(call, PARAMS(proto)); \
555static const char print_fmt_##call[] = print; \
556static struct ftrace_event_class __used event_class_##call = { \
557 .system = __stringify(TRACE_SYSTEM), \
558 .define_fields = ftrace_define_fields_##call, \
559 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
560 .raw_init = trace_event_raw_init, \
561 .probe = ftrace_raw_event_##call, \
562 .reg = ftrace_event_reg, \
563 _TRACE_PERF_INIT(call) \
564};
727 565
728#undef DEFINE_EVENT 566#undef DEFINE_EVENT
729#define DEFINE_EVENT(template, call, proto, args) \ 567#define DEFINE_EVENT(template, call, proto, args) \
@@ -732,42 +570,34 @@ static struct ftrace_event_call __used \
732__attribute__((__aligned__(4))) \ 570__attribute__((__aligned__(4))) \
733__attribute__((section("_ftrace_events"))) event_##call = { \ 571__attribute__((section("_ftrace_events"))) event_##call = { \
734 .name = #call, \ 572 .name = #call, \
735 .system = __stringify(TRACE_SYSTEM), \ 573 .class = &event_class_##template, \
736 .event = &ftrace_event_type_##call, \ 574 .event.funcs = &ftrace_event_type_funcs_##template, \
737 .raw_init = trace_event_raw_init, \ 575 .print_fmt = print_fmt_##template, \
738 .regfunc = ftrace_raw_reg_event_##call, \ 576};
739 .unregfunc = ftrace_raw_unreg_event_##call, \
740 .show_format = ftrace_format_##template, \
741 .define_fields = ftrace_define_fields_##template, \
742 _TRACE_PROFILE_INIT(call) \
743}
744 577
745#undef DEFINE_EVENT_PRINT 578#undef DEFINE_EVENT_PRINT
746#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 579#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
747 \ 580 \
581static const char print_fmt_##call[] = print; \
582 \
748static struct ftrace_event_call __used \ 583static struct ftrace_event_call __used \
749__attribute__((__aligned__(4))) \ 584__attribute__((__aligned__(4))) \
750__attribute__((section("_ftrace_events"))) event_##call = { \ 585__attribute__((section("_ftrace_events"))) event_##call = { \
751 .name = #call, \ 586 .name = #call, \
752 .system = __stringify(TRACE_SYSTEM), \ 587 .class = &event_class_##template, \
753 .event = &ftrace_event_type_##call, \ 588 .event.funcs = &ftrace_event_type_funcs_##call, \
754 .raw_init = trace_event_raw_init, \ 589 .print_fmt = print_fmt_##call, \
755 .regfunc = ftrace_raw_reg_event_##call, \
756 .unregfunc = ftrace_raw_unreg_event_##call, \
757 .show_format = ftrace_format_##call, \
758 .define_fields = ftrace_define_fields_##template, \
759 _TRACE_PROFILE_INIT(call) \
760} 590}
761 591
762#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 592#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
763 593
764/* 594/*
765 * Define the insertion callback to profile events 595 * Define the insertion callback to perf events
766 * 596 *
767 * The job is very similar to ftrace_raw_event_<call> except that we don't 597 * The job is very similar to ftrace_raw_event_<call> except that we don't
768 * insert in the ring buffer but in a perf counter. 598 * insert in the ring buffer but in a perf counter.
769 * 599 *
770 * static void ftrace_profile_<call>(proto) 600 * static void ftrace_perf_<call>(proto)
771 * { 601 * {
772 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 602 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
773 * struct ftrace_event_call *event_call = &event_<call>; 603 * struct ftrace_event_call *event_call = &event_<call>;
@@ -798,9 +628,9 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
798 * __cpu = smp_processor_id(); 628 * __cpu = smp_processor_id();
799 * 629 *
800 * if (in_nmi()) 630 * if (in_nmi())
801 * trace_buf = rcu_dereference(perf_trace_buf_nmi); 631 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
802 * else 632 * else
803 * trace_buf = rcu_dereference(perf_trace_buf); 633 * trace_buf = rcu_dereference_sched(perf_trace_buf);
804 * 634 *
805 * if (!trace_buf) 635 * if (!trace_buf)
806 * goto end; 636 * goto end;
@@ -835,7 +665,17 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
835 * } 665 * }
836 */ 666 */
837 667
838#ifdef CONFIG_EVENT_PROFILE 668#ifdef CONFIG_PERF_EVENTS
669
670#undef __entry
671#define __entry entry
672
673#undef __get_dynamic_array
674#define __get_dynamic_array(field) \
675 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
676
677#undef __get_str
678#define __get_str(field) (char *)__get_dynamic_array(field)
839 679
840#undef __perf_addr 680#undef __perf_addr
841#define __perf_addr(a) __addr = (a) 681#define __perf_addr(a) __addr = (a)
@@ -845,89 +685,63 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
845 685
846#undef DECLARE_EVENT_CLASS 686#undef DECLARE_EVENT_CLASS
847#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 687#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
848static void \ 688static notrace void \
849ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ 689perf_trace_##call(void *__data, proto) \
850 proto) \
851{ \ 690{ \
691 struct ftrace_event_call *event_call = __data; \
852 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 692 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
853 extern int perf_swevent_get_recursion_context(void); \
854 extern void perf_swevent_put_recursion_context(int rctx); \
855 extern void perf_tp_event(int, u64, u64, void *, int); \
856 struct ftrace_raw_##call *entry; \ 693 struct ftrace_raw_##call *entry; \
694 struct pt_regs __regs; \
857 u64 __addr = 0, __count = 1; \ 695 u64 __addr = 0, __count = 1; \
858 unsigned long irq_flags; \ 696 struct hlist_head *head; \
859 struct trace_entry *ent; \
860 int __entry_size; \ 697 int __entry_size; \
861 int __data_size; \ 698 int __data_size; \
862 char *trace_buf; \
863 char *raw_data; \
864 int __cpu; \
865 int rctx; \ 699 int rctx; \
866 int pc; \
867 \ 700 \
868 pc = preempt_count(); \ 701 perf_fetch_caller_regs(&__regs); \
869 \ 702 \
870 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 703 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
871 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 704 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
872 sizeof(u64)); \ 705 sizeof(u64)); \
873 __entry_size -= sizeof(u32); \ 706 __entry_size -= sizeof(u32); \
874 \ 707 \
875 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 708 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
876 "profile buffer not large enough")) \ 709 "profile buffer not large enough")) \
877 return; \ 710 return; \
878 \ 711 \
879 local_irq_save(irq_flags); \ 712 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
880 \ 713 __entry_size, event_call->event.type, &__regs, &rctx); \
881 rctx = perf_swevent_get_recursion_context(); \ 714 if (!entry) \
882 if (rctx < 0) \ 715 return; \
883 goto end_recursion; \
884 \
885 __cpu = smp_processor_id(); \
886 \
887 if (in_nmi()) \
888 trace_buf = rcu_dereference(perf_trace_buf_nmi); \
889 else \
890 trace_buf = rcu_dereference(perf_trace_buf); \
891 \
892 if (!trace_buf) \
893 goto end; \
894 \
895 raw_data = per_cpu_ptr(trace_buf, __cpu); \
896 \
897 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
898 entry = (struct ftrace_raw_##call *)raw_data; \
899 ent = &entry->ent; \
900 tracing_generic_entry_update(ent, irq_flags, pc); \
901 ent->type = event_call->id; \
902 \ 716 \
903 tstruct \ 717 tstruct \
904 \ 718 \
905 { assign; } \ 719 { assign; } \
906 \ 720 \
907 perf_tp_event(event_call->id, __addr, __count, entry, \ 721 head = this_cpu_ptr(event_call->perf_events); \
908 __entry_size); \ 722 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
909 \ 723 __count, &__regs, head); \
910end: \
911 perf_swevent_put_recursion_context(rctx); \
912end_recursion: \
913 local_irq_restore(irq_flags); \
914} 724}
915 725
726/*
727 * This part is compiled out, it is only here as a build time check
728 * to make sure that if the tracepoint handling changes, the
729 * perf probe will fail to compile unless it too is updated.
730 */
916#undef DEFINE_EVENT 731#undef DEFINE_EVENT
917#define DEFINE_EVENT(template, call, proto, args) \ 732#define DEFINE_EVENT(template, call, proto, args) \
918static void ftrace_profile_##call(proto) \ 733static inline void perf_test_probe_##call(void) \
919{ \ 734{ \
920 struct ftrace_event_call *event_call = &event_##call; \ 735 check_trace_callback_type_##call(perf_trace_##template); \
921 \
922 ftrace_profile_templ_##template(event_call, args); \
923} 736}
924 737
738
925#undef DEFINE_EVENT_PRINT 739#undef DEFINE_EVENT_PRINT
926#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 740#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
927 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 741 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
928 742
929#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 743#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
930#endif /* CONFIG_EVENT_PROFILE */ 744#endif /* CONFIG_PERF_EVENTS */
931 745
932#undef _TRACE_PROFILE_INIT 746#undef _TRACE_PROFILE_INIT
933 747