diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-08-06 13:13:54 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-08-06 13:13:54 -0400 |
commit | 11e4afb49b7fa1fc8e1ffd850c1806dd86a08204 (patch) | |
tree | 9e57efcb106ae912f7bec718feb3f8ec607559bb /include/trace/ftrace.h | |
parent | 162500b3a3ff39d941d29db49b41a16667ae44f0 (diff) | |
parent | 9b2a606d3898fcb2eedb6faded3bb37549590ac4 (diff) |
Merge branches 'gemini' and 'misc' into devel
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r-- | include/trace/ftrace.h | 284 |
1 files changed, 113 insertions, 171 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index ea6f9d4a20e9..5a64905d7278 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -62,10 +62,13 @@ | |||
62 | struct trace_entry ent; \ | 62 | struct trace_entry ent; \ |
63 | tstruct \ | 63 | tstruct \ |
64 | char __data[0]; \ | 64 | char __data[0]; \ |
65 | }; | 65 | }; \ |
66 | \ | ||
67 | static struct ftrace_event_class event_class_##name; | ||
68 | |||
66 | #undef DEFINE_EVENT | 69 | #undef DEFINE_EVENT |
67 | #define DEFINE_EVENT(template, name, proto, args) \ | 70 | #define DEFINE_EVENT(template, name, proto, args) \ |
68 | static struct ftrace_event_call \ | 71 | static struct ftrace_event_call __used \ |
69 | __attribute__((__aligned__(4))) event_##name | 72 | __attribute__((__aligned__(4))) event_##name |
70 | 73 | ||
71 | #undef DEFINE_EVENT_PRINT | 74 | #undef DEFINE_EVENT_PRINT |
@@ -147,16 +150,18 @@ | |||
147 | * | 150 | * |
148 | * entry = iter->ent; | 151 | * entry = iter->ent; |
149 | * | 152 | * |
150 | * if (entry->type != event_<call>.id) { | 153 | * if (entry->type != event_<call>->event.type) { |
151 | * WARN_ON_ONCE(1); | 154 | * WARN_ON_ONCE(1); |
152 | * return TRACE_TYPE_UNHANDLED; | 155 | * return TRACE_TYPE_UNHANDLED; |
153 | * } | 156 | * } |
154 | * | 157 | * |
155 | * field = (typeof(field))entry; | 158 | * field = (typeof(field))entry; |
156 | * | 159 | * |
157 | * p = get_cpu_var(ftrace_event_seq); | 160 | * p = &get_cpu_var(ftrace_event_seq); |
158 | * trace_seq_init(p); | 161 | * trace_seq_init(p); |
159 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | 162 | * ret = trace_seq_printf(s, "%s: ", <call>); |
163 | * if (ret) | ||
164 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | ||
160 | * put_cpu(); | 165 | * put_cpu(); |
161 | * if (!ret) | 166 | * if (!ret) |
162 | * return TRACE_TYPE_PARTIAL_LINE; | 167 | * return TRACE_TYPE_PARTIAL_LINE; |
@@ -198,21 +203,28 @@ | |||
198 | ftrace_print_symbols_seq(p, value, symbols); \ | 203 | ftrace_print_symbols_seq(p, value, symbols); \ |
199 | }) | 204 | }) |
200 | 205 | ||
206 | #undef __print_hex | ||
207 | #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) | ||
208 | |||
201 | #undef DECLARE_EVENT_CLASS | 209 | #undef DECLARE_EVENT_CLASS |
202 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 210 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
203 | static notrace enum print_line_t \ | 211 | static notrace enum print_line_t \ |
204 | ftrace_raw_output_id_##call(int event_id, const char *name, \ | 212 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ |
205 | struct trace_iterator *iter, int flags) \ | 213 | struct trace_event *trace_event) \ |
206 | { \ | 214 | { \ |
215 | struct ftrace_event_call *event; \ | ||
207 | struct trace_seq *s = &iter->seq; \ | 216 | struct trace_seq *s = &iter->seq; \ |
208 | struct ftrace_raw_##call *field; \ | 217 | struct ftrace_raw_##call *field; \ |
209 | struct trace_entry *entry; \ | 218 | struct trace_entry *entry; \ |
210 | struct trace_seq *p; \ | 219 | struct trace_seq *p; \ |
211 | int ret; \ | 220 | int ret; \ |
212 | \ | 221 | \ |
222 | event = container_of(trace_event, struct ftrace_event_call, \ | ||
223 | event); \ | ||
224 | \ | ||
213 | entry = iter->ent; \ | 225 | entry = iter->ent; \ |
214 | \ | 226 | \ |
215 | if (entry->type != event_id) { \ | 227 | if (entry->type != event->event.type) { \ |
216 | WARN_ON_ONCE(1); \ | 228 | WARN_ON_ONCE(1); \ |
217 | return TRACE_TYPE_UNHANDLED; \ | 229 | return TRACE_TYPE_UNHANDLED; \ |
218 | } \ | 230 | } \ |
@@ -221,7 +233,7 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \ | |||
221 | \ | 233 | \ |
222 | p = &get_cpu_var(ftrace_event_seq); \ | 234 | p = &get_cpu_var(ftrace_event_seq); \ |
223 | trace_seq_init(p); \ | 235 | trace_seq_init(p); \ |
224 | ret = trace_seq_printf(s, "%s: ", name); \ | 236 | ret = trace_seq_printf(s, "%s: ", event->name); \ |
225 | if (ret) \ | 237 | if (ret) \ |
226 | ret = trace_seq_printf(s, print); \ | 238 | ret = trace_seq_printf(s, print); \ |
227 | put_cpu(); \ | 239 | put_cpu(); \ |
@@ -229,21 +241,16 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \ | |||
229 | return TRACE_TYPE_PARTIAL_LINE; \ | 241 | return TRACE_TYPE_PARTIAL_LINE; \ |
230 | \ | 242 | \ |
231 | return TRACE_TYPE_HANDLED; \ | 243 | return TRACE_TYPE_HANDLED; \ |
232 | } | 244 | } \ |
233 | 245 | static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | |
234 | #undef DEFINE_EVENT | 246 | .trace = ftrace_raw_output_##call, \ |
235 | #define DEFINE_EVENT(template, name, proto, args) \ | 247 | }; |
236 | static notrace enum print_line_t \ | ||
237 | ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \ | ||
238 | { \ | ||
239 | return ftrace_raw_output_id_##template(event_##name.id, \ | ||
240 | #name, iter, flags); \ | ||
241 | } | ||
242 | 248 | ||
243 | #undef DEFINE_EVENT_PRINT | 249 | #undef DEFINE_EVENT_PRINT |
244 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | 250 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ |
245 | static notrace enum print_line_t \ | 251 | static notrace enum print_line_t \ |
246 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | 252 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ |
253 | struct trace_event *event) \ | ||
247 | { \ | 254 | { \ |
248 | struct trace_seq *s = &iter->seq; \ | 255 | struct trace_seq *s = &iter->seq; \ |
249 | struct ftrace_raw_##template *field; \ | 256 | struct ftrace_raw_##template *field; \ |
@@ -253,7 +260,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
253 | \ | 260 | \ |
254 | entry = iter->ent; \ | 261 | entry = iter->ent; \ |
255 | \ | 262 | \ |
256 | if (entry->type != event_##call.id) { \ | 263 | if (entry->type != event_##call.event.type) { \ |
257 | WARN_ON_ONCE(1); \ | 264 | WARN_ON_ONCE(1); \ |
258 | return TRACE_TYPE_UNHANDLED; \ | 265 | return TRACE_TYPE_UNHANDLED; \ |
259 | } \ | 266 | } \ |
@@ -270,7 +277,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
270 | return TRACE_TYPE_PARTIAL_LINE; \ | 277 | return TRACE_TYPE_PARTIAL_LINE; \ |
271 | \ | 278 | \ |
272 | return TRACE_TYPE_HANDLED; \ | 279 | return TRACE_TYPE_HANDLED; \ |
273 | } | 280 | } \ |
281 | static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | ||
282 | .trace = ftrace_raw_output_##call, \ | ||
283 | }; | ||
274 | 284 | ||
275 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 285 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
276 | 286 | ||
@@ -376,142 +386,83 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
376 | 386 | ||
377 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 387 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
378 | 388 | ||
379 | #ifdef CONFIG_PERF_EVENTS | ||
380 | |||
381 | /* | ||
382 | * Generate the functions needed for tracepoint perf_event support. | ||
383 | * | ||
384 | * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later | ||
385 | * | ||
386 | * static int ftrace_profile_enable_<call>(void) | ||
387 | * { | ||
388 | * return register_trace_<call>(ftrace_profile_<call>); | ||
389 | * } | ||
390 | * | ||
391 | * static void ftrace_profile_disable_<call>(void) | ||
392 | * { | ||
393 | * unregister_trace_<call>(ftrace_profile_<call>); | ||
394 | * } | ||
395 | * | ||
396 | */ | ||
397 | |||
398 | #undef DECLARE_EVENT_CLASS | ||
399 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) | ||
400 | |||
401 | #undef DEFINE_EVENT | ||
402 | #define DEFINE_EVENT(template, name, proto, args) \ | ||
403 | \ | ||
404 | static void perf_trace_##name(proto); \ | ||
405 | \ | ||
406 | static notrace int \ | ||
407 | perf_trace_enable_##name(struct ftrace_event_call *unused) \ | ||
408 | { \ | ||
409 | return register_trace_##name(perf_trace_##name); \ | ||
410 | } \ | ||
411 | \ | ||
412 | static notrace void \ | ||
413 | perf_trace_disable_##name(struct ftrace_event_call *unused) \ | ||
414 | { \ | ||
415 | unregister_trace_##name(perf_trace_##name); \ | ||
416 | } | ||
417 | |||
418 | #undef DEFINE_EVENT_PRINT | ||
419 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
420 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | ||
421 | |||
422 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
423 | |||
424 | #endif /* CONFIG_PERF_EVENTS */ | ||
425 | |||
426 | /* | 389 | /* |
427 | * Stage 4 of the trace events. | 390 | * Stage 4 of the trace events. |
428 | * | 391 | * |
429 | * Override the macros in <trace/trace_events.h> to include the following: | 392 | * Override the macros in <trace/trace_events.h> to include the following: |
430 | * | 393 | * |
431 | * static void ftrace_event_<call>(proto) | ||
432 | * { | ||
433 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); | ||
434 | * } | ||
435 | * | ||
436 | * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused) | ||
437 | * { | ||
438 | * return register_trace_<call>(ftrace_event_<call>); | ||
439 | * } | ||
440 | * | ||
441 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) | ||
442 | * { | ||
443 | * unregister_trace_<call>(ftrace_event_<call>); | ||
444 | * } | ||
445 | * | ||
446 | * | ||
447 | * For those macros defined with TRACE_EVENT: | 394 | * For those macros defined with TRACE_EVENT: |
448 | * | 395 | * |
449 | * static struct ftrace_event_call event_<call>; | 396 | * static struct ftrace_event_call event_<call>; |
450 | * | 397 | * |
451 | * static void ftrace_raw_event_<call>(proto) | 398 | * static void ftrace_raw_event_<call>(void *__data, proto) |
452 | * { | 399 | * { |
400 | * struct ftrace_event_call *event_call = __data; | ||
401 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | ||
453 | * struct ring_buffer_event *event; | 402 | * struct ring_buffer_event *event; |
454 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 403 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
455 | * struct ring_buffer *buffer; | 404 | * struct ring_buffer *buffer; |
456 | * unsigned long irq_flags; | 405 | * unsigned long irq_flags; |
406 | * int __data_size; | ||
457 | * int pc; | 407 | * int pc; |
458 | * | 408 | * |
459 | * local_save_flags(irq_flags); | 409 | * local_save_flags(irq_flags); |
460 | * pc = preempt_count(); | 410 | * pc = preempt_count(); |
461 | * | 411 | * |
412 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | ||
413 | * | ||
462 | * event = trace_current_buffer_lock_reserve(&buffer, | 414 | * event = trace_current_buffer_lock_reserve(&buffer, |
463 | * event_<call>.id, | 415 | * event_<call>->event.type, |
464 | * sizeof(struct ftrace_raw_<call>), | 416 | * sizeof(*entry) + __data_size, |
465 | * irq_flags, pc); | 417 | * irq_flags, pc); |
466 | * if (!event) | 418 | * if (!event) |
467 | * return; | 419 | * return; |
468 | * entry = ring_buffer_event_data(event); | 420 | * entry = ring_buffer_event_data(event); |
469 | * | 421 | * |
470 | * <assign>; <-- Here we assign the entries by the __field and | 422 | * { <assign>; } <-- Here we assign the entries by the __field and |
471 | * __array macros. | 423 | * __array macros. |
472 | * | ||
473 | * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); | ||
474 | * } | ||
475 | * | ||
476 | * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) | ||
477 | * { | ||
478 | * int ret; | ||
479 | * | ||
480 | * ret = register_trace_<call>(ftrace_raw_event_<call>); | ||
481 | * if (!ret) | ||
482 | * pr_info("event trace: Could not activate trace point " | ||
483 | * "probe to <call>"); | ||
484 | * return ret; | ||
485 | * } | ||
486 | * | 424 | * |
487 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) | 425 | * if (!filter_current_check_discard(buffer, event_call, entry, event)) |
488 | * { | 426 | * trace_current_buffer_unlock_commit(buffer, |
489 | * unregister_trace_<call>(ftrace_raw_event_<call>); | 427 | * event, irq_flags, pc); |
490 | * } | 428 | * } |
491 | * | 429 | * |
492 | * static struct trace_event ftrace_event_type_<call> = { | 430 | * static struct trace_event ftrace_event_type_<call> = { |
493 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | 431 | * .trace = ftrace_raw_output_<call>, <-- stage 2 |
494 | * }; | 432 | * }; |
495 | * | 433 | * |
434 | * static const char print_fmt_<call>[] = <TP_printk>; | ||
435 | * | ||
436 | * static struct ftrace_event_class __used event_class_<template> = { | ||
437 | * .system = "<system>", | ||
438 | * .define_fields = ftrace_define_fields_<call>, | ||
439 | * .fields = LIST_HEAD_INIT(event_class_##call.fields), | ||
440 | * .raw_init = trace_event_raw_init, | ||
441 | * .probe = ftrace_raw_event_##call, | ||
442 | * }; | ||
443 | * | ||
496 | * static struct ftrace_event_call __used | 444 | * static struct ftrace_event_call __used |
497 | * __attribute__((__aligned__(4))) | 445 | * __attribute__((__aligned__(4))) |
498 | * __attribute__((section("_ftrace_events"))) event_<call> = { | 446 | * __attribute__((section("_ftrace_events"))) event_<call> = { |
499 | * .name = "<call>", | 447 | * .name = "<call>", |
500 | * .system = "<system>", | 448 | * .class = event_class_<template>, |
501 | * .raw_init = trace_event_raw_init, | 449 | * .event = &ftrace_event_type_<call>, |
502 | * .regfunc = ftrace_reg_event_<call>, | 450 | * .print_fmt = print_fmt_<call>, |
503 | * .unregfunc = ftrace_unreg_event_<call>, | 451 | * }; |
504 | * } | ||
505 | * | 452 | * |
506 | */ | 453 | */ |
507 | 454 | ||
508 | #ifdef CONFIG_PERF_EVENTS | 455 | #ifdef CONFIG_PERF_EVENTS |
509 | 456 | ||
457 | #define _TRACE_PERF_PROTO(call, proto) \ | ||
458 | static notrace void \ | ||
459 | perf_trace_##call(void *__data, proto); | ||
460 | |||
510 | #define _TRACE_PERF_INIT(call) \ | 461 | #define _TRACE_PERF_INIT(call) \ |
511 | .perf_event_enable = perf_trace_enable_##call, \ | 462 | .perf_probe = perf_trace_##call, |
512 | .perf_event_disable = perf_trace_disable_##call, | ||
513 | 463 | ||
514 | #else | 464 | #else |
465 | #define _TRACE_PERF_PROTO(call, proto) | ||
515 | #define _TRACE_PERF_INIT(call) | 466 | #define _TRACE_PERF_INIT(call) |
516 | #endif /* CONFIG_PERF_EVENTS */ | 467 | #endif /* CONFIG_PERF_EVENTS */ |
517 | 468 | ||
@@ -545,9 +496,9 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
545 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 496 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
546 | \ | 497 | \ |
547 | static notrace void \ | 498 | static notrace void \ |
548 | ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ | 499 | ftrace_raw_event_##call(void *__data, proto) \ |
549 | proto) \ | ||
550 | { \ | 500 | { \ |
501 | struct ftrace_event_call *event_call = __data; \ | ||
551 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 502 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
552 | struct ring_buffer_event *event; \ | 503 | struct ring_buffer_event *event; \ |
553 | struct ftrace_raw_##call *entry; \ | 504 | struct ftrace_raw_##call *entry; \ |
@@ -562,14 +513,13 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ | |||
562 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | 513 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
563 | \ | 514 | \ |
564 | event = trace_current_buffer_lock_reserve(&buffer, \ | 515 | event = trace_current_buffer_lock_reserve(&buffer, \ |
565 | event_call->id, \ | 516 | event_call->event.type, \ |
566 | sizeof(*entry) + __data_size, \ | 517 | sizeof(*entry) + __data_size, \ |
567 | irq_flags, pc); \ | 518 | irq_flags, pc); \ |
568 | if (!event) \ | 519 | if (!event) \ |
569 | return; \ | 520 | return; \ |
570 | entry = ring_buffer_event_data(event); \ | 521 | entry = ring_buffer_event_data(event); \ |
571 | \ | 522 | \ |
572 | \ | ||
573 | tstruct \ | 523 | tstruct \ |
574 | \ | 524 | \ |
575 | { assign; } \ | 525 | { assign; } \ |
@@ -578,34 +528,21 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ | |||
578 | trace_nowake_buffer_unlock_commit(buffer, \ | 528 | trace_nowake_buffer_unlock_commit(buffer, \ |
579 | event, irq_flags, pc); \ | 529 | event, irq_flags, pc); \ |
580 | } | 530 | } |
531 | /* | ||
532 | * The ftrace_test_probe is compiled out, it is only here as a build time check | ||
533 | * to make sure that if the tracepoint handling changes, the ftrace probe will | ||
534 | * fail to compile unless it too is updated. | ||
535 | */ | ||
581 | 536 | ||
582 | #undef DEFINE_EVENT | 537 | #undef DEFINE_EVENT |
583 | #define DEFINE_EVENT(template, call, proto, args) \ | 538 | #define DEFINE_EVENT(template, call, proto, args) \ |
584 | \ | 539 | static inline void ftrace_test_probe_##call(void) \ |
585 | static notrace void ftrace_raw_event_##call(proto) \ | ||
586 | { \ | 540 | { \ |
587 | ftrace_raw_event_id_##template(&event_##call, args); \ | 541 | check_trace_callback_type_##call(ftrace_raw_event_##template); \ |
588 | } \ | 542 | } |
589 | \ | ||
590 | static notrace int \ | ||
591 | ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \ | ||
592 | { \ | ||
593 | return register_trace_##call(ftrace_raw_event_##call); \ | ||
594 | } \ | ||
595 | \ | ||
596 | static notrace void \ | ||
597 | ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \ | ||
598 | { \ | ||
599 | unregister_trace_##call(ftrace_raw_event_##call); \ | ||
600 | } \ | ||
601 | \ | ||
602 | static struct trace_event ftrace_event_type_##call = { \ | ||
603 | .trace = ftrace_raw_output_##call, \ | ||
604 | }; | ||
605 | 543 | ||
606 | #undef DEFINE_EVENT_PRINT | 544 | #undef DEFINE_EVENT_PRINT |
607 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | 545 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) |
608 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | ||
609 | 546 | ||
610 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 547 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
611 | 548 | ||
@@ -622,7 +559,16 @@ static struct trace_event ftrace_event_type_##call = { \ | |||
622 | 559 | ||
623 | #undef DECLARE_EVENT_CLASS | 560 | #undef DECLARE_EVENT_CLASS |
624 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 561 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
625 | static const char print_fmt_##call[] = print; | 562 | _TRACE_PERF_PROTO(call, PARAMS(proto)); \ |
563 | static const char print_fmt_##call[] = print; \ | ||
564 | static struct ftrace_event_class __used event_class_##call = { \ | ||
565 | .system = __stringify(TRACE_SYSTEM), \ | ||
566 | .define_fields = ftrace_define_fields_##call, \ | ||
567 | .fields = LIST_HEAD_INIT(event_class_##call.fields),\ | ||
568 | .raw_init = trace_event_raw_init, \ | ||
569 | .probe = ftrace_raw_event_##call, \ | ||
570 | _TRACE_PERF_INIT(call) \ | ||
571 | }; | ||
626 | 572 | ||
627 | #undef DEFINE_EVENT | 573 | #undef DEFINE_EVENT |
628 | #define DEFINE_EVENT(template, call, proto, args) \ | 574 | #define DEFINE_EVENT(template, call, proto, args) \ |
@@ -631,15 +577,10 @@ static struct ftrace_event_call __used \ | |||
631 | __attribute__((__aligned__(4))) \ | 577 | __attribute__((__aligned__(4))) \ |
632 | __attribute__((section("_ftrace_events"))) event_##call = { \ | 578 | __attribute__((section("_ftrace_events"))) event_##call = { \ |
633 | .name = #call, \ | 579 | .name = #call, \ |
634 | .system = __stringify(TRACE_SYSTEM), \ | 580 | .class = &event_class_##template, \ |
635 | .event = &ftrace_event_type_##call, \ | 581 | .event.funcs = &ftrace_event_type_funcs_##template, \ |
636 | .raw_init = trace_event_raw_init, \ | ||
637 | .regfunc = ftrace_raw_reg_event_##call, \ | ||
638 | .unregfunc = ftrace_raw_unreg_event_##call, \ | ||
639 | .print_fmt = print_fmt_##template, \ | 582 | .print_fmt = print_fmt_##template, \ |
640 | .define_fields = ftrace_define_fields_##template, \ | 583 | }; |
641 | _TRACE_PERF_INIT(call) \ | ||
642 | } | ||
643 | 584 | ||
644 | #undef DEFINE_EVENT_PRINT | 585 | #undef DEFINE_EVENT_PRINT |
645 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | 586 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ |
@@ -650,14 +591,9 @@ static struct ftrace_event_call __used \ | |||
650 | __attribute__((__aligned__(4))) \ | 591 | __attribute__((__aligned__(4))) \ |
651 | __attribute__((section("_ftrace_events"))) event_##call = { \ | 592 | __attribute__((section("_ftrace_events"))) event_##call = { \ |
652 | .name = #call, \ | 593 | .name = #call, \ |
653 | .system = __stringify(TRACE_SYSTEM), \ | 594 | .class = &event_class_##template, \ |
654 | .event = &ftrace_event_type_##call, \ | 595 | .event.funcs = &ftrace_event_type_funcs_##call, \ |
655 | .raw_init = trace_event_raw_init, \ | ||
656 | .regfunc = ftrace_raw_reg_event_##call, \ | ||
657 | .unregfunc = ftrace_raw_unreg_event_##call, \ | ||
658 | .print_fmt = print_fmt_##call, \ | 596 | .print_fmt = print_fmt_##call, \ |
659 | .define_fields = ftrace_define_fields_##template, \ | ||
660 | _TRACE_PERF_INIT(call) \ | ||
661 | } | 597 | } |
662 | 598 | ||
663 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 599 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
@@ -757,18 +693,20 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
757 | #undef DECLARE_EVENT_CLASS | 693 | #undef DECLARE_EVENT_CLASS |
758 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 694 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
759 | static notrace void \ | 695 | static notrace void \ |
760 | perf_trace_templ_##call(struct ftrace_event_call *event_call, \ | 696 | perf_trace_##call(void *__data, proto) \ |
761 | proto) \ | ||
762 | { \ | 697 | { \ |
698 | struct ftrace_event_call *event_call = __data; \ | ||
763 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 699 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
764 | struct ftrace_raw_##call *entry; \ | 700 | struct ftrace_raw_##call *entry; \ |
701 | struct pt_regs __regs; \ | ||
765 | u64 __addr = 0, __count = 1; \ | 702 | u64 __addr = 0, __count = 1; \ |
766 | unsigned long irq_flags; \ | 703 | struct hlist_head *head; \ |
767 | struct pt_regs *__regs; \ | ||
768 | int __entry_size; \ | 704 | int __entry_size; \ |
769 | int __data_size; \ | 705 | int __data_size; \ |
770 | int rctx; \ | 706 | int rctx; \ |
771 | \ | 707 | \ |
708 | perf_fetch_caller_regs(&__regs, 1); \ | ||
709 | \ | ||
772 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | 710 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
773 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ | 711 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ |
774 | sizeof(u64)); \ | 712 | sizeof(u64)); \ |
@@ -777,30 +715,34 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ | |||
777 | if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ | 715 | if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ |
778 | "profile buffer not large enough")) \ | 716 | "profile buffer not large enough")) \ |
779 | return; \ | 717 | return; \ |
718 | \ | ||
780 | entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ | 719 | entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ |
781 | __entry_size, event_call->id, &rctx, &irq_flags); \ | 720 | __entry_size, event_call->event.type, &__regs, &rctx); \ |
782 | if (!entry) \ | 721 | if (!entry) \ |
783 | return; \ | 722 | return; \ |
723 | \ | ||
784 | tstruct \ | 724 | tstruct \ |
785 | \ | 725 | \ |
786 | { assign; } \ | 726 | { assign; } \ |
787 | \ | 727 | \ |
788 | __regs = &__get_cpu_var(perf_trace_regs); \ | 728 | head = this_cpu_ptr(event_call->perf_events); \ |
789 | perf_fetch_caller_regs(__regs, 2); \ | ||
790 | \ | ||
791 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ | 729 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ |
792 | __count, irq_flags, __regs); \ | 730 | __count, &__regs, head); \ |
793 | } | 731 | } |
794 | 732 | ||
733 | /* | ||
734 | * This part is compiled out, it is only here as a build time check | ||
735 | * to make sure that if the tracepoint handling changes, the | ||
736 | * perf probe will fail to compile unless it too is updated. | ||
737 | */ | ||
795 | #undef DEFINE_EVENT | 738 | #undef DEFINE_EVENT |
796 | #define DEFINE_EVENT(template, call, proto, args) \ | 739 | #define DEFINE_EVENT(template, call, proto, args) \ |
797 | static notrace void perf_trace_##call(proto) \ | 740 | static inline void perf_test_probe_##call(void) \ |
798 | { \ | 741 | { \ |
799 | struct ftrace_event_call *event_call = &event_##call; \ | 742 | check_trace_callback_type_##call(perf_trace_##template); \ |
800 | \ | ||
801 | perf_trace_templ_##template(event_call, args); \ | ||
802 | } | 743 | } |
803 | 744 | ||
745 | |||
804 | #undef DEFINE_EVENT_PRINT | 746 | #undef DEFINE_EVENT_PRINT |
805 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | 747 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ |
806 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | 748 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) |