aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/ftrace.h
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-04-21 12:27:06 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-05-14 14:19:14 -0400
commit2239291aeb0379fe47980b0e560e0eb9fd7e82ec (patch)
treee75bb60ec24e6fd7137f01db48e0ea7e5b1eddd4 /include/trace/ftrace.h
parent38516ab59fbc5b3bb278cf5e1fe2867c70cff32e (diff)
tracing: Remove per event trace registering
This patch removes the register functions of TRACE_EVENT() to enable and disable tracepoints. The registering of a event is now down directly in the trace_events.c file. The tracepoint_probe_register() is now called directly. The prototypes are no longer type checked, but this should not be an issue since the tracepoints are created automatically by the macros. If a prototype is incorrect in the TRACE_EVENT() macro, then other macros will catch it. The trace_event_class structure now holds the probes to be called by the callbacks. This removes needing to have each event have a separate pointer for the probe. To handle kprobes and syscalls, since they register probes in a different manner, a "reg" field is added to the ftrace_event_class structure. If the "reg" field is assigned, then it will be called for enabling and disabling of the probe for either ftrace or perf. To let the reg function know what is happening, a new enum (trace_reg) is created that has the type of control that is needed. With this new rework, the 82 kernel events and 618 syscall events has their footprint dramatically lowered: text data bss dec hex filename 4913961 1088356 861512 6863829 68bbd5 vmlinux.orig 4914025 1088868 861512 6864405 68be15 vmlinux.class 4918492 1084612 861512 6864616 68bee8 vmlinux.tracepoint 4900252 1057412 861512 6819176 680d68 vmlinux.regs The size went from 6863829 to 6819176, that's a total of 44K in savings. With tracepoints being continuously added, this is critical that the footprint becomes minimal. v5: Added #ifdef CONFIG_PERF_EVENTS around a reference to perf specific structure in trace_events.c. v4: Fixed trace self tests to check probe because regfunc no longer exists. v3: Updated to handle void *data in beginning of probe parameters. Also added the tracepoint: check_trace_callback_type_##call(). v2: Changed the callback probes to pass void * and typecast the value within the function. Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Acked-by: Masami Hiramatsu <mhiramat@redhat.com> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r--include/trace/ftrace.h133
1 files changed, 38 insertions, 95 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index ba28b644f41..26d132418f9 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -381,53 +381,6 @@ static inline notrace int ftrace_get_offsets_##call( \
381 381
382#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 382#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
383 383
384#ifdef CONFIG_PERF_EVENTS
385
386/*
387 * Generate the functions needed for tracepoint perf_event support.
388 *
389 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
390 *
391 * static int ftrace_profile_enable_<call>(void)
392 * {
393 * return register_trace_<call>(ftrace_profile_<call>);
394 * }
395 *
396 * static void ftrace_profile_disable_<call>(void)
397 * {
398 * unregister_trace_<call>(ftrace_profile_<call>);
399 * }
400 *
401 */
402
403#undef DECLARE_EVENT_CLASS
404#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
405
406#undef DEFINE_EVENT
407#define DEFINE_EVENT(template, name, proto, args) \
408 \
409static void perf_trace_##name(void *, proto); \
410 \
411static notrace int \
412perf_trace_enable_##name(struct ftrace_event_call *unused) \
413{ \
414 return register_trace_##name(perf_trace_##name, NULL); \
415} \
416 \
417static notrace void \
418perf_trace_disable_##name(struct ftrace_event_call *unused) \
419{ \
420 unregister_trace_##name(perf_trace_##name, NULL); \
421}
422
423#undef DEFINE_EVENT_PRINT
424#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
425 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
426
427#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
428
429#endif /* CONFIG_PERF_EVENTS */
430
431/* 384/*
432 * Stage 4 of the trace events. 385 * Stage 4 of the trace events.
433 * 386 *
@@ -437,8 +390,9 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
437 * 390 *
438 * static struct ftrace_event_call event_<call>; 391 * static struct ftrace_event_call event_<call>;
439 * 392 *
440 * static void ftrace_raw_event_<call>(proto) 393 * static void ftrace_raw_event_<call>(void *__data, proto)
441 * { 394 * {
395 * struct ftrace_event_call *event_call = __data;
442 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 396 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
443 * struct ring_buffer_event *event; 397 * struct ring_buffer_event *event;
444 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 398 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
@@ -468,16 +422,6 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
468 * event, irq_flags, pc); 422 * event, irq_flags, pc);
469 * } 423 * }
470 * 424 *
471 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
472 * {
473 * return register_trace_<call>(ftrace_raw_event_<call>);
474 * }
475 *
476 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
477 * {
478 * unregister_trace_<call>(ftrace_raw_event_<call>);
479 * }
480 *
481 * static struct trace_event ftrace_event_type_<call> = { 425 * static struct trace_event ftrace_event_type_<call> = {
482 * .trace = ftrace_raw_output_<call>, <-- stage 2 426 * .trace = ftrace_raw_output_<call>, <-- stage 2
483 * }; 427 * };
@@ -504,11 +448,15 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
504 448
505#ifdef CONFIG_PERF_EVENTS 449#ifdef CONFIG_PERF_EVENTS
506 450
451#define _TRACE_PERF_PROTO(call, proto) \
452 static notrace void \
453 perf_trace_##call(void *__data, proto);
454
507#define _TRACE_PERF_INIT(call) \ 455#define _TRACE_PERF_INIT(call) \
508 .perf_event_enable = perf_trace_enable_##call, \ 456 .perf_probe = perf_trace_##call,
509 .perf_event_disable = perf_trace_disable_##call,
510 457
511#else 458#else
459#define _TRACE_PERF_PROTO(call, proto)
512#define _TRACE_PERF_INIT(call) 460#define _TRACE_PERF_INIT(call)
513#endif /* CONFIG_PERF_EVENTS */ 461#endif /* CONFIG_PERF_EVENTS */
514 462
@@ -542,9 +490,9 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
542#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 490#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
543 \ 491 \
544static notrace void \ 492static notrace void \
545ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ 493ftrace_raw_event_##call(void *__data, proto) \
546 proto) \
547{ \ 494{ \
495 struct ftrace_event_call *event_call = __data; \
548 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 496 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
549 struct ring_buffer_event *event; \ 497 struct ring_buffer_event *event; \
550 struct ftrace_raw_##call *entry; \ 498 struct ftrace_raw_##call *entry; \
@@ -574,30 +522,23 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
574 trace_nowake_buffer_unlock_commit(buffer, \ 522 trace_nowake_buffer_unlock_commit(buffer, \
575 event, irq_flags, pc); \ 523 event, irq_flags, pc); \
576} 524}
525/*
526 * The ftrace_test_probe is compiled out, it is only here as a build time check
527 * to make sure that if the tracepoint handling changes, the ftrace probe will
528 * fail to compile unless it too is updated.
529 */
577 530
578#undef DEFINE_EVENT 531#undef DEFINE_EVENT
579#define DEFINE_EVENT(template, call, proto, args) \ 532#define DEFINE_EVENT(template, call, proto, args) \
580 \ 533 \
581static notrace void ftrace_raw_event_##call(void *__ignore, proto) \
582{ \
583 ftrace_raw_event_id_##template(&event_##call, args); \
584} \
585 \
586static notrace int \
587ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \
588{ \
589 return register_trace_##call(ftrace_raw_event_##call, NULL); \
590} \
591 \
592static notrace void \
593ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \
594{ \
595 unregister_trace_##call(ftrace_raw_event_##call, NULL); \
596} \
597 \
598static struct trace_event ftrace_event_type_##call = { \ 534static struct trace_event ftrace_event_type_##call = { \
599 .trace = ftrace_raw_output_##call, \ 535 .trace = ftrace_raw_output_##call, \
600}; 536}; \
537 \
538static inline void ftrace_test_probe_##call(void) \
539{ \
540 check_trace_callback_type_##call(ftrace_raw_event_##template); \
541}
601 542
602#undef DEFINE_EVENT_PRINT 543#undef DEFINE_EVENT_PRINT
603#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 544#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
@@ -618,9 +559,12 @@ static struct trace_event ftrace_event_type_##call = { \
618 559
619#undef DECLARE_EVENT_CLASS 560#undef DECLARE_EVENT_CLASS
620#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 561#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
562_TRACE_PERF_PROTO(call, PARAMS(proto)); \
621static const char print_fmt_##call[] = print; \ 563static const char print_fmt_##call[] = print; \
622static struct ftrace_event_class __used event_class_##call = { \ 564static struct ftrace_event_class __used event_class_##call = { \
623 .system = __stringify(TRACE_SYSTEM) \ 565 .system = __stringify(TRACE_SYSTEM), \
566 .probe = ftrace_raw_event_##call, \
567 _TRACE_PERF_INIT(call) \
624}; 568};
625 569
626#undef DEFINE_EVENT 570#undef DEFINE_EVENT
@@ -633,11 +577,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
633 .class = &event_class_##template, \ 577 .class = &event_class_##template, \
634 .event = &ftrace_event_type_##call, \ 578 .event = &ftrace_event_type_##call, \
635 .raw_init = trace_event_raw_init, \ 579 .raw_init = trace_event_raw_init, \
636 .regfunc = ftrace_raw_reg_event_##call, \
637 .unregfunc = ftrace_raw_unreg_event_##call, \
638 .print_fmt = print_fmt_##template, \ 580 .print_fmt = print_fmt_##template, \
639 .define_fields = ftrace_define_fields_##template, \ 581 .define_fields = ftrace_define_fields_##template, \
640 _TRACE_PERF_INIT(call) \
641}; 582};
642 583
643#undef DEFINE_EVENT_PRINT 584#undef DEFINE_EVENT_PRINT
@@ -652,11 +593,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
652 .class = &event_class_##template, \ 593 .class = &event_class_##template, \
653 .event = &ftrace_event_type_##call, \ 594 .event = &ftrace_event_type_##call, \
654 .raw_init = trace_event_raw_init, \ 595 .raw_init = trace_event_raw_init, \
655 .regfunc = ftrace_raw_reg_event_##call, \
656 .unregfunc = ftrace_raw_unreg_event_##call, \
657 .print_fmt = print_fmt_##call, \ 596 .print_fmt = print_fmt_##call, \
658 .define_fields = ftrace_define_fields_##template, \ 597 .define_fields = ftrace_define_fields_##template, \
659 _TRACE_PERF_INIT(call) \
660} 598}
661 599
662#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 600#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -756,9 +694,9 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
756#undef DECLARE_EVENT_CLASS 694#undef DECLARE_EVENT_CLASS
757#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 695#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
758static notrace void \ 696static notrace void \
759perf_trace_templ_##call(struct ftrace_event_call *event_call, \ 697perf_trace_##call(void *__data, proto) \
760 proto) \
761{ \ 698{ \
699 struct ftrace_event_call *event_call = __data; \
762 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 700 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
763 struct ftrace_raw_##call *entry; \ 701 struct ftrace_raw_##call *entry; \
764 u64 __addr = 0, __count = 1; \ 702 u64 __addr = 0, __count = 1; \
@@ -791,15 +729,20 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \
791 __count, irq_flags, __regs); \ 729 __count, irq_flags, __regs); \
792} 730}
793 731
732/*
733 * This part is compiled out, it is only here as a build time check
734 * to make sure that if the tracepoint handling changes, the
735 * perf probe will fail to compile unless it too is updated.
736 */
794#undef DEFINE_EVENT 737#undef DEFINE_EVENT
795#define DEFINE_EVENT(template, call, proto, args) \ 738#define DEFINE_EVENT(template, call, proto, args) \
796static notrace void perf_trace_##call(void *__ignore, proto) \ 739static inline void perf_test_probe_##call(void) \
797{ \ 740{ \
798 struct ftrace_event_call *event_call = &event_##call; \ 741 check_trace_callback_type_##call(perf_trace_##template); \
799 \ 742 \
800 perf_trace_templ_##template(event_call, args); \
801} 743}
802 744
745
803#undef DEFINE_EVENT_PRINT 746#undef DEFINE_EVENT_PRINT
804#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 747#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
805 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 748 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))