diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-08-11 08:19:09 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-11 08:19:09 -0400 |
commit | 89034bc2c7b839702c00a704e79d112737f98be0 (patch) | |
tree | e65b1f3d4c751baa840efc81bc4734f089379eb3 /include/trace/ftrace.h | |
parent | fb82ad719831db58e9baa4c67015aae3fe27e7e3 (diff) | |
parent | 85dfd81dc57e8183a277ddd7a56aa65c96f3f487 (diff) |
Merge branch 'linus' into tracing/core
Conflicts:
kernel/trace/trace_events_filter.c
We use the tracing/core version.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r-- | include/trace/ftrace.h | 183 |
1 files changed, 158 insertions, 25 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 3cbb96ef34f4..25d3b02a06f8 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -145,6 +145,9 @@ | |||
145 | #undef TP_fast_assign | 145 | #undef TP_fast_assign |
146 | #define TP_fast_assign(args...) args | 146 | #define TP_fast_assign(args...) args |
147 | 147 | ||
148 | #undef TP_perf_assign | ||
149 | #define TP_perf_assign(args...) | ||
150 | |||
148 | #undef TRACE_EVENT | 151 | #undef TRACE_EVENT |
149 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | 152 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ |
150 | static int \ | 153 | static int \ |
@@ -347,6 +350,56 @@ static inline int ftrace_get_offsets_##call( \ | |||
347 | 350 | ||
348 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 351 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
349 | 352 | ||
353 | #ifdef CONFIG_EVENT_PROFILE | ||
354 | |||
355 | /* | ||
356 | * Generate the functions needed for tracepoint perf_counter support. | ||
357 | * | ||
358 | * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later | ||
359 | * | ||
360 | * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call) | ||
361 | * { | ||
362 | * int ret = 0; | ||
363 | * | ||
364 | * if (!atomic_inc_return(&event_call->profile_count)) | ||
365 | * ret = register_trace_<call>(ftrace_profile_<call>); | ||
366 | * | ||
367 | * return ret; | ||
368 | * } | ||
369 | * | ||
370 | * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call) | ||
371 | * { | ||
372 | * if (atomic_add_negative(-1, &event->call->profile_count)) | ||
373 | * unregister_trace_<call>(ftrace_profile_<call>); | ||
374 | * } | ||
375 | * | ||
376 | */ | ||
377 | |||
378 | #undef TRACE_EVENT | ||
379 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
380 | \ | ||
381 | static void ftrace_profile_##call(proto); \ | ||
382 | \ | ||
383 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
384 | { \ | ||
385 | int ret = 0; \ | ||
386 | \ | ||
387 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
388 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
389 | \ | ||
390 | return ret; \ | ||
391 | } \ | ||
392 | \ | ||
393 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
394 | { \ | ||
395 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
396 | unregister_trace_##call(ftrace_profile_##call); \ | ||
397 | } | ||
398 | |||
399 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
400 | |||
401 | #endif | ||
402 | |||
350 | /* | 403 | /* |
351 | * Stage 4 of the trace events. | 404 | * Stage 4 of the trace events. |
352 | * | 405 | * |
@@ -449,28 +502,6 @@ static inline int ftrace_get_offsets_##call( \ | |||
449 | #define TP_FMT(fmt, args...) fmt "\n", ##args | 502 | #define TP_FMT(fmt, args...) fmt "\n", ##args |
450 | 503 | ||
451 | #ifdef CONFIG_EVENT_PROFILE | 504 | #ifdef CONFIG_EVENT_PROFILE |
452 | #define _TRACE_PROFILE(call, proto, args) \ | ||
453 | static void ftrace_profile_##call(proto) \ | ||
454 | { \ | ||
455 | extern void perf_tpcounter_event(int); \ | ||
456 | perf_tpcounter_event(event_##call.id); \ | ||
457 | } \ | ||
458 | \ | ||
459 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
460 | { \ | ||
461 | int ret = 0; \ | ||
462 | \ | ||
463 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
464 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
465 | \ | ||
466 | return ret; \ | ||
467 | } \ | ||
468 | \ | ||
469 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
470 | { \ | ||
471 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
472 | unregister_trace_##call(ftrace_profile_##call); \ | ||
473 | } | ||
474 | 505 | ||
475 | #define _TRACE_PROFILE_INIT(call) \ | 506 | #define _TRACE_PROFILE_INIT(call) \ |
476 | .profile_count = ATOMIC_INIT(-1), \ | 507 | .profile_count = ATOMIC_INIT(-1), \ |
@@ -478,7 +509,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
478 | .profile_disable = ftrace_profile_disable_##call, | 509 | .profile_disable = ftrace_profile_disable_##call, |
479 | 510 | ||
480 | #else | 511 | #else |
481 | #define _TRACE_PROFILE(call, proto, args) | ||
482 | #define _TRACE_PROFILE_INIT(call) | 512 | #define _TRACE_PROFILE_INIT(call) |
483 | #endif | 513 | #endif |
484 | 514 | ||
@@ -504,7 +534,6 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | |||
504 | 534 | ||
505 | #undef TRACE_EVENT | 535 | #undef TRACE_EVENT |
506 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | 536 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ |
507 | _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | ||
508 | \ | 537 | \ |
509 | static struct ftrace_event_call event_##call; \ | 538 | static struct ftrace_event_call event_##call; \ |
510 | \ | 539 | \ |
@@ -588,6 +617,110 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
588 | 617 | ||
589 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 618 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
590 | 619 | ||
591 | #undef _TRACE_PROFILE | 620 | /* |
621 | * Define the insertion callback to profile events | ||
622 | * | ||
623 | * The job is very similar to ftrace_raw_event_<call> except that we don't | ||
624 | * insert in the ring buffer but in a perf counter. | ||
625 | * | ||
626 | * static void ftrace_profile_<call>(proto) | ||
627 | * { | ||
628 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | ||
629 | * struct ftrace_event_call *event_call = &event_<call>; | ||
630 | * extern void perf_tpcounter_event(int, u64, u64, void *, int); | ||
631 | * struct ftrace_raw_##call *entry; | ||
632 | * u64 __addr = 0, __count = 1; | ||
633 | * unsigned long irq_flags; | ||
634 | * int __entry_size; | ||
635 | * int __data_size; | ||
636 | * int pc; | ||
637 | * | ||
638 | * local_save_flags(irq_flags); | ||
639 | * pc = preempt_count(); | ||
640 | * | ||
641 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | ||
642 | * | ||
643 | * // Below we want to get the aligned size by taking into account | ||
644 | * // the u32 field that will later store the buffer size | ||
645 | * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), | ||
646 | * sizeof(u64)); | ||
647 | * __entry_size -= sizeof(u32); | ||
648 | * | ||
649 | * do { | ||
650 | * char raw_data[__entry_size]; <- allocate our sample in the stack | ||
651 | * struct trace_entry *ent; | ||
652 | * | ||
653 | * zero dead bytes from alignment to avoid stack leak to userspace: | ||
654 | * | ||
655 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; | ||
656 | * entry = (struct ftrace_raw_<call> *)raw_data; | ||
657 | * ent = &entry->ent; | ||
658 | * tracing_generic_entry_update(ent, irq_flags, pc); | ||
659 | * ent->type = event_call->id; | ||
660 | * | ||
661 | * <tstruct> <- do some jobs with dynamic arrays | ||
662 | * | ||
663 | * <assign> <- affect our values | ||
664 | * | ||
665 | * perf_tpcounter_event(event_call->id, __addr, __count, entry, | ||
666 | * __entry_size); <- submit them to perf counter | ||
667 | * } while (0); | ||
668 | * | ||
669 | * } | ||
670 | */ | ||
671 | |||
672 | #ifdef CONFIG_EVENT_PROFILE | ||
673 | |||
674 | #undef __perf_addr | ||
675 | #define __perf_addr(a) __addr = (a) | ||
676 | |||
677 | #undef __perf_count | ||
678 | #define __perf_count(c) __count = (c) | ||
679 | |||
680 | #undef TRACE_EVENT | ||
681 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
682 | static void ftrace_profile_##call(proto) \ | ||
683 | { \ | ||
684 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | ||
685 | struct ftrace_event_call *event_call = &event_##call; \ | ||
686 | extern void perf_tpcounter_event(int, u64, u64, void *, int); \ | ||
687 | struct ftrace_raw_##call *entry; \ | ||
688 | u64 __addr = 0, __count = 1; \ | ||
689 | unsigned long irq_flags; \ | ||
690 | int __entry_size; \ | ||
691 | int __data_size; \ | ||
692 | int pc; \ | ||
693 | \ | ||
694 | local_save_flags(irq_flags); \ | ||
695 | pc = preempt_count(); \ | ||
696 | \ | ||
697 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | ||
698 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ | ||
699 | sizeof(u64)); \ | ||
700 | __entry_size -= sizeof(u32); \ | ||
701 | \ | ||
702 | do { \ | ||
703 | char raw_data[__entry_size]; \ | ||
704 | struct trace_entry *ent; \ | ||
705 | \ | ||
706 | *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ | ||
707 | entry = (struct ftrace_raw_##call *)raw_data; \ | ||
708 | ent = &entry->ent; \ | ||
709 | tracing_generic_entry_update(ent, irq_flags, pc); \ | ||
710 | ent->type = event_call->id; \ | ||
711 | \ | ||
712 | tstruct \ | ||
713 | \ | ||
714 | { assign; } \ | ||
715 | \ | ||
716 | perf_tpcounter_event(event_call->id, __addr, __count, entry,\ | ||
717 | __entry_size); \ | ||
718 | } while (0); \ | ||
719 | \ | ||
720 | } | ||
721 | |||
722 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
723 | #endif /* CONFIG_EVENT_PROFILE */ | ||
724 | |||
592 | #undef _TRACE_PROFILE_INIT | 725 | #undef _TRACE_PROFILE_INIT |
593 | 726 | ||