diff options
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r-- | include/trace/ftrace.h | 73 |
1 files changed, 0 insertions, 73 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 698f2a890322..40dc5e8fe340 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -619,79 +619,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | |||
619 | 619 | ||
620 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 620 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
621 | 621 | ||
622 | /* | ||
623 | * Define the insertion callback to perf events | ||
624 | * | ||
625 | * The job is very similar to ftrace_raw_event_<call> except that we don't | ||
626 | * insert in the ring buffer but in a perf counter. | ||
627 | * | ||
628 | * static void ftrace_perf_<call>(proto) | ||
629 | * { | ||
630 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | ||
631 | * struct ftrace_event_call *event_call = &event_<call>; | ||
632 | * extern void perf_tp_event(int, u64, u64, void *, int); | ||
633 | * struct ftrace_raw_##call *entry; | ||
634 | * struct perf_trace_buf *trace_buf; | ||
635 | * u64 __addr = 0, __count = 1; | ||
636 | * unsigned long irq_flags; | ||
637 | * struct trace_entry *ent; | ||
638 | * int __entry_size; | ||
639 | * int __data_size; | ||
640 | * int __cpu | ||
641 | * int pc; | ||
642 | * | ||
643 | * pc = preempt_count(); | ||
644 | * | ||
645 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | ||
646 | * | ||
647 | * // Below we want to get the aligned size by taking into account | ||
648 | * // the u32 field that will later store the buffer size | ||
649 | * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), | ||
650 | * sizeof(u64)); | ||
651 | * __entry_size -= sizeof(u32); | ||
652 | * | ||
653 | * // Protect the non nmi buffer | ||
654 | * // This also protects the rcu read side | ||
655 | * local_irq_save(irq_flags); | ||
656 | * __cpu = smp_processor_id(); | ||
657 | * | ||
658 | * if (in_nmi()) | ||
659 | * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); | ||
660 | * else | ||
661 | * trace_buf = rcu_dereference_sched(perf_trace_buf); | ||
662 | * | ||
663 | * if (!trace_buf) | ||
664 | * goto end; | ||
665 | * | ||
666 | * trace_buf = per_cpu_ptr(trace_buf, __cpu); | ||
667 | * | ||
668 | * // Avoid recursion from perf that could mess up the buffer | ||
669 | * if (trace_buf->recursion++) | ||
670 | * goto end_recursion; | ||
671 | * | ||
672 | * raw_data = trace_buf->buf; | ||
673 | * | ||
674 | * // Make recursion update visible before entering perf_tp_event | ||
675 | * // so that we protect from perf recursions. | ||
676 | * | ||
677 | * barrier(); | ||
678 | * | ||
679 | * //zero dead bytes from alignment to avoid stack leak to userspace: | ||
680 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; | ||
681 | * entry = (struct ftrace_raw_<call> *)raw_data; | ||
682 | * ent = &entry->ent; | ||
683 | * tracing_generic_entry_update(ent, irq_flags, pc); | ||
684 | * ent->type = event_call->id; | ||
685 | * | ||
686 | * <tstruct> <- do some jobs with dynamic arrays | ||
687 | * | ||
688 | * <assign> <- affect our values | ||
689 | * | ||
690 | * perf_tp_event(event_call->id, __addr, __count, entry, | ||
691 | * __entry_size); <- submit them to perf counter | ||
692 | * | ||
693 | * } | ||
694 | */ | ||
695 | 622 | ||
696 | #ifdef CONFIG_PERF_EVENTS | 623 | #ifdef CONFIG_PERF_EVENTS |
697 | 624 | ||