aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/ftrace.h
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /include/trace/ftrace.h
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r--include/trace/ftrace.h86
1 files changed, 76 insertions, 10 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 40dc5e8fe34..533c49f4804 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -545,7 +545,8 @@ ftrace_raw_event_##call(void *__data, proto) \
545 { assign; } \ 545 { assign; } \
546 \ 546 \
547 if (!filter_current_check_discard(buffer, event_call, entry, event)) \ 547 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
548 trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ 548 trace_nowake_buffer_unlock_commit(buffer, \
549 event, irq_flags, pc); \
549} 550}
550/* 551/*
551 * The ftrace_test_probe is compiled out, it is only here as a build time check 552 * The ftrace_test_probe is compiled out, it is only here as a build time check
@@ -570,7 +571,6 @@ static inline void ftrace_test_probe_##call(void) \
570 571
571#undef __print_flags 572#undef __print_flags
572#undef __print_symbolic 573#undef __print_symbolic
573#undef __print_hex
574#undef __get_dynamic_array 574#undef __get_dynamic_array
575#undef __get_str 575#undef __get_str
576 576
@@ -619,6 +619,79 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
619 619
620#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 620#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
621 621
622/*
623 * Define the insertion callback to perf events
624 *
625 * The job is very similar to ftrace_raw_event_<call> except that we don't
626 * insert in the ring buffer but in a perf counter.
627 *
628 * static void ftrace_perf_<call>(proto)
629 * {
630 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
631 * struct ftrace_event_call *event_call = &event_<call>;
632 * extern void perf_tp_event(int, u64, u64, void *, int);
633 * struct ftrace_raw_##call *entry;
634 * struct perf_trace_buf *trace_buf;
635 * u64 __addr = 0, __count = 1;
636 * unsigned long irq_flags;
637 * struct trace_entry *ent;
638 * int __entry_size;
639 * int __data_size;
640 * int __cpu
641 * int pc;
642 *
643 * pc = preempt_count();
644 *
645 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
646 *
647 * // Below we want to get the aligned size by taking into account
648 * // the u32 field that will later store the buffer size
649 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
650 * sizeof(u64));
651 * __entry_size -= sizeof(u32);
652 *
653 * // Protect the non nmi buffer
654 * // This also protects the rcu read side
655 * local_irq_save(irq_flags);
656 * __cpu = smp_processor_id();
657 *
658 * if (in_nmi())
659 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
660 * else
661 * trace_buf = rcu_dereference_sched(perf_trace_buf);
662 *
663 * if (!trace_buf)
664 * goto end;
665 *
666 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
667 *
668 * // Avoid recursion from perf that could mess up the buffer
669 * if (trace_buf->recursion++)
670 * goto end_recursion;
671 *
672 * raw_data = trace_buf->buf;
673 *
674 * // Make recursion update visible before entering perf_tp_event
675 * // so that we protect from perf recursions.
676 *
677 * barrier();
678 *
679 * //zero dead bytes from alignment to avoid stack leak to userspace:
680 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
681 * entry = (struct ftrace_raw_<call> *)raw_data;
682 * ent = &entry->ent;
683 * tracing_generic_entry_update(ent, irq_flags, pc);
684 * ent->type = event_call->id;
685 *
686 * <tstruct> <- do some jobs with dynamic arrays
687 *
688 * <assign> <- affect our values
689 *
690 * perf_tp_event(event_call->id, __addr, __count, entry,
691 * __entry_size); <- submit them to perf counter
692 *
693 * }
694 */
622 695
623#ifdef CONFIG_PERF_EVENTS 696#ifdef CONFIG_PERF_EVENTS
624 697
@@ -638,12 +711,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
638#undef __perf_count 711#undef __perf_count
639#define __perf_count(c) __count = (c) 712#define __perf_count(c) __count = (c)
640 713
641#undef __perf_task
642#define __perf_task(t) __task = (t)
643
644#undef TP_perf_assign
645#define TP_perf_assign(args...) args
646
647#undef DECLARE_EVENT_CLASS 714#undef DECLARE_EVENT_CLASS
648#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 715#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
649static notrace void \ 716static notrace void \
@@ -654,7 +721,6 @@ perf_trace_##call(void *__data, proto) \
654 struct ftrace_raw_##call *entry; \ 721 struct ftrace_raw_##call *entry; \
655 struct pt_regs __regs; \ 722 struct pt_regs __regs; \
656 u64 __addr = 0, __count = 1; \ 723 u64 __addr = 0, __count = 1; \
657 struct task_struct *__task = NULL; \
658 struct hlist_head *head; \ 724 struct hlist_head *head; \
659 int __entry_size; \ 725 int __entry_size; \
660 int __data_size; \ 726 int __data_size; \
@@ -682,7 +748,7 @@ perf_trace_##call(void *__data, proto) \
682 \ 748 \
683 head = this_cpu_ptr(event_call->perf_events); \ 749 head = this_cpu_ptr(event_call->perf_events); \
684 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 750 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
685 __count, &__regs, head, __task); \ 751 __count, &__regs, head); \
686} 752}
687 753
688/* 754/*