aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-12-08 09:54:35 -0500
committerIngo Molnar <mingo@kernel.org>2012-12-08 09:54:35 -0500
commitcc1b39dbf9f55a438e8a21a694394c20e6a17129 (patch)
treecf48e5c871250c2bfd1d0590acd2f0569e95b950 /include
parent7e0dd574cd6b1bcc818ed4251e5ceda7d8bee08f (diff)
parent1c7d66732458dc187008e3f5b2f71e019e320fc2 (diff)
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core
Pull ftrace updates from Steve Rostedt. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/trace_clock.h16
-rw-r--r--include/linux/ftrace_event.h6
-rw-r--r--include/linux/trace_clock.h2
-rw-r--r--include/trace/ftrace.h73
4 files changed, 24 insertions, 73 deletions
diff --git a/include/asm-generic/trace_clock.h b/include/asm-generic/trace_clock.h
new file mode 100644
index 000000000000..6726f1bafb5e
--- /dev/null
+++ b/include/asm-generic/trace_clock.h
@@ -0,0 +1,16 @@
1#ifndef _ASM_GENERIC_TRACE_CLOCK_H
2#define _ASM_GENERIC_TRACE_CLOCK_H
3/*
4 * Arch-specific trace clocks.
5 */
6
7/*
8 * Additional trace clocks added to the trace_clocks
9 * array in kernel/trace/trace.c
10 * None if the architecture has not defined it.
11 */
12#ifndef ARCH_TRACE_CLOCKS
13# define ARCH_TRACE_CLOCKS
14#endif
15
16#endif /* _ASM_GENERIC_TRACE_CLOCK_H */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index b80c8ddfbbdc..a3d489531d83 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -86,6 +86,12 @@ struct trace_iterator {
86 cpumask_var_t started; 86 cpumask_var_t started;
87}; 87};
88 88
89enum trace_iter_flags {
90 TRACE_FILE_LAT_FMT = 1,
91 TRACE_FILE_ANNOTATE = 2,
92 TRACE_FILE_TIME_IN_NS = 4,
93};
94
89 95
90struct trace_event; 96struct trace_event;
91 97
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
index 4eb490237d4c..d563f37e1a1d 100644
--- a/include/linux/trace_clock.h
+++ b/include/linux/trace_clock.h
@@ -12,6 +12,8 @@
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13#include <linux/types.h> 13#include <linux/types.h>
14 14
15#include <asm/trace_clock.h>
16
15extern u64 notrace trace_clock_local(void); 17extern u64 notrace trace_clock_local(void);
16extern u64 notrace trace_clock(void); 18extern u64 notrace trace_clock(void);
17extern u64 notrace trace_clock_global(void); 19extern u64 notrace trace_clock_global(void);
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 698f2a890322..40dc5e8fe340 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -619,79 +619,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
619 619
620#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 620#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
621 621
622/*
623 * Define the insertion callback to perf events
624 *
625 * The job is very similar to ftrace_raw_event_<call> except that we don't
626 * insert in the ring buffer but in a perf counter.
627 *
628 * static void ftrace_perf_<call>(proto)
629 * {
630 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
631 * struct ftrace_event_call *event_call = &event_<call>;
632 * extern void perf_tp_event(int, u64, u64, void *, int);
633 * struct ftrace_raw_##call *entry;
634 * struct perf_trace_buf *trace_buf;
635 * u64 __addr = 0, __count = 1;
636 * unsigned long irq_flags;
637 * struct trace_entry *ent;
638 * int __entry_size;
639 * int __data_size;
640 * int __cpu
641 * int pc;
642 *
643 * pc = preempt_count();
644 *
645 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
646 *
647 * // Below we want to get the aligned size by taking into account
648 * // the u32 field that will later store the buffer size
649 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
650 * sizeof(u64));
651 * __entry_size -= sizeof(u32);
652 *
653 * // Protect the non nmi buffer
654 * // This also protects the rcu read side
655 * local_irq_save(irq_flags);
656 * __cpu = smp_processor_id();
657 *
658 * if (in_nmi())
659 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
660 * else
661 * trace_buf = rcu_dereference_sched(perf_trace_buf);
662 *
663 * if (!trace_buf)
664 * goto end;
665 *
666 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
667 *
668 * // Avoid recursion from perf that could mess up the buffer
669 * if (trace_buf->recursion++)
670 * goto end_recursion;
671 *
672 * raw_data = trace_buf->buf;
673 *
674 * // Make recursion update visible before entering perf_tp_event
675 * // so that we protect from perf recursions.
676 *
677 * barrier();
678 *
679 * //zero dead bytes from alignment to avoid stack leak to userspace:
680 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
681 * entry = (struct ftrace_raw_<call> *)raw_data;
682 * ent = &entry->ent;
683 * tracing_generic_entry_update(ent, irq_flags, pc);
684 * ent->type = event_call->id;
685 *
686 * <tstruct> <- do some jobs with dynamic arrays
687 *
688 * <assign> <- affect our values
689 *
690 * perf_tp_event(event_call->id, __addr, __count, entry,
691 * __entry_size); <- submit them to perf counter
692 *
693 * }
694 */
695 622
696#ifdef CONFIG_PERF_EVENTS 623#ifdef CONFIG_PERF_EVENTS
697 624