diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/trace_clock.h | 16 | ||||
-rw-r--r-- | include/linux/ftrace_event.h | 20 | ||||
-rw-r--r-- | include/linux/kernel.h | 7 | ||||
-rw-r--r-- | include/linux/ring_buffer.h | 3 | ||||
-rw-r--r-- | include/linux/trace_clock.h | 2 | ||||
-rw-r--r-- | include/linux/uprobes.h | 10 | ||||
-rw-r--r-- | include/trace/ftrace.h | 76 | ||||
-rw-r--r-- | include/trace/syscall.h | 23 |
8 files changed, 44 insertions, 113 deletions
diff --git a/include/asm-generic/trace_clock.h b/include/asm-generic/trace_clock.h new file mode 100644 index 000000000000..6726f1bafb5e --- /dev/null +++ b/include/asm-generic/trace_clock.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef _ASM_GENERIC_TRACE_CLOCK_H | ||
2 | #define _ASM_GENERIC_TRACE_CLOCK_H | ||
3 | /* | ||
4 | * Arch-specific trace clocks. | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * Additional trace clocks added to the trace_clocks | ||
9 | * array in kernel/trace/trace.c | ||
10 | * None if the architecture has not defined it. | ||
11 | */ | ||
12 | #ifndef ARCH_TRACE_CLOCKS | ||
13 | # define ARCH_TRACE_CLOCKS | ||
14 | #endif | ||
15 | |||
16 | #endif /* _ASM_GENERIC_TRACE_CLOCK_H */ | ||
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 642928cf57b4..a3d489531d83 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -86,6 +86,12 @@ struct trace_iterator { | |||
86 | cpumask_var_t started; | 86 | cpumask_var_t started; |
87 | }; | 87 | }; |
88 | 88 | ||
89 | enum trace_iter_flags { | ||
90 | TRACE_FILE_LAT_FMT = 1, | ||
91 | TRACE_FILE_ANNOTATE = 2, | ||
92 | TRACE_FILE_TIME_IN_NS = 4, | ||
93 | }; | ||
94 | |||
89 | 95 | ||
90 | struct trace_event; | 96 | struct trace_event; |
91 | 97 | ||
@@ -127,13 +133,13 @@ trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, | |||
127 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, | 133 | void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, |
128 | struct ring_buffer_event *event, | 134 | struct ring_buffer_event *event, |
129 | unsigned long flags, int pc); | 135 | unsigned long flags, int pc); |
130 | void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, | 136 | void trace_buffer_unlock_commit(struct ring_buffer *buffer, |
131 | struct ring_buffer_event *event, | 137 | struct ring_buffer_event *event, |
132 | unsigned long flags, int pc); | 138 | unsigned long flags, int pc); |
133 | void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, | 139 | void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, |
134 | struct ring_buffer_event *event, | 140 | struct ring_buffer_event *event, |
135 | unsigned long flags, int pc, | 141 | unsigned long flags, int pc, |
136 | struct pt_regs *regs); | 142 | struct pt_regs *regs); |
137 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, | 143 | void trace_current_buffer_discard_commit(struct ring_buffer *buffer, |
138 | struct ring_buffer_event *event); | 144 | struct ring_buffer_event *event); |
139 | 145 | ||
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index dd9900cabf89..d97ed5897447 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -527,9 +527,6 @@ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); | |||
527 | 527 | ||
528 | extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); | 528 | extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); |
529 | #else | 529 | #else |
530 | static inline __printf(1, 2) | ||
531 | int trace_printk(const char *fmt, ...); | ||
532 | |||
533 | static inline void tracing_start(void) { } | 530 | static inline void tracing_start(void) { } |
534 | static inline void tracing_stop(void) { } | 531 | static inline void tracing_stop(void) { } |
535 | static inline void ftrace_off_permanent(void) { } | 532 | static inline void ftrace_off_permanent(void) { } |
@@ -539,8 +536,8 @@ static inline void tracing_on(void) { } | |||
539 | static inline void tracing_off(void) { } | 536 | static inline void tracing_off(void) { } |
540 | static inline int tracing_is_on(void) { return 0; } | 537 | static inline int tracing_is_on(void) { return 0; } |
541 | 538 | ||
542 | static inline int | 539 | static inline __printf(1, 2) |
543 | trace_printk(const char *fmt, ...) | 540 | int trace_printk(const char *fmt, ...) |
544 | { | 541 | { |
545 | return 0; | 542 | return 0; |
546 | } | 543 | } |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 6c8835f74f79..519777e3fa01 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -159,13 +159,14 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer); | |||
159 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); | 159 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); |
160 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); | 160 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); |
161 | 161 | ||
162 | unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); | 162 | u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); |
163 | unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); | 163 | unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); |
164 | unsigned long ring_buffer_entries(struct ring_buffer *buffer); | 164 | unsigned long ring_buffer_entries(struct ring_buffer *buffer); |
165 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); | 165 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); |
166 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); | 166 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); |
167 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); | 167 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); |
168 | unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); | 168 | unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); |
169 | unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); | ||
169 | 170 | ||
170 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); | 171 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); |
171 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, | 172 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, |
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h index 4eb490237d4c..d563f37e1a1d 100644 --- a/include/linux/trace_clock.h +++ b/include/linux/trace_clock.h | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | 14 | ||
15 | #include <asm/trace_clock.h> | ||
16 | |||
15 | extern u64 notrace trace_clock_local(void); | 17 | extern u64 notrace trace_clock_local(void); |
16 | extern u64 notrace trace_clock(void); | 18 | extern u64 notrace trace_clock(void); |
17 | extern u64 notrace trace_clock_global(void); | 19 | extern u64 notrace trace_clock_global(void); |
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 24594571c5a3..4f628a6fc5b4 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h | |||
@@ -97,12 +97,12 @@ extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_con | |||
97 | extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); | 97 | extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); |
98 | extern int uprobe_mmap(struct vm_area_struct *vma); | 98 | extern int uprobe_mmap(struct vm_area_struct *vma); |
99 | extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end); | 99 | extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
100 | extern void uprobe_start_dup_mmap(void); | ||
101 | extern void uprobe_end_dup_mmap(void); | ||
100 | extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); | 102 | extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); |
101 | extern void uprobe_free_utask(struct task_struct *t); | 103 | extern void uprobe_free_utask(struct task_struct *t); |
102 | extern void uprobe_copy_process(struct task_struct *t); | 104 | extern void uprobe_copy_process(struct task_struct *t); |
103 | extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); | 105 | extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); |
104 | extern void __weak arch_uprobe_enable_step(struct arch_uprobe *arch); | ||
105 | extern void __weak arch_uprobe_disable_step(struct arch_uprobe *arch); | ||
106 | extern int uprobe_post_sstep_notifier(struct pt_regs *regs); | 106 | extern int uprobe_post_sstep_notifier(struct pt_regs *regs); |
107 | extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); | 107 | extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); |
108 | extern void uprobe_notify_resume(struct pt_regs *regs); | 108 | extern void uprobe_notify_resume(struct pt_regs *regs); |
@@ -129,6 +129,12 @@ static inline void | |||
129 | uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 129 | uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
130 | { | 130 | { |
131 | } | 131 | } |
132 | static inline void uprobe_start_dup_mmap(void) | ||
133 | { | ||
134 | } | ||
135 | static inline void uprobe_end_dup_mmap(void) | ||
136 | { | ||
137 | } | ||
132 | static inline void | 138 | static inline void |
133 | uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) | 139 | uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) |
134 | { | 140 | { |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index a763888a36f9..40dc5e8fe340 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -545,8 +545,7 @@ ftrace_raw_event_##call(void *__data, proto) \ | |||
545 | { assign; } \ | 545 | { assign; } \ |
546 | \ | 546 | \ |
547 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ | 547 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ |
548 | trace_nowake_buffer_unlock_commit(buffer, \ | 548 | trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ |
549 | event, irq_flags, pc); \ | ||
550 | } | 549 | } |
551 | /* | 550 | /* |
552 | * The ftrace_test_probe is compiled out, it is only here as a build time check | 551 | * The ftrace_test_probe is compiled out, it is only here as a build time check |
@@ -620,79 +619,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | |||
620 | 619 | ||
621 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 620 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
622 | 621 | ||
623 | /* | ||
624 | * Define the insertion callback to perf events | ||
625 | * | ||
626 | * The job is very similar to ftrace_raw_event_<call> except that we don't | ||
627 | * insert in the ring buffer but in a perf counter. | ||
628 | * | ||
629 | * static void ftrace_perf_<call>(proto) | ||
630 | * { | ||
631 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | ||
632 | * struct ftrace_event_call *event_call = &event_<call>; | ||
633 | * extern void perf_tp_event(int, u64, u64, void *, int); | ||
634 | * struct ftrace_raw_##call *entry; | ||
635 | * struct perf_trace_buf *trace_buf; | ||
636 | * u64 __addr = 0, __count = 1; | ||
637 | * unsigned long irq_flags; | ||
638 | * struct trace_entry *ent; | ||
639 | * int __entry_size; | ||
640 | * int __data_size; | ||
641 | * int __cpu | ||
642 | * int pc; | ||
643 | * | ||
644 | * pc = preempt_count(); | ||
645 | * | ||
646 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | ||
647 | * | ||
648 | * // Below we want to get the aligned size by taking into account | ||
649 | * // the u32 field that will later store the buffer size | ||
650 | * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), | ||
651 | * sizeof(u64)); | ||
652 | * __entry_size -= sizeof(u32); | ||
653 | * | ||
654 | * // Protect the non nmi buffer | ||
655 | * // This also protects the rcu read side | ||
656 | * local_irq_save(irq_flags); | ||
657 | * __cpu = smp_processor_id(); | ||
658 | * | ||
659 | * if (in_nmi()) | ||
660 | * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); | ||
661 | * else | ||
662 | * trace_buf = rcu_dereference_sched(perf_trace_buf); | ||
663 | * | ||
664 | * if (!trace_buf) | ||
665 | * goto end; | ||
666 | * | ||
667 | * trace_buf = per_cpu_ptr(trace_buf, __cpu); | ||
668 | * | ||
669 | * // Avoid recursion from perf that could mess up the buffer | ||
670 | * if (trace_buf->recursion++) | ||
671 | * goto end_recursion; | ||
672 | * | ||
673 | * raw_data = trace_buf->buf; | ||
674 | * | ||
675 | * // Make recursion update visible before entering perf_tp_event | ||
676 | * // so that we protect from perf recursions. | ||
677 | * | ||
678 | * barrier(); | ||
679 | * | ||
680 | * //zero dead bytes from alignment to avoid stack leak to userspace: | ||
681 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; | ||
682 | * entry = (struct ftrace_raw_<call> *)raw_data; | ||
683 | * ent = &entry->ent; | ||
684 | * tracing_generic_entry_update(ent, irq_flags, pc); | ||
685 | * ent->type = event_call->id; | ||
686 | * | ||
687 | * <tstruct> <- do some jobs with dynamic arrays | ||
688 | * | ||
689 | * <assign> <- affect our values | ||
690 | * | ||
691 | * perf_tp_event(event_call->id, __addr, __count, entry, | ||
692 | * __entry_size); <- submit them to perf counter | ||
693 | * | ||
694 | * } | ||
695 | */ | ||
696 | 622 | ||
697 | #ifdef CONFIG_PERF_EVENTS | 623 | #ifdef CONFIG_PERF_EVENTS |
698 | 624 | ||
diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 31966a4fb8cc..84bc4197e736 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h | |||
@@ -31,27 +31,4 @@ struct syscall_metadata { | |||
31 | struct ftrace_event_call *exit_event; | 31 | struct ftrace_event_call *exit_event; |
32 | }; | 32 | }; |
33 | 33 | ||
34 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
35 | extern unsigned long arch_syscall_addr(int nr); | ||
36 | extern int init_syscall_trace(struct ftrace_event_call *call); | ||
37 | |||
38 | extern int reg_event_syscall_enter(struct ftrace_event_call *call); | ||
39 | extern void unreg_event_syscall_enter(struct ftrace_event_call *call); | ||
40 | extern int reg_event_syscall_exit(struct ftrace_event_call *call); | ||
41 | extern void unreg_event_syscall_exit(struct ftrace_event_call *call); | ||
42 | extern int | ||
43 | ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); | ||
44 | enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags, | ||
45 | struct trace_event *event); | ||
46 | enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags, | ||
47 | struct trace_event *event); | ||
48 | #endif | ||
49 | |||
50 | #ifdef CONFIG_PERF_EVENTS | ||
51 | int perf_sysenter_enable(struct ftrace_event_call *call); | ||
52 | void perf_sysenter_disable(struct ftrace_event_call *call); | ||
53 | int perf_sysexit_enable(struct ftrace_event_call *call); | ||
54 | void perf_sysexit_disable(struct ftrace_event_call *call); | ||
55 | #endif | ||
56 | |||
57 | #endif /* _TRACE_SYSCALL_H */ | 34 | #endif /* _TRACE_SYSCALL_H */ |