aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-27 18:23:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-27 18:23:47 -0400
commitc5617b200ac52e35f7e8cf05a17b0a2d50f6b3e9 (patch)
tree40d5e99660c77c5791392d349a93113c044dbf14 /include
parentcad719d86e9dbd06634eaba6401e022c8101d6b2 (diff)
parent49c177461bfbedeccbab22bf3905db2f9da7f1c3 (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (61 commits) tracing: Add __used annotation to event variable perf, trace: Fix !x86 build bug perf report: Support multiple events on the TUI perf annotate: Fix up usage of the build id cache x86/mmiotrace: Remove redundant instruction prefix checks perf annotate: Add TUI interface perf tui: Remove annotate from popup menu after failure perf report: Don't start the TUI if -D is used perf: Fix getline undeclared perf: Optimize perf_tp_event_match() perf: Remove more code from the fastpath perf: Optimize the !vmalloc backed buffer perf: Optimize perf_output_copy() perf: Fix wakeup storm for RO mmap()s perf-record: Share per-cpu buffers perf-record: Remove -M perf: Ensure that IOC_OUTPUT isn't used to create multi-writer buffers perf, trace: Optimize tracepoints by using per-tracepoint-per-cpu hlist to track events perf, trace: Optimize tracepoints by removing IRQ-disable from perf/tracepoint interaction perf tui: Allow disabling the TUI on a per command basis in ~/.perfconfig ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/ftrace_event.h103
-rw-r--r--include/linux/perf_event.h28
-rw-r--r--include/linux/syscalls.h57
-rw-r--r--include/linux/tracepoint.h98
-rw-r--r--include/trace/ftrace.h249
-rw-r--r--include/trace/syscall.h10
6 files changed, 280 insertions, 265 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index c082f223e2fe..3167f2df4126 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -73,18 +73,25 @@ struct trace_iterator {
73}; 73};
74 74
75 75
76struct trace_event;
77
76typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, 78typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
77 int flags); 79 int flags, struct trace_event *event);
78struct trace_event { 80
79 struct hlist_node node; 81struct trace_event_functions {
80 struct list_head list;
81 int type;
82 trace_print_func trace; 82 trace_print_func trace;
83 trace_print_func raw; 83 trace_print_func raw;
84 trace_print_func hex; 84 trace_print_func hex;
85 trace_print_func binary; 85 trace_print_func binary;
86}; 86};
87 87
88struct trace_event {
89 struct hlist_node node;
90 struct list_head list;
91 int type;
92 struct trace_event_functions *funcs;
93};
94
88extern int register_ftrace_event(struct trace_event *event); 95extern int register_ftrace_event(struct trace_event *event);
89extern int unregister_ftrace_event(struct trace_event *event); 96extern int unregister_ftrace_event(struct trace_event *event);
90 97
@@ -116,28 +123,70 @@ void tracing_record_cmdline(struct task_struct *tsk);
116 123
117struct event_filter; 124struct event_filter;
118 125
126enum trace_reg {
127 TRACE_REG_REGISTER,
128 TRACE_REG_UNREGISTER,
129 TRACE_REG_PERF_REGISTER,
130 TRACE_REG_PERF_UNREGISTER,
131};
132
133struct ftrace_event_call;
134
135struct ftrace_event_class {
136 char *system;
137 void *probe;
138#ifdef CONFIG_PERF_EVENTS
139 void *perf_probe;
140#endif
141 int (*reg)(struct ftrace_event_call *event,
142 enum trace_reg type);
143 int (*define_fields)(struct ftrace_event_call *);
144 struct list_head *(*get_fields)(struct ftrace_event_call *);
145 struct list_head fields;
146 int (*raw_init)(struct ftrace_event_call *);
147};
148
149enum {
150 TRACE_EVENT_FL_ENABLED_BIT,
151 TRACE_EVENT_FL_FILTERED_BIT,
152};
153
154enum {
155 TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
156 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
157};
158
119struct ftrace_event_call { 159struct ftrace_event_call {
120 struct list_head list; 160 struct list_head list;
161 struct ftrace_event_class *class;
121 char *name; 162 char *name;
122 char *system;
123 struct dentry *dir; 163 struct dentry *dir;
124 struct trace_event *event; 164 struct trace_event event;
125 int enabled;
126 int (*regfunc)(struct ftrace_event_call *);
127 void (*unregfunc)(struct ftrace_event_call *);
128 int id;
129 const char *print_fmt; 165 const char *print_fmt;
130 int (*raw_init)(struct ftrace_event_call *);
131 int (*define_fields)(struct ftrace_event_call *);
132 struct list_head fields;
133 int filter_active;
134 struct event_filter *filter; 166 struct event_filter *filter;
135 void *mod; 167 void *mod;
136 void *data; 168 void *data;
137 169
170 /*
171 * 32 bit flags:
172 * bit 1: enabled
173 * bit 2: filter_active
174 *
175 * Changes to flags must hold the event_mutex.
176 *
177 * Note: Reads of flags do not hold the event_mutex since
178 * they occur in critical sections. But the way flags
179 * is currently used, these changes do no affect the code
180 * except that when a change is made, it may have a slight
181 * delay in propagating the changes to other CPUs due to
182 * caching and such.
183 */
184 unsigned int flags;
185
186#ifdef CONFIG_PERF_EVENTS
138 int perf_refcount; 187 int perf_refcount;
139 int (*perf_event_enable)(struct ftrace_event_call *); 188 struct hlist_head *perf_events;
140 void (*perf_event_disable)(struct ftrace_event_call *); 189#endif
141}; 190};
142 191
143#define PERF_MAX_TRACE_SIZE 2048 192#define PERF_MAX_TRACE_SIZE 2048
@@ -194,24 +243,22 @@ struct perf_event;
194 243
195DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); 244DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
196 245
197extern int perf_trace_enable(int event_id); 246extern int perf_trace_init(struct perf_event *event);
198extern void perf_trace_disable(int event_id); 247extern void perf_trace_destroy(struct perf_event *event);
199extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, 248extern int perf_trace_enable(struct perf_event *event);
249extern void perf_trace_disable(struct perf_event *event);
250extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
200 char *filter_str); 251 char *filter_str);
201extern void ftrace_profile_free_filter(struct perf_event *event); 252extern void ftrace_profile_free_filter(struct perf_event *event);
202extern void * 253extern void *perf_trace_buf_prepare(int size, unsigned short type,
203perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, 254 struct pt_regs *regs, int *rctxp);
204 unsigned long *irq_flags);
205 255
206static inline void 256static inline void
207perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, 257perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
208 u64 count, unsigned long irq_flags, struct pt_regs *regs) 258 u64 count, struct pt_regs *regs, void *head)
209{ 259{
210 struct trace_entry *entry = raw_data; 260 perf_tp_event(addr, count, raw_data, size, regs, head);
211
212 perf_tp_event(entry->type, addr, count, raw_data, size, regs);
213 perf_swevent_put_recursion_context(rctx); 261 perf_swevent_put_recursion_context(rctx);
214 local_irq_restore(irq_flags);
215} 262}
216#endif 263#endif
217 264
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 3fd5c82e0e18..fb6c91eac7e3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -485,6 +485,7 @@ struct perf_guest_info_callbacks {
485#include <linux/ftrace.h> 485#include <linux/ftrace.h>
486#include <linux/cpu.h> 486#include <linux/cpu.h>
487#include <asm/atomic.h> 487#include <asm/atomic.h>
488#include <asm/local.h>
488 489
489#define PERF_MAX_STACK_DEPTH 255 490#define PERF_MAX_STACK_DEPTH 255
490 491
@@ -587,21 +588,19 @@ struct perf_mmap_data {
587 struct rcu_head rcu_head; 588 struct rcu_head rcu_head;
588#ifdef CONFIG_PERF_USE_VMALLOC 589#ifdef CONFIG_PERF_USE_VMALLOC
589 struct work_struct work; 590 struct work_struct work;
591 int page_order; /* allocation order */
590#endif 592#endif
591 int data_order;
592 int nr_pages; /* nr of data pages */ 593 int nr_pages; /* nr of data pages */
593 int writable; /* are we writable */ 594 int writable; /* are we writable */
594 int nr_locked; /* nr pages mlocked */ 595 int nr_locked; /* nr pages mlocked */
595 596
596 atomic_t poll; /* POLL_ for wakeups */ 597 atomic_t poll; /* POLL_ for wakeups */
597 atomic_t events; /* event_id limit */
598 598
599 atomic_long_t head; /* write position */ 599 local_t head; /* write position */
600 atomic_long_t done_head; /* completed head */ 600 local_t nest; /* nested writers */
601 601 local_t events; /* event limit */
602 atomic_t lock; /* concurrent writes */ 602 local_t wakeup; /* wakeup stamp */
603 atomic_t wakeup; /* needs a wakeup */ 603 local_t lost; /* nr records lost */
604 atomic_t lost; /* nr records lost */
605 604
606 long watermark; /* wakeup watermark */ 605 long watermark; /* wakeup watermark */
607 606
@@ -728,6 +727,7 @@ struct perf_event {
728 perf_overflow_handler_t overflow_handler; 727 perf_overflow_handler_t overflow_handler;
729 728
730#ifdef CONFIG_EVENT_TRACING 729#ifdef CONFIG_EVENT_TRACING
730 struct ftrace_event_call *tp_event;
731 struct event_filter *filter; 731 struct event_filter *filter;
732#endif 732#endif
733 733
@@ -803,11 +803,12 @@ struct perf_cpu_context {
803struct perf_output_handle { 803struct perf_output_handle {
804 struct perf_event *event; 804 struct perf_event *event;
805 struct perf_mmap_data *data; 805 struct perf_mmap_data *data;
806 unsigned long head; 806 unsigned long wakeup;
807 unsigned long offset; 807 unsigned long size;
808 void *addr;
809 int page;
808 int nmi; 810 int nmi;
809 int sample; 811 int sample;
810 int locked;
811}; 812};
812 813
813#ifdef CONFIG_PERF_EVENTS 814#ifdef CONFIG_PERF_EVENTS
@@ -993,8 +994,9 @@ static inline bool perf_paranoid_kernel(void)
993} 994}
994 995
995extern void perf_event_init(void); 996extern void perf_event_init(void);
996extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 997extern void perf_tp_event(u64 addr, u64 count, void *record,
997 int entry_size, struct pt_regs *regs); 998 int entry_size, struct pt_regs *regs,
999 struct hlist_head *head);
998extern void perf_bp_event(struct perf_event *event, void *data); 1000extern void perf_bp_event(struct perf_event *event, void *data);
999 1001
1000#ifndef perf_misc_flags 1002#ifndef perf_misc_flags
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 057929b0a651..a1a86a53bc73 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -103,22 +103,6 @@ struct perf_event_attr;
103#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) 103#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
104#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) 104#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
105 105
106#ifdef CONFIG_PERF_EVENTS
107
108#define TRACE_SYS_ENTER_PERF_INIT(sname) \
109 .perf_event_enable = perf_sysenter_enable, \
110 .perf_event_disable = perf_sysenter_disable,
111
112#define TRACE_SYS_EXIT_PERF_INIT(sname) \
113 .perf_event_enable = perf_sysexit_enable, \
114 .perf_event_disable = perf_sysexit_disable,
115#else
116#define TRACE_SYS_ENTER_PERF(sname)
117#define TRACE_SYS_ENTER_PERF_INIT(sname)
118#define TRACE_SYS_EXIT_PERF(sname)
119#define TRACE_SYS_EXIT_PERF_INIT(sname)
120#endif /* CONFIG_PERF_EVENTS */
121
122#ifdef CONFIG_FTRACE_SYSCALLS 106#ifdef CONFIG_FTRACE_SYSCALLS
123#define __SC_STR_ADECL1(t, a) #a 107#define __SC_STR_ADECL1(t, a) #a
124#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) 108#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
@@ -134,54 +118,43 @@ struct perf_event_attr;
134#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) 118#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
135#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) 119#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
136 120
121extern struct ftrace_event_class event_class_syscall_enter;
122extern struct ftrace_event_class event_class_syscall_exit;
123extern struct trace_event_functions enter_syscall_print_funcs;
124extern struct trace_event_functions exit_syscall_print_funcs;
125
137#define SYSCALL_TRACE_ENTER_EVENT(sname) \ 126#define SYSCALL_TRACE_ENTER_EVENT(sname) \
138 static const struct syscall_metadata __syscall_meta_##sname; \ 127 static struct syscall_metadata __syscall_meta_##sname; \
139 static struct ftrace_event_call \ 128 static struct ftrace_event_call \
140 __attribute__((__aligned__(4))) event_enter_##sname; \ 129 __attribute__((__aligned__(4))) event_enter_##sname; \
141 static struct trace_event enter_syscall_print_##sname = { \
142 .trace = print_syscall_enter, \
143 }; \
144 static struct ftrace_event_call __used \ 130 static struct ftrace_event_call __used \
145 __attribute__((__aligned__(4))) \ 131 __attribute__((__aligned__(4))) \
146 __attribute__((section("_ftrace_events"))) \ 132 __attribute__((section("_ftrace_events"))) \
147 event_enter_##sname = { \ 133 event_enter_##sname = { \
148 .name = "sys_enter"#sname, \ 134 .name = "sys_enter"#sname, \
149 .system = "syscalls", \ 135 .class = &event_class_syscall_enter, \
150 .event = &enter_syscall_print_##sname, \ 136 .event.funcs = &enter_syscall_print_funcs, \
151 .raw_init = init_syscall_trace, \
152 .define_fields = syscall_enter_define_fields, \
153 .regfunc = reg_event_syscall_enter, \
154 .unregfunc = unreg_event_syscall_enter, \
155 .data = (void *)&__syscall_meta_##sname,\ 137 .data = (void *)&__syscall_meta_##sname,\
156 TRACE_SYS_ENTER_PERF_INIT(sname) \
157 } 138 }
158 139
159#define SYSCALL_TRACE_EXIT_EVENT(sname) \ 140#define SYSCALL_TRACE_EXIT_EVENT(sname) \
160 static const struct syscall_metadata __syscall_meta_##sname; \ 141 static struct syscall_metadata __syscall_meta_##sname; \
161 static struct ftrace_event_call \ 142 static struct ftrace_event_call \
162 __attribute__((__aligned__(4))) event_exit_##sname; \ 143 __attribute__((__aligned__(4))) event_exit_##sname; \
163 static struct trace_event exit_syscall_print_##sname = { \
164 .trace = print_syscall_exit, \
165 }; \
166 static struct ftrace_event_call __used \ 144 static struct ftrace_event_call __used \
167 __attribute__((__aligned__(4))) \ 145 __attribute__((__aligned__(4))) \
168 __attribute__((section("_ftrace_events"))) \ 146 __attribute__((section("_ftrace_events"))) \
169 event_exit_##sname = { \ 147 event_exit_##sname = { \
170 .name = "sys_exit"#sname, \ 148 .name = "sys_exit"#sname, \
171 .system = "syscalls", \ 149 .class = &event_class_syscall_exit, \
172 .event = &exit_syscall_print_##sname, \ 150 .event.funcs = &exit_syscall_print_funcs, \
173 .raw_init = init_syscall_trace, \
174 .define_fields = syscall_exit_define_fields, \
175 .regfunc = reg_event_syscall_exit, \
176 .unregfunc = unreg_event_syscall_exit, \
177 .data = (void *)&__syscall_meta_##sname,\ 151 .data = (void *)&__syscall_meta_##sname,\
178 TRACE_SYS_EXIT_PERF_INIT(sname) \
179 } 152 }
180 153
181#define SYSCALL_METADATA(sname, nb) \ 154#define SYSCALL_METADATA(sname, nb) \
182 SYSCALL_TRACE_ENTER_EVENT(sname); \ 155 SYSCALL_TRACE_ENTER_EVENT(sname); \
183 SYSCALL_TRACE_EXIT_EVENT(sname); \ 156 SYSCALL_TRACE_EXIT_EVENT(sname); \
184 static const struct syscall_metadata __used \ 157 static struct syscall_metadata __used \
185 __attribute__((__aligned__(4))) \ 158 __attribute__((__aligned__(4))) \
186 __attribute__((section("__syscalls_metadata"))) \ 159 __attribute__((section("__syscalls_metadata"))) \
187 __syscall_meta_##sname = { \ 160 __syscall_meta_##sname = { \
@@ -191,12 +164,14 @@ struct perf_event_attr;
191 .args = args_##sname, \ 164 .args = args_##sname, \
192 .enter_event = &event_enter_##sname, \ 165 .enter_event = &event_enter_##sname, \
193 .exit_event = &event_exit_##sname, \ 166 .exit_event = &event_exit_##sname, \
167 .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
168 .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \
194 }; 169 };
195 170
196#define SYSCALL_DEFINE0(sname) \ 171#define SYSCALL_DEFINE0(sname) \
197 SYSCALL_TRACE_ENTER_EVENT(_##sname); \ 172 SYSCALL_TRACE_ENTER_EVENT(_##sname); \
198 SYSCALL_TRACE_EXIT_EVENT(_##sname); \ 173 SYSCALL_TRACE_EXIT_EVENT(_##sname); \
199 static const struct syscall_metadata __used \ 174 static struct syscall_metadata __used \
200 __attribute__((__aligned__(4))) \ 175 __attribute__((__aligned__(4))) \
201 __attribute__((section("__syscalls_metadata"))) \ 176 __attribute__((section("__syscalls_metadata"))) \
202 __syscall_meta__##sname = { \ 177 __syscall_meta__##sname = { \
@@ -204,6 +179,8 @@ struct perf_event_attr;
204 .nb_args = 0, \ 179 .nb_args = 0, \
205 .enter_event = &event_enter__##sname, \ 180 .enter_event = &event_enter__##sname, \
206 .exit_event = &event_exit__##sname, \ 181 .exit_event = &event_exit__##sname, \
182 .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
183 .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \
207 }; \ 184 }; \
208 asmlinkage long sys_##sname(void) 185 asmlinkage long sys_##sname(void)
209#else 186#else
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 1d85f9a6a199..9a59d1f98cd4 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -20,12 +20,17 @@
20struct module; 20struct module;
21struct tracepoint; 21struct tracepoint;
22 22
23struct tracepoint_func {
24 void *func;
25 void *data;
26};
27
23struct tracepoint { 28struct tracepoint {
24 const char *name; /* Tracepoint name */ 29 const char *name; /* Tracepoint name */
25 int state; /* State. */ 30 int state; /* State. */
26 void (*regfunc)(void); 31 void (*regfunc)(void);
27 void (*unregfunc)(void); 32 void (*unregfunc)(void);
28 void **funcs; 33 struct tracepoint_func *funcs;
29} __attribute__((aligned(32))); /* 34} __attribute__((aligned(32))); /*
30 * Aligned on 32 bytes because it is 35 * Aligned on 32 bytes because it is
31 * globally visible and gcc happily 36 * globally visible and gcc happily
@@ -37,16 +42,19 @@ struct tracepoint {
37 * Connect a probe to a tracepoint. 42 * Connect a probe to a tracepoint.
38 * Internal API, should not be used directly. 43 * Internal API, should not be used directly.
39 */ 44 */
40extern int tracepoint_probe_register(const char *name, void *probe); 45extern int tracepoint_probe_register(const char *name, void *probe, void *data);
41 46
42/* 47/*
43 * Disconnect a probe from a tracepoint. 48 * Disconnect a probe from a tracepoint.
44 * Internal API, should not be used directly. 49 * Internal API, should not be used directly.
45 */ 50 */
46extern int tracepoint_probe_unregister(const char *name, void *probe); 51extern int
52tracepoint_probe_unregister(const char *name, void *probe, void *data);
47 53
48extern int tracepoint_probe_register_noupdate(const char *name, void *probe); 54extern int tracepoint_probe_register_noupdate(const char *name, void *probe,
49extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe); 55 void *data);
56extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
57 void *data);
50extern void tracepoint_probe_update_all(void); 58extern void tracepoint_probe_update_all(void);
51 59
52struct tracepoint_iter { 60struct tracepoint_iter {
@@ -102,17 +110,27 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
102/* 110/*
103 * it_func[0] is never NULL because there is at least one element in the array 111 * it_func[0] is never NULL because there is at least one element in the array
104 * when the array itself is non NULL. 112 * when the array itself is non NULL.
113 *
114 * Note, the proto and args passed in includes "__data" as the first parameter.
115 * The reason for this is to handle the "void" prototype. If a tracepoint
116 * has a "void" prototype, then it is invalid to declare a function
117 * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
118 * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
105 */ 119 */
106#define __DO_TRACE(tp, proto, args) \ 120#define __DO_TRACE(tp, proto, args) \
107 do { \ 121 do { \
108 void **it_func; \ 122 struct tracepoint_func *it_func_ptr; \
123 void *it_func; \
124 void *__data; \
109 \ 125 \
110 rcu_read_lock_sched_notrace(); \ 126 rcu_read_lock_sched_notrace(); \
111 it_func = rcu_dereference_sched((tp)->funcs); \ 127 it_func_ptr = rcu_dereference_sched((tp)->funcs); \
112 if (it_func) { \ 128 if (it_func_ptr) { \
113 do { \ 129 do { \
114 ((void(*)(proto))(*it_func))(args); \ 130 it_func = (it_func_ptr)->func; \
115 } while (*(++it_func)); \ 131 __data = (it_func_ptr)->data; \
132 ((void(*)(proto))(it_func))(args); \
133 } while ((++it_func_ptr)->func); \
116 } \ 134 } \
117 rcu_read_unlock_sched_notrace(); \ 135 rcu_read_unlock_sched_notrace(); \
118 } while (0) 136 } while (0)
@@ -122,24 +140,32 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
122 * not add unwanted padding between the beginning of the section and the 140 * not add unwanted padding between the beginning of the section and the
123 * structure. Force alignment to the same alignment as the section start. 141 * structure. Force alignment to the same alignment as the section start.
124 */ 142 */
125#define DECLARE_TRACE(name, proto, args) \ 143#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \
126 extern struct tracepoint __tracepoint_##name; \ 144 extern struct tracepoint __tracepoint_##name; \
127 static inline void trace_##name(proto) \ 145 static inline void trace_##name(proto) \
128 { \ 146 { \
129 if (unlikely(__tracepoint_##name.state)) \ 147 if (unlikely(__tracepoint_##name.state)) \
130 __DO_TRACE(&__tracepoint_##name, \ 148 __DO_TRACE(&__tracepoint_##name, \
131 TP_PROTO(proto), TP_ARGS(args)); \ 149 TP_PROTO(data_proto), \
150 TP_ARGS(data_args)); \
151 } \
152 static inline int \
153 register_trace_##name(void (*probe)(data_proto), void *data) \
154 { \
155 return tracepoint_probe_register(#name, (void *)probe, \
156 data); \
132 } \ 157 } \
133 static inline int register_trace_##name(void (*probe)(proto)) \ 158 static inline int \
159 unregister_trace_##name(void (*probe)(data_proto), void *data) \
134 { \ 160 { \
135 return tracepoint_probe_register(#name, (void *)probe); \ 161 return tracepoint_probe_unregister(#name, (void *)probe, \
162 data); \
136 } \ 163 } \
137 static inline int unregister_trace_##name(void (*probe)(proto)) \ 164 static inline void \
165 check_trace_callback_type_##name(void (*cb)(data_proto)) \
138 { \ 166 { \
139 return tracepoint_probe_unregister(#name, (void *)probe);\
140 } 167 }
141 168
142
143#define DEFINE_TRACE_FN(name, reg, unreg) \ 169#define DEFINE_TRACE_FN(name, reg, unreg) \
144 static const char __tpstrtab_##name[] \ 170 static const char __tpstrtab_##name[] \
145 __attribute__((section("__tracepoints_strings"))) = #name; \ 171 __attribute__((section("__tracepoints_strings"))) = #name; \
@@ -156,18 +182,23 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
156 EXPORT_SYMBOL(__tracepoint_##name) 182 EXPORT_SYMBOL(__tracepoint_##name)
157 183
158#else /* !CONFIG_TRACEPOINTS */ 184#else /* !CONFIG_TRACEPOINTS */
159#define DECLARE_TRACE(name, proto, args) \ 185#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \
160 static inline void _do_trace_##name(struct tracepoint *tp, proto) \
161 { } \
162 static inline void trace_##name(proto) \ 186 static inline void trace_##name(proto) \
163 { } \ 187 { } \
164 static inline int register_trace_##name(void (*probe)(proto)) \ 188 static inline int \
189 register_trace_##name(void (*probe)(data_proto), \
190 void *data) \
165 { \ 191 { \
166 return -ENOSYS; \ 192 return -ENOSYS; \
167 } \ 193 } \
168 static inline int unregister_trace_##name(void (*probe)(proto)) \ 194 static inline int \
195 unregister_trace_##name(void (*probe)(data_proto), \
196 void *data) \
169 { \ 197 { \
170 return -ENOSYS; \ 198 return -ENOSYS; \
199 } \
200 static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \
201 { \
171 } 202 }
172 203
173#define DEFINE_TRACE_FN(name, reg, unreg) 204#define DEFINE_TRACE_FN(name, reg, unreg)
@@ -176,6 +207,29 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
176#define EXPORT_TRACEPOINT_SYMBOL(name) 207#define EXPORT_TRACEPOINT_SYMBOL(name)
177 208
178#endif /* CONFIG_TRACEPOINTS */ 209#endif /* CONFIG_TRACEPOINTS */
210
211/*
212 * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype
213 * (void). "void" is a special value in a function prototype and can
214 * not be combined with other arguments. Since the DECLARE_TRACE()
215 * macro adds a data element at the beginning of the prototype,
216 * we need a way to differentiate "(void *data, proto)" from
217 * "(void *data, void)". The second prototype is invalid.
218 *
219 * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype
220 * and "void *__data" as the callback prototype.
221 *
222 * DECLARE_TRACE() passes "proto" as the tracepoint protoype and
223 * "void *__data, proto" as the callback prototype.
224 */
225#define DECLARE_TRACE_NOARGS(name) \
226 __DECLARE_TRACE(name, void, , void *__data, __data)
227
228#define DECLARE_TRACE(name, proto, args) \
229 __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
230 PARAMS(void *__data, proto), \
231 PARAMS(__data, args))
232
179#endif /* DECLARE_TRACE */ 233#endif /* DECLARE_TRACE */
180 234
181#ifndef TRACE_EVENT 235#ifndef TRACE_EVENT
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 88c59c13ea7b..3d685d1f2a03 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -62,10 +62,13 @@
62 struct trace_entry ent; \ 62 struct trace_entry ent; \
63 tstruct \ 63 tstruct \
64 char __data[0]; \ 64 char __data[0]; \
65 }; 65 }; \
66 \
67 static struct ftrace_event_class event_class_##name;
68
66#undef DEFINE_EVENT 69#undef DEFINE_EVENT
67#define DEFINE_EVENT(template, name, proto, args) \ 70#define DEFINE_EVENT(template, name, proto, args) \
68 static struct ftrace_event_call \ 71 static struct ftrace_event_call __used \
69 __attribute__((__aligned__(4))) event_##name 72 __attribute__((__aligned__(4))) event_##name
70 73
71#undef DEFINE_EVENT_PRINT 74#undef DEFINE_EVENT_PRINT
@@ -147,7 +150,7 @@
147 * 150 *
148 * entry = iter->ent; 151 * entry = iter->ent;
149 * 152 *
150 * if (entry->type != event_<call>.id) { 153 * if (entry->type != event_<call>->event.type) {
151 * WARN_ON_ONCE(1); 154 * WARN_ON_ONCE(1);
152 * return TRACE_TYPE_UNHANDLED; 155 * return TRACE_TYPE_UNHANDLED;
153 * } 156 * }
@@ -206,18 +209,22 @@
206#undef DECLARE_EVENT_CLASS 209#undef DECLARE_EVENT_CLASS
207#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 210#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
208static notrace enum print_line_t \ 211static notrace enum print_line_t \
209ftrace_raw_output_id_##call(int event_id, const char *name, \ 212ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
210 struct trace_iterator *iter, int flags) \ 213 struct trace_event *trace_event) \
211{ \ 214{ \
215 struct ftrace_event_call *event; \
212 struct trace_seq *s = &iter->seq; \ 216 struct trace_seq *s = &iter->seq; \
213 struct ftrace_raw_##call *field; \ 217 struct ftrace_raw_##call *field; \
214 struct trace_entry *entry; \ 218 struct trace_entry *entry; \
215 struct trace_seq *p; \ 219 struct trace_seq *p; \
216 int ret; \ 220 int ret; \
217 \ 221 \
222 event = container_of(trace_event, struct ftrace_event_call, \
223 event); \
224 \
218 entry = iter->ent; \ 225 entry = iter->ent; \
219 \ 226 \
220 if (entry->type != event_id) { \ 227 if (entry->type != event->event.type) { \
221 WARN_ON_ONCE(1); \ 228 WARN_ON_ONCE(1); \
222 return TRACE_TYPE_UNHANDLED; \ 229 return TRACE_TYPE_UNHANDLED; \
223 } \ 230 } \
@@ -226,7 +233,7 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \
226 \ 233 \
227 p = &get_cpu_var(ftrace_event_seq); \ 234 p = &get_cpu_var(ftrace_event_seq); \
228 trace_seq_init(p); \ 235 trace_seq_init(p); \
229 ret = trace_seq_printf(s, "%s: ", name); \ 236 ret = trace_seq_printf(s, "%s: ", event->name); \
230 if (ret) \ 237 if (ret) \
231 ret = trace_seq_printf(s, print); \ 238 ret = trace_seq_printf(s, print); \
232 put_cpu(); \ 239 put_cpu(); \
@@ -234,21 +241,16 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \
234 return TRACE_TYPE_PARTIAL_LINE; \ 241 return TRACE_TYPE_PARTIAL_LINE; \
235 \ 242 \
236 return TRACE_TYPE_HANDLED; \ 243 return TRACE_TYPE_HANDLED; \
237} 244} \
238 245static struct trace_event_functions ftrace_event_type_funcs_##call = { \
239#undef DEFINE_EVENT 246 .trace = ftrace_raw_output_##call, \
240#define DEFINE_EVENT(template, name, proto, args) \ 247};
241static notrace enum print_line_t \
242ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
243{ \
244 return ftrace_raw_output_id_##template(event_##name.id, \
245 #name, iter, flags); \
246}
247 248
248#undef DEFINE_EVENT_PRINT 249#undef DEFINE_EVENT_PRINT
249#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 250#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
250static notrace enum print_line_t \ 251static notrace enum print_line_t \
251ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 252ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
253 struct trace_event *event) \
252{ \ 254{ \
253 struct trace_seq *s = &iter->seq; \ 255 struct trace_seq *s = &iter->seq; \
254 struct ftrace_raw_##template *field; \ 256 struct ftrace_raw_##template *field; \
@@ -258,7 +260,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
258 \ 260 \
259 entry = iter->ent; \ 261 entry = iter->ent; \
260 \ 262 \
261 if (entry->type != event_##call.id) { \ 263 if (entry->type != event_##call.event.type) { \
262 WARN_ON_ONCE(1); \ 264 WARN_ON_ONCE(1); \
263 return TRACE_TYPE_UNHANDLED; \ 265 return TRACE_TYPE_UNHANDLED; \
264 } \ 266 } \
@@ -275,7 +277,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
275 return TRACE_TYPE_PARTIAL_LINE; \ 277 return TRACE_TYPE_PARTIAL_LINE; \
276 \ 278 \
277 return TRACE_TYPE_HANDLED; \ 279 return TRACE_TYPE_HANDLED; \
278} 280} \
281static struct trace_event_functions ftrace_event_type_funcs_##call = { \
282 .trace = ftrace_raw_output_##call, \
283};
279 284
280#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 285#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
281 286
@@ -381,80 +386,18 @@ static inline notrace int ftrace_get_offsets_##call( \
381 386
382#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 387#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
383 388
384#ifdef CONFIG_PERF_EVENTS
385
386/*
387 * Generate the functions needed for tracepoint perf_event support.
388 *
389 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
390 *
391 * static int ftrace_profile_enable_<call>(void)
392 * {
393 * return register_trace_<call>(ftrace_profile_<call>);
394 * }
395 *
396 * static void ftrace_profile_disable_<call>(void)
397 * {
398 * unregister_trace_<call>(ftrace_profile_<call>);
399 * }
400 *
401 */
402
403#undef DECLARE_EVENT_CLASS
404#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
405
406#undef DEFINE_EVENT
407#define DEFINE_EVENT(template, name, proto, args) \
408 \
409static void perf_trace_##name(proto); \
410 \
411static notrace int \
412perf_trace_enable_##name(struct ftrace_event_call *unused) \
413{ \
414 return register_trace_##name(perf_trace_##name); \
415} \
416 \
417static notrace void \
418perf_trace_disable_##name(struct ftrace_event_call *unused) \
419{ \
420 unregister_trace_##name(perf_trace_##name); \
421}
422
423#undef DEFINE_EVENT_PRINT
424#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
425 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
426
427#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
428
429#endif /* CONFIG_PERF_EVENTS */
430
431/* 389/*
432 * Stage 4 of the trace events. 390 * Stage 4 of the trace events.
433 * 391 *
434 * Override the macros in <trace/trace_events.h> to include the following: 392 * Override the macros in <trace/trace_events.h> to include the following:
435 * 393 *
436 * static void ftrace_event_<call>(proto)
437 * {
438 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
439 * }
440 *
441 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
442 * {
443 * return register_trace_<call>(ftrace_event_<call>);
444 * }
445 *
446 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
447 * {
448 * unregister_trace_<call>(ftrace_event_<call>);
449 * }
450 *
451 *
452 * For those macros defined with TRACE_EVENT: 394 * For those macros defined with TRACE_EVENT:
453 * 395 *
454 * static struct ftrace_event_call event_<call>; 396 * static struct ftrace_event_call event_<call>;
455 * 397 *
456 * static void ftrace_raw_event_<call>(proto) 398 * static void ftrace_raw_event_<call>(void *__data, proto)
457 * { 399 * {
400 * struct ftrace_event_call *event_call = __data;
458 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 401 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
459 * struct ring_buffer_event *event; 402 * struct ring_buffer_event *event;
460 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 403 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
@@ -469,7 +412,7 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
469 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 412 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
470 * 413 *
471 * event = trace_current_buffer_lock_reserve(&buffer, 414 * event = trace_current_buffer_lock_reserve(&buffer,
472 * event_<call>.id, 415 * event_<call>->event.type,
473 * sizeof(*entry) + __data_size, 416 * sizeof(*entry) + __data_size,
474 * irq_flags, pc); 417 * irq_flags, pc);
475 * if (!event) 418 * if (!event)
@@ -484,43 +427,42 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
484 * event, irq_flags, pc); 427 * event, irq_flags, pc);
485 * } 428 * }
486 * 429 *
487 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
488 * {
489 * return register_trace_<call>(ftrace_raw_event_<call>);
490 * }
491 *
492 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
493 * {
494 * unregister_trace_<call>(ftrace_raw_event_<call>);
495 * }
496 *
497 * static struct trace_event ftrace_event_type_<call> = { 430 * static struct trace_event ftrace_event_type_<call> = {
498 * .trace = ftrace_raw_output_<call>, <-- stage 2 431 * .trace = ftrace_raw_output_<call>, <-- stage 2
499 * }; 432 * };
500 * 433 *
501 * static const char print_fmt_<call>[] = <TP_printk>; 434 * static const char print_fmt_<call>[] = <TP_printk>;
502 * 435 *
436 * static struct ftrace_event_class __used event_class_<template> = {
437 * .system = "<system>",
438 * .define_fields = ftrace_define_fields_<call>,
439 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
440 * .raw_init = trace_event_raw_init,
441 * .probe = ftrace_raw_event_##call,
442 * };
443 *
503 * static struct ftrace_event_call __used 444 * static struct ftrace_event_call __used
504 * __attribute__((__aligned__(4))) 445 * __attribute__((__aligned__(4)))
505 * __attribute__((section("_ftrace_events"))) event_<call> = { 446 * __attribute__((section("_ftrace_events"))) event_<call> = {
506 * .name = "<call>", 447 * .name = "<call>",
507 * .system = "<system>", 448 * .class = event_class_<template>,
508 * .raw_init = trace_event_raw_init, 449 * .event = &ftrace_event_type_<call>,
509 * .regfunc = ftrace_reg_event_<call>,
510 * .unregfunc = ftrace_unreg_event_<call>,
511 * .print_fmt = print_fmt_<call>, 450 * .print_fmt = print_fmt_<call>,
512 * .define_fields = ftrace_define_fields_<call>, 451 * };
513 * }
514 * 452 *
515 */ 453 */
516 454
517#ifdef CONFIG_PERF_EVENTS 455#ifdef CONFIG_PERF_EVENTS
518 456
457#define _TRACE_PERF_PROTO(call, proto) \
458 static notrace void \
459 perf_trace_##call(void *__data, proto);
460
519#define _TRACE_PERF_INIT(call) \ 461#define _TRACE_PERF_INIT(call) \
520 .perf_event_enable = perf_trace_enable_##call, \ 462 .perf_probe = perf_trace_##call,
521 .perf_event_disable = perf_trace_disable_##call,
522 463
523#else 464#else
465#define _TRACE_PERF_PROTO(call, proto)
524#define _TRACE_PERF_INIT(call) 466#define _TRACE_PERF_INIT(call)
525#endif /* CONFIG_PERF_EVENTS */ 467#endif /* CONFIG_PERF_EVENTS */
526 468
@@ -554,9 +496,9 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
554#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 496#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
555 \ 497 \
556static notrace void \ 498static notrace void \
557ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ 499ftrace_raw_event_##call(void *__data, proto) \
558 proto) \
559{ \ 500{ \
501 struct ftrace_event_call *event_call = __data; \
560 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 502 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
561 struct ring_buffer_event *event; \ 503 struct ring_buffer_event *event; \
562 struct ftrace_raw_##call *entry; \ 504 struct ftrace_raw_##call *entry; \
@@ -571,7 +513,7 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
571 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 513 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
572 \ 514 \
573 event = trace_current_buffer_lock_reserve(&buffer, \ 515 event = trace_current_buffer_lock_reserve(&buffer, \
574 event_call->id, \ 516 event_call->event.type, \
575 sizeof(*entry) + __data_size, \ 517 sizeof(*entry) + __data_size, \
576 irq_flags, pc); \ 518 irq_flags, pc); \
577 if (!event) \ 519 if (!event) \
@@ -586,34 +528,21 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
586 trace_nowake_buffer_unlock_commit(buffer, \ 528 trace_nowake_buffer_unlock_commit(buffer, \
587 event, irq_flags, pc); \ 529 event, irq_flags, pc); \
588} 530}
531/*
532 * The ftrace_test_probe is compiled out, it is only here as a build time check
533 * to make sure that if the tracepoint handling changes, the ftrace probe will
534 * fail to compile unless it too is updated.
535 */
589 536
590#undef DEFINE_EVENT 537#undef DEFINE_EVENT
591#define DEFINE_EVENT(template, call, proto, args) \ 538#define DEFINE_EVENT(template, call, proto, args) \
592 \ 539static inline void ftrace_test_probe_##call(void) \
593static notrace void ftrace_raw_event_##call(proto) \
594{ \
595 ftrace_raw_event_id_##template(&event_##call, args); \
596} \
597 \
598static notrace int \
599ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \
600{ \ 540{ \
601 return register_trace_##call(ftrace_raw_event_##call); \ 541 check_trace_callback_type_##call(ftrace_raw_event_##template); \
602} \ 542}
603 \
604static notrace void \
605ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \
606{ \
607 unregister_trace_##call(ftrace_raw_event_##call); \
608} \
609 \
610static struct trace_event ftrace_event_type_##call = { \
611 .trace = ftrace_raw_output_##call, \
612};
613 543
614#undef DEFINE_EVENT_PRINT 544#undef DEFINE_EVENT_PRINT
615#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 545#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
616 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
617 546
618#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 547#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
619 548
@@ -630,7 +559,16 @@ static struct trace_event ftrace_event_type_##call = { \
630 559
631#undef DECLARE_EVENT_CLASS 560#undef DECLARE_EVENT_CLASS
632#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 561#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
633static const char print_fmt_##call[] = print; 562_TRACE_PERF_PROTO(call, PARAMS(proto)); \
563static const char print_fmt_##call[] = print; \
564static struct ftrace_event_class __used event_class_##call = { \
565 .system = __stringify(TRACE_SYSTEM), \
566 .define_fields = ftrace_define_fields_##call, \
567 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
568 .raw_init = trace_event_raw_init, \
569 .probe = ftrace_raw_event_##call, \
570 _TRACE_PERF_INIT(call) \
571};
634 572
635#undef DEFINE_EVENT 573#undef DEFINE_EVENT
636#define DEFINE_EVENT(template, call, proto, args) \ 574#define DEFINE_EVENT(template, call, proto, args) \
@@ -639,15 +577,10 @@ static struct ftrace_event_call __used \
639__attribute__((__aligned__(4))) \ 577__attribute__((__aligned__(4))) \
640__attribute__((section("_ftrace_events"))) event_##call = { \ 578__attribute__((section("_ftrace_events"))) event_##call = { \
641 .name = #call, \ 579 .name = #call, \
642 .system = __stringify(TRACE_SYSTEM), \ 580 .class = &event_class_##template, \
643 .event = &ftrace_event_type_##call, \ 581 .event.funcs = &ftrace_event_type_funcs_##template, \
644 .raw_init = trace_event_raw_init, \
645 .regfunc = ftrace_raw_reg_event_##call, \
646 .unregfunc = ftrace_raw_unreg_event_##call, \
647 .print_fmt = print_fmt_##template, \ 582 .print_fmt = print_fmt_##template, \
648 .define_fields = ftrace_define_fields_##template, \ 583};
649 _TRACE_PERF_INIT(call) \
650}
651 584
652#undef DEFINE_EVENT_PRINT 585#undef DEFINE_EVENT_PRINT
653#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 586#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
@@ -658,14 +591,9 @@ static struct ftrace_event_call __used \
658__attribute__((__aligned__(4))) \ 591__attribute__((__aligned__(4))) \
659__attribute__((section("_ftrace_events"))) event_##call = { \ 592__attribute__((section("_ftrace_events"))) event_##call = { \
660 .name = #call, \ 593 .name = #call, \
661 .system = __stringify(TRACE_SYSTEM), \ 594 .class = &event_class_##template, \
662 .event = &ftrace_event_type_##call, \ 595 .event.funcs = &ftrace_event_type_funcs_##call, \
663 .raw_init = trace_event_raw_init, \
664 .regfunc = ftrace_raw_reg_event_##call, \
665 .unregfunc = ftrace_raw_unreg_event_##call, \
666 .print_fmt = print_fmt_##call, \ 596 .print_fmt = print_fmt_##call, \
667 .define_fields = ftrace_define_fields_##template, \
668 _TRACE_PERF_INIT(call) \
669} 597}
670 598
671#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 599#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -765,17 +693,20 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
765#undef DECLARE_EVENT_CLASS 693#undef DECLARE_EVENT_CLASS
766#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 694#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
767static notrace void \ 695static notrace void \
768perf_trace_templ_##call(struct ftrace_event_call *event_call, \ 696perf_trace_##call(void *__data, proto) \
769 struct pt_regs *__regs, proto) \
770{ \ 697{ \
698 struct ftrace_event_call *event_call = __data; \
771 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 699 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
772 struct ftrace_raw_##call *entry; \ 700 struct ftrace_raw_##call *entry; \
701 struct pt_regs __regs; \
773 u64 __addr = 0, __count = 1; \ 702 u64 __addr = 0, __count = 1; \
774 unsigned long irq_flags; \ 703 struct hlist_head *head; \
775 int __entry_size; \ 704 int __entry_size; \
776 int __data_size; \ 705 int __data_size; \
777 int rctx; \ 706 int rctx; \
778 \ 707 \
708 perf_fetch_caller_regs(&__regs, 1); \
709 \
779 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 710 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
780 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 711 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
781 sizeof(u64)); \ 712 sizeof(u64)); \
@@ -784,32 +715,34 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \
784 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ 715 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
785 "profile buffer not large enough")) \ 716 "profile buffer not large enough")) \
786 return; \ 717 return; \
718 \
787 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ 719 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
788 __entry_size, event_call->id, &rctx, &irq_flags); \ 720 __entry_size, event_call->event.type, &__regs, &rctx); \
789 if (!entry) \ 721 if (!entry) \
790 return; \ 722 return; \
723 \
791 tstruct \ 724 tstruct \
792 \ 725 \
793 { assign; } \ 726 { assign; } \
794 \ 727 \
728 head = per_cpu_ptr(event_call->perf_events, smp_processor_id());\
795 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 729 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
796 __count, irq_flags, __regs); \ 730 __count, &__regs, head); \
797} 731}
798 732
733/*
734 * This part is compiled out, it is only here as a build time check
735 * to make sure that if the tracepoint handling changes, the
736 * perf probe will fail to compile unless it too is updated.
737 */
799#undef DEFINE_EVENT 738#undef DEFINE_EVENT
800#define DEFINE_EVENT(template, call, proto, args) \ 739#define DEFINE_EVENT(template, call, proto, args) \
801static notrace void perf_trace_##call(proto) \ 740static inline void perf_test_probe_##call(void) \
802{ \ 741{ \
803 struct ftrace_event_call *event_call = &event_##call; \ 742 check_trace_callback_type_##call(perf_trace_##template); \
804 struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \
805 \
806 perf_fetch_caller_regs(__regs, 1); \
807 \
808 perf_trace_templ_##template(event_call, __regs, args); \
809 \
810 put_cpu_var(perf_trace_regs); \
811} 743}
812 744
745
813#undef DEFINE_EVENT_PRINT 746#undef DEFINE_EVENT_PRINT
814#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 747#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
815 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 748 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index e5e5f48dbfb3..257e08960d7b 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -25,6 +25,8 @@ struct syscall_metadata {
25 int nb_args; 25 int nb_args;
26 const char **types; 26 const char **types;
27 const char **args; 27 const char **args;
28 struct list_head enter_fields;
29 struct list_head exit_fields;
28 30
29 struct ftrace_event_call *enter_event; 31 struct ftrace_event_call *enter_event;
30 struct ftrace_event_call *exit_event; 32 struct ftrace_event_call *exit_event;
@@ -34,16 +36,16 @@ struct syscall_metadata {
34extern unsigned long arch_syscall_addr(int nr); 36extern unsigned long arch_syscall_addr(int nr);
35extern int init_syscall_trace(struct ftrace_event_call *call); 37extern int init_syscall_trace(struct ftrace_event_call *call);
36 38
37extern int syscall_enter_define_fields(struct ftrace_event_call *call);
38extern int syscall_exit_define_fields(struct ftrace_event_call *call);
39extern int reg_event_syscall_enter(struct ftrace_event_call *call); 39extern int reg_event_syscall_enter(struct ftrace_event_call *call);
40extern void unreg_event_syscall_enter(struct ftrace_event_call *call); 40extern void unreg_event_syscall_enter(struct ftrace_event_call *call);
41extern int reg_event_syscall_exit(struct ftrace_event_call *call); 41extern int reg_event_syscall_exit(struct ftrace_event_call *call);
42extern void unreg_event_syscall_exit(struct ftrace_event_call *call); 42extern void unreg_event_syscall_exit(struct ftrace_event_call *call);
43extern int 43extern int
44ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); 44ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
45enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); 45enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags,
46enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); 46 struct trace_event *event);
47enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags,
48 struct trace_event *event);
47#endif 49#endif
48 50
49#ifdef CONFIG_PERF_EVENTS 51#ifdef CONFIG_PERF_EVENTS