diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/ftrace_event.h | 82 | ||||
-rw-r--r-- | include/linux/syscalls.h | 57 | ||||
-rw-r--r-- | include/linux/tracepoint.h | 98 | ||||
-rw-r--r-- | include/trace/ftrace.h | 239 | ||||
-rw-r--r-- | include/trace/syscall.h | 10 |
5 files changed, 250 insertions, 236 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 7024b7d1126f..ee8a8411b055 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -70,18 +70,25 @@ struct trace_iterator { | |||
70 | }; | 70 | }; |
71 | 71 | ||
72 | 72 | ||
73 | struct trace_event; | ||
74 | |||
73 | typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, | 75 | typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, |
74 | int flags); | 76 | int flags, struct trace_event *event); |
75 | struct trace_event { | 77 | |
76 | struct hlist_node node; | 78 | struct trace_event_functions { |
77 | struct list_head list; | ||
78 | int type; | ||
79 | trace_print_func trace; | 79 | trace_print_func trace; |
80 | trace_print_func raw; | 80 | trace_print_func raw; |
81 | trace_print_func hex; | 81 | trace_print_func hex; |
82 | trace_print_func binary; | 82 | trace_print_func binary; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | struct trace_event { | ||
86 | struct hlist_node node; | ||
87 | struct list_head list; | ||
88 | int type; | ||
89 | struct trace_event_functions *funcs; | ||
90 | }; | ||
91 | |||
85 | extern int register_ftrace_event(struct trace_event *event); | 92 | extern int register_ftrace_event(struct trace_event *event); |
86 | extern int unregister_ftrace_event(struct trace_event *event); | 93 | extern int unregister_ftrace_event(struct trace_event *event); |
87 | 94 | ||
@@ -113,29 +120,70 @@ void tracing_record_cmdline(struct task_struct *tsk); | |||
113 | 120 | ||
114 | struct event_filter; | 121 | struct event_filter; |
115 | 122 | ||
123 | enum trace_reg { | ||
124 | TRACE_REG_REGISTER, | ||
125 | TRACE_REG_UNREGISTER, | ||
126 | TRACE_REG_PERF_REGISTER, | ||
127 | TRACE_REG_PERF_UNREGISTER, | ||
128 | }; | ||
129 | |||
130 | struct ftrace_event_call; | ||
131 | |||
132 | struct ftrace_event_class { | ||
133 | char *system; | ||
134 | void *probe; | ||
135 | #ifdef CONFIG_PERF_EVENTS | ||
136 | void *perf_probe; | ||
137 | #endif | ||
138 | int (*reg)(struct ftrace_event_call *event, | ||
139 | enum trace_reg type); | ||
140 | int (*define_fields)(struct ftrace_event_call *); | ||
141 | struct list_head *(*get_fields)(struct ftrace_event_call *); | ||
142 | struct list_head fields; | ||
143 | int (*raw_init)(struct ftrace_event_call *); | ||
144 | }; | ||
145 | |||
146 | enum { | ||
147 | TRACE_EVENT_FL_ENABLED_BIT, | ||
148 | TRACE_EVENT_FL_FILTERED_BIT, | ||
149 | }; | ||
150 | |||
151 | enum { | ||
152 | TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), | ||
153 | TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), | ||
154 | }; | ||
155 | |||
116 | struct ftrace_event_call { | 156 | struct ftrace_event_call { |
117 | struct list_head list; | 157 | struct list_head list; |
158 | struct ftrace_event_class *class; | ||
118 | char *name; | 159 | char *name; |
119 | char *system; | ||
120 | struct dentry *dir; | 160 | struct dentry *dir; |
121 | struct trace_event *event; | 161 | struct trace_event event; |
122 | int enabled; | ||
123 | int (*regfunc)(struct ftrace_event_call *); | ||
124 | void (*unregfunc)(struct ftrace_event_call *); | ||
125 | int id; | ||
126 | const char *print_fmt; | 162 | const char *print_fmt; |
127 | int (*raw_init)(struct ftrace_event_call *); | ||
128 | int (*define_fields)(struct ftrace_event_call *); | ||
129 | struct list_head fields; | ||
130 | int filter_active; | ||
131 | struct event_filter *filter; | 163 | struct event_filter *filter; |
132 | void *mod; | 164 | void *mod; |
133 | void *data; | 165 | void *data; |
134 | 166 | ||
167 | /* | ||
168 | * 32 bit flags: | ||
169 | * bit 1: enabled | ||
170 | * bit 2: filter_active | ||
171 | * | ||
172 | * Changes to flags must hold the event_mutex. | ||
173 | * | ||
174 | * Note: Reads of flags do not hold the event_mutex since | ||
175 | * they occur in critical sections. But the way flags | ||
176 | * is currently used, these changes do no affect the code | ||
177 | * except that when a change is made, it may have a slight | ||
178 | * delay in propagating the changes to other CPUs due to | ||
179 | * caching and such. | ||
180 | */ | ||
181 | unsigned int flags; | ||
182 | |||
183 | #ifdef CONFIG_PERF_EVENTS | ||
135 | int perf_refcount; | 184 | int perf_refcount; |
136 | struct hlist_head *perf_events; | 185 | struct hlist_head *perf_events; |
137 | int (*perf_event_enable)(struct ftrace_event_call *); | 186 | #endif |
138 | void (*perf_event_disable)(struct ftrace_event_call *); | ||
139 | }; | 187 | }; |
140 | 188 | ||
141 | #define PERF_MAX_TRACE_SIZE 2048 | 189 | #define PERF_MAX_TRACE_SIZE 2048 |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 057929b0a651..a1a86a53bc73 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -103,22 +103,6 @@ struct perf_event_attr; | |||
103 | #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) | 103 | #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) |
104 | #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) | 104 | #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) |
105 | 105 | ||
106 | #ifdef CONFIG_PERF_EVENTS | ||
107 | |||
108 | #define TRACE_SYS_ENTER_PERF_INIT(sname) \ | ||
109 | .perf_event_enable = perf_sysenter_enable, \ | ||
110 | .perf_event_disable = perf_sysenter_disable, | ||
111 | |||
112 | #define TRACE_SYS_EXIT_PERF_INIT(sname) \ | ||
113 | .perf_event_enable = perf_sysexit_enable, \ | ||
114 | .perf_event_disable = perf_sysexit_disable, | ||
115 | #else | ||
116 | #define TRACE_SYS_ENTER_PERF(sname) | ||
117 | #define TRACE_SYS_ENTER_PERF_INIT(sname) | ||
118 | #define TRACE_SYS_EXIT_PERF(sname) | ||
119 | #define TRACE_SYS_EXIT_PERF_INIT(sname) | ||
120 | #endif /* CONFIG_PERF_EVENTS */ | ||
121 | |||
122 | #ifdef CONFIG_FTRACE_SYSCALLS | 106 | #ifdef CONFIG_FTRACE_SYSCALLS |
123 | #define __SC_STR_ADECL1(t, a) #a | 107 | #define __SC_STR_ADECL1(t, a) #a |
124 | #define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) | 108 | #define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) |
@@ -134,54 +118,43 @@ struct perf_event_attr; | |||
134 | #define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) | 118 | #define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) |
135 | #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) | 119 | #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) |
136 | 120 | ||
121 | extern struct ftrace_event_class event_class_syscall_enter; | ||
122 | extern struct ftrace_event_class event_class_syscall_exit; | ||
123 | extern struct trace_event_functions enter_syscall_print_funcs; | ||
124 | extern struct trace_event_functions exit_syscall_print_funcs; | ||
125 | |||
137 | #define SYSCALL_TRACE_ENTER_EVENT(sname) \ | 126 | #define SYSCALL_TRACE_ENTER_EVENT(sname) \ |
138 | static const struct syscall_metadata __syscall_meta_##sname; \ | 127 | static struct syscall_metadata __syscall_meta_##sname; \ |
139 | static struct ftrace_event_call \ | 128 | static struct ftrace_event_call \ |
140 | __attribute__((__aligned__(4))) event_enter_##sname; \ | 129 | __attribute__((__aligned__(4))) event_enter_##sname; \ |
141 | static struct trace_event enter_syscall_print_##sname = { \ | ||
142 | .trace = print_syscall_enter, \ | ||
143 | }; \ | ||
144 | static struct ftrace_event_call __used \ | 130 | static struct ftrace_event_call __used \ |
145 | __attribute__((__aligned__(4))) \ | 131 | __attribute__((__aligned__(4))) \ |
146 | __attribute__((section("_ftrace_events"))) \ | 132 | __attribute__((section("_ftrace_events"))) \ |
147 | event_enter_##sname = { \ | 133 | event_enter_##sname = { \ |
148 | .name = "sys_enter"#sname, \ | 134 | .name = "sys_enter"#sname, \ |
149 | .system = "syscalls", \ | 135 | .class = &event_class_syscall_enter, \ |
150 | .event = &enter_syscall_print_##sname, \ | 136 | .event.funcs = &enter_syscall_print_funcs, \ |
151 | .raw_init = init_syscall_trace, \ | ||
152 | .define_fields = syscall_enter_define_fields, \ | ||
153 | .regfunc = reg_event_syscall_enter, \ | ||
154 | .unregfunc = unreg_event_syscall_enter, \ | ||
155 | .data = (void *)&__syscall_meta_##sname,\ | 137 | .data = (void *)&__syscall_meta_##sname,\ |
156 | TRACE_SYS_ENTER_PERF_INIT(sname) \ | ||
157 | } | 138 | } |
158 | 139 | ||
159 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ | 140 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ |
160 | static const struct syscall_metadata __syscall_meta_##sname; \ | 141 | static struct syscall_metadata __syscall_meta_##sname; \ |
161 | static struct ftrace_event_call \ | 142 | static struct ftrace_event_call \ |
162 | __attribute__((__aligned__(4))) event_exit_##sname; \ | 143 | __attribute__((__aligned__(4))) event_exit_##sname; \ |
163 | static struct trace_event exit_syscall_print_##sname = { \ | ||
164 | .trace = print_syscall_exit, \ | ||
165 | }; \ | ||
166 | static struct ftrace_event_call __used \ | 144 | static struct ftrace_event_call __used \ |
167 | __attribute__((__aligned__(4))) \ | 145 | __attribute__((__aligned__(4))) \ |
168 | __attribute__((section("_ftrace_events"))) \ | 146 | __attribute__((section("_ftrace_events"))) \ |
169 | event_exit_##sname = { \ | 147 | event_exit_##sname = { \ |
170 | .name = "sys_exit"#sname, \ | 148 | .name = "sys_exit"#sname, \ |
171 | .system = "syscalls", \ | 149 | .class = &event_class_syscall_exit, \ |
172 | .event = &exit_syscall_print_##sname, \ | 150 | .event.funcs = &exit_syscall_print_funcs, \ |
173 | .raw_init = init_syscall_trace, \ | ||
174 | .define_fields = syscall_exit_define_fields, \ | ||
175 | .regfunc = reg_event_syscall_exit, \ | ||
176 | .unregfunc = unreg_event_syscall_exit, \ | ||
177 | .data = (void *)&__syscall_meta_##sname,\ | 151 | .data = (void *)&__syscall_meta_##sname,\ |
178 | TRACE_SYS_EXIT_PERF_INIT(sname) \ | ||
179 | } | 152 | } |
180 | 153 | ||
181 | #define SYSCALL_METADATA(sname, nb) \ | 154 | #define SYSCALL_METADATA(sname, nb) \ |
182 | SYSCALL_TRACE_ENTER_EVENT(sname); \ | 155 | SYSCALL_TRACE_ENTER_EVENT(sname); \ |
183 | SYSCALL_TRACE_EXIT_EVENT(sname); \ | 156 | SYSCALL_TRACE_EXIT_EVENT(sname); \ |
184 | static const struct syscall_metadata __used \ | 157 | static struct syscall_metadata __used \ |
185 | __attribute__((__aligned__(4))) \ | 158 | __attribute__((__aligned__(4))) \ |
186 | __attribute__((section("__syscalls_metadata"))) \ | 159 | __attribute__((section("__syscalls_metadata"))) \ |
187 | __syscall_meta_##sname = { \ | 160 | __syscall_meta_##sname = { \ |
@@ -191,12 +164,14 @@ struct perf_event_attr; | |||
191 | .args = args_##sname, \ | 164 | .args = args_##sname, \ |
192 | .enter_event = &event_enter_##sname, \ | 165 | .enter_event = &event_enter_##sname, \ |
193 | .exit_event = &event_exit_##sname, \ | 166 | .exit_event = &event_exit_##sname, \ |
167 | .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \ | ||
168 | .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \ | ||
194 | }; | 169 | }; |
195 | 170 | ||
196 | #define SYSCALL_DEFINE0(sname) \ | 171 | #define SYSCALL_DEFINE0(sname) \ |
197 | SYSCALL_TRACE_ENTER_EVENT(_##sname); \ | 172 | SYSCALL_TRACE_ENTER_EVENT(_##sname); \ |
198 | SYSCALL_TRACE_EXIT_EVENT(_##sname); \ | 173 | SYSCALL_TRACE_EXIT_EVENT(_##sname); \ |
199 | static const struct syscall_metadata __used \ | 174 | static struct syscall_metadata __used \ |
200 | __attribute__((__aligned__(4))) \ | 175 | __attribute__((__aligned__(4))) \ |
201 | __attribute__((section("__syscalls_metadata"))) \ | 176 | __attribute__((section("__syscalls_metadata"))) \ |
202 | __syscall_meta__##sname = { \ | 177 | __syscall_meta__##sname = { \ |
@@ -204,6 +179,8 @@ struct perf_event_attr; | |||
204 | .nb_args = 0, \ | 179 | .nb_args = 0, \ |
205 | .enter_event = &event_enter__##sname, \ | 180 | .enter_event = &event_enter__##sname, \ |
206 | .exit_event = &event_exit__##sname, \ | 181 | .exit_event = &event_exit__##sname, \ |
182 | .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \ | ||
183 | .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \ | ||
207 | }; \ | 184 | }; \ |
208 | asmlinkage long sys_##sname(void) | 185 | asmlinkage long sys_##sname(void) |
209 | #else | 186 | #else |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 1d85f9a6a199..9a59d1f98cd4 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -20,12 +20,17 @@ | |||
20 | struct module; | 20 | struct module; |
21 | struct tracepoint; | 21 | struct tracepoint; |
22 | 22 | ||
23 | struct tracepoint_func { | ||
24 | void *func; | ||
25 | void *data; | ||
26 | }; | ||
27 | |||
23 | struct tracepoint { | 28 | struct tracepoint { |
24 | const char *name; /* Tracepoint name */ | 29 | const char *name; /* Tracepoint name */ |
25 | int state; /* State. */ | 30 | int state; /* State. */ |
26 | void (*regfunc)(void); | 31 | void (*regfunc)(void); |
27 | void (*unregfunc)(void); | 32 | void (*unregfunc)(void); |
28 | void **funcs; | 33 | struct tracepoint_func *funcs; |
29 | } __attribute__((aligned(32))); /* | 34 | } __attribute__((aligned(32))); /* |
30 | * Aligned on 32 bytes because it is | 35 | * Aligned on 32 bytes because it is |
31 | * globally visible and gcc happily | 36 | * globally visible and gcc happily |
@@ -37,16 +42,19 @@ struct tracepoint { | |||
37 | * Connect a probe to a tracepoint. | 42 | * Connect a probe to a tracepoint. |
38 | * Internal API, should not be used directly. | 43 | * Internal API, should not be used directly. |
39 | */ | 44 | */ |
40 | extern int tracepoint_probe_register(const char *name, void *probe); | 45 | extern int tracepoint_probe_register(const char *name, void *probe, void *data); |
41 | 46 | ||
42 | /* | 47 | /* |
43 | * Disconnect a probe from a tracepoint. | 48 | * Disconnect a probe from a tracepoint. |
44 | * Internal API, should not be used directly. | 49 | * Internal API, should not be used directly. |
45 | */ | 50 | */ |
46 | extern int tracepoint_probe_unregister(const char *name, void *probe); | 51 | extern int |
52 | tracepoint_probe_unregister(const char *name, void *probe, void *data); | ||
47 | 53 | ||
48 | extern int tracepoint_probe_register_noupdate(const char *name, void *probe); | 54 | extern int tracepoint_probe_register_noupdate(const char *name, void *probe, |
49 | extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe); | 55 | void *data); |
56 | extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe, | ||
57 | void *data); | ||
50 | extern void tracepoint_probe_update_all(void); | 58 | extern void tracepoint_probe_update_all(void); |
51 | 59 | ||
52 | struct tracepoint_iter { | 60 | struct tracepoint_iter { |
@@ -102,17 +110,27 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, | |||
102 | /* | 110 | /* |
103 | * it_func[0] is never NULL because there is at least one element in the array | 111 | * it_func[0] is never NULL because there is at least one element in the array |
104 | * when the array itself is non NULL. | 112 | * when the array itself is non NULL. |
113 | * | ||
114 | * Note, the proto and args passed in includes "__data" as the first parameter. | ||
115 | * The reason for this is to handle the "void" prototype. If a tracepoint | ||
116 | * has a "void" prototype, then it is invalid to declare a function | ||
117 | * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just | ||
118 | * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". | ||
105 | */ | 119 | */ |
106 | #define __DO_TRACE(tp, proto, args) \ | 120 | #define __DO_TRACE(tp, proto, args) \ |
107 | do { \ | 121 | do { \ |
108 | void **it_func; \ | 122 | struct tracepoint_func *it_func_ptr; \ |
123 | void *it_func; \ | ||
124 | void *__data; \ | ||
109 | \ | 125 | \ |
110 | rcu_read_lock_sched_notrace(); \ | 126 | rcu_read_lock_sched_notrace(); \ |
111 | it_func = rcu_dereference_sched((tp)->funcs); \ | 127 | it_func_ptr = rcu_dereference_sched((tp)->funcs); \ |
112 | if (it_func) { \ | 128 | if (it_func_ptr) { \ |
113 | do { \ | 129 | do { \ |
114 | ((void(*)(proto))(*it_func))(args); \ | 130 | it_func = (it_func_ptr)->func; \ |
115 | } while (*(++it_func)); \ | 131 | __data = (it_func_ptr)->data; \ |
132 | ((void(*)(proto))(it_func))(args); \ | ||
133 | } while ((++it_func_ptr)->func); \ | ||
116 | } \ | 134 | } \ |
117 | rcu_read_unlock_sched_notrace(); \ | 135 | rcu_read_unlock_sched_notrace(); \ |
118 | } while (0) | 136 | } while (0) |
@@ -122,24 +140,32 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, | |||
122 | * not add unwanted padding between the beginning of the section and the | 140 | * not add unwanted padding between the beginning of the section and the |
123 | * structure. Force alignment to the same alignment as the section start. | 141 | * structure. Force alignment to the same alignment as the section start. |
124 | */ | 142 | */ |
125 | #define DECLARE_TRACE(name, proto, args) \ | 143 | #define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \ |
126 | extern struct tracepoint __tracepoint_##name; \ | 144 | extern struct tracepoint __tracepoint_##name; \ |
127 | static inline void trace_##name(proto) \ | 145 | static inline void trace_##name(proto) \ |
128 | { \ | 146 | { \ |
129 | if (unlikely(__tracepoint_##name.state)) \ | 147 | if (unlikely(__tracepoint_##name.state)) \ |
130 | __DO_TRACE(&__tracepoint_##name, \ | 148 | __DO_TRACE(&__tracepoint_##name, \ |
131 | TP_PROTO(proto), TP_ARGS(args)); \ | 149 | TP_PROTO(data_proto), \ |
150 | TP_ARGS(data_args)); \ | ||
151 | } \ | ||
152 | static inline int \ | ||
153 | register_trace_##name(void (*probe)(data_proto), void *data) \ | ||
154 | { \ | ||
155 | return tracepoint_probe_register(#name, (void *)probe, \ | ||
156 | data); \ | ||
132 | } \ | 157 | } \ |
133 | static inline int register_trace_##name(void (*probe)(proto)) \ | 158 | static inline int \ |
159 | unregister_trace_##name(void (*probe)(data_proto), void *data) \ | ||
134 | { \ | 160 | { \ |
135 | return tracepoint_probe_register(#name, (void *)probe); \ | 161 | return tracepoint_probe_unregister(#name, (void *)probe, \ |
162 | data); \ | ||
136 | } \ | 163 | } \ |
137 | static inline int unregister_trace_##name(void (*probe)(proto)) \ | 164 | static inline void \ |
165 | check_trace_callback_type_##name(void (*cb)(data_proto)) \ | ||
138 | { \ | 166 | { \ |
139 | return tracepoint_probe_unregister(#name, (void *)probe);\ | ||
140 | } | 167 | } |
141 | 168 | ||
142 | |||
143 | #define DEFINE_TRACE_FN(name, reg, unreg) \ | 169 | #define DEFINE_TRACE_FN(name, reg, unreg) \ |
144 | static const char __tpstrtab_##name[] \ | 170 | static const char __tpstrtab_##name[] \ |
145 | __attribute__((section("__tracepoints_strings"))) = #name; \ | 171 | __attribute__((section("__tracepoints_strings"))) = #name; \ |
@@ -156,18 +182,23 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, | |||
156 | EXPORT_SYMBOL(__tracepoint_##name) | 182 | EXPORT_SYMBOL(__tracepoint_##name) |
157 | 183 | ||
158 | #else /* !CONFIG_TRACEPOINTS */ | 184 | #else /* !CONFIG_TRACEPOINTS */ |
159 | #define DECLARE_TRACE(name, proto, args) \ | 185 | #define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \ |
160 | static inline void _do_trace_##name(struct tracepoint *tp, proto) \ | ||
161 | { } \ | ||
162 | static inline void trace_##name(proto) \ | 186 | static inline void trace_##name(proto) \ |
163 | { } \ | 187 | { } \ |
164 | static inline int register_trace_##name(void (*probe)(proto)) \ | 188 | static inline int \ |
189 | register_trace_##name(void (*probe)(data_proto), \ | ||
190 | void *data) \ | ||
165 | { \ | 191 | { \ |
166 | return -ENOSYS; \ | 192 | return -ENOSYS; \ |
167 | } \ | 193 | } \ |
168 | static inline int unregister_trace_##name(void (*probe)(proto)) \ | 194 | static inline int \ |
195 | unregister_trace_##name(void (*probe)(data_proto), \ | ||
196 | void *data) \ | ||
169 | { \ | 197 | { \ |
170 | return -ENOSYS; \ | 198 | return -ENOSYS; \ |
199 | } \ | ||
200 | static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \ | ||
201 | { \ | ||
171 | } | 202 | } |
172 | 203 | ||
173 | #define DEFINE_TRACE_FN(name, reg, unreg) | 204 | #define DEFINE_TRACE_FN(name, reg, unreg) |
@@ -176,6 +207,29 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, | |||
176 | #define EXPORT_TRACEPOINT_SYMBOL(name) | 207 | #define EXPORT_TRACEPOINT_SYMBOL(name) |
177 | 208 | ||
178 | #endif /* CONFIG_TRACEPOINTS */ | 209 | #endif /* CONFIG_TRACEPOINTS */ |
210 | |||
211 | /* | ||
212 | * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype | ||
213 | * (void). "void" is a special value in a function prototype and can | ||
214 | * not be combined with other arguments. Since the DECLARE_TRACE() | ||
215 | * macro adds a data element at the beginning of the prototype, | ||
216 | * we need a way to differentiate "(void *data, proto)" from | ||
217 | * "(void *data, void)". The second prototype is invalid. | ||
218 | * | ||
219 | * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype | ||
220 | * and "void *__data" as the callback prototype. | ||
221 | * | ||
222 | * DECLARE_TRACE() passes "proto" as the tracepoint protoype and | ||
223 | * "void *__data, proto" as the callback prototype. | ||
224 | */ | ||
225 | #define DECLARE_TRACE_NOARGS(name) \ | ||
226 | __DECLARE_TRACE(name, void, , void *__data, __data) | ||
227 | |||
228 | #define DECLARE_TRACE(name, proto, args) \ | ||
229 | __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ | ||
230 | PARAMS(void *__data, proto), \ | ||
231 | PARAMS(__data, args)) | ||
232 | |||
179 | #endif /* DECLARE_TRACE */ | 233 | #endif /* DECLARE_TRACE */ |
180 | 234 | ||
181 | #ifndef TRACE_EVENT | 235 | #ifndef TRACE_EVENT |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 4eb2148f1321..0152b8673bd7 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -62,7 +62,10 @@ | |||
62 | struct trace_entry ent; \ | 62 | struct trace_entry ent; \ |
63 | tstruct \ | 63 | tstruct \ |
64 | char __data[0]; \ | 64 | char __data[0]; \ |
65 | }; | 65 | }; \ |
66 | \ | ||
67 | static struct ftrace_event_class event_class_##name; | ||
68 | |||
66 | #undef DEFINE_EVENT | 69 | #undef DEFINE_EVENT |
67 | #define DEFINE_EVENT(template, name, proto, args) \ | 70 | #define DEFINE_EVENT(template, name, proto, args) \ |
68 | static struct ftrace_event_call \ | 71 | static struct ftrace_event_call \ |
@@ -147,7 +150,7 @@ | |||
147 | * | 150 | * |
148 | * entry = iter->ent; | 151 | * entry = iter->ent; |
149 | * | 152 | * |
150 | * if (entry->type != event_<call>.id) { | 153 | * if (entry->type != event_<call>->event.type) { |
151 | * WARN_ON_ONCE(1); | 154 | * WARN_ON_ONCE(1); |
152 | * return TRACE_TYPE_UNHANDLED; | 155 | * return TRACE_TYPE_UNHANDLED; |
153 | * } | 156 | * } |
@@ -203,18 +206,22 @@ | |||
203 | #undef DECLARE_EVENT_CLASS | 206 | #undef DECLARE_EVENT_CLASS |
204 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 207 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
205 | static notrace enum print_line_t \ | 208 | static notrace enum print_line_t \ |
206 | ftrace_raw_output_id_##call(int event_id, const char *name, \ | 209 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ |
207 | struct trace_iterator *iter, int flags) \ | 210 | struct trace_event *trace_event) \ |
208 | { \ | 211 | { \ |
212 | struct ftrace_event_call *event; \ | ||
209 | struct trace_seq *s = &iter->seq; \ | 213 | struct trace_seq *s = &iter->seq; \ |
210 | struct ftrace_raw_##call *field; \ | 214 | struct ftrace_raw_##call *field; \ |
211 | struct trace_entry *entry; \ | 215 | struct trace_entry *entry; \ |
212 | struct trace_seq *p; \ | 216 | struct trace_seq *p; \ |
213 | int ret; \ | 217 | int ret; \ |
214 | \ | 218 | \ |
219 | event = container_of(trace_event, struct ftrace_event_call, \ | ||
220 | event); \ | ||
221 | \ | ||
215 | entry = iter->ent; \ | 222 | entry = iter->ent; \ |
216 | \ | 223 | \ |
217 | if (entry->type != event_id) { \ | 224 | if (entry->type != event->event.type) { \ |
218 | WARN_ON_ONCE(1); \ | 225 | WARN_ON_ONCE(1); \ |
219 | return TRACE_TYPE_UNHANDLED; \ | 226 | return TRACE_TYPE_UNHANDLED; \ |
220 | } \ | 227 | } \ |
@@ -223,7 +230,7 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \ | |||
223 | \ | 230 | \ |
224 | p = &get_cpu_var(ftrace_event_seq); \ | 231 | p = &get_cpu_var(ftrace_event_seq); \ |
225 | trace_seq_init(p); \ | 232 | trace_seq_init(p); \ |
226 | ret = trace_seq_printf(s, "%s: ", name); \ | 233 | ret = trace_seq_printf(s, "%s: ", event->name); \ |
227 | if (ret) \ | 234 | if (ret) \ |
228 | ret = trace_seq_printf(s, print); \ | 235 | ret = trace_seq_printf(s, print); \ |
229 | put_cpu(); \ | 236 | put_cpu(); \ |
@@ -231,21 +238,16 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \ | |||
231 | return TRACE_TYPE_PARTIAL_LINE; \ | 238 | return TRACE_TYPE_PARTIAL_LINE; \ |
232 | \ | 239 | \ |
233 | return TRACE_TYPE_HANDLED; \ | 240 | return TRACE_TYPE_HANDLED; \ |
234 | } | 241 | } \ |
235 | 242 | static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | |
236 | #undef DEFINE_EVENT | 243 | .trace = ftrace_raw_output_##call, \ |
237 | #define DEFINE_EVENT(template, name, proto, args) \ | 244 | }; |
238 | static notrace enum print_line_t \ | ||
239 | ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \ | ||
240 | { \ | ||
241 | return ftrace_raw_output_id_##template(event_##name.id, \ | ||
242 | #name, iter, flags); \ | ||
243 | } | ||
244 | 245 | ||
245 | #undef DEFINE_EVENT_PRINT | 246 | #undef DEFINE_EVENT_PRINT |
246 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | 247 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ |
247 | static notrace enum print_line_t \ | 248 | static notrace enum print_line_t \ |
248 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | 249 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ |
250 | struct trace_event *event) \ | ||
249 | { \ | 251 | { \ |
250 | struct trace_seq *s = &iter->seq; \ | 252 | struct trace_seq *s = &iter->seq; \ |
251 | struct ftrace_raw_##template *field; \ | 253 | struct ftrace_raw_##template *field; \ |
@@ -255,7 +257,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
255 | \ | 257 | \ |
256 | entry = iter->ent; \ | 258 | entry = iter->ent; \ |
257 | \ | 259 | \ |
258 | if (entry->type != event_##call.id) { \ | 260 | if (entry->type != event_##call.event.type) { \ |
259 | WARN_ON_ONCE(1); \ | 261 | WARN_ON_ONCE(1); \ |
260 | return TRACE_TYPE_UNHANDLED; \ | 262 | return TRACE_TYPE_UNHANDLED; \ |
261 | } \ | 263 | } \ |
@@ -272,7 +274,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
272 | return TRACE_TYPE_PARTIAL_LINE; \ | 274 | return TRACE_TYPE_PARTIAL_LINE; \ |
273 | \ | 275 | \ |
274 | return TRACE_TYPE_HANDLED; \ | 276 | return TRACE_TYPE_HANDLED; \ |
275 | } | 277 | } \ |
278 | static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | ||
279 | .trace = ftrace_raw_output_##call, \ | ||
280 | }; | ||
276 | 281 | ||
277 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 282 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
278 | 283 | ||
@@ -378,80 +383,18 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
378 | 383 | ||
379 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 384 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
380 | 385 | ||
381 | #ifdef CONFIG_PERF_EVENTS | ||
382 | |||
383 | /* | ||
384 | * Generate the functions needed for tracepoint perf_event support. | ||
385 | * | ||
386 | * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later | ||
387 | * | ||
388 | * static int ftrace_profile_enable_<call>(void) | ||
389 | * { | ||
390 | * return register_trace_<call>(ftrace_profile_<call>); | ||
391 | * } | ||
392 | * | ||
393 | * static void ftrace_profile_disable_<call>(void) | ||
394 | * { | ||
395 | * unregister_trace_<call>(ftrace_profile_<call>); | ||
396 | * } | ||
397 | * | ||
398 | */ | ||
399 | |||
400 | #undef DECLARE_EVENT_CLASS | ||
401 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) | ||
402 | |||
403 | #undef DEFINE_EVENT | ||
404 | #define DEFINE_EVENT(template, name, proto, args) \ | ||
405 | \ | ||
406 | static void perf_trace_##name(proto); \ | ||
407 | \ | ||
408 | static notrace int \ | ||
409 | perf_trace_enable_##name(struct ftrace_event_call *unused) \ | ||
410 | { \ | ||
411 | return register_trace_##name(perf_trace_##name); \ | ||
412 | } \ | ||
413 | \ | ||
414 | static notrace void \ | ||
415 | perf_trace_disable_##name(struct ftrace_event_call *unused) \ | ||
416 | { \ | ||
417 | unregister_trace_##name(perf_trace_##name); \ | ||
418 | } | ||
419 | |||
420 | #undef DEFINE_EVENT_PRINT | ||
421 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | ||
422 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | ||
423 | |||
424 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
425 | |||
426 | #endif /* CONFIG_PERF_EVENTS */ | ||
427 | |||
428 | /* | 386 | /* |
429 | * Stage 4 of the trace events. | 387 | * Stage 4 of the trace events. |
430 | * | 388 | * |
431 | * Override the macros in <trace/trace_events.h> to include the following: | 389 | * Override the macros in <trace/trace_events.h> to include the following: |
432 | * | 390 | * |
433 | * static void ftrace_event_<call>(proto) | ||
434 | * { | ||
435 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); | ||
436 | * } | ||
437 | * | ||
438 | * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused) | ||
439 | * { | ||
440 | * return register_trace_<call>(ftrace_event_<call>); | ||
441 | * } | ||
442 | * | ||
443 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) | ||
444 | * { | ||
445 | * unregister_trace_<call>(ftrace_event_<call>); | ||
446 | * } | ||
447 | * | ||
448 | * | ||
449 | * For those macros defined with TRACE_EVENT: | 391 | * For those macros defined with TRACE_EVENT: |
450 | * | 392 | * |
451 | * static struct ftrace_event_call event_<call>; | 393 | * static struct ftrace_event_call event_<call>; |
452 | * | 394 | * |
453 | * static void ftrace_raw_event_<call>(proto) | 395 | * static void ftrace_raw_event_<call>(void *__data, proto) |
454 | * { | 396 | * { |
397 | * struct ftrace_event_call *event_call = __data; | ||
455 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | 398 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; |
456 | * struct ring_buffer_event *event; | 399 | * struct ring_buffer_event *event; |
457 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | 400 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 |
@@ -466,7 +409,7 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
466 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | 409 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); |
467 | * | 410 | * |
468 | * event = trace_current_buffer_lock_reserve(&buffer, | 411 | * event = trace_current_buffer_lock_reserve(&buffer, |
469 | * event_<call>.id, | 412 | * event_<call>->event.type, |
470 | * sizeof(*entry) + __data_size, | 413 | * sizeof(*entry) + __data_size, |
471 | * irq_flags, pc); | 414 | * irq_flags, pc); |
472 | * if (!event) | 415 | * if (!event) |
@@ -481,43 +424,42 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
481 | * event, irq_flags, pc); | 424 | * event, irq_flags, pc); |
482 | * } | 425 | * } |
483 | * | 426 | * |
484 | * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) | ||
485 | * { | ||
486 | * return register_trace_<call>(ftrace_raw_event_<call>); | ||
487 | * } | ||
488 | * | ||
489 | * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) | ||
490 | * { | ||
491 | * unregister_trace_<call>(ftrace_raw_event_<call>); | ||
492 | * } | ||
493 | * | ||
494 | * static struct trace_event ftrace_event_type_<call> = { | 427 | * static struct trace_event ftrace_event_type_<call> = { |
495 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | 428 | * .trace = ftrace_raw_output_<call>, <-- stage 2 |
496 | * }; | 429 | * }; |
497 | * | 430 | * |
498 | * static const char print_fmt_<call>[] = <TP_printk>; | 431 | * static const char print_fmt_<call>[] = <TP_printk>; |
499 | * | 432 | * |
433 | * static struct ftrace_event_class __used event_class_<template> = { | ||
434 | * .system = "<system>", | ||
435 | * .define_fields = ftrace_define_fields_<call>, | ||
436 | * .fields = LIST_HEAD_INIT(event_class_##call.fields), | ||
437 | * .raw_init = trace_event_raw_init, | ||
438 | * .probe = ftrace_raw_event_##call, | ||
439 | * }; | ||
440 | * | ||
500 | * static struct ftrace_event_call __used | 441 | * static struct ftrace_event_call __used |
501 | * __attribute__((__aligned__(4))) | 442 | * __attribute__((__aligned__(4))) |
502 | * __attribute__((section("_ftrace_events"))) event_<call> = { | 443 | * __attribute__((section("_ftrace_events"))) event_<call> = { |
503 | * .name = "<call>", | 444 | * .name = "<call>", |
504 | * .system = "<system>", | 445 | * .class = event_class_<template>, |
505 | * .raw_init = trace_event_raw_init, | 446 | * .event = &ftrace_event_type_<call>, |
506 | * .regfunc = ftrace_reg_event_<call>, | ||
507 | * .unregfunc = ftrace_unreg_event_<call>, | ||
508 | * .print_fmt = print_fmt_<call>, | 447 | * .print_fmt = print_fmt_<call>, |
509 | * .define_fields = ftrace_define_fields_<call>, | 448 | * }; |
510 | * } | ||
511 | * | 449 | * |
512 | */ | 450 | */ |
513 | 451 | ||
514 | #ifdef CONFIG_PERF_EVENTS | 452 | #ifdef CONFIG_PERF_EVENTS |
515 | 453 | ||
454 | #define _TRACE_PERF_PROTO(call, proto) \ | ||
455 | static notrace void \ | ||
456 | perf_trace_##call(void *__data, proto); | ||
457 | |||
516 | #define _TRACE_PERF_INIT(call) \ | 458 | #define _TRACE_PERF_INIT(call) \ |
517 | .perf_event_enable = perf_trace_enable_##call, \ | 459 | .perf_probe = perf_trace_##call, |
518 | .perf_event_disable = perf_trace_disable_##call, | ||
519 | 460 | ||
520 | #else | 461 | #else |
462 | #define _TRACE_PERF_PROTO(call, proto) | ||
521 | #define _TRACE_PERF_INIT(call) | 463 | #define _TRACE_PERF_INIT(call) |
522 | #endif /* CONFIG_PERF_EVENTS */ | 464 | #endif /* CONFIG_PERF_EVENTS */ |
523 | 465 | ||
@@ -551,9 +493,9 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ | |||
551 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 493 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
552 | \ | 494 | \ |
553 | static notrace void \ | 495 | static notrace void \ |
554 | ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ | 496 | ftrace_raw_event_##call(void *__data, proto) \ |
555 | proto) \ | ||
556 | { \ | 497 | { \ |
498 | struct ftrace_event_call *event_call = __data; \ | ||
557 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 499 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
558 | struct ring_buffer_event *event; \ | 500 | struct ring_buffer_event *event; \ |
559 | struct ftrace_raw_##call *entry; \ | 501 | struct ftrace_raw_##call *entry; \ |
@@ -568,7 +510,7 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ | |||
568 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | 510 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
569 | \ | 511 | \ |
570 | event = trace_current_buffer_lock_reserve(&buffer, \ | 512 | event = trace_current_buffer_lock_reserve(&buffer, \ |
571 | event_call->id, \ | 513 | event_call->event.type, \ |
572 | sizeof(*entry) + __data_size, \ | 514 | sizeof(*entry) + __data_size, \ |
573 | irq_flags, pc); \ | 515 | irq_flags, pc); \ |
574 | if (!event) \ | 516 | if (!event) \ |
@@ -583,34 +525,21 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ | |||
583 | trace_nowake_buffer_unlock_commit(buffer, \ | 525 | trace_nowake_buffer_unlock_commit(buffer, \ |
584 | event, irq_flags, pc); \ | 526 | event, irq_flags, pc); \ |
585 | } | 527 | } |
528 | /* | ||
529 | * The ftrace_test_probe is compiled out, it is only here as a build time check | ||
530 | * to make sure that if the tracepoint handling changes, the ftrace probe will | ||
531 | * fail to compile unless it too is updated. | ||
532 | */ | ||
586 | 533 | ||
587 | #undef DEFINE_EVENT | 534 | #undef DEFINE_EVENT |
588 | #define DEFINE_EVENT(template, call, proto, args) \ | 535 | #define DEFINE_EVENT(template, call, proto, args) \ |
589 | \ | 536 | static inline void ftrace_test_probe_##call(void) \ |
590 | static notrace void ftrace_raw_event_##call(proto) \ | ||
591 | { \ | ||
592 | ftrace_raw_event_id_##template(&event_##call, args); \ | ||
593 | } \ | ||
594 | \ | ||
595 | static notrace int \ | ||
596 | ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \ | ||
597 | { \ | ||
598 | return register_trace_##call(ftrace_raw_event_##call); \ | ||
599 | } \ | ||
600 | \ | ||
601 | static notrace void \ | ||
602 | ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \ | ||
603 | { \ | 537 | { \ |
604 | unregister_trace_##call(ftrace_raw_event_##call); \ | 538 | check_trace_callback_type_##call(ftrace_raw_event_##template); \ |
605 | } \ | 539 | } |
606 | \ | ||
607 | static struct trace_event ftrace_event_type_##call = { \ | ||
608 | .trace = ftrace_raw_output_##call, \ | ||
609 | }; | ||
610 | 540 | ||
611 | #undef DEFINE_EVENT_PRINT | 541 | #undef DEFINE_EVENT_PRINT |
612 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | 542 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) |
613 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | ||
614 | 543 | ||
615 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 544 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
616 | 545 | ||
@@ -627,7 +556,16 @@ static struct trace_event ftrace_event_type_##call = { \ | |||
627 | 556 | ||
628 | #undef DECLARE_EVENT_CLASS | 557 | #undef DECLARE_EVENT_CLASS |
629 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 558 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
630 | static const char print_fmt_##call[] = print; | 559 | _TRACE_PERF_PROTO(call, PARAMS(proto)); \ |
560 | static const char print_fmt_##call[] = print; \ | ||
561 | static struct ftrace_event_class __used event_class_##call = { \ | ||
562 | .system = __stringify(TRACE_SYSTEM), \ | ||
563 | .define_fields = ftrace_define_fields_##call, \ | ||
564 | .fields = LIST_HEAD_INIT(event_class_##call.fields),\ | ||
565 | .raw_init = trace_event_raw_init, \ | ||
566 | .probe = ftrace_raw_event_##call, \ | ||
567 | _TRACE_PERF_INIT(call) \ | ||
568 | }; | ||
631 | 569 | ||
632 | #undef DEFINE_EVENT | 570 | #undef DEFINE_EVENT |
633 | #define DEFINE_EVENT(template, call, proto, args) \ | 571 | #define DEFINE_EVENT(template, call, proto, args) \ |
@@ -636,15 +574,10 @@ static struct ftrace_event_call __used \ | |||
636 | __attribute__((__aligned__(4))) \ | 574 | __attribute__((__aligned__(4))) \ |
637 | __attribute__((section("_ftrace_events"))) event_##call = { \ | 575 | __attribute__((section("_ftrace_events"))) event_##call = { \ |
638 | .name = #call, \ | 576 | .name = #call, \ |
639 | .system = __stringify(TRACE_SYSTEM), \ | 577 | .class = &event_class_##template, \ |
640 | .event = &ftrace_event_type_##call, \ | 578 | .event.funcs = &ftrace_event_type_funcs_##template, \ |
641 | .raw_init = trace_event_raw_init, \ | ||
642 | .regfunc = ftrace_raw_reg_event_##call, \ | ||
643 | .unregfunc = ftrace_raw_unreg_event_##call, \ | ||
644 | .print_fmt = print_fmt_##template, \ | 579 | .print_fmt = print_fmt_##template, \ |
645 | .define_fields = ftrace_define_fields_##template, \ | 580 | }; |
646 | _TRACE_PERF_INIT(call) \ | ||
647 | } | ||
648 | 581 | ||
649 | #undef DEFINE_EVENT_PRINT | 582 | #undef DEFINE_EVENT_PRINT |
650 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | 583 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ |
@@ -655,14 +588,9 @@ static struct ftrace_event_call __used \ | |||
655 | __attribute__((__aligned__(4))) \ | 588 | __attribute__((__aligned__(4))) \ |
656 | __attribute__((section("_ftrace_events"))) event_##call = { \ | 589 | __attribute__((section("_ftrace_events"))) event_##call = { \ |
657 | .name = #call, \ | 590 | .name = #call, \ |
658 | .system = __stringify(TRACE_SYSTEM), \ | 591 | .class = &event_class_##template, \ |
659 | .event = &ftrace_event_type_##call, \ | 592 | .event.funcs = &ftrace_event_type_funcs_##call, \ |
660 | .raw_init = trace_event_raw_init, \ | ||
661 | .regfunc = ftrace_raw_reg_event_##call, \ | ||
662 | .unregfunc = ftrace_raw_unreg_event_##call, \ | ||
663 | .print_fmt = print_fmt_##call, \ | 593 | .print_fmt = print_fmt_##call, \ |
664 | .define_fields = ftrace_define_fields_##template, \ | ||
665 | _TRACE_PERF_INIT(call) \ | ||
666 | } | 594 | } |
667 | 595 | ||
668 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 596 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
@@ -762,17 +690,20 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
762 | #undef DECLARE_EVENT_CLASS | 690 | #undef DECLARE_EVENT_CLASS |
763 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 691 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
764 | static notrace void \ | 692 | static notrace void \ |
765 | perf_trace_templ_##call(struct ftrace_event_call *event_call, \ | 693 | perf_trace_##call(void *__data, proto) \ |
766 | struct pt_regs *__regs, proto) \ | ||
767 | { \ | 694 | { \ |
695 | struct ftrace_event_call *event_call = __data; \ | ||
768 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 696 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
769 | struct ftrace_raw_##call *entry; \ | 697 | struct ftrace_raw_##call *entry; \ |
698 | struct pt_regs __regs; \ | ||
770 | u64 __addr = 0, __count = 1; \ | 699 | u64 __addr = 0, __count = 1; \ |
771 | struct hlist_head *head; \ | 700 | struct hlist_head *head; \ |
772 | int __entry_size; \ | 701 | int __entry_size; \ |
773 | int __data_size; \ | 702 | int __data_size; \ |
774 | int rctx; \ | 703 | int rctx; \ |
775 | \ | 704 | \ |
705 | perf_fetch_caller_regs(&__regs, 1); \ | ||
706 | \ | ||
776 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | 707 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ |
777 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ | 708 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ |
778 | sizeof(u64)); \ | 709 | sizeof(u64)); \ |
@@ -783,7 +714,7 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ | |||
783 | return; \ | 714 | return; \ |
784 | \ | 715 | \ |
785 | entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ | 716 | entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ |
786 | __entry_size, event_call->id, __regs, &rctx); \ | 717 | __entry_size, event_call->event.type, &__regs, &rctx); \ |
787 | if (!entry) \ | 718 | if (!entry) \ |
788 | return; \ | 719 | return; \ |
789 | \ | 720 | \ |
@@ -793,20 +724,22 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \ | |||
793 | \ | 724 | \ |
794 | head = per_cpu_ptr(event_call->perf_events, smp_processor_id());\ | 725 | head = per_cpu_ptr(event_call->perf_events, smp_processor_id());\ |
795 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ | 726 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ |
796 | __count, __regs, head); \ | 727 | __count, &__regs, head); \ |
797 | } | 728 | } |
798 | 729 | ||
730 | /* | ||
731 | * This part is compiled out, it is only here as a build time check | ||
732 | * to make sure that if the tracepoint handling changes, the | ||
733 | * perf probe will fail to compile unless it too is updated. | ||
734 | */ | ||
799 | #undef DEFINE_EVENT | 735 | #undef DEFINE_EVENT |
800 | #define DEFINE_EVENT(template, call, proto, args) \ | 736 | #define DEFINE_EVENT(template, call, proto, args) \ |
801 | static notrace void perf_trace_##call(proto) \ | 737 | static inline void perf_test_probe_##call(void) \ |
802 | { \ | 738 | { \ |
803 | struct ftrace_event_call *event_call = &event_##call; \ | 739 | check_trace_callback_type_##call(perf_trace_##template); \ |
804 | struct pt_regs __regs; \ | ||
805 | \ | ||
806 | perf_fetch_caller_regs(&__regs, 1); \ | ||
807 | perf_trace_templ_##template(event_call, &__regs, args); \ | ||
808 | } | 740 | } |
809 | 741 | ||
742 | |||
810 | #undef DEFINE_EVENT_PRINT | 743 | #undef DEFINE_EVENT_PRINT |
811 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | 744 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ |
812 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | 745 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) |
diff --git a/include/trace/syscall.h b/include/trace/syscall.h index e5e5f48dbfb3..257e08960d7b 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h | |||
@@ -25,6 +25,8 @@ struct syscall_metadata { | |||
25 | int nb_args; | 25 | int nb_args; |
26 | const char **types; | 26 | const char **types; |
27 | const char **args; | 27 | const char **args; |
28 | struct list_head enter_fields; | ||
29 | struct list_head exit_fields; | ||
28 | 30 | ||
29 | struct ftrace_event_call *enter_event; | 31 | struct ftrace_event_call *enter_event; |
30 | struct ftrace_event_call *exit_event; | 32 | struct ftrace_event_call *exit_event; |
@@ -34,16 +36,16 @@ struct syscall_metadata { | |||
34 | extern unsigned long arch_syscall_addr(int nr); | 36 | extern unsigned long arch_syscall_addr(int nr); |
35 | extern int init_syscall_trace(struct ftrace_event_call *call); | 37 | extern int init_syscall_trace(struct ftrace_event_call *call); |
36 | 38 | ||
37 | extern int syscall_enter_define_fields(struct ftrace_event_call *call); | ||
38 | extern int syscall_exit_define_fields(struct ftrace_event_call *call); | ||
39 | extern int reg_event_syscall_enter(struct ftrace_event_call *call); | 39 | extern int reg_event_syscall_enter(struct ftrace_event_call *call); |
40 | extern void unreg_event_syscall_enter(struct ftrace_event_call *call); | 40 | extern void unreg_event_syscall_enter(struct ftrace_event_call *call); |
41 | extern int reg_event_syscall_exit(struct ftrace_event_call *call); | 41 | extern int reg_event_syscall_exit(struct ftrace_event_call *call); |
42 | extern void unreg_event_syscall_exit(struct ftrace_event_call *call); | 42 | extern void unreg_event_syscall_exit(struct ftrace_event_call *call); |
43 | extern int | 43 | extern int |
44 | ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); | 44 | ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); |
45 | enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); | 45 | enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags, |
46 | enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); | 46 | struct trace_event *event); |
47 | enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags, | ||
48 | struct trace_event *event); | ||
47 | #endif | 49 | #endif |
48 | 50 | ||
49 | #ifdef CONFIG_PERF_EVENTS | 51 | #ifdef CONFIG_PERF_EVENTS |