diff options
Diffstat (limited to 'include/trace/perf.h')
-rw-r--r-- | include/trace/perf.h | 258 |
1 files changed, 0 insertions, 258 deletions
diff --git a/include/trace/perf.h b/include/trace/perf.h index 1b5443cebedc..26486fcd74ce 100644 --- a/include/trace/perf.h +++ b/include/trace/perf.h | |||
@@ -1,261 +1,3 @@ | |||
1 | /* | ||
2 | * Stage 4 of the trace events. | ||
3 | * | ||
4 | * Override the macros in <trace/trace_events.h> to include the following: | ||
5 | * | ||
6 | * For those macros defined with TRACE_EVENT: | ||
7 | * | ||
8 | * static struct trace_event_call event_<call>; | ||
9 | * | ||
10 | * static void trace_event_raw_event_<call>(void *__data, proto) | ||
11 | * { | ||
12 | * struct trace_event_file *trace_file = __data; | ||
13 | * struct trace_event_call *event_call = trace_file->event_call; | ||
14 | * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets; | ||
15 | * unsigned long eflags = trace_file->flags; | ||
16 | * enum event_trigger_type __tt = ETT_NONE; | ||
17 | * struct ring_buffer_event *event; | ||
18 | * struct trace_event_raw_<call> *entry; <-- defined in stage 1 | ||
19 | * struct ring_buffer *buffer; | ||
20 | * unsigned long irq_flags; | ||
21 | * int __data_size; | ||
22 | * int pc; | ||
23 | * | ||
24 | * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { | ||
25 | * if (eflags & EVENT_FILE_FL_TRIGGER_MODE) | ||
26 | * event_triggers_call(trace_file, NULL); | ||
27 | * if (eflags & EVENT_FILE_FL_SOFT_DISABLED) | ||
28 | * return; | ||
29 | * } | ||
30 | * | ||
31 | * local_save_flags(irq_flags); | ||
32 | * pc = preempt_count(); | ||
33 | * | ||
34 | * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args); | ||
35 | * | ||
36 | * event = trace_event_buffer_lock_reserve(&buffer, trace_file, | ||
37 | * event_<call>->event.type, | ||
38 | * sizeof(*entry) + __data_size, | ||
39 | * irq_flags, pc); | ||
40 | * if (!event) | ||
41 | * return; | ||
42 | * entry = ring_buffer_event_data(event); | ||
43 | * | ||
44 | * { <assign>; } <-- Here we assign the entries by the __field and | ||
45 | * __array macros. | ||
46 | * | ||
47 | * if (eflags & EVENT_FILE_FL_TRIGGER_COND) | ||
48 | * __tt = event_triggers_call(trace_file, entry); | ||
49 | * | ||
50 | * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, | ||
51 | * &trace_file->flags)) | ||
52 | * ring_buffer_discard_commit(buffer, event); | ||
53 | * else if (!filter_check_discard(trace_file, entry, buffer, event)) | ||
54 | * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); | ||
55 | * | ||
56 | * if (__tt) | ||
57 | * event_triggers_post_call(trace_file, __tt); | ||
58 | * } | ||
59 | * | ||
60 | * static struct trace_event ftrace_event_type_<call> = { | ||
61 | * .trace = trace_raw_output_<call>, <-- stage 2 | ||
62 | * }; | ||
63 | * | ||
64 | * static char print_fmt_<call>[] = <TP_printk>; | ||
65 | * | ||
66 | * static struct trace_event_class __used event_class_<template> = { | ||
67 | * .system = "<system>", | ||
68 | * .define_fields = trace_event_define_fields_<call>, | ||
69 | * .fields = LIST_HEAD_INIT(event_class_##call.fields), | ||
70 | * .raw_init = trace_event_raw_init, | ||
71 | * .probe = trace_event_raw_event_##call, | ||
72 | * .reg = trace_event_reg, | ||
73 | * }; | ||
74 | * | ||
75 | * static struct trace_event_call event_<call> = { | ||
76 | * .class = event_class_<template>, | ||
77 | * { | ||
78 | * .tp = &__tracepoint_<call>, | ||
79 | * }, | ||
80 | * .event = &ftrace_event_type_<call>, | ||
81 | * .print_fmt = print_fmt_<call>, | ||
82 | * .flags = TRACE_EVENT_FL_TRACEPOINT, | ||
83 | * }; | ||
84 | * // its only safe to use pointers when doing linker tricks to | ||
85 | * // create an array. | ||
86 | * static struct trace_event_call __used | ||
87 | * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; | ||
88 | * | ||
89 | */ | ||
90 | |||
91 | #ifdef CONFIG_PERF_EVENTS | ||
92 | |||
93 | #define _TRACE_PERF_PROTO(call, proto) \ | ||
94 | static notrace void \ | ||
95 | perf_trace_##call(void *__data, proto); | ||
96 | |||
97 | #define _TRACE_PERF_INIT(call) \ | ||
98 | .perf_probe = perf_trace_##call, | ||
99 | |||
100 | #else | ||
101 | #define _TRACE_PERF_PROTO(call, proto) | ||
102 | #define _TRACE_PERF_INIT(call) | ||
103 | #endif /* CONFIG_PERF_EVENTS */ | ||
104 | |||
105 | #undef __entry | ||
106 | #define __entry entry | ||
107 | |||
108 | #undef __field | ||
109 | #define __field(type, item) | ||
110 | |||
111 | #undef __field_struct | ||
112 | #define __field_struct(type, item) | ||
113 | |||
114 | #undef __array | ||
115 | #define __array(type, item, len) | ||
116 | |||
117 | #undef __dynamic_array | ||
118 | #define __dynamic_array(type, item, len) \ | ||
119 | __entry->__data_loc_##item = __data_offsets.item; | ||
120 | |||
121 | #undef __string | ||
122 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
123 | |||
124 | #undef __assign_str | ||
125 | #define __assign_str(dst, src) \ | ||
126 | strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); | ||
127 | |||
128 | #undef __bitmask | ||
129 | #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) | ||
130 | |||
131 | #undef __get_bitmask | ||
132 | #define __get_bitmask(field) (char *)__get_dynamic_array(field) | ||
133 | |||
134 | #undef __assign_bitmask | ||
135 | #define __assign_bitmask(dst, src, nr_bits) \ | ||
136 | memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) | ||
137 | |||
138 | #undef TP_fast_assign | ||
139 | #define TP_fast_assign(args...) args | ||
140 | |||
141 | #undef __perf_addr | ||
142 | #define __perf_addr(a) (a) | ||
143 | |||
144 | #undef __perf_count | ||
145 | #define __perf_count(c) (c) | ||
146 | |||
147 | #undef __perf_task | ||
148 | #define __perf_task(t) (t) | ||
149 | |||
150 | #undef DECLARE_EVENT_CLASS | ||
151 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | ||
152 | \ | ||
153 | static notrace void \ | ||
154 | trace_event_raw_event_##call(void *__data, proto) \ | ||
155 | { \ | ||
156 | struct trace_event_file *trace_file = __data; \ | ||
157 | struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ | ||
158 | struct trace_event_buffer fbuffer; \ | ||
159 | struct trace_event_raw_##call *entry; \ | ||
160 | int __data_size; \ | ||
161 | \ | ||
162 | if (trace_trigger_soft_disabled(trace_file)) \ | ||
163 | return; \ | ||
164 | \ | ||
165 | __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ | ||
166 | \ | ||
167 | entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ | ||
168 | sizeof(*entry) + __data_size); \ | ||
169 | \ | ||
170 | if (!entry) \ | ||
171 | return; \ | ||
172 | \ | ||
173 | tstruct \ | ||
174 | \ | ||
175 | { assign; } \ | ||
176 | \ | ||
177 | trace_event_buffer_commit(&fbuffer); \ | ||
178 | } | ||
179 | /* | ||
180 | * The ftrace_test_probe is compiled out, it is only here as a build time check | ||
181 | * to make sure that if the tracepoint handling changes, the ftrace probe will | ||
182 | * fail to compile unless it too is updated. | ||
183 | */ | ||
184 | |||
185 | #undef DEFINE_EVENT | ||
186 | #define DEFINE_EVENT(template, call, proto, args) \ | ||
187 | static inline void ftrace_test_probe_##call(void) \ | ||
188 | { \ | ||
189 | check_trace_callback_type_##call(trace_event_raw_event_##template); \ | ||
190 | } | ||
191 | |||
192 | #undef DEFINE_EVENT_PRINT | ||
193 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) | ||
194 | |||
195 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
196 | |||
197 | #undef __entry | ||
198 | #define __entry REC | ||
199 | |||
200 | #undef __print_flags | ||
201 | #undef __print_symbolic | ||
202 | #undef __print_hex | ||
203 | #undef __get_dynamic_array | ||
204 | #undef __get_dynamic_array_len | ||
205 | #undef __get_str | ||
206 | #undef __get_bitmask | ||
207 | #undef __print_array | ||
208 | |||
209 | #undef TP_printk | ||
210 | #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) | ||
211 | |||
212 | #undef DECLARE_EVENT_CLASS | ||
213 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | ||
214 | _TRACE_PERF_PROTO(call, PARAMS(proto)); \ | ||
215 | static char print_fmt_##call[] = print; \ | ||
216 | static struct trace_event_class __used __refdata event_class_##call = { \ | ||
217 | .system = TRACE_SYSTEM_STRING, \ | ||
218 | .define_fields = trace_event_define_fields_##call, \ | ||
219 | .fields = LIST_HEAD_INIT(event_class_##call.fields),\ | ||
220 | .raw_init = trace_event_raw_init, \ | ||
221 | .probe = trace_event_raw_event_##call, \ | ||
222 | .reg = trace_event_reg, \ | ||
223 | _TRACE_PERF_INIT(call) \ | ||
224 | }; | ||
225 | |||
226 | #undef DEFINE_EVENT | ||
227 | #define DEFINE_EVENT(template, call, proto, args) \ | ||
228 | \ | ||
229 | static struct trace_event_call __used event_##call = { \ | ||
230 | .class = &event_class_##template, \ | ||
231 | { \ | ||
232 | .tp = &__tracepoint_##call, \ | ||
233 | }, \ | ||
234 | .event.funcs = &trace_event_type_funcs_##template, \ | ||
235 | .print_fmt = print_fmt_##template, \ | ||
236 | .flags = TRACE_EVENT_FL_TRACEPOINT, \ | ||
237 | }; \ | ||
238 | static struct trace_event_call __used \ | ||
239 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | ||
240 | |||
241 | #undef DEFINE_EVENT_PRINT | ||
242 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | ||
243 | \ | ||
244 | static char print_fmt_##call[] = print; \ | ||
245 | \ | ||
246 | static struct trace_event_call __used event_##call = { \ | ||
247 | .class = &event_class_##template, \ | ||
248 | { \ | ||
249 | .tp = &__tracepoint_##call, \ | ||
250 | }, \ | ||
251 | .event.funcs = &trace_event_type_funcs_##call, \ | ||
252 | .print_fmt = print_fmt_##call, \ | ||
253 | .flags = TRACE_EVENT_FL_TRACEPOINT, \ | ||
254 | }; \ | ||
255 | static struct trace_event_call __used \ | ||
256 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | ||
257 | |||
258 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
259 | 1 | ||
260 | #undef TRACE_SYSTEM_VAR | 2 | #undef TRACE_SYSTEM_VAR |
261 | 3 | ||