aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/ftrace.h
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-04-13 12:25:37 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-04-14 12:57:59 -0400
commitf42c85e74faa422cf0bc747ed808681145448f88 (patch)
tree3775dc0a402f7da5247aa8ceb92ae89590038199 /include/trace/ftrace.h
parent97f2025153499faa17267a0d4e18c7afaf73f39d (diff)
tracing/events: move the ftrace event tracing code to core
This patch moves the ftrace creation into include/trace/ftrace.h and simplifies the work of developers in adding new tracepoints. Just the act of creating the trace points in include/trace and including define_trace.h will create the events in the debugfs/tracing/events directory. This patch removes the need of include/trace/trace_events.h Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r--include/trace/ftrace.h492
1 files changed, 492 insertions, 0 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
new file mode 100644
index 000000000000..955b967acd74
--- /dev/null
+++ b/include/trace/ftrace.h
@@ -0,0 +1,492 @@
1/*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19#include <linux/ftrace_event.h>
20
21#undef TRACE_FORMAT
22#define TRACE_FORMAT(call, proto, args, fmt)
23
24#undef __array
25#define __array(type, item, len) type item[len];
26
27#undef __field
28#define __field(type, item) type item;
29
30#undef TP_STRUCT__entry
31#define TP_STRUCT__entry(args...) args
32
33#undef TRACE_EVENT
34#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
35 struct ftrace_raw_##name { \
36 struct trace_entry ent; \
37 tstruct \
38 }; \
39 static struct ftrace_event_call event_##name
40
41#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
42
43/*
44 * Stage 2 of the trace events.
45 *
46 * Override the macros in <trace/trace_events.h> to include the following:
47 *
48 * enum print_line_t
49 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
50 * {
51 * struct trace_seq *s = &iter->seq;
52 * struct ftrace_raw_<call> *field; <-- defined in stage 1
53 * struct trace_entry *entry;
54 * int ret;
55 *
56 * entry = iter->ent;
57 *
58 * if (entry->type != event_<call>.id) {
59 * WARN_ON_ONCE(1);
60 * return TRACE_TYPE_UNHANDLED;
61 * }
62 *
63 * field = (typeof(field))entry;
64 *
65 * ret = trace_seq_printf(s, <TP_printk> "\n");
66 * if (!ret)
67 * return TRACE_TYPE_PARTIAL_LINE;
68 *
69 * return TRACE_TYPE_HANDLED;
70 * }
71 *
72 * This is the method used to print the raw event to the trace
73 * output format. Note, this is not needed if the data is read
74 * in binary.
75 */
76
77#undef __entry
78#define __entry field
79
80#undef TP_printk
81#define TP_printk(fmt, args...) fmt "\n", args
82
83#undef TRACE_EVENT
84#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
85enum print_line_t \
86ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
87{ \
88 struct trace_seq *s = &iter->seq; \
89 struct ftrace_raw_##call *field; \
90 struct trace_entry *entry; \
91 int ret; \
92 \
93 entry = iter->ent; \
94 \
95 if (entry->type != event_##call.id) { \
96 WARN_ON_ONCE(1); \
97 return TRACE_TYPE_UNHANDLED; \
98 } \
99 \
100 field = (typeof(field))entry; \
101 \
102 ret = trace_seq_printf(s, #call ": " print); \
103 if (!ret) \
104 return TRACE_TYPE_PARTIAL_LINE; \
105 \
106 return TRACE_TYPE_HANDLED; \
107}
108
109#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
110
111/*
112 * Setup the showing format of trace point.
113 *
114 * int
115 * ftrace_format_##call(struct trace_seq *s)
116 * {
117 * struct ftrace_raw_##call field;
118 * int ret;
119 *
120 * ret = trace_seq_printf(s, #type " " #item ";"
121 * " offset:%u; size:%u;\n",
122 * offsetof(struct ftrace_raw_##call, item),
123 * sizeof(field.type));
124 *
125 * }
126 */
127
128#undef TP_STRUCT__entry
129#define TP_STRUCT__entry(args...) args
130
131#undef __field
132#define __field(type, item) \
133 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
134 "offset:%u;\tsize:%u;\n", \
135 (unsigned int)offsetof(typeof(field), item), \
136 (unsigned int)sizeof(field.item)); \
137 if (!ret) \
138 return 0;
139
140#undef __array
141#define __array(type, item, len) \
142 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
143 "offset:%u;\tsize:%u;\n", \
144 (unsigned int)offsetof(typeof(field), item), \
145 (unsigned int)sizeof(field.item)); \
146 if (!ret) \
147 return 0;
148
149#undef __entry
150#define __entry REC
151
152#undef TP_printk
153#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
154
155#undef TP_fast_assign
156#define TP_fast_assign(args...) args
157
158#undef TRACE_EVENT
159#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
160static int \
161ftrace_format_##call(struct trace_seq *s) \
162{ \
163 struct ftrace_raw_##call field; \
164 int ret; \
165 \
166 tstruct; \
167 \
168 trace_seq_printf(s, "\nprint fmt: " print); \
169 \
170 return ret; \
171}
172
173#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
174
175#undef __field
176#define __field(type, item) \
177 ret = trace_define_field(event_call, #type, #item, \
178 offsetof(typeof(field), item), \
179 sizeof(field.item)); \
180 if (ret) \
181 return ret;
182
183#undef __array
184#define __array(type, item, len) \
185 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
186 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
187 offsetof(typeof(field), item), \
188 sizeof(field.item)); \
189 if (ret) \
190 return ret;
191
192#undef TRACE_EVENT
193#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
194int \
195ftrace_define_fields_##call(void) \
196{ \
197 struct ftrace_raw_##call field; \
198 struct ftrace_event_call *event_call = &event_##call; \
199 int ret; \
200 \
201 __common_field(unsigned char, type); \
202 __common_field(unsigned char, flags); \
203 __common_field(unsigned char, preempt_count); \
204 __common_field(int, pid); \
205 __common_field(int, tgid); \
206 \
207 tstruct; \
208 \
209 return ret; \
210}
211
212#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
213
214/*
215 * Stage 3 of the trace events.
216 *
217 * Override the macros in <trace/trace_events.h> to include the following:
218 *
219 * static void ftrace_event_<call>(proto)
220 * {
221 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
222 * }
223 *
224 * static int ftrace_reg_event_<call>(void)
225 * {
226 * int ret;
227 *
228 * ret = register_trace_<call>(ftrace_event_<call>);
229 * if (!ret)
230 * pr_info("event trace: Could not activate trace point "
231 * "probe to <call>");
232 * return ret;
233 * }
234 *
235 * static void ftrace_unreg_event_<call>(void)
236 * {
237 * unregister_trace_<call>(ftrace_event_<call>);
238 * }
239 *
240 * For those macros defined with TRACE_FORMAT:
241 *
242 * static struct ftrace_event_call __used
243 * __attribute__((__aligned__(4)))
244 * __attribute__((section("_ftrace_events"))) event_<call> = {
245 * .name = "<call>",
246 * .regfunc = ftrace_reg_event_<call>,
247 * .unregfunc = ftrace_unreg_event_<call>,
248 * }
249 *
250 *
251 * For those macros defined with TRACE_EVENT:
252 *
253 * static struct ftrace_event_call event_<call>;
254 *
255 * static void ftrace_raw_event_<call>(proto)
256 * {
257 * struct ring_buffer_event *event;
258 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
259 * unsigned long irq_flags;
260 * int pc;
261 *
262 * local_save_flags(irq_flags);
263 * pc = preempt_count();
264 *
265 * event = trace_current_buffer_lock_reserve(event_<call>.id,
266 * sizeof(struct ftrace_raw_<call>),
267 * irq_flags, pc);
268 * if (!event)
269 * return;
270 * entry = ring_buffer_event_data(event);
271 *
272 * <assign>; <-- Here we assign the entries by the __field and
273 * __array macros.
274 *
275 * trace_current_buffer_unlock_commit(event, irq_flags, pc);
276 * }
277 *
278 * static int ftrace_raw_reg_event_<call>(void)
279 * {
280 * int ret;
281 *
282 * ret = register_trace_<call>(ftrace_raw_event_<call>);
283 * if (!ret)
284 * pr_info("event trace: Could not activate trace point "
285 * "probe to <call>");
286 * return ret;
287 * }
288 *
289 * static void ftrace_unreg_event_<call>(void)
290 * {
291 * unregister_trace_<call>(ftrace_raw_event_<call>);
292 * }
293 *
294 * static struct trace_event ftrace_event_type_<call> = {
295 * .trace = ftrace_raw_output_<call>, <-- stage 2
296 * };
297 *
298 * static int ftrace_raw_init_event_<call>(void)
299 * {
300 * int id;
301 *
302 * id = register_ftrace_event(&ftrace_event_type_<call>);
303 * if (!id)
304 * return -ENODEV;
305 * event_<call>.id = id;
306 * return 0;
307 * }
308 *
309 * static struct ftrace_event_call __used
310 * __attribute__((__aligned__(4)))
311 * __attribute__((section("_ftrace_events"))) event_<call> = {
312 * .name = "<call>",
313 * .system = "<system>",
314 * .raw_init = ftrace_raw_init_event_<call>,
315 * .regfunc = ftrace_reg_event_<call>,
316 * .unregfunc = ftrace_unreg_event_<call>,
317 * .show_format = ftrace_format_<call>,
318 * }
319 *
320 */
321
322#undef TP_FMT
323#define TP_FMT(fmt, args...) fmt "\n", ##args
324
325#ifdef CONFIG_EVENT_PROFILE
326#define _TRACE_PROFILE(call, proto, args) \
327static void ftrace_profile_##call(proto) \
328{ \
329 extern void perf_tpcounter_event(int); \
330 perf_tpcounter_event(event_##call.id); \
331} \
332 \
333static int ftrace_profile_enable_##call(struct ftrace_event_call *call) \
334{ \
335 int ret = 0; \
336 \
337 if (!atomic_inc_return(&call->profile_count)) \
338 ret = register_trace_##call(ftrace_profile_##call); \
339 \
340 return ret; \
341} \
342 \
343static void ftrace_profile_disable_##call(struct ftrace_event_call *call) \
344{ \
345 if (atomic_add_negative(-1, &call->profile_count)) \
346 unregister_trace_##call(ftrace_profile_##call); \
347}
348
349#define _TRACE_PROFILE_INIT(call) \
350 .profile_count = ATOMIC_INIT(-1), \
351 .profile_enable = ftrace_profile_enable_##call, \
352 .profile_disable = ftrace_profile_disable_##call,
353
354#else
355#define _TRACE_PROFILE(call, proto, args)
356#define _TRACE_PROFILE_INIT(call)
357#endif
358
359#define _TRACE_FORMAT(call, proto, args, fmt) \
360static void ftrace_event_##call(proto) \
361{ \
362 event_trace_printk(_RET_IP_, #call ": " fmt); \
363} \
364 \
365static int ftrace_reg_event_##call(void) \
366{ \
367 int ret; \
368 \
369 ret = register_trace_##call(ftrace_event_##call); \
370 if (ret) \
371 pr_info("event trace: Could not activate trace point " \
372 "probe to " #call "\n"); \
373 return ret; \
374} \
375 \
376static void ftrace_unreg_event_##call(void) \
377{ \
378 unregister_trace_##call(ftrace_event_##call); \
379} \
380 \
381static struct ftrace_event_call event_##call; \
382 \
383static int ftrace_init_event_##call(void) \
384{ \
385 int id; \
386 \
387 id = register_ftrace_event(NULL); \
388 if (!id) \
389 return -ENODEV; \
390 event_##call.id = id; \
391 return 0; \
392}
393
394#undef TRACE_FORMAT
395#define TRACE_FORMAT(call, proto, args, fmt) \
396_TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \
397_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
398static struct ftrace_event_call __used \
399__attribute__((__aligned__(4))) \
400__attribute__((section("_ftrace_events"))) event_##call = { \
401 .name = #call, \
402 .system = __stringify(TRACE_SYSTEM), \
403 .raw_init = ftrace_init_event_##call, \
404 .regfunc = ftrace_reg_event_##call, \
405 .unregfunc = ftrace_unreg_event_##call, \
406 _TRACE_PROFILE_INIT(call) \
407}
408
409#undef __entry
410#define __entry entry
411
412#undef TRACE_EVENT
413#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
414_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
415 \
416static struct ftrace_event_call event_##call; \
417 \
418static void ftrace_raw_event_##call(proto) \
419{ \
420 struct ftrace_event_call *call = &event_##call; \
421 struct ring_buffer_event *event; \
422 struct ftrace_raw_##call *entry; \
423 unsigned long irq_flags; \
424 int pc; \
425 \
426 local_save_flags(irq_flags); \
427 pc = preempt_count(); \
428 \
429 event = trace_current_buffer_lock_reserve(event_##call.id, \
430 sizeof(struct ftrace_raw_##call), \
431 irq_flags, pc); \
432 if (!event) \
433 return; \
434 entry = ring_buffer_event_data(event); \
435 \
436 assign; \
437 \
438 if (!filter_current_check_discard(call, entry, event)) \
439 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
440} \
441 \
442static int ftrace_raw_reg_event_##call(void) \
443{ \
444 int ret; \
445 \
446 ret = register_trace_##call(ftrace_raw_event_##call); \
447 if (ret) \
448 pr_info("event trace: Could not activate trace point " \
449 "probe to " #call "\n"); \
450 return ret; \
451} \
452 \
453static void ftrace_raw_unreg_event_##call(void) \
454{ \
455 unregister_trace_##call(ftrace_raw_event_##call); \
456} \
457 \
458static struct trace_event ftrace_event_type_##call = { \
459 .trace = ftrace_raw_output_##call, \
460}; \
461 \
462static int ftrace_raw_init_event_##call(void) \
463{ \
464 int id; \
465 \
466 id = register_ftrace_event(&ftrace_event_type_##call); \
467 if (!id) \
468 return -ENODEV; \
469 event_##call.id = id; \
470 INIT_LIST_HEAD(&event_##call.fields); \
471 init_preds(&event_##call); \
472 return 0; \
473} \
474 \
475static struct ftrace_event_call __used \
476__attribute__((__aligned__(4))) \
477__attribute__((section("_ftrace_events"))) event_##call = { \
478 .name = #call, \
479 .system = __stringify(TRACE_SYSTEM), \
480 .raw_init = ftrace_raw_init_event_##call, \
481 .regfunc = ftrace_raw_reg_event_##call, \
482 .unregfunc = ftrace_raw_unreg_event_##call, \
483 .show_format = ftrace_format_##call, \
484 .define_fields = ftrace_define_fields_##call, \
485 _TRACE_PROFILE_INIT(call) \
486}
487
488#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
489
490#undef _TRACE_PROFILE
491#undef _TRACE_PROFILE_INIT
492