aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-11-25 03:03:15 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-25 03:03:15 -0500
commit9533ac6291d78cd16c4b11a15bfbb055affd76c3 (patch)
treeda1ebe92d1a9f77a30248625e7b5208fef515168
parentfe6126722718e51fba4879517c11ac12d9775bcc (diff)
parent75ec29ab848a7e92a41aaafaeb33d1afbc839be4 (diff)
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
-rw-r--r--include/linux/tracepoint.h6
-rw-r--r--include/trace/define_trace.h11
-rw-r--r--include/trace/events/sched.h170
-rw-r--r--include/trace/ftrace.h266
4 files changed, 292 insertions, 161 deletions
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 2aac8a83e89b..7063383cca13 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -280,6 +280,12 @@ static inline void tracepoint_synchronize_unregister(void)
280 * TRACE_EVENT_FN to perform any (un)registration work. 280 * TRACE_EVENT_FN to perform any (un)registration work.
281 */ 281 */
282 282
283#define TRACE_EVENT_TEMPLATE(name, proto, args, tstruct, assign, print)
284#define DEFINE_EVENT(template, name, proto, args) \
285 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
286#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
287 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
288
283#define TRACE_EVENT(name, proto, args, struct, assign, print) \ 289#define TRACE_EVENT(name, proto, args, struct, assign, print) \
284 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) 290 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
285#define TRACE_EVENT_FN(name, proto, args, struct, \ 291#define TRACE_EVENT_FN(name, proto, args, struct, \
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 2a4b3bf74033..5d7d855ae21e 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -31,6 +31,14 @@
31 assign, print, reg, unreg) \ 31 assign, print, reg, unreg) \
32 DEFINE_TRACE_FN(name, reg, unreg) 32 DEFINE_TRACE_FN(name, reg, unreg)
33 33
34#undef DEFINE_EVENT
35#define DEFINE_EVENT(template, name, proto, args) \
36 DEFINE_TRACE(name)
37
38#undef DEFINE_EVENT_PRINT
39#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
40 DEFINE_TRACE(name)
41
34#undef DECLARE_TRACE 42#undef DECLARE_TRACE
35#define DECLARE_TRACE(name, proto, args) \ 43#define DECLARE_TRACE(name, proto, args) \
36 DEFINE_TRACE(name) 44 DEFINE_TRACE(name)
@@ -63,6 +71,9 @@
63 71
64#undef TRACE_EVENT 72#undef TRACE_EVENT
65#undef TRACE_EVENT_FN 73#undef TRACE_EVENT_FN
74#undef TRACE_EVENT_TEMPLATE
75#undef DEFINE_EVENT
76#undef DEFINE_EVENT_PRINT
66#undef TRACE_HEADER_MULTI_READ 77#undef TRACE_HEADER_MULTI_READ
67 78
68/* Only undef what we defined in this file */ 79/* Only undef what we defined in this file */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index b50b9856c59f..238f74b58486 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -83,7 +83,7 @@ TRACE_EVENT(sched_wait_task,
83 * (NOTE: the 'rq' argument is not used by generic trace events, 83 * (NOTE: the 'rq' argument is not used by generic trace events,
84 * but used by the latency tracer plugin. ) 84 * but used by the latency tracer plugin. )
85 */ 85 */
86TRACE_EVENT(sched_wakeup, 86TRACE_EVENT_TEMPLATE(sched_wakeup_template,
87 87
88 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 88 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
89 89
@@ -110,38 +110,19 @@ TRACE_EVENT(sched_wakeup,
110 __entry->success, __entry->target_cpu) 110 __entry->success, __entry->target_cpu)
111); 111);
112 112
113DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
114 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
115 TP_ARGS(rq, p, success));
116
113/* 117/*
114 * Tracepoint for waking up a new task: 118 * Tracepoint for waking up a new task:
115 * 119 *
116 * (NOTE: the 'rq' argument is not used by generic trace events, 120 * (NOTE: the 'rq' argument is not used by generic trace events,
117 * but used by the latency tracer plugin. ) 121 * but used by the latency tracer plugin. )
118 */ 122 */
119TRACE_EVENT(sched_wakeup_new, 123DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
120 124 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
121 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 125 TP_ARGS(rq, p, success));
122
123 TP_ARGS(rq, p, success),
124
125 TP_STRUCT__entry(
126 __array( char, comm, TASK_COMM_LEN )
127 __field( pid_t, pid )
128 __field( int, prio )
129 __field( int, success )
130 __field( int, target_cpu )
131 ),
132
133 TP_fast_assign(
134 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
135 __entry->pid = p->pid;
136 __entry->prio = p->prio;
137 __entry->success = success;
138 __entry->target_cpu = task_cpu(p);
139 ),
140
141 TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
142 __entry->comm, __entry->pid, __entry->prio,
143 __entry->success, __entry->target_cpu)
144);
145 126
146/* 127/*
147 * Tracepoint for task switches, performed by the scheduler: 128 * Tracepoint for task switches, performed by the scheduler:
@@ -216,10 +197,7 @@ TRACE_EVENT(sched_migrate_task,
216 __entry->orig_cpu, __entry->dest_cpu) 197 __entry->orig_cpu, __entry->dest_cpu)
217); 198);
218 199
219/* 200TRACE_EVENT_TEMPLATE(sched_process_template,
220 * Tracepoint for freeing a task:
221 */
222TRACE_EVENT(sched_process_free,
223 201
224 TP_PROTO(struct task_struct *p), 202 TP_PROTO(struct task_struct *p),
225 203
@@ -242,29 +220,19 @@ TRACE_EVENT(sched_process_free,
242); 220);
243 221
244/* 222/*
245 * Tracepoint for a task exiting: 223 * Tracepoint for freeing a task:
246 */ 224 */
247TRACE_EVENT(sched_process_exit, 225DEFINE_EVENT(sched_process_template, sched_process_free,
248 226 TP_PROTO(struct task_struct *p),
249 TP_PROTO(struct task_struct *p), 227 TP_ARGS(p));
228
250 229
251 TP_ARGS(p), 230/*
252 231 * Tracepoint for a task exiting:
253 TP_STRUCT__entry( 232 */
254 __array( char, comm, TASK_COMM_LEN ) 233DEFINE_EVENT(sched_process_template, sched_process_exit,
255 __field( pid_t, pid ) 234 TP_PROTO(struct task_struct *p),
256 __field( int, prio ) 235 TP_ARGS(p));
257 ),
258
259 TP_fast_assign(
260 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
261 __entry->pid = p->pid;
262 __entry->prio = p->prio;
263 ),
264
265 TP_printk("comm=%s pid=%d prio=%d",
266 __entry->comm, __entry->pid, __entry->prio)
267);
268 236
269/* 237/*
270 * Tracepoint for a waiting task: 238 * Tracepoint for a waiting task:
@@ -348,12 +316,7 @@ TRACE_EVENT(sched_signal_send,
348 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE 316 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
349 * adding sched_stat support to SCHED_FIFO/RR would be welcome. 317 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
350 */ 318 */
351 319TRACE_EVENT_TEMPLATE(sched_stat_template,
352/*
353 * Tracepoint for accounting wait time (time the task is runnable
354 * but not actually running due to scheduler contention).
355 */
356TRACE_EVENT(sched_stat_wait,
357 320
358 TP_PROTO(struct task_struct *tsk, u64 delay), 321 TP_PROTO(struct task_struct *tsk, u64 delay),
359 322
@@ -379,6 +342,37 @@ TRACE_EVENT(sched_stat_wait,
379 (unsigned long long)__entry->delay) 342 (unsigned long long)__entry->delay)
380); 343);
381 344
345
346/*
347 * Tracepoint for accounting wait time (time the task is runnable
348 * but not actually running due to scheduler contention).
349 */
350DEFINE_EVENT(sched_stat_template, sched_stat_wait,
351 TP_PROTO(struct task_struct *tsk, u64 delay),
352 TP_ARGS(tsk, delay));
353
354/*
355 * Tracepoint for accounting sleep time (time the task is not runnable,
356 * including iowait, see below).
357 */
358DEFINE_EVENT_PRINT(sched_stat_template, sched_stat_sleep,
359 TP_PROTO(struct task_struct *tsk, u64 delay),
360 TP_ARGS(tsk, delay),
361 TP_printk("task: %s:%d sleep: %Lu [ns]",
362 __entry->comm, __entry->pid,
363 (unsigned long long)__entry->delay));
364
365/*
366 * Tracepoint for accounting iowait time (time the task is not runnable
367 * due to waiting on IO to complete).
368 */
369DEFINE_EVENT_PRINT(sched_stat_template, sched_stat_iowait,
370 TP_PROTO(struct task_struct *tsk, u64 delay),
371 TP_ARGS(tsk, delay),
372 TP_printk("task: %s:%d iowait: %Lu [ns]",
373 __entry->comm, __entry->pid,
374 (unsigned long long)__entry->delay));
375
382/* 376/*
383 * Tracepoint for accounting runtime (time the task is executing 377 * Tracepoint for accounting runtime (time the task is executing
384 * on a CPU). 378 * on a CPU).
@@ -412,66 +406,6 @@ TRACE_EVENT(sched_stat_runtime,
412 (unsigned long long)__entry->vruntime) 406 (unsigned long long)__entry->vruntime)
413); 407);
414 408
415/*
416 * Tracepoint for accounting sleep time (time the task is not runnable,
417 * including iowait, see below).
418 */
419TRACE_EVENT(sched_stat_sleep,
420
421 TP_PROTO(struct task_struct *tsk, u64 delay),
422
423 TP_ARGS(tsk, delay),
424
425 TP_STRUCT__entry(
426 __array( char, comm, TASK_COMM_LEN )
427 __field( pid_t, pid )
428 __field( u64, delay )
429 ),
430
431 TP_fast_assign(
432 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
433 __entry->pid = tsk->pid;
434 __entry->delay = delay;
435 )
436 TP_perf_assign(
437 __perf_count(delay);
438 ),
439
440 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
441 __entry->comm, __entry->pid,
442 (unsigned long long)__entry->delay)
443);
444
445/*
446 * Tracepoint for accounting iowait time (time the task is not runnable
447 * due to waiting on IO to complete).
448 */
449TRACE_EVENT(sched_stat_iowait,
450
451 TP_PROTO(struct task_struct *tsk, u64 delay),
452
453 TP_ARGS(tsk, delay),
454
455 TP_STRUCT__entry(
456 __array( char, comm, TASK_COMM_LEN )
457 __field( pid_t, pid )
458 __field( u64, delay )
459 ),
460
461 TP_fast_assign(
462 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
463 __entry->pid = tsk->pid;
464 __entry->delay = delay;
465 )
466 TP_perf_assign(
467 __perf_count(delay);
468 ),
469
470 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
471 __entry->comm, __entry->pid,
472 (unsigned long long)__entry->delay)
473);
474
475#endif /* _TRACE_SCHED_H */ 409#endif /* _TRACE_SCHED_H */
476 410
477/* This part must be outside protection */ 411/* This part must be outside protection */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index c3417c13e3ed..b0461772bc8d 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -18,6 +18,26 @@
18 18
19#include <linux/ftrace_event.h> 19#include <linux/ftrace_event.h>
20 20
21/*
22 * TRACE_EVENT_TEMPLATE can be used to add a generic function
23 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
26 * will map the TRACE_EVENT_TEMPLATE to the tracepoint.
27 *
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
29 */
30#undef TRACE_EVENT
31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32 TRACE_EVENT_TEMPLATE(name, \
33 PARAMS(proto), \
34 PARAMS(args), \
35 PARAMS(tstruct), \
36 PARAMS(assign), \
37 PARAMS(print)); \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
39
40
21#undef __field 41#undef __field
22#define __field(type, item) type item; 42#define __field(type, item) type item;
23 43
@@ -36,15 +56,21 @@
36#undef TP_STRUCT__entry 56#undef TP_STRUCT__entry
37#define TP_STRUCT__entry(args...) args 57#define TP_STRUCT__entry(args...) args
38 58
39#undef TRACE_EVENT 59#undef TRACE_EVENT_TEMPLATE
40#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ 60#define TRACE_EVENT_TEMPLATE(name, proto, args, tstruct, assign, print) \
41 struct ftrace_raw_##name { \ 61 struct ftrace_raw_##name { \
42 struct trace_entry ent; \ 62 struct trace_entry ent; \
43 tstruct \ 63 tstruct \
44 char __data[0]; \ 64 char __data[0]; \
45 }; \ 65 };
66#undef DEFINE_EVENT
67#define DEFINE_EVENT(template, name, proto, args) \
46 static struct ftrace_event_call event_##name 68 static struct ftrace_event_call event_##name
47 69
70#undef DEFINE_EVENT_PRINT
71#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
72 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
73
48#undef __cpparg 74#undef __cpparg
49#define __cpparg(arg...) arg 75#define __cpparg(arg...) arg
50 76
@@ -89,12 +115,19 @@
89#undef __string 115#undef __string
90#define __string(item, src) __dynamic_array(char, item, -1) 116#define __string(item, src) __dynamic_array(char, item, -1)
91 117
92#undef TRACE_EVENT 118#undef TRACE_EVENT_TEMPLATE
93#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 119#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
94 struct ftrace_data_offsets_##call { \ 120 struct ftrace_data_offsets_##call { \
95 tstruct; \ 121 tstruct; \
96 }; 122 };
97 123
124#undef DEFINE_EVENT
125#define DEFINE_EVENT(template, name, proto, args)
126
127#undef DEFINE_EVENT_PRINT
128#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
129 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
130
98#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 131#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
99 132
100/* 133/*
@@ -170,17 +203,50 @@
170#undef TP_perf_assign 203#undef TP_perf_assign
171#define TP_perf_assign(args...) 204#define TP_perf_assign(args...)
172 205
173#undef TRACE_EVENT 206#undef TRACE_EVENT_TEMPLATE
174#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 207#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print) \
175static int \ 208static int \
176ftrace_format_##call(struct ftrace_event_call *unused, \ 209ftrace_format_setup_##call(struct ftrace_event_call *unused, \
177 struct trace_seq *s) \ 210 struct trace_seq *s) \
178{ \ 211{ \
179 struct ftrace_raw_##call field __attribute__((unused)); \ 212 struct ftrace_raw_##call field __attribute__((unused)); \
180 int ret = 0; \ 213 int ret = 0; \
181 \ 214 \
182 tstruct; \ 215 tstruct; \
183 \ 216 \
217 return ret; \
218} \
219 \
220static int \
221ftrace_format_##call(struct ftrace_event_call *unused, \
222 struct trace_seq *s) \
223{ \
224 int ret = 0; \
225 \
226 ret = ftrace_format_setup_##call(unused, s); \
227 if (!ret) \
228 return ret; \
229 \
230 ret = trace_seq_printf(s, "\nprint fmt: " print); \
231 \
232 return ret; \
233}
234
235#undef DEFINE_EVENT
236#define DEFINE_EVENT(template, name, proto, args)
237
238#undef DEFINE_EVENT_PRINT
239#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
240static int \
241ftrace_format_##name(struct ftrace_event_call *unused, \
242 struct trace_seq *s) \
243{ \
244 int ret = 0; \
245 \
246 ret = ftrace_format_setup_##template(unused, s); \
247 if (!ret) \
248 return ret; \
249 \
184 trace_seq_printf(s, "\nprint fmt: " print); \ 250 trace_seq_printf(s, "\nprint fmt: " print); \
185 \ 251 \
186 return ret; \ 252 return ret; \
@@ -255,10 +321,11 @@ ftrace_format_##call(struct ftrace_event_call *unused, \
255 ftrace_print_symbols_seq(p, value, symbols); \ 321 ftrace_print_symbols_seq(p, value, symbols); \
256 }) 322 })
257 323
258#undef TRACE_EVENT 324#undef TRACE_EVENT_TEMPLATE
259#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 325#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
260static enum print_line_t \ 326static enum print_line_t \
261ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 327ftrace_raw_output_id_##call(int event_id, const char *name, \
328 struct trace_iterator *iter, int flags) \
262{ \ 329{ \
263 struct trace_seq *s = &iter->seq; \ 330 struct trace_seq *s = &iter->seq; \
264 struct ftrace_raw_##call *field; \ 331 struct ftrace_raw_##call *field; \
@@ -268,6 +335,47 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
268 \ 335 \
269 entry = iter->ent; \ 336 entry = iter->ent; \
270 \ 337 \
338 if (entry->type != event_id) { \
339 WARN_ON_ONCE(1); \
340 return TRACE_TYPE_UNHANDLED; \
341 } \
342 \
343 field = (typeof(field))entry; \
344 \
345 p = &get_cpu_var(ftrace_event_seq); \
346 trace_seq_init(p); \
347 ret = trace_seq_printf(s, "%s: ", name); \
348 if (ret) \
349 ret = trace_seq_printf(s, print); \
350 put_cpu(); \
351 if (!ret) \
352 return TRACE_TYPE_PARTIAL_LINE; \
353 \
354 return TRACE_TYPE_HANDLED; \
355}
356
357#undef DEFINE_EVENT
358#define DEFINE_EVENT(template, name, proto, args) \
359static enum print_line_t \
360ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
361{ \
362 return ftrace_raw_output_id_##template(event_##name.id, \
363 #name, iter, flags); \
364}
365
366#undef DEFINE_EVENT_PRINT
367#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
368static enum print_line_t \
369ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
370{ \
371 struct trace_seq *s = &iter->seq; \
372 struct ftrace_raw_##template *field; \
373 struct trace_entry *entry; \
374 struct trace_seq *p; \
375 int ret; \
376 \
377 entry = iter->ent; \
378 \
271 if (entry->type != event_##call.id) { \ 379 if (entry->type != event_##call.id) { \
272 WARN_ON_ONCE(1); \ 380 WARN_ON_ONCE(1); \
273 return TRACE_TYPE_UNHANDLED; \ 381 return TRACE_TYPE_UNHANDLED; \
@@ -277,14 +385,16 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
277 \ 385 \
278 p = &get_cpu_var(ftrace_event_seq); \ 386 p = &get_cpu_var(ftrace_event_seq); \
279 trace_seq_init(p); \ 387 trace_seq_init(p); \
280 ret = trace_seq_printf(s, #call ": " print); \ 388 ret = trace_seq_printf(s, "%s: ", #call); \
389 if (ret) \
390 ret = trace_seq_printf(s, print); \
281 put_cpu(); \ 391 put_cpu(); \
282 if (!ret) \ 392 if (!ret) \
283 return TRACE_TYPE_PARTIAL_LINE; \ 393 return TRACE_TYPE_PARTIAL_LINE; \
284 \ 394 \
285 return TRACE_TYPE_HANDLED; \ 395 return TRACE_TYPE_HANDLED; \
286} 396}
287 397
288#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 398#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
289 399
290#undef __field_ext 400#undef __field_ext
@@ -318,8 +428,8 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
318#undef __string 428#undef __string
319#define __string(item, src) __dynamic_array(char, item, -1) 429#define __string(item, src) __dynamic_array(char, item, -1)
320 430
321#undef TRACE_EVENT 431#undef TRACE_EVENT_TEMPLATE
322#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 432#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print) \
323static int \ 433static int \
324ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 434ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
325{ \ 435{ \
@@ -335,6 +445,13 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
335 return ret; \ 445 return ret; \
336} 446}
337 447
448#undef DEFINE_EVENT
449#define DEFINE_EVENT(template, name, proto, args)
450
451#undef DEFINE_EVENT_PRINT
452#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
453 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
454
338#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 455#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
339 456
340/* 457/*
@@ -361,10 +478,10 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
361 __data_size += (len) * sizeof(type); 478 __data_size += (len) * sizeof(type);
362 479
363#undef __string 480#undef __string
364#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \ 481#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
365 482
366#undef TRACE_EVENT 483#undef TRACE_EVENT_TEMPLATE
367#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 484#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
368static inline int ftrace_get_offsets_##call( \ 485static inline int ftrace_get_offsets_##call( \
369 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 486 struct ftrace_data_offsets_##call *__data_offsets, proto) \
370{ \ 487{ \
@@ -376,6 +493,13 @@ static inline int ftrace_get_offsets_##call( \
376 return __data_size; \ 493 return __data_size; \
377} 494}
378 495
496#undef DEFINE_EVENT
497#define DEFINE_EVENT(template, name, proto, args)
498
499#undef DEFINE_EVENT_PRINT
500#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
501 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
502
379#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 503#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
380 504
381#ifdef CONFIG_EVENT_PROFILE 505#ifdef CONFIG_EVENT_PROFILE
@@ -397,21 +521,28 @@ static inline int ftrace_get_offsets_##call( \
397 * 521 *
398 */ 522 */
399 523
400#undef TRACE_EVENT 524#undef TRACE_EVENT_TEMPLATE
401#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 525#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)
526
527#undef DEFINE_EVENT
528#define DEFINE_EVENT(template, name, proto, args) \
402 \ 529 \
403static void ftrace_profile_##call(proto); \ 530static void ftrace_profile_##name(proto); \
404 \ 531 \
405static int ftrace_profile_enable_##call(struct ftrace_event_call *unused)\ 532static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
406{ \ 533{ \
407 return register_trace_##call(ftrace_profile_##call); \ 534 return register_trace_##name(ftrace_profile_##name); \
408} \ 535} \
409 \ 536 \
410static void ftrace_profile_disable_##call(struct ftrace_event_call *unused)\ 537static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
411{ \ 538{ \
412 unregister_trace_##call(ftrace_profile_##call); \ 539 unregister_trace_##name(ftrace_profile_##name); \
413} 540}
414 541
542#undef DEFINE_EVENT_PRINT
543#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
544 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
545
415#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 546#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
416 547
417#endif 548#endif
@@ -550,15 +681,13 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *unused)\
550#define __assign_str(dst, src) \ 681#define __assign_str(dst, src) \
551 strcpy(__get_str(dst), src); 682 strcpy(__get_str(dst), src);
552 683
553#undef TRACE_EVENT 684#undef TRACE_EVENT_TEMPLATE
554#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 685#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
555 \ 686 \
556static struct ftrace_event_call event_##call; \ 687static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
557 \ 688 proto) \
558static void ftrace_raw_event_##call(proto) \
559{ \ 689{ \
560 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 690 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
561 struct ftrace_event_call *event_call = &event_##call; \
562 struct ring_buffer_event *event; \ 691 struct ring_buffer_event *event; \
563 struct ftrace_raw_##call *entry; \ 692 struct ftrace_raw_##call *entry; \
564 struct ring_buffer *buffer; \ 693 struct ring_buffer *buffer; \
@@ -572,7 +701,7 @@ static void ftrace_raw_event_##call(proto) \
572 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 701 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
573 \ 702 \
574 event = trace_current_buffer_lock_reserve(&buffer, \ 703 event = trace_current_buffer_lock_reserve(&buffer, \
575 event_##call.id, \ 704 event_call->id, \
576 sizeof(*entry) + __data_size, \ 705 sizeof(*entry) + __data_size, \
577 irq_flags, pc); \ 706 irq_flags, pc); \
578 if (!event) \ 707 if (!event) \
@@ -587,6 +716,14 @@ static void ftrace_raw_event_##call(proto) \
587 if (!filter_current_check_discard(buffer, event_call, entry, event)) \ 716 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
588 trace_nowake_buffer_unlock_commit(buffer, \ 717 trace_nowake_buffer_unlock_commit(buffer, \
589 event, irq_flags, pc); \ 718 event, irq_flags, pc); \
719}
720
721#undef DEFINE_EVENT
722#define DEFINE_EVENT(template, call, proto, args) \
723 \
724static void ftrace_raw_event_##call(proto) \
725{ \
726 ftrace_raw_event_id_##template(&event_##call, args); \
590} \ 727} \
591 \ 728 \
592static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ 729static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
@@ -619,7 +756,36 @@ static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\
619 event_##call.id = id; \ 756 event_##call.id = id; \
620 INIT_LIST_HEAD(&event_##call.fields); \ 757 INIT_LIST_HEAD(&event_##call.fields); \
621 return 0; \ 758 return 0; \
622} \ 759}
760
761#undef DEFINE_EVENT_PRINT
762#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
763 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
764
765#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
766
767#undef TRACE_EVENT_TEMPLATE
768#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print)
769
770#undef DEFINE_EVENT
771#define DEFINE_EVENT(template, call, proto, args) \
772 \
773static struct ftrace_event_call __used \
774__attribute__((__aligned__(4))) \
775__attribute__((section("_ftrace_events"))) event_##call = { \
776 .name = #call, \
777 .system = __stringify(TRACE_SYSTEM), \
778 .event = &ftrace_event_type_##call, \
779 .raw_init = ftrace_raw_init_event_##call, \
780 .regfunc = ftrace_raw_reg_event_##call, \
781 .unregfunc = ftrace_raw_unreg_event_##call, \
782 .show_format = ftrace_format_##template, \
783 .define_fields = ftrace_define_fields_##template, \
784 _TRACE_PROFILE_INIT(call) \
785}
786
787#undef DEFINE_EVENT_PRINT
788#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
623 \ 789 \
624static struct ftrace_event_call __used \ 790static struct ftrace_event_call __used \
625__attribute__((__aligned__(4))) \ 791__attribute__((__aligned__(4))) \
@@ -631,7 +797,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
631 .regfunc = ftrace_raw_reg_event_##call, \ 797 .regfunc = ftrace_raw_reg_event_##call, \
632 .unregfunc = ftrace_raw_unreg_event_##call, \ 798 .unregfunc = ftrace_raw_unreg_event_##call, \
633 .show_format = ftrace_format_##call, \ 799 .show_format = ftrace_format_##call, \
634 .define_fields = ftrace_define_fields_##call, \ 800 .define_fields = ftrace_define_fields_##template, \
635 _TRACE_PROFILE_INIT(call) \ 801 _TRACE_PROFILE_INIT(call) \
636} 802}
637 803
@@ -719,14 +885,15 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
719#undef __perf_count 885#undef __perf_count
720#define __perf_count(c) __count = (c) 886#define __perf_count(c) __count = (c)
721 887
722#undef TRACE_EVENT 888#undef TRACE_EVENT_TEMPLATE
723#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 889#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \
724static void ftrace_profile_##call(proto) \ 890static void \
891ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
892 proto) \
725{ \ 893{ \
726 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 894 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
727 extern int perf_swevent_get_recursion_context(void); \ 895 extern int perf_swevent_get_recursion_context(void); \
728 extern void perf_swevent_put_recursion_context(int rctx); \ 896 extern void perf_swevent_put_recursion_context(int rctx); \
729 struct ftrace_event_call *event_call = &event_##call; \
730 extern void perf_tp_event(int, u64, u64, void *, int); \ 897 extern void perf_tp_event(int, u64, u64, void *, int); \
731 struct ftrace_raw_##call *entry; \ 898 struct ftrace_raw_##call *entry; \
732 u64 __addr = 0, __count = 1; \ 899 u64 __addr = 0, __count = 1; \
@@ -789,6 +956,19 @@ end_recursion: \
789 \ 956 \
790} 957}
791 958
959#undef DEFINE_EVENT
960#define DEFINE_EVENT(template, call, proto, args) \
961static void ftrace_profile_##call(proto) \
962{ \
963 struct ftrace_event_call *event_call = &event_##call; \
964 \
965 ftrace_profile_templ_##template(event_call, args); \
966}
967
968#undef DEFINE_EVENT_PRINT
969#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
970 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
971
792#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 972#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
793#endif /* CONFIG_EVENT_PROFILE */ 973#endif /* CONFIG_EVENT_PROFILE */
794 974