aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-11-18 20:48:08 -0500
committerSteven Rostedt <rostedt@goodmis.org>2009-11-24 18:24:00 -0500
commit75ec29ab848a7e92a41aaafaeb33d1afbc839be4 (patch)
treed9bf6d12a60ddfe7cab9fb6ca370b0436ffe4649 /include/trace
parente5bc9721684e9412f3e0465222f317c362a8ab47 (diff)
tracing: Convert some sched trace events to DEFINE_EVENT and _PRINT
Converting some of the scheduler trace events to use the TRACE_EVENT_TEMPLATE, DEFINE_EVENT and DEFINE_EVENT_PRINT helped to save some space: $ size kernel/sched.o-* text data bss dec hex filename 79299 6776 2520 88595 15a13 kernel/sched.o-notrace 101941 11896 2584 116421 1c6c5 kernel/sched.o-templ 104779 11896 2584 119259 1d1db kernel/sched.o-trace sched.o-notrace is without any tracepoints compiled sched.o-templ is with this patch sched.o-trace is the tracepoints before this patch The trace events converted to DEFINE_EVENT: sched_wakeup, sched_wakeup_new, sched_process_free, sched_process_exit, and sched_stat_wait. The trace events converted to DEFINE_EVENT_PRINT: sched_stat_sleep and sched_stat_iowait. Note, since the TRACE_EVENT_TEMPLATE always uses a print, the sched_stat_wait print format is defined in the template and this template is used by sched_stat_sleep and sched_stat_iowait. But the later two override the print format. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/sched.h170
1 files changed, 52 insertions, 118 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index b50b9856c59f..238f74b58486 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -83,7 +83,7 @@ TRACE_EVENT(sched_wait_task,
83 * (NOTE: the 'rq' argument is not used by generic trace events, 83 * (NOTE: the 'rq' argument is not used by generic trace events,
84 * but used by the latency tracer plugin. ) 84 * but used by the latency tracer plugin. )
85 */ 85 */
86TRACE_EVENT(sched_wakeup, 86TRACE_EVENT_TEMPLATE(sched_wakeup_template,
87 87
88 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 88 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
89 89
@@ -110,38 +110,19 @@ TRACE_EVENT(sched_wakeup,
110 __entry->success, __entry->target_cpu) 110 __entry->success, __entry->target_cpu)
111); 111);
112 112
113DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
114 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
115 TP_ARGS(rq, p, success));
116
113/* 117/*
114 * Tracepoint for waking up a new task: 118 * Tracepoint for waking up a new task:
115 * 119 *
116 * (NOTE: the 'rq' argument is not used by generic trace events, 120 * (NOTE: the 'rq' argument is not used by generic trace events,
117 * but used by the latency tracer plugin. ) 121 * but used by the latency tracer plugin. )
118 */ 122 */
119TRACE_EVENT(sched_wakeup_new, 123DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
120 124 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
121 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 125 TP_ARGS(rq, p, success));
122
123 TP_ARGS(rq, p, success),
124
125 TP_STRUCT__entry(
126 __array( char, comm, TASK_COMM_LEN )
127 __field( pid_t, pid )
128 __field( int, prio )
129 __field( int, success )
130 __field( int, target_cpu )
131 ),
132
133 TP_fast_assign(
134 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
135 __entry->pid = p->pid;
136 __entry->prio = p->prio;
137 __entry->success = success;
138 __entry->target_cpu = task_cpu(p);
139 ),
140
141 TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
142 __entry->comm, __entry->pid, __entry->prio,
143 __entry->success, __entry->target_cpu)
144);
145 126
146/* 127/*
147 * Tracepoint for task switches, performed by the scheduler: 128 * Tracepoint for task switches, performed by the scheduler:
@@ -216,10 +197,7 @@ TRACE_EVENT(sched_migrate_task,
216 __entry->orig_cpu, __entry->dest_cpu) 197 __entry->orig_cpu, __entry->dest_cpu)
217); 198);
218 199
219/* 200TRACE_EVENT_TEMPLATE(sched_process_template,
220 * Tracepoint for freeing a task:
221 */
222TRACE_EVENT(sched_process_free,
223 201
224 TP_PROTO(struct task_struct *p), 202 TP_PROTO(struct task_struct *p),
225 203
@@ -242,29 +220,19 @@ TRACE_EVENT(sched_process_free,
242); 220);
243 221
244/* 222/*
245 * Tracepoint for a task exiting: 223 * Tracepoint for freeing a task:
246 */ 224 */
247TRACE_EVENT(sched_process_exit, 225DEFINE_EVENT(sched_process_template, sched_process_free,
248 226 TP_PROTO(struct task_struct *p),
249 TP_PROTO(struct task_struct *p), 227 TP_ARGS(p));
228
250 229
251 TP_ARGS(p), 230/*
252 231 * Tracepoint for a task exiting:
253 TP_STRUCT__entry( 232 */
254 __array( char, comm, TASK_COMM_LEN ) 233DEFINE_EVENT(sched_process_template, sched_process_exit,
255 __field( pid_t, pid ) 234 TP_PROTO(struct task_struct *p),
256 __field( int, prio ) 235 TP_ARGS(p));
257 ),
258
259 TP_fast_assign(
260 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
261 __entry->pid = p->pid;
262 __entry->prio = p->prio;
263 ),
264
265 TP_printk("comm=%s pid=%d prio=%d",
266 __entry->comm, __entry->pid, __entry->prio)
267);
268 236
269/* 237/*
270 * Tracepoint for a waiting task: 238 * Tracepoint for a waiting task:
@@ -348,12 +316,7 @@ TRACE_EVENT(sched_signal_send,
348 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE 316 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
349 * adding sched_stat support to SCHED_FIFO/RR would be welcome. 317 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
350 */ 318 */
351 319TRACE_EVENT_TEMPLATE(sched_stat_template,
352/*
353 * Tracepoint for accounting wait time (time the task is runnable
354 * but not actually running due to scheduler contention).
355 */
356TRACE_EVENT(sched_stat_wait,
357 320
358 TP_PROTO(struct task_struct *tsk, u64 delay), 321 TP_PROTO(struct task_struct *tsk, u64 delay),
359 322
@@ -379,6 +342,37 @@ TRACE_EVENT(sched_stat_wait,
379 (unsigned long long)__entry->delay) 342 (unsigned long long)__entry->delay)
380); 343);
381 344
345
346/*
347 * Tracepoint for accounting wait time (time the task is runnable
348 * but not actually running due to scheduler contention).
349 */
350DEFINE_EVENT(sched_stat_template, sched_stat_wait,
351 TP_PROTO(struct task_struct *tsk, u64 delay),
352 TP_ARGS(tsk, delay));
353
354/*
355 * Tracepoint for accounting sleep time (time the task is not runnable,
356 * including iowait, see below).
357 */
358DEFINE_EVENT_PRINT(sched_stat_template, sched_stat_sleep,
359 TP_PROTO(struct task_struct *tsk, u64 delay),
360 TP_ARGS(tsk, delay),
361 TP_printk("task: %s:%d sleep: %Lu [ns]",
362 __entry->comm, __entry->pid,
363 (unsigned long long)__entry->delay));
364
365/*
366 * Tracepoint for accounting iowait time (time the task is not runnable
367 * due to waiting on IO to complete).
368 */
369DEFINE_EVENT_PRINT(sched_stat_template, sched_stat_iowait,
370 TP_PROTO(struct task_struct *tsk, u64 delay),
371 TP_ARGS(tsk, delay),
372 TP_printk("task: %s:%d iowait: %Lu [ns]",
373 __entry->comm, __entry->pid,
374 (unsigned long long)__entry->delay));
375
382/* 376/*
383 * Tracepoint for accounting runtime (time the task is executing 377 * Tracepoint for accounting runtime (time the task is executing
384 * on a CPU). 378 * on a CPU).
@@ -412,66 +406,6 @@ TRACE_EVENT(sched_stat_runtime,
412 (unsigned long long)__entry->vruntime) 406 (unsigned long long)__entry->vruntime)
413); 407);
414 408
415/*
416 * Tracepoint for accounting sleep time (time the task is not runnable,
417 * including iowait, see below).
418 */
419TRACE_EVENT(sched_stat_sleep,
420
421 TP_PROTO(struct task_struct *tsk, u64 delay),
422
423 TP_ARGS(tsk, delay),
424
425 TP_STRUCT__entry(
426 __array( char, comm, TASK_COMM_LEN )
427 __field( pid_t, pid )
428 __field( u64, delay )
429 ),
430
431 TP_fast_assign(
432 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
433 __entry->pid = tsk->pid;
434 __entry->delay = delay;
435 )
436 TP_perf_assign(
437 __perf_count(delay);
438 ),
439
440 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
441 __entry->comm, __entry->pid,
442 (unsigned long long)__entry->delay)
443);
444
445/*
446 * Tracepoint for accounting iowait time (time the task is not runnable
447 * due to waiting on IO to complete).
448 */
449TRACE_EVENT(sched_stat_iowait,
450
451 TP_PROTO(struct task_struct *tsk, u64 delay),
452
453 TP_ARGS(tsk, delay),
454
455 TP_STRUCT__entry(
456 __array( char, comm, TASK_COMM_LEN )
457 __field( pid_t, pid )
458 __field( u64, delay )
459 ),
460
461 TP_fast_assign(
462 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
463 __entry->pid = tsk->pid;
464 __entry->delay = delay;
465 )
466 TP_perf_assign(
467 __perf_count(delay);
468 ),
469
470 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
471 __entry->comm, __entry->pid,
472 (unsigned long long)__entry->delay)
473);
474
475#endif /* _TRACE_SCHED_H */ 409#endif /* _TRACE_SCHED_H */
476 410
477/* This part must be outside protection */ 411/* This part must be outside protection */