aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-07-23 14:13:26 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-02 03:12:18 -0400
commit768d0c27226e6587cad2fcf543f9711da3f3774e (patch)
tree0e299fb34584ff3c5c59f3ed95da70aab372dbc7
parent8f0dfc34e9b323a028c2ec41abb7e9de477b7a94 (diff)
sched: Add wait, sleep and iowait accounting tracepoints
Add 3 schedstat tracepoints to help account for wait-time, sleep-time and iowait-time. They can also be used as a perf-counter source to profile tasks on these clocks. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Arjan van de Ven <arjan@linux.intel.com> LKML-Reference: <new-submission> [ build fix for the !CONFIG_SCHEDSTATS case ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/trace/events/sched.h95
-rw-r--r--kernel/sched_fair.c12
2 files changed, 106 insertions, 1 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 8949bb7eb082..a4c369ec328f 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -340,6 +340,101 @@ TRACE_EVENT(sched_signal_send,
340 __entry->sig, __entry->comm, __entry->pid) 340 __entry->sig, __entry->comm, __entry->pid)
341); 341);
342 342
343/*
344 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
345 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
346 */
347
348/*
349 * Tracepoint for accounting wait time (time the task is runnable
350 * but not actually running due to scheduler contention).
351 */
352TRACE_EVENT(sched_stat_wait,
353
354 TP_PROTO(struct task_struct *tsk, u64 delay),
355
356 TP_ARGS(tsk, delay),
357
358 TP_STRUCT__entry(
359 __array( char, comm, TASK_COMM_LEN )
360 __field( pid_t, pid )
361 __field( u64, delay )
362 ),
363
364 TP_fast_assign(
365 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
366 __entry->pid = tsk->pid;
367 __entry->delay = delay;
368 )
369 TP_perf_assign(
370 __perf_count(delay);
371 ),
372
373 TP_printk("task: %s:%d wait: %Lu [ns]",
374 __entry->comm, __entry->pid,
375 (unsigned long long)__entry->delay)
376);
377
378/*
379 * Tracepoint for accounting sleep time (time the task is not runnable,
380 * including iowait, see below).
381 */
382TRACE_EVENT(sched_stat_sleep,
383
384 TP_PROTO(struct task_struct *tsk, u64 delay),
385
386 TP_ARGS(tsk, delay),
387
388 TP_STRUCT__entry(
389 __array( char, comm, TASK_COMM_LEN )
390 __field( pid_t, pid )
391 __field( u64, delay )
392 ),
393
394 TP_fast_assign(
395 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
396 __entry->pid = tsk->pid;
397 __entry->delay = delay;
398 )
399 TP_perf_assign(
400 __perf_count(delay);
401 ),
402
403 TP_printk("task: %s:%d sleep: %Lu [ns]",
404 __entry->comm, __entry->pid,
405 (unsigned long long)__entry->delay)
406);
407
408/*
409 * Tracepoint for accounting iowait time (time the task is not runnable
410 * due to waiting on IO to complete).
411 */
412TRACE_EVENT(sched_stat_iowait,
413
414 TP_PROTO(struct task_struct *tsk, u64 delay),
415
416 TP_ARGS(tsk, delay),
417
418 TP_STRUCT__entry(
419 __array( char, comm, TASK_COMM_LEN )
420 __field( pid_t, pid )
421 __field( u64, delay )
422 ),
423
424 TP_fast_assign(
425 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
426 __entry->pid = tsk->pid;
427 __entry->delay = delay;
428 )
429 TP_perf_assign(
430 __perf_count(delay);
431 ),
432
433 TP_printk("task: %s:%d iowait: %Lu [ns]",
434 __entry->comm, __entry->pid,
435 (unsigned long long)__entry->delay)
436);
437
343#endif /* _TRACE_SCHED_H */ 438#endif /* _TRACE_SCHED_H */
344 439
345/* This part must be outside protection */ 440/* This part must be outside protection */
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 471fa281f5e0..2ff850f90d1e 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -546,6 +546,13 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
546 schedstat_set(se->wait_sum, se->wait_sum + 546 schedstat_set(se->wait_sum, se->wait_sum +
547 rq_of(cfs_rq)->clock - se->wait_start); 547 rq_of(cfs_rq)->clock - se->wait_start);
548 schedstat_set(se->wait_start, 0); 548 schedstat_set(se->wait_start, 0);
549
550#ifdef CONFIG_SCHEDSTATS
551 if (entity_is_task(se)) {
552 trace_sched_stat_wait(task_of(se),
553 rq_of(cfs_rq)->clock - se->wait_start);
554 }
555#endif
549} 556}
550 557
551static inline void 558static inline void
@@ -636,8 +643,10 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
636 se->sleep_start = 0; 643 se->sleep_start = 0;
637 se->sum_sleep_runtime += delta; 644 se->sum_sleep_runtime += delta;
638 645
639 if (tsk) 646 if (tsk) {
640 account_scheduler_latency(tsk, delta >> 10, 1); 647 account_scheduler_latency(tsk, delta >> 10, 1);
648 trace_sched_stat_sleep(tsk, delta);
649 }
641 } 650 }
642 if (se->block_start) { 651 if (se->block_start) {
643 u64 delta = rq_of(cfs_rq)->clock - se->block_start; 652 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
@@ -655,6 +664,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
655 if (tsk->in_iowait) { 664 if (tsk->in_iowait) {
656 se->iowait_sum += delta; 665 se->iowait_sum += delta;
657 se->iowait_count++; 666 se->iowait_count++;
667 trace_sched_stat_iowait(tsk, delta);
658 } 668 }
659 669
660 /* 670 /*