diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-09-12 04:08:34 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-13 04:22:45 -0400 |
commit | ea92ed5a8f4e6c638efe7de2efe8a875d580ad3f (patch) | |
tree | dcdad8cc0b26a3aec5fb98a261da147a7fa662df /tools/perf/builtin-sched.c | |
parent | d9340c1db3f52460a8335eeb127a2728c5bba6ce (diff) |
perf sched: Add runtime stats
Extend the latency tracking structure with scheduling atom
runtime info - and sum it up during per task display.
(Also clean up a few details.)
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r-- | tools/perf/builtin-sched.c | 58 |
1 files changed, 38 insertions, 20 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index a084c284e198..c382f530d4c6 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c | |||
@@ -243,8 +243,8 @@ add_sched_event_run(struct task_desc *task, nsec_t timestamp, u64 duration) | |||
243 | nr_run_events++; | 243 | nr_run_events++; |
244 | } | 244 | } |
245 | 245 | ||
246 | static unsigned long targetless_wakeups; | 246 | static unsigned long targetless_wakeups; |
247 | static unsigned long multitarget_wakeups; | 247 | static unsigned long multitarget_wakeups; |
248 | 248 | ||
249 | static void | 249 | static void |
250 | add_sched_event_wakeup(struct task_desc *task, nsec_t timestamp, | 250 | add_sched_event_wakeup(struct task_desc *task, nsec_t timestamp, |
@@ -485,10 +485,10 @@ static void create_tasks(void) | |||
485 | } | 485 | } |
486 | } | 486 | } |
487 | 487 | ||
488 | static nsec_t cpu_usage; | 488 | static nsec_t cpu_usage; |
489 | static nsec_t runavg_cpu_usage; | 489 | static nsec_t runavg_cpu_usage; |
490 | static nsec_t parent_cpu_usage; | 490 | static nsec_t parent_cpu_usage; |
491 | static nsec_t runavg_parent_cpu_usage; | 491 | static nsec_t runavg_parent_cpu_usage; |
492 | 492 | ||
493 | static void wait_for_tasks(void) | 493 | static void wait_for_tasks(void) |
494 | { | 494 | { |
@@ -858,9 +858,9 @@ replay_fork_event(struct trace_fork_event *fork_event, | |||
858 | } | 858 | } |
859 | 859 | ||
860 | static struct trace_sched_handler replay_ops = { | 860 | static struct trace_sched_handler replay_ops = { |
861 | .wakeup_event = replay_wakeup_event, | 861 | .wakeup_event = replay_wakeup_event, |
862 | .switch_event = replay_switch_event, | 862 | .switch_event = replay_switch_event, |
863 | .fork_event = replay_fork_event, | 863 | .fork_event = replay_fork_event, |
864 | }; | 864 | }; |
865 | 865 | ||
866 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" | 866 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" |
@@ -877,6 +877,7 @@ struct lat_snapshot { | |||
877 | enum thread_state state; | 877 | enum thread_state state; |
878 | u64 wake_up_time; | 878 | u64 wake_up_time; |
879 | u64 sched_in_time; | 879 | u64 sched_in_time; |
880 | u64 runtime; | ||
880 | }; | 881 | }; |
881 | 882 | ||
882 | struct thread_latency { | 883 | struct thread_latency { |
@@ -951,6 +952,7 @@ latency_fork_event(struct trace_fork_event *fork_event __used, | |||
951 | /* should insert the newcomer */ | 952 | /* should insert the newcomer */ |
952 | } | 953 | } |
953 | 954 | ||
955 | __used | ||
954 | static char sched_out_state(struct trace_switch_event *switch_event) | 956 | static char sched_out_state(struct trace_switch_event *switch_event) |
955 | { | 957 | { |
956 | const char *str = TASK_STATE_TO_CHAR_STR; | 958 | const char *str = TASK_STATE_TO_CHAR_STR; |
@@ -960,17 +962,15 @@ static char sched_out_state(struct trace_switch_event *switch_event) | |||
960 | 962 | ||
961 | static void | 963 | static void |
962 | lat_sched_out(struct thread_latency *lat, | 964 | lat_sched_out(struct thread_latency *lat, |
963 | struct trace_switch_event *switch_event) | 965 | struct trace_switch_event *switch_event __used, u64 delta) |
964 | { | 966 | { |
965 | struct lat_snapshot *snapshot; | 967 | struct lat_snapshot *snapshot; |
966 | 968 | ||
967 | if (sched_out_state(switch_event) == 'R') | ||
968 | return; | ||
969 | |||
970 | snapshot = calloc(sizeof(*snapshot), 1); | 969 | snapshot = calloc(sizeof(*snapshot), 1); |
971 | if (!snapshot) | 970 | if (!snapshot) |
972 | die("Non memory"); | 971 | die("Non memory"); |
973 | 972 | ||
973 | snapshot->runtime = delta; | ||
974 | list_add_tail(&snapshot->list, &lat->snapshot_list); | 974 | list_add_tail(&snapshot->list, &lat->snapshot_list); |
975 | } | 975 | } |
976 | 976 | ||
@@ -997,16 +997,31 @@ lat_sched_in(struct thread_latency *lat, u64 timestamp) | |||
997 | snapshot->sched_in_time = timestamp; | 997 | snapshot->sched_in_time = timestamp; |
998 | } | 998 | } |
999 | 999 | ||
1000 | |||
1001 | static void | 1000 | static void |
1002 | latency_switch_event(struct trace_switch_event *switch_event, | 1001 | latency_switch_event(struct trace_switch_event *switch_event, |
1003 | struct event *event __used, | 1002 | struct event *event __used, |
1004 | int cpu __used, | 1003 | int cpu, |
1005 | u64 timestamp, | 1004 | u64 timestamp, |
1006 | struct thread *thread __used) | 1005 | struct thread *thread __used) |
1007 | { | 1006 | { |
1008 | struct thread_latency *out_lat, *in_lat; | 1007 | struct thread_latency *out_lat, *in_lat; |
1009 | struct thread *sched_out, *sched_in; | 1008 | struct thread *sched_out, *sched_in; |
1009 | u64 timestamp0; | ||
1010 | s64 delta; | ||
1011 | |||
1012 | if (cpu >= MAX_CPUS || cpu < 0) | ||
1013 | return; | ||
1014 | |||
1015 | timestamp0 = cpu_last_switched[cpu]; | ||
1016 | cpu_last_switched[cpu] = timestamp; | ||
1017 | if (timestamp0) | ||
1018 | delta = timestamp - timestamp0; | ||
1019 | else | ||
1020 | delta = 0; | ||
1021 | |||
1022 | if (delta < 0) | ||
1023 | die("hm, delta: %Ld < 0 ?\n", delta); | ||
1024 | |||
1010 | 1025 | ||
1011 | sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); | 1026 | sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); |
1012 | sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); | 1027 | sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); |
@@ -1028,7 +1043,7 @@ latency_switch_event(struct trace_switch_event *switch_event, | |||
1028 | } | 1043 | } |
1029 | 1044 | ||
1030 | lat_sched_in(in_lat, timestamp); | 1045 | lat_sched_in(in_lat, timestamp); |
1031 | lat_sched_out(out_lat, switch_event); | 1046 | lat_sched_out(out_lat, switch_event, delta); |
1032 | } | 1047 | } |
1033 | 1048 | ||
1034 | static void | 1049 | static void |
@@ -1067,9 +1082,9 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
1067 | } | 1082 | } |
1068 | 1083 | ||
1069 | static struct trace_sched_handler lat_ops = { | 1084 | static struct trace_sched_handler lat_ops = { |
1070 | .wakeup_event = latency_wakeup_event, | 1085 | .wakeup_event = latency_wakeup_event, |
1071 | .switch_event = latency_switch_event, | 1086 | .switch_event = latency_switch_event, |
1072 | .fork_event = latency_fork_event, | 1087 | .fork_event = latency_fork_event, |
1073 | }; | 1088 | }; |
1074 | 1089 | ||
1075 | static void output_lat_thread(struct thread_latency *lat) | 1090 | static void output_lat_thread(struct thread_latency *lat) |
@@ -1080,8 +1095,11 @@ static void output_lat_thread(struct thread_latency *lat) | |||
1080 | int ret; | 1095 | int ret; |
1081 | u64 max = 0, avg; | 1096 | u64 max = 0, avg; |
1082 | u64 total = 0, delta; | 1097 | u64 total = 0, delta; |
1098 | u64 total_runtime = 0; | ||
1083 | 1099 | ||
1084 | list_for_each_entry(shot, &lat->snapshot_list, list) { | 1100 | list_for_each_entry(shot, &lat->snapshot_list, list) { |
1101 | total_runtime += shot->runtime; | ||
1102 | |||
1085 | if (shot->state != THREAD_SCHED_IN) | 1103 | if (shot->state != THREAD_SCHED_IN) |
1086 | continue; | 1104 | continue; |
1087 | 1105 | ||
@@ -1104,7 +1122,7 @@ static void output_lat_thread(struct thread_latency *lat) | |||
1104 | avg = total / count; | 1122 | avg = total / count; |
1105 | 1123 | ||
1106 | printf("|%9.3f ms |%9d | avg:%9.3f ms | max:%9.3f ms |\n", | 1124 | printf("|%9.3f ms |%9d | avg:%9.3f ms | max:%9.3f ms |\n", |
1107 | 0.0, count, (double)avg/1e9, (double)max/1e9); | 1125 | (double)total_runtime/1e9, count, (double)avg/1e9, (double)max/1e9); |
1108 | } | 1126 | } |
1109 | 1127 | ||
1110 | static void __cmd_lat(void) | 1128 | static void __cmd_lat(void) |