diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-09-12 18:46:19 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-13 04:22:47 -0400 |
commit | c6ced61112f1e6139914149fab65695801a74f0f (patch) | |
tree | 7190da3cfee68b9a0fdcd39af84e78e31e054cb1 /tools | |
parent | 175622053069afbd366ba3c6030b5af82f378d40 (diff) |
perf sched: Add involuntarily sleeping task in work atoms
Currently in perf sched, we are measuring the scheduler wakeup
latencies.
Now we also want measure the time a task wait to be scheduled
after it gets preempted.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/perf/builtin-sched.c | 19 |
1 files changed, 13 insertions, 6 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 7e57a986c056..61a80e8c9d0d 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c | |||
@@ -866,8 +866,8 @@ static struct trace_sched_handler replay_ops = { | |||
866 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" | 866 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" |
867 | 867 | ||
868 | enum thread_state { | 868 | enum thread_state { |
869 | THREAD_SLEEPING, | 869 | THREAD_SLEEPING = 0, |
870 | THREAD_WAKED_UP, | 870 | THREAD_WAIT_CPU, |
871 | THREAD_SCHED_IN, | 871 | THREAD_SCHED_IN, |
872 | THREAD_IGNORE | 872 | THREAD_IGNORE |
873 | }; | 873 | }; |
@@ -962,7 +962,9 @@ static char sched_out_state(struct trace_switch_event *switch_event) | |||
962 | 962 | ||
963 | static void | 963 | static void |
964 | lat_sched_out(struct task_atoms *atoms, | 964 | lat_sched_out(struct task_atoms *atoms, |
965 | struct trace_switch_event *switch_event __used, u64 delta) | 965 | struct trace_switch_event *switch_event __used, |
966 | u64 delta, | ||
967 | u64 timestamp) | ||
966 | { | 968 | { |
967 | struct work_atom *snapshot; | 969 | struct work_atom *snapshot; |
968 | 970 | ||
@@ -970,6 +972,11 @@ lat_sched_out(struct task_atoms *atoms, | |||
970 | if (!snapshot) | 972 | if (!snapshot) |
971 | die("Non memory"); | 973 | die("Non memory"); |
972 | 974 | ||
975 | if (sched_out_state(switch_event) == 'R') { | ||
976 | snapshot->state = THREAD_WAIT_CPU; | ||
977 | snapshot->wake_up_time = timestamp; | ||
978 | } | ||
979 | |||
973 | snapshot->runtime = delta; | 980 | snapshot->runtime = delta; |
974 | list_add_tail(&snapshot->list, &atoms->snapshot_list); | 981 | list_add_tail(&snapshot->list, &atoms->snapshot_list); |
975 | } | 982 | } |
@@ -985,7 +992,7 @@ lat_sched_in(struct task_atoms *atoms, u64 timestamp) | |||
985 | snapshot = list_entry(atoms->snapshot_list.prev, struct work_atom, | 992 | snapshot = list_entry(atoms->snapshot_list.prev, struct work_atom, |
986 | list); | 993 | list); |
987 | 994 | ||
988 | if (snapshot->state != THREAD_WAKED_UP) | 995 | if (snapshot->state != THREAD_WAIT_CPU) |
989 | return; | 996 | return; |
990 | 997 | ||
991 | if (timestamp < snapshot->wake_up_time) { | 998 | if (timestamp < snapshot->wake_up_time) { |
@@ -1043,7 +1050,7 @@ latency_switch_event(struct trace_switch_event *switch_event, | |||
1043 | } | 1050 | } |
1044 | 1051 | ||
1045 | lat_sched_in(in_atoms, timestamp); | 1052 | lat_sched_in(in_atoms, timestamp); |
1046 | lat_sched_out(out_atoms, switch_event, delta); | 1053 | lat_sched_out(out_atoms, switch_event, delta, timestamp); |
1047 | } | 1054 | } |
1048 | 1055 | ||
1049 | static void | 1056 | static void |
@@ -1077,7 +1084,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
1077 | if (snapshot->state != THREAD_SLEEPING) | 1084 | if (snapshot->state != THREAD_SLEEPING) |
1078 | return; | 1085 | return; |
1079 | 1086 | ||
1080 | snapshot->state = THREAD_WAKED_UP; | 1087 | snapshot->state = THREAD_WAIT_CPU; |
1081 | snapshot->wake_up_time = timestamp; | 1088 | snapshot->wake_up_time = timestamp; |
1082 | } | 1089 | } |
1083 | 1090 | ||