diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-10-08 15:04:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-10-08 15:10:21 -0400 |
commit | 97ea1a7fa62af0d8d49a0fc12796b0073537c9d8 (patch) | |
tree | 3be620dc044bd978e71b1bdd997d8e38a3c9b2a8 /tools/perf/builtin-sched.c | |
parent | 2e538c4a1847291cf01218d4fe7bb4dc60fef7cf (diff) |
perf tools: Fix thread comm resolution in perf sched
This reverts commit 9a92b479b2f088ee2d3194243f4c8e59b1b8c9c2 ("perf
tools: Improve thread comm resolution in perf sched") and fixes the
real bug.
The bug was elsewhere:
We are failing to resolve thread names in perf sched because the
table of threads we are building, on top of comm events, has a per
process granularity. But perf sched, unlike the other perf tools,
needs a per thread granularity as we are profiling every tasks
individually.
So fix it by building our threads table using the tid instead of
the pid as the thread identifier.
v2: Revert the previous fix - it is not really needed
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1255028657-11158-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r-- | tools/perf/builtin-sched.c | 46 |
1 files changed, 6 insertions, 40 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 25b91e784332..6b00529ce348 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c | |||
@@ -638,7 +638,7 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) | |||
638 | { | 638 | { |
639 | struct thread *thread; | 639 | struct thread *thread; |
640 | 640 | ||
641 | thread = threads__findnew(event->comm.pid, &threads, &last_match); | 641 | thread = threads__findnew(event->comm.tid, &threads, &last_match); |
642 | 642 | ||
643 | dump_printf("%p [%p]: perf_event_comm: %s:%d\n", | 643 | dump_printf("%p [%p]: perf_event_comm: %s:%d\n", |
644 | (void *)(offset + head), | 644 | (void *)(offset + head), |
@@ -1034,36 +1034,6 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) | |||
1034 | atoms->nb_atoms++; | 1034 | atoms->nb_atoms++; |
1035 | } | 1035 | } |
1036 | 1036 | ||
1037 | static struct thread * | ||
1038 | threads__findnew_from_ctx(u32 pid, struct trace_switch_event *switch_event) | ||
1039 | { | ||
1040 | struct thread *th; | ||
1041 | |||
1042 | th = threads__findnew_nocomm(pid, &threads, &last_match); | ||
1043 | if (th->comm) | ||
1044 | return th; | ||
1045 | |||
1046 | if (pid == switch_event->prev_pid) | ||
1047 | thread__set_comm(th, switch_event->prev_comm); | ||
1048 | else | ||
1049 | thread__set_comm(th, switch_event->next_comm); | ||
1050 | return th; | ||
1051 | } | ||
1052 | |||
1053 | static struct thread * | ||
1054 | threads__findnew_from_wakeup(struct trace_wakeup_event *wakeup_event) | ||
1055 | { | ||
1056 | struct thread *th; | ||
1057 | |||
1058 | th = threads__findnew_nocomm(wakeup_event->pid, &threads, &last_match); | ||
1059 | if (th->comm) | ||
1060 | return th; | ||
1061 | |||
1062 | thread__set_comm(th, wakeup_event->comm); | ||
1063 | |||
1064 | return th; | ||
1065 | } | ||
1066 | |||
1067 | static void | 1037 | static void |
1068 | latency_switch_event(struct trace_switch_event *switch_event, | 1038 | latency_switch_event(struct trace_switch_event *switch_event, |
1069 | struct event *event __used, | 1039 | struct event *event __used, |
@@ -1089,10 +1059,8 @@ latency_switch_event(struct trace_switch_event *switch_event, | |||
1089 | die("hm, delta: %Ld < 0 ?\n", delta); | 1059 | die("hm, delta: %Ld < 0 ?\n", delta); |
1090 | 1060 | ||
1091 | 1061 | ||
1092 | sched_out = threads__findnew_from_ctx(switch_event->prev_pid, | 1062 | sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); |
1093 | switch_event); | 1063 | sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); |
1094 | sched_in = threads__findnew_from_ctx(switch_event->next_pid, | ||
1095 | switch_event); | ||
1096 | 1064 | ||
1097 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); | 1065 | out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); |
1098 | if (!out_events) { | 1066 | if (!out_events) { |
@@ -1158,7 +1126,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, | |||
1158 | if (!wakeup_event->success) | 1126 | if (!wakeup_event->success) |
1159 | return; | 1127 | return; |
1160 | 1128 | ||
1161 | wakee = threads__findnew_from_wakeup(wakeup_event); | 1129 | wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); |
1162 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); | 1130 | atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); |
1163 | if (!atoms) { | 1131 | if (!atoms) { |
1164 | thread_atoms_insert(wakee); | 1132 | thread_atoms_insert(wakee); |
@@ -1418,10 +1386,8 @@ map_switch_event(struct trace_switch_event *switch_event, | |||
1418 | die("hm, delta: %Ld < 0 ?\n", delta); | 1386 | die("hm, delta: %Ld < 0 ?\n", delta); |
1419 | 1387 | ||
1420 | 1388 | ||
1421 | sched_out = threads__findnew_from_ctx(switch_event->prev_pid, | 1389 | sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); |
1422 | switch_event); | 1390 | sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); |
1423 | sched_in = threads__findnew_from_ctx(switch_event->next_pid, | ||
1424 | switch_event); | ||
1425 | 1391 | ||
1426 | curr_thread[this_cpu] = sched_in; | 1392 | curr_thread[this_cpu] = sched_in; |
1427 | 1393 | ||