aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r--tools/perf/builtin-sched.c30
1 files changed, 11 insertions, 19 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 387a44234368..73bdad029730 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -24,9 +24,6 @@ static char const *input_name = "perf.data";
24 24
25static unsigned long total_comm = 0; 25static unsigned long total_comm = 0;
26 26
27static struct rb_root threads;
28static struct thread *last_match;
29
30static struct perf_header *header; 27static struct perf_header *header;
31static u64 sample_type; 28static u64 sample_type;
32 29
@@ -641,9 +638,7 @@ static void test_calibrations(void)
641static int 638static int
642process_comm_event(event_t *event, unsigned long offset, unsigned long head) 639process_comm_event(event_t *event, unsigned long offset, unsigned long head)
643{ 640{
644 struct thread *thread; 641 struct thread *thread = threads__findnew(event->comm.tid);
645
646 thread = threads__findnew(event->comm.tid, &threads, &last_match);
647 642
648 dump_printf("%p [%p]: perf_event_comm: %s:%d\n", 643 dump_printf("%p [%p]: perf_event_comm: %s:%d\n",
649 (void *)(offset + head), 644 (void *)(offset + head),
@@ -1086,8 +1081,8 @@ latency_switch_event(struct trace_switch_event *switch_event,
1086 die("hm, delta: %Ld < 0 ?\n", delta); 1081 die("hm, delta: %Ld < 0 ?\n", delta);
1087 1082
1088 1083
1089 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); 1084 sched_out = threads__findnew(switch_event->prev_pid);
1090 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); 1085 sched_in = threads__findnew(switch_event->next_pid);
1091 1086
1092 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); 1087 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1093 if (!out_events) { 1088 if (!out_events) {
@@ -1120,13 +1115,10 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,
1120 u64 timestamp, 1115 u64 timestamp,
1121 struct thread *this_thread __used) 1116 struct thread *this_thread __used)
1122{ 1117{
1123 struct work_atoms *atoms; 1118 struct thread *thread = threads__findnew(runtime_event->pid);
1124 struct thread *thread; 1119 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1125 1120
1126 BUG_ON(cpu >= MAX_CPUS || cpu < 0); 1121 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1127
1128 thread = threads__findnew(runtime_event->pid, &threads, &last_match);
1129 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1130 if (!atoms) { 1122 if (!atoms) {
1131 thread_atoms_insert(thread); 1123 thread_atoms_insert(thread);
1132 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); 1124 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
@@ -1153,7 +1145,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1153 if (!wakeup_event->success) 1145 if (!wakeup_event->success)
1154 return; 1146 return;
1155 1147
1156 wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); 1148 wakee = threads__findnew(wakeup_event->pid);
1157 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); 1149 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1158 if (!atoms) { 1150 if (!atoms) {
1159 thread_atoms_insert(wakee); 1151 thread_atoms_insert(wakee);
@@ -1202,7 +1194,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1202 if (profile_cpu == -1) 1194 if (profile_cpu == -1)
1203 return; 1195 return;
1204 1196
1205 migrant = threads__findnew(migrate_task_event->pid, &threads, &last_match); 1197 migrant = threads__findnew(migrate_task_event->pid);
1206 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); 1198 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1207 if (!atoms) { 1199 if (!atoms) {
1208 thread_atoms_insert(migrant); 1200 thread_atoms_insert(migrant);
@@ -1458,8 +1450,8 @@ map_switch_event(struct trace_switch_event *switch_event,
1458 die("hm, delta: %Ld < 0 ?\n", delta); 1450 die("hm, delta: %Ld < 0 ?\n", delta);
1459 1451
1460 1452
1461 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); 1453 sched_out = threads__findnew(switch_event->prev_pid);
1462 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); 1454 sched_in = threads__findnew(switch_event->next_pid);
1463 1455
1464 curr_thread[this_cpu] = sched_in; 1456 curr_thread[this_cpu] = sched_in;
1465 1457
@@ -1649,7 +1641,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1649 if (!(sample_type & PERF_SAMPLE_RAW)) 1641 if (!(sample_type & PERF_SAMPLE_RAW))
1650 return 0; 1642 return 0;
1651 1643
1652 thread = threads__findnew(event->ip.pid, &threads, &last_match); 1644 thread = threads__findnew(event->ip.pid);
1653 1645
1654 if (sample_type & PERF_SAMPLE_TIME) { 1646 if (sample_type & PERF_SAMPLE_TIME) {
1655 timestamp = *(u64 *)more_data; 1647 timestamp = *(u64 *)more_data;
@@ -1725,7 +1717,7 @@ static struct perf_file_handler file_handler = {
1725 1717
1726static int read_events(void) 1718static int read_events(void)
1727{ 1719{
1728 register_idle_thread(&threads, &last_match); 1720 register_idle_thread();
1729 register_perf_file_handler(&file_handler); 1721 register_perf_file_handler(&file_handler);
1730 1722
1731 return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); 1723 return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd);