aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-08-14 06:21:53 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-15 10:10:19 -0400
commit6baa0a5ae0954fb2486c480a20556a9f1aee0965 (patch)
treeb85e485562b34b222153d41414dec655724e26b8 /tools
parentbe750231ce1599b86fbba213e3da8344ece262e2 (diff)
perf tools: Factorize the thread code in a dedicated file
Factorize the thread management code used by perf-annotate and perf-report in dedicated source and header files. v2: pass last_match by address so that it can actually be modified. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Mike Galbraith <efault@gmx.de> LKML-Reference: <1250245313-6995-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/Makefile1
-rw-r--r--tools/perf/builtin-annotate.c173
-rw-r--r--tools/perf/builtin-report.c205
-rw-r--r--tools/perf/util/thread.c143
-rw-r--r--tools/perf/util/thread.h19
5 files changed, 202 insertions, 339 deletions
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 68218cfd38b3..0056405e4c93 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -341,6 +341,7 @@ LIB_OBJS += util/callchain.o
341LIB_OBJS += util/values.o 341LIB_OBJS += util/values.o
342LIB_OBJS += util/debug.o 342LIB_OBJS += util/debug.o
343LIB_OBJS += util/map.o 343LIB_OBJS += util/map.o
344LIB_OBJS += util/thread.o
344 345
345BUILTIN_OBJS += builtin-annotate.o 346BUILTIN_OBJS += builtin-annotate.o
346BUILTIN_OBJS += builtin-help.o 347BUILTIN_OBJS += builtin-help.o
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 543c4524f8c2..3bedaa5d21d2 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -20,6 +20,7 @@
20 20
21#include "util/parse-options.h" 21#include "util/parse-options.h"
22#include "util/parse-events.h" 22#include "util/parse-events.h"
23#include "util/thread.h"
23 24
24#define SHOW_KERNEL 1 25#define SHOW_KERNEL 1
25#define SHOW_USER 2 26#define SHOW_USER 2
@@ -44,6 +45,9 @@ static int print_line;
44static unsigned long page_size; 45static unsigned long page_size;
45static unsigned long mmap_window = 32; 46static unsigned long mmap_window = 32;
46 47
48static struct rb_root threads;
49static struct thread *last_match;
50
47 51
48struct sym_ext { 52struct sym_ext {
49 struct rb_node node; 53 struct rb_node node;
@@ -51,154 +55,6 @@ struct sym_ext {
51 char *path; 55 char *path;
52}; 56};
53 57
54
55struct thread {
56 struct rb_node rb_node;
57 struct list_head maps;
58 pid_t pid;
59 char *comm;
60};
61
62static struct thread *thread__new(pid_t pid)
63{
64 struct thread *self = malloc(sizeof(*self));
65
66 if (self != NULL) {
67 self->pid = pid;
68 self->comm = malloc(32);
69 if (self->comm)
70 snprintf(self->comm, 32, ":%d", self->pid);
71 INIT_LIST_HEAD(&self->maps);
72 }
73
74 return self;
75}
76
77static int thread__set_comm(struct thread *self, const char *comm)
78{
79 if (self->comm)
80 free(self->comm);
81 self->comm = strdup(comm);
82 return self->comm ? 0 : -ENOMEM;
83}
84
85static size_t thread__fprintf(struct thread *self, FILE *fp)
86{
87 struct map *pos;
88 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
89
90 list_for_each_entry(pos, &self->maps, node)
91 ret += map__fprintf(pos, fp);
92
93 return ret;
94}
95
96
97static struct rb_root threads;
98static struct thread *last_match;
99
100static struct thread *threads__findnew(pid_t pid)
101{
102 struct rb_node **p = &threads.rb_node;
103 struct rb_node *parent = NULL;
104 struct thread *th;
105
106 /*
107 * Font-end cache - PID lookups come in blocks,
108 * so most of the time we dont have to look up
109 * the full rbtree:
110 */
111 if (last_match && last_match->pid == pid)
112 return last_match;
113
114 while (*p != NULL) {
115 parent = *p;
116 th = rb_entry(parent, struct thread, rb_node);
117
118 if (th->pid == pid) {
119 last_match = th;
120 return th;
121 }
122
123 if (pid < th->pid)
124 p = &(*p)->rb_left;
125 else
126 p = &(*p)->rb_right;
127 }
128
129 th = thread__new(pid);
130 if (th != NULL) {
131 rb_link_node(&th->rb_node, parent, p);
132 rb_insert_color(&th->rb_node, &threads);
133 last_match = th;
134 }
135
136 return th;
137}
138
139static void thread__insert_map(struct thread *self, struct map *map)
140{
141 struct map *pos, *tmp;
142
143 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
144 if (map__overlap(pos, map)) {
145 list_del_init(&pos->node);
146 /* XXX leaks dsos */
147 free(pos);
148 }
149 }
150
151 list_add_tail(&map->node, &self->maps);
152}
153
154static int thread__fork(struct thread *self, struct thread *parent)
155{
156 struct map *map;
157
158 if (self->comm)
159 free(self->comm);
160 self->comm = strdup(parent->comm);
161 if (!self->comm)
162 return -ENOMEM;
163
164 list_for_each_entry(map, &parent->maps, node) {
165 struct map *new = map__clone(map);
166 if (!new)
167 return -ENOMEM;
168 thread__insert_map(self, new);
169 }
170
171 return 0;
172}
173
174static struct map *thread__find_map(struct thread *self, u64 ip)
175{
176 struct map *pos;
177
178 if (self == NULL)
179 return NULL;
180
181 list_for_each_entry(pos, &self->maps, node)
182 if (ip >= pos->start && ip <= pos->end)
183 return pos;
184
185 return NULL;
186}
187
188static size_t threads__fprintf(FILE *fp)
189{
190 size_t ret = 0;
191 struct rb_node *nd;
192
193 for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
194 struct thread *pos = rb_entry(nd, struct thread, rb_node);
195
196 ret += thread__fprintf(pos, fp);
197 }
198
199 return ret;
200}
201
202/* 58/*
203 * histogram, sorted on item, collects counts 59 * histogram, sorted on item, collects counts
204 */ 60 */
@@ -624,7 +480,7 @@ static void output__resort(void)
624 480
625static void register_idle_thread(void) 481static void register_idle_thread(void)
626{ 482{
627 struct thread *thread = threads__findnew(0); 483 struct thread *thread = threads__findnew(0, &threads, &last_match);
628 484
629 if (thread == NULL || 485 if (thread == NULL ||
630 thread__set_comm(thread, "[idle]")) { 486 thread__set_comm(thread, "[idle]")) {
@@ -645,10 +501,12 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
645 char level; 501 char level;
646 int show = 0; 502 int show = 0;
647 struct dso *dso = NULL; 503 struct dso *dso = NULL;
648 struct thread *thread = threads__findnew(event->ip.pid); 504 struct thread *thread;
649 u64 ip = event->ip.ip; 505 u64 ip = event->ip.ip;
650 struct map *map = NULL; 506 struct map *map = NULL;
651 507
508 thread = threads__findnew(event->ip.pid, &threads, &last_match);
509
652 dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", 510 dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
653 (void *)(offset + head), 511 (void *)(offset + head),
654 (void *)(long)(event->header.size), 512 (void *)(long)(event->header.size),
@@ -719,9 +577,11 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
719static int 577static int
720process_mmap_event(event_t *event, unsigned long offset, unsigned long head) 578process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
721{ 579{
722 struct thread *thread = threads__findnew(event->mmap.pid); 580 struct thread *thread;
723 struct map *map = map__new(&event->mmap, NULL, 0); 581 struct map *map = map__new(&event->mmap, NULL, 0);
724 582
583 thread = threads__findnew(event->mmap.pid, &threads, &last_match);
584
725 dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", 585 dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
726 (void *)(offset + head), 586 (void *)(offset + head),
727 (void *)(long)(event->header.size), 587 (void *)(long)(event->header.size),
@@ -745,8 +605,9 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
745static int 605static int
746process_comm_event(event_t *event, unsigned long offset, unsigned long head) 606process_comm_event(event_t *event, unsigned long offset, unsigned long head)
747{ 607{
748 struct thread *thread = threads__findnew(event->comm.pid); 608 struct thread *thread;
749 609
610 thread = threads__findnew(event->comm.pid, &threads, &last_match);
750 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 611 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
751 (void *)(offset + head), 612 (void *)(offset + head),
752 (void *)(long)(event->header.size), 613 (void *)(long)(event->header.size),
@@ -765,9 +626,11 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
765static int 626static int
766process_fork_event(event_t *event, unsigned long offset, unsigned long head) 627process_fork_event(event_t *event, unsigned long offset, unsigned long head)
767{ 628{
768 struct thread *thread = threads__findnew(event->fork.pid); 629 struct thread *thread;
769 struct thread *parent = threads__findnew(event->fork.ppid); 630 struct thread *parent;
770 631
632 thread = threads__findnew(event->fork.pid, &threads, &last_match);
633 parent = threads__findnew(event->fork.ppid, &threads, &last_match);
771 dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", 634 dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
772 (void *)(offset + head), 635 (void *)(offset + head),
773 (void *)(long)(event->header.size), 636 (void *)(long)(event->header.size),
@@ -1202,7 +1065,7 @@ more:
1202 return 0; 1065 return 0;
1203 1066
1204 if (verbose >= 3) 1067 if (verbose >= 3)
1205 threads__fprintf(stdout); 1068 threads__fprintf(stdout, &threads);
1206 1069
1207 if (verbose >= 2) 1070 if (verbose >= 2)
1208 dsos__fprintf(stdout); 1071 dsos__fprintf(stdout);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 6321951fe1bf..298f26b8ac78 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -25,6 +25,8 @@
25#include "util/parse-options.h" 25#include "util/parse-options.h"
26#include "util/parse-events.h" 26#include "util/parse-events.h"
27 27
28#include "util/thread.h"
29
28#define SHOW_KERNEL 1 30#define SHOW_KERNEL 1
29#define SHOW_USER 2 31#define SHOW_USER 2
30#define SHOW_HV 4 32#define SHOW_HV 4
@@ -71,6 +73,9 @@ static char __cwd[PATH_MAX];
71static char *cwd = __cwd; 73static char *cwd = __cwd;
72static int cwdlen; 74static int cwdlen;
73 75
76static struct rb_root threads;
77static struct thread *last_match;
78
74static 79static
75struct callchain_param callchain_param = { 80struct callchain_param callchain_param = {
76 .mode = CHAIN_GRAPH_REL, 81 .mode = CHAIN_GRAPH_REL,
@@ -106,187 +111,10 @@ static int repsep_fprintf(FILE *fp, const char *fmt, ...)
106 return n; 111 return n;
107} 112}
108 113
109struct thread {
110 struct rb_node rb_node;
111 struct list_head maps;
112 pid_t pid;
113 char *comm;
114};
115
116static struct thread *thread__new(pid_t pid)
117{
118 struct thread *self = malloc(sizeof(*self));
119
120 if (self != NULL) {
121 self->pid = pid;
122 self->comm = malloc(32);
123 if (self->comm)
124 snprintf(self->comm, 32, ":%d", self->pid);
125 INIT_LIST_HEAD(&self->maps);
126 }
127
128 return self;
129}
130
131static unsigned int dsos__col_width, 114static unsigned int dsos__col_width,
132 comms__col_width, 115 comms__col_width,
133 threads__col_width; 116 threads__col_width;
134 117
135static int thread__set_comm(struct thread *self, const char *comm)
136{
137 if (self->comm)
138 free(self->comm);
139 self->comm = strdup(comm);
140 if (!self->comm)
141 return -ENOMEM;
142
143 if (!col_width_list_str && !field_sep &&
144 (!comm_list || strlist__has_entry(comm_list, comm))) {
145 unsigned int slen = strlen(comm);
146 if (slen > comms__col_width) {
147 comms__col_width = slen;
148 threads__col_width = slen + 6;
149 }
150 }
151
152 return 0;
153}
154
155static size_t thread__fprintf(struct thread *self, FILE *fp)
156{
157 struct map *pos;
158 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
159
160 list_for_each_entry(pos, &self->maps, node)
161 ret += map__fprintf(pos, fp);
162
163 return ret;
164}
165
166
167static struct rb_root threads;
168static struct thread *last_match;
169
170static struct thread *threads__findnew(pid_t pid)
171{
172 struct rb_node **p = &threads.rb_node;
173 struct rb_node *parent = NULL;
174 struct thread *th;
175
176 /*
177 * Font-end cache - PID lookups come in blocks,
178 * so most of the time we dont have to look up
179 * the full rbtree:
180 */
181 if (last_match && last_match->pid == pid)
182 return last_match;
183
184 while (*p != NULL) {
185 parent = *p;
186 th = rb_entry(parent, struct thread, rb_node);
187
188 if (th->pid == pid) {
189 last_match = th;
190 return th;
191 }
192
193 if (pid < th->pid)
194 p = &(*p)->rb_left;
195 else
196 p = &(*p)->rb_right;
197 }
198
199 th = thread__new(pid);
200 if (th != NULL) {
201 rb_link_node(&th->rb_node, parent, p);
202 rb_insert_color(&th->rb_node, &threads);
203 last_match = th;
204 }
205
206 return th;
207}
208
209static void thread__insert_map(struct thread *self, struct map *map)
210{
211 struct map *pos, *tmp;
212
213 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
214 if (map__overlap(pos, map)) {
215 if (verbose >= 2) {
216 printf("overlapping maps:\n");
217 map__fprintf(map, stdout);
218 map__fprintf(pos, stdout);
219 }
220
221 if (map->start <= pos->start && map->end > pos->start)
222 pos->start = map->end;
223
224 if (map->end >= pos->end && map->start < pos->end)
225 pos->end = map->start;
226
227 if (verbose >= 2) {
228 printf("after collision:\n");
229 map__fprintf(pos, stdout);
230 }
231
232 if (pos->start >= pos->end) {
233 list_del_init(&pos->node);
234 free(pos);
235 }
236 }
237 }
238
239 list_add_tail(&map->node, &self->maps);
240}
241
242static int thread__fork(struct thread *self, struct thread *parent)
243{
244 struct map *map;
245
246 if (self->comm)
247 free(self->comm);
248 self->comm = strdup(parent->comm);
249 if (!self->comm)
250 return -ENOMEM;
251
252 list_for_each_entry(map, &parent->maps, node) {
253 struct map *new = map__clone(map);
254 if (!new)
255 return -ENOMEM;
256 thread__insert_map(self, new);
257 }
258
259 return 0;
260}
261
262static struct map *thread__find_map(struct thread *self, u64 ip)
263{
264 struct map *pos;
265
266 if (self == NULL)
267 return NULL;
268
269 list_for_each_entry(pos, &self->maps, node)
270 if (ip >= pos->start && ip <= pos->end)
271 return pos;
272
273 return NULL;
274}
275
276static size_t threads__fprintf(FILE *fp)
277{
278 size_t ret = 0;
279 struct rb_node *nd;
280
281 for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
282 struct thread *pos = rb_entry(nd, struct thread, rb_node);
283
284 ret += thread__fprintf(pos, fp);
285 }
286
287 return ret;
288}
289
290/* 118/*
291 * histogram, sorted on item, collects counts 119 * histogram, sorted on item, collects counts
292 */ 120 */
@@ -1228,7 +1056,7 @@ print_entries:
1228 1056
1229static void register_idle_thread(void) 1057static void register_idle_thread(void)
1230{ 1058{
1231 struct thread *thread = threads__findnew(0); 1059 struct thread *thread = threads__findnew(0, &threads, &last_match);
1232 1060
1233 if (thread == NULL || 1061 if (thread == NULL ||
1234 thread__set_comm(thread, "[idle]")) { 1062 thread__set_comm(thread, "[idle]")) {
@@ -1263,7 +1091,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1263 char level; 1091 char level;
1264 int show = 0; 1092 int show = 0;
1265 struct dso *dso = NULL; 1093 struct dso *dso = NULL;
1266 struct thread *thread = threads__findnew(event->ip.pid); 1094 struct thread *thread;
1267 u64 ip = event->ip.ip; 1095 u64 ip = event->ip.ip;
1268 u64 period = 1; 1096 u64 period = 1;
1269 struct map *map = NULL; 1097 struct map *map = NULL;
@@ -1271,6 +1099,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1271 struct ip_callchain *chain = NULL; 1099 struct ip_callchain *chain = NULL;
1272 int cpumode; 1100 int cpumode;
1273 1101
1102 thread = threads__findnew(event->ip.pid, &threads, &last_match);
1103
1274 if (sample_type & PERF_SAMPLE_PERIOD) { 1104 if (sample_type & PERF_SAMPLE_PERIOD) {
1275 period = *(u64 *)more_data; 1105 period = *(u64 *)more_data;
1276 more_data += sizeof(u64); 1106 more_data += sizeof(u64);
@@ -1360,9 +1190,11 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1360static int 1190static int
1361process_mmap_event(event_t *event, unsigned long offset, unsigned long head) 1191process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1362{ 1192{
1363 struct thread *thread = threads__findnew(event->mmap.pid); 1193 struct thread *thread;
1364 struct map *map = map__new(&event->mmap, cwd, cwdlen); 1194 struct map *map = map__new(&event->mmap, cwd, cwdlen);
1365 1195
1196 thread = threads__findnew(event->mmap.pid, &threads, &last_match);
1197
1366 dprintf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n", 1198 dprintf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n",
1367 (void *)(offset + head), 1199 (void *)(offset + head),
1368 (void *)(long)(event->header.size), 1200 (void *)(long)(event->header.size),
@@ -1387,7 +1219,9 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1387static int 1219static int
1388process_comm_event(event_t *event, unsigned long offset, unsigned long head) 1220process_comm_event(event_t *event, unsigned long offset, unsigned long head)
1389{ 1221{
1390 struct thread *thread = threads__findnew(event->comm.pid); 1222 struct thread *thread;
1223
1224 thread = threads__findnew(event->comm.pid, &threads, &last_match);
1391 1225
1392 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 1226 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
1393 (void *)(offset + head), 1227 (void *)(offset + head),
@@ -1407,8 +1241,11 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
1407static int 1241static int
1408process_task_event(event_t *event, unsigned long offset, unsigned long head) 1242process_task_event(event_t *event, unsigned long offset, unsigned long head)
1409{ 1243{
1410 struct thread *thread = threads__findnew(event->fork.pid); 1244 struct thread *thread;
1411 struct thread *parent = threads__findnew(event->fork.ppid); 1245 struct thread *parent;
1246
1247 thread = threads__findnew(event->fork.pid, &threads, &last_match);
1248 parent = threads__findnew(event->fork.ppid, &threads, &last_match);
1412 1249
1413 dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n", 1250 dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
1414 (void *)(offset + head), 1251 (void *)(offset + head),
@@ -1749,7 +1586,7 @@ done:
1749 return 0; 1586 return 0;
1750 1587
1751 if (verbose >= 3) 1588 if (verbose >= 3)
1752 threads__fprintf(stdout); 1589 threads__fprintf(stdout, &threads);
1753 1590
1754 if (verbose >= 2) 1591 if (verbose >= 2)
1755 dsos__fprintf(stdout); 1592 dsos__fprintf(stdout);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
new file mode 100644
index 000000000000..00c14b98d651
--- /dev/null
+++ b/tools/perf/util/thread.c
@@ -0,0 +1,143 @@
1#include "../perf.h"
2#include <stdlib.h>
3#include <stdio.h>
4#include <string.h>
5#include "thread.h"
6#include "util.h"
7
8static struct thread *thread__new(pid_t pid)
9{
10 struct thread *self = malloc(sizeof(*self));
11
12 if (self != NULL) {
13 self->pid = pid;
14 self->comm = malloc(32);
15 if (self->comm)
16 snprintf(self->comm, 32, ":%d", self->pid);
17 INIT_LIST_HEAD(&self->maps);
18 }
19
20 return self;
21}
22
23int thread__set_comm(struct thread *self, const char *comm)
24{
25 if (self->comm)
26 free(self->comm);
27 self->comm = strdup(comm);
28 return self->comm ? 0 : -ENOMEM;
29}
30
31static size_t thread__fprintf(struct thread *self, FILE *fp)
32{
33 struct map *pos;
34 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
35
36 list_for_each_entry(pos, &self->maps, node)
37 ret += map__fprintf(pos, fp);
38
39 return ret;
40}
41
42struct thread *
43threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
44{
45 struct rb_node **p = &threads->rb_node;
46 struct rb_node *parent = NULL;
47 struct thread *th;
48
49 /*
50 * Font-end cache - PID lookups come in blocks,
51 * so most of the time we dont have to look up
52 * the full rbtree:
53 */
54 if (*last_match && (*last_match)->pid == pid)
55 return *last_match;
56
57 while (*p != NULL) {
58 parent = *p;
59 th = rb_entry(parent, struct thread, rb_node);
60
61 if (th->pid == pid) {
62 *last_match = th;
63 return th;
64 }
65
66 if (pid < th->pid)
67 p = &(*p)->rb_left;
68 else
69 p = &(*p)->rb_right;
70 }
71
72 th = thread__new(pid);
73 if (th != NULL) {
74 rb_link_node(&th->rb_node, parent, p);
75 rb_insert_color(&th->rb_node, threads);
76 *last_match = th;
77 }
78
79 return th;
80}
81
82void thread__insert_map(struct thread *self, struct map *map)
83{
84 struct map *pos, *tmp;
85
86 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
87 if (map__overlap(pos, map)) {
88 list_del_init(&pos->node);
89 /* XXX leaks dsos */
90 free(pos);
91 }
92 }
93
94 list_add_tail(&map->node, &self->maps);
95}
96
97int thread__fork(struct thread *self, struct thread *parent)
98{
99 struct map *map;
100
101 if (self->comm)
102 free(self->comm);
103 self->comm = strdup(parent->comm);
104 if (!self->comm)
105 return -ENOMEM;
106
107 list_for_each_entry(map, &parent->maps, node) {
108 struct map *new = map__clone(map);
109 if (!new)
110 return -ENOMEM;
111 thread__insert_map(self, new);
112 }
113
114 return 0;
115}
116
117struct map *thread__find_map(struct thread *self, u64 ip)
118{
119 struct map *pos;
120
121 if (self == NULL)
122 return NULL;
123
124 list_for_each_entry(pos, &self->maps, node)
125 if (ip >= pos->start && ip <= pos->end)
126 return pos;
127
128 return NULL;
129}
130
131size_t threads__fprintf(FILE *fp, struct rb_root *threads)
132{
133 size_t ret = 0;
134 struct rb_node *nd;
135
136 for (nd = rb_first(threads); nd; nd = rb_next(nd)) {
137 struct thread *pos = rb_entry(nd, struct thread, rb_node);
138
139 ret += thread__fprintf(pos, fp);
140 }
141
142 return ret;
143}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
new file mode 100644
index 000000000000..b1c66719379b
--- /dev/null
+++ b/tools/perf/util/thread.h
@@ -0,0 +1,19 @@
1#include <linux/rbtree.h>
2#include <linux/list.h>
3#include <unistd.h>
4#include "symbol.h"
5
6struct thread {
7 struct rb_node rb_node;
8 struct list_head maps;
9 pid_t pid;
10 char *comm;
11};
12
13int thread__set_comm(struct thread *self, const char *comm);
14struct thread *
15threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match);
16void thread__insert_map(struct thread *self, struct map *map);
17int thread__fork(struct thread *self, struct thread *parent);
18struct map *thread__find_map(struct thread *self, u64 ip);
19size_t threads__fprintf(FILE *fp, struct rb_root *threads);