diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /tools/perf/util/thread.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'tools/perf/util/thread.c')
-rw-r--r-- | tools/perf/util/thread.c | 315 |
1 files changed, 245 insertions, 70 deletions
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 45efb5db0d19..fa968312ee7d 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c | |||
@@ -2,48 +2,151 @@ | |||
2 | #include <stdlib.h> | 2 | #include <stdlib.h> |
3 | #include <stdio.h> | 3 | #include <stdio.h> |
4 | #include <string.h> | 4 | #include <string.h> |
5 | #include "session.h" | ||
5 | #include "thread.h" | 6 | #include "thread.h" |
6 | #include "util.h" | 7 | #include "util.h" |
7 | #include "debug.h" | 8 | #include "debug.h" |
8 | 9 | ||
10 | void map_groups__init(struct map_groups *self) | ||
11 | { | ||
12 | int i; | ||
13 | for (i = 0; i < MAP__NR_TYPES; ++i) { | ||
14 | self->maps[i] = RB_ROOT; | ||
15 | INIT_LIST_HEAD(&self->removed_maps[i]); | ||
16 | } | ||
17 | } | ||
18 | |||
9 | static struct thread *thread__new(pid_t pid) | 19 | static struct thread *thread__new(pid_t pid) |
10 | { | 20 | { |
11 | struct thread *self = calloc(1, sizeof(*self)); | 21 | struct thread *self = zalloc(sizeof(*self)); |
12 | 22 | ||
13 | if (self != NULL) { | 23 | if (self != NULL) { |
24 | map_groups__init(&self->mg); | ||
14 | self->pid = pid; | 25 | self->pid = pid; |
15 | self->comm = malloc(32); | 26 | self->comm = malloc(32); |
16 | if (self->comm) | 27 | if (self->comm) |
17 | snprintf(self->comm, 32, ":%d", self->pid); | 28 | snprintf(self->comm, 32, ":%d", self->pid); |
18 | INIT_LIST_HEAD(&self->maps); | ||
19 | } | 29 | } |
20 | 30 | ||
21 | return self; | 31 | return self; |
22 | } | 32 | } |
23 | 33 | ||
34 | static void map_groups__flush(struct map_groups *self) | ||
35 | { | ||
36 | int type; | ||
37 | |||
38 | for (type = 0; type < MAP__NR_TYPES; type++) { | ||
39 | struct rb_root *root = &self->maps[type]; | ||
40 | struct rb_node *next = rb_first(root); | ||
41 | |||
42 | while (next) { | ||
43 | struct map *pos = rb_entry(next, struct map, rb_node); | ||
44 | next = rb_next(&pos->rb_node); | ||
45 | rb_erase(&pos->rb_node, root); | ||
46 | /* | ||
47 | * We may have references to this map, for | ||
48 | * instance in some hist_entry instances, so | ||
49 | * just move them to a separate list. | ||
50 | */ | ||
51 | list_add_tail(&pos->node, &self->removed_maps[pos->type]); | ||
52 | } | ||
53 | } | ||
54 | } | ||
55 | |||
24 | int thread__set_comm(struct thread *self, const char *comm) | 56 | int thread__set_comm(struct thread *self, const char *comm) |
25 | { | 57 | { |
58 | int err; | ||
59 | |||
26 | if (self->comm) | 60 | if (self->comm) |
27 | free(self->comm); | 61 | free(self->comm); |
28 | self->comm = strdup(comm); | 62 | self->comm = strdup(comm); |
29 | return self->comm ? 0 : -ENOMEM; | 63 | err = self->comm == NULL ? -ENOMEM : 0; |
64 | if (!err) { | ||
65 | self->comm_set = true; | ||
66 | map_groups__flush(&self->mg); | ||
67 | } | ||
68 | return err; | ||
30 | } | 69 | } |
31 | 70 | ||
32 | static size_t thread__fprintf(struct thread *self, FILE *fp) | 71 | int thread__comm_len(struct thread *self) |
72 | { | ||
73 | if (!self->comm_len) { | ||
74 | if (!self->comm) | ||
75 | return 0; | ||
76 | self->comm_len = strlen(self->comm); | ||
77 | } | ||
78 | |||
79 | return self->comm_len; | ||
80 | } | ||
81 | |||
82 | size_t __map_groups__fprintf_maps(struct map_groups *self, | ||
83 | enum map_type type, FILE *fp) | ||
84 | { | ||
85 | size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); | ||
86 | struct rb_node *nd; | ||
87 | |||
88 | for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { | ||
89 | struct map *pos = rb_entry(nd, struct map, rb_node); | ||
90 | printed += fprintf(fp, "Map:"); | ||
91 | printed += map__fprintf(pos, fp); | ||
92 | if (verbose > 2) { | ||
93 | printed += dso__fprintf(pos->dso, type, fp); | ||
94 | printed += fprintf(fp, "--\n"); | ||
95 | } | ||
96 | } | ||
97 | |||
98 | return printed; | ||
99 | } | ||
100 | |||
101 | size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp) | ||
102 | { | ||
103 | size_t printed = 0, i; | ||
104 | for (i = 0; i < MAP__NR_TYPES; ++i) | ||
105 | printed += __map_groups__fprintf_maps(self, i, fp); | ||
106 | return printed; | ||
107 | } | ||
108 | |||
109 | static size_t __map_groups__fprintf_removed_maps(struct map_groups *self, | ||
110 | enum map_type type, FILE *fp) | ||
33 | { | 111 | { |
34 | struct map *pos; | 112 | struct map *pos; |
35 | size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); | 113 | size_t printed = 0; |
114 | |||
115 | list_for_each_entry(pos, &self->removed_maps[type], node) { | ||
116 | printed += fprintf(fp, "Map:"); | ||
117 | printed += map__fprintf(pos, fp); | ||
118 | if (verbose > 1) { | ||
119 | printed += dso__fprintf(pos->dso, type, fp); | ||
120 | printed += fprintf(fp, "--\n"); | ||
121 | } | ||
122 | } | ||
123 | return printed; | ||
124 | } | ||
36 | 125 | ||
37 | list_for_each_entry(pos, &self->maps, node) | 126 | static size_t map_groups__fprintf_removed_maps(struct map_groups *self, FILE *fp) |
38 | ret += map__fprintf(pos, fp); | 127 | { |
128 | size_t printed = 0, i; | ||
129 | for (i = 0; i < MAP__NR_TYPES; ++i) | ||
130 | printed += __map_groups__fprintf_removed_maps(self, i, fp); | ||
131 | return printed; | ||
132 | } | ||
39 | 133 | ||
40 | return ret; | 134 | static size_t map_groups__fprintf(struct map_groups *self, FILE *fp) |
135 | { | ||
136 | size_t printed = map_groups__fprintf_maps(self, fp); | ||
137 | printed += fprintf(fp, "Removed maps:\n"); | ||
138 | return printed + map_groups__fprintf_removed_maps(self, fp); | ||
41 | } | 139 | } |
42 | 140 | ||
43 | struct thread * | 141 | static size_t thread__fprintf(struct thread *self, FILE *fp) |
44 | threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) | 142 | { |
143 | return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) + | ||
144 | map_groups__fprintf(&self->mg, fp); | ||
145 | } | ||
146 | |||
147 | struct thread *perf_session__findnew(struct perf_session *self, pid_t pid) | ||
45 | { | 148 | { |
46 | struct rb_node **p = &threads->rb_node; | 149 | struct rb_node **p = &self->threads.rb_node; |
47 | struct rb_node *parent = NULL; | 150 | struct rb_node *parent = NULL; |
48 | struct thread *th; | 151 | struct thread *th; |
49 | 152 | ||
@@ -52,15 +155,15 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) | |||
52 | * so most of the time we dont have to look up | 155 | * so most of the time we dont have to look up |
53 | * the full rbtree: | 156 | * the full rbtree: |
54 | */ | 157 | */ |
55 | if (*last_match && (*last_match)->pid == pid) | 158 | if (self->last_match && self->last_match->pid == pid) |
56 | return *last_match; | 159 | return self->last_match; |
57 | 160 | ||
58 | while (*p != NULL) { | 161 | while (*p != NULL) { |
59 | parent = *p; | 162 | parent = *p; |
60 | th = rb_entry(parent, struct thread, rb_node); | 163 | th = rb_entry(parent, struct thread, rb_node); |
61 | 164 | ||
62 | if (th->pid == pid) { | 165 | if (th->pid == pid) { |
63 | *last_match = th; | 166 | self->last_match = th; |
64 | return th; | 167 | return th; |
65 | } | 168 | } |
66 | 169 | ||
@@ -73,99 +176,159 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) | |||
73 | th = thread__new(pid); | 176 | th = thread__new(pid); |
74 | if (th != NULL) { | 177 | if (th != NULL) { |
75 | rb_link_node(&th->rb_node, parent, p); | 178 | rb_link_node(&th->rb_node, parent, p); |
76 | rb_insert_color(&th->rb_node, threads); | 179 | rb_insert_color(&th->rb_node, &self->threads); |
77 | *last_match = th; | 180 | self->last_match = th; |
78 | } | 181 | } |
79 | 182 | ||
80 | return th; | 183 | return th; |
81 | } | 184 | } |
82 | 185 | ||
83 | struct thread * | 186 | static int map_groups__fixup_overlappings(struct map_groups *self, |
84 | register_idle_thread(struct rb_root *threads, struct thread **last_match) | 187 | struct map *map) |
85 | { | 188 | { |
86 | struct thread *thread = threads__findnew(0, threads, last_match); | 189 | struct rb_root *root = &self->maps[map->type]; |
190 | struct rb_node *next = rb_first(root); | ||
191 | |||
192 | while (next) { | ||
193 | struct map *pos = rb_entry(next, struct map, rb_node); | ||
194 | next = rb_next(&pos->rb_node); | ||
195 | |||
196 | if (!map__overlap(pos, map)) | ||
197 | continue; | ||
198 | |||
199 | if (verbose >= 2) { | ||
200 | fputs("overlapping maps:\n", stderr); | ||
201 | map__fprintf(map, stderr); | ||
202 | map__fprintf(pos, stderr); | ||
203 | } | ||
204 | |||
205 | rb_erase(&pos->rb_node, root); | ||
206 | /* | ||
207 | * We may have references to this map, for instance in some | ||
208 | * hist_entry instances, so just move them to a separate | ||
209 | * list. | ||
210 | */ | ||
211 | list_add_tail(&pos->node, &self->removed_maps[map->type]); | ||
212 | /* | ||
213 | * Now check if we need to create new maps for areas not | ||
214 | * overlapped by the new map: | ||
215 | */ | ||
216 | if (map->start > pos->start) { | ||
217 | struct map *before = map__clone(pos); | ||
218 | |||
219 | if (before == NULL) | ||
220 | return -ENOMEM; | ||
221 | |||
222 | before->end = map->start - 1; | ||
223 | map_groups__insert(self, before); | ||
224 | if (verbose >= 2) | ||
225 | map__fprintf(before, stderr); | ||
226 | } | ||
227 | |||
228 | if (map->end < pos->end) { | ||
229 | struct map *after = map__clone(pos); | ||
87 | 230 | ||
88 | if (!thread || thread__set_comm(thread, "swapper")) { | 231 | if (after == NULL) |
89 | fprintf(stderr, "problem inserting idle task.\n"); | 232 | return -ENOMEM; |
90 | exit(-1); | 233 | |
234 | after->start = map->end + 1; | ||
235 | map_groups__insert(self, after); | ||
236 | if (verbose >= 2) | ||
237 | map__fprintf(after, stderr); | ||
238 | } | ||
91 | } | 239 | } |
92 | 240 | ||
93 | return thread; | 241 | return 0; |
94 | } | 242 | } |
95 | 243 | ||
96 | void thread__insert_map(struct thread *self, struct map *map) | 244 | void maps__insert(struct rb_root *maps, struct map *map) |
97 | { | 245 | { |
98 | struct map *pos, *tmp; | 246 | struct rb_node **p = &maps->rb_node; |
99 | 247 | struct rb_node *parent = NULL; | |
100 | list_for_each_entry_safe(pos, tmp, &self->maps, node) { | 248 | const u64 ip = map->start; |
101 | if (map__overlap(pos, map)) { | 249 | struct map *m; |
102 | if (verbose >= 2) { | ||
103 | printf("overlapping maps:\n"); | ||
104 | map__fprintf(map, stdout); | ||
105 | map__fprintf(pos, stdout); | ||
106 | } | ||
107 | 250 | ||
108 | if (map->start <= pos->start && map->end > pos->start) | 251 | while (*p != NULL) { |
109 | pos->start = map->end; | 252 | parent = *p; |
253 | m = rb_entry(parent, struct map, rb_node); | ||
254 | if (ip < m->start) | ||
255 | p = &(*p)->rb_left; | ||
256 | else | ||
257 | p = &(*p)->rb_right; | ||
258 | } | ||
110 | 259 | ||
111 | if (map->end >= pos->end && map->start < pos->end) | 260 | rb_link_node(&map->rb_node, parent, p); |
112 | pos->end = map->start; | 261 | rb_insert_color(&map->rb_node, maps); |
262 | } | ||
113 | 263 | ||
114 | if (verbose >= 2) { | 264 | struct map *maps__find(struct rb_root *maps, u64 ip) |
115 | printf("after collision:\n"); | 265 | { |
116 | map__fprintf(pos, stdout); | 266 | struct rb_node **p = &maps->rb_node; |
117 | } | 267 | struct rb_node *parent = NULL; |
268 | struct map *m; | ||
118 | 269 | ||
119 | if (pos->start >= pos->end) { | 270 | while (*p != NULL) { |
120 | list_del_init(&pos->node); | 271 | parent = *p; |
121 | free(pos); | 272 | m = rb_entry(parent, struct map, rb_node); |
122 | } | 273 | if (ip < m->start) |
123 | } | 274 | p = &(*p)->rb_left; |
275 | else if (ip > m->end) | ||
276 | p = &(*p)->rb_right; | ||
277 | else | ||
278 | return m; | ||
124 | } | 279 | } |
125 | 280 | ||
126 | list_add_tail(&map->node, &self->maps); | 281 | return NULL; |
127 | } | 282 | } |
128 | 283 | ||
129 | int thread__fork(struct thread *self, struct thread *parent) | 284 | void thread__insert_map(struct thread *self, struct map *map) |
130 | { | 285 | { |
131 | struct map *map; | 286 | map_groups__fixup_overlappings(&self->mg, map); |
132 | 287 | map_groups__insert(&self->mg, map); | |
133 | if (self->comm) | 288 | } |
134 | free(self->comm); | ||
135 | self->comm = strdup(parent->comm); | ||
136 | if (!self->comm) | ||
137 | return -ENOMEM; | ||
138 | 289 | ||
139 | list_for_each_entry(map, &parent->maps, node) { | 290 | /* |
291 | * XXX This should not really _copy_ te maps, but refcount them. | ||
292 | */ | ||
293 | static int map_groups__clone(struct map_groups *self, | ||
294 | struct map_groups *parent, enum map_type type) | ||
295 | { | ||
296 | struct rb_node *nd; | ||
297 | for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) { | ||
298 | struct map *map = rb_entry(nd, struct map, rb_node); | ||
140 | struct map *new = map__clone(map); | 299 | struct map *new = map__clone(map); |
141 | if (!new) | 300 | if (new == NULL) |
142 | return -ENOMEM; | 301 | return -ENOMEM; |
143 | thread__insert_map(self, new); | 302 | map_groups__insert(self, new); |
144 | } | 303 | } |
145 | |||
146 | return 0; | 304 | return 0; |
147 | } | 305 | } |
148 | 306 | ||
149 | struct map *thread__find_map(struct thread *self, u64 ip) | 307 | int thread__fork(struct thread *self, struct thread *parent) |
150 | { | 308 | { |
151 | struct map *pos; | 309 | int i; |
152 | |||
153 | if (self == NULL) | ||
154 | return NULL; | ||
155 | 310 | ||
156 | list_for_each_entry(pos, &self->maps, node) | 311 | if (parent->comm_set) { |
157 | if (ip >= pos->start && ip <= pos->end) | 312 | if (self->comm) |
158 | return pos; | 313 | free(self->comm); |
314 | self->comm = strdup(parent->comm); | ||
315 | if (!self->comm) | ||
316 | return -ENOMEM; | ||
317 | self->comm_set = true; | ||
318 | } | ||
159 | 319 | ||
160 | return NULL; | 320 | for (i = 0; i < MAP__NR_TYPES; ++i) |
321 | if (map_groups__clone(&self->mg, &parent->mg, i) < 0) | ||
322 | return -ENOMEM; | ||
323 | return 0; | ||
161 | } | 324 | } |
162 | 325 | ||
163 | size_t threads__fprintf(FILE *fp, struct rb_root *threads) | 326 | size_t perf_session__fprintf(struct perf_session *self, FILE *fp) |
164 | { | 327 | { |
165 | size_t ret = 0; | 328 | size_t ret = 0; |
166 | struct rb_node *nd; | 329 | struct rb_node *nd; |
167 | 330 | ||
168 | for (nd = rb_first(threads); nd; nd = rb_next(nd)) { | 331 | for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) { |
169 | struct thread *pos = rb_entry(nd, struct thread, rb_node); | 332 | struct thread *pos = rb_entry(nd, struct thread, rb_node); |
170 | 333 | ||
171 | ret += thread__fprintf(pos, fp); | 334 | ret += thread__fprintf(pos, fp); |
@@ -173,3 +336,15 @@ size_t threads__fprintf(FILE *fp, struct rb_root *threads) | |||
173 | 336 | ||
174 | return ret; | 337 | return ret; |
175 | } | 338 | } |
339 | |||
340 | struct symbol *map_groups__find_symbol(struct map_groups *self, | ||
341 | enum map_type type, u64 addr, | ||
342 | symbol_filter_t filter) | ||
343 | { | ||
344 | struct map *map = map_groups__find(self, type, addr); | ||
345 | |||
346 | if (map != NULL) | ||
347 | return map__find_symbol(map, map->map_ip(map, addr), filter); | ||
348 | |||
349 | return NULL; | ||
350 | } | ||