aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/thread.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-10-23 02:23:20 -0400
committerIngo Molnar <mingo@elte.hu>2009-10-23 02:23:20 -0400
commit43315956509ca6913764861ac7dec128b91eb1ec (patch)
tree60fd5647f150a46e63093a41417c2eef3e776b3d /tools/perf/util/thread.c
parent9bf4e7fba8006d19846fec877b6da0616b2772de (diff)
parent6beba7adbe092e63dfe8d09fbd1e3ec140474a13 (diff)
Merge branch 'perf/core' into perf/probes
Conflicts: tools/perf/Makefile Merge reason: - fix the conflict - pick up the pr_*() infrastructure to queue up dependent patch Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf/util/thread.c')
-rw-r--r--tools/perf/util/thread.c167
1 files changed, 108 insertions, 59 deletions
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 45efb5db0d1..0f6d78c9863 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -6,6 +6,9 @@
6#include "util.h" 6#include "util.h"
7#include "debug.h" 7#include "debug.h"
8 8
9static struct rb_root threads;
10static struct thread *last_match;
11
9static struct thread *thread__new(pid_t pid) 12static struct thread *thread__new(pid_t pid)
10{ 13{
11 struct thread *self = calloc(1, sizeof(*self)); 14 struct thread *self = calloc(1, sizeof(*self));
@@ -15,7 +18,8 @@ static struct thread *thread__new(pid_t pid)
15 self->comm = malloc(32); 18 self->comm = malloc(32);
16 if (self->comm) 19 if (self->comm)
17 snprintf(self->comm, 32, ":%d", self->pid); 20 snprintf(self->comm, 32, ":%d", self->pid);
18 INIT_LIST_HEAD(&self->maps); 21 self->maps = RB_ROOT;
22 INIT_LIST_HEAD(&self->removed_maps);
19 } 23 }
20 24
21 return self; 25 return self;
@@ -29,21 +33,40 @@ int thread__set_comm(struct thread *self, const char *comm)
29 return self->comm ? 0 : -ENOMEM; 33 return self->comm ? 0 : -ENOMEM;
30} 34}
31 35
36int thread__comm_len(struct thread *self)
37{
38 if (!self->comm_len) {
39 if (!self->comm)
40 return 0;
41 self->comm_len = strlen(self->comm);
42 }
43
44 return self->comm_len;
45}
46
32static size_t thread__fprintf(struct thread *self, FILE *fp) 47static size_t thread__fprintf(struct thread *self, FILE *fp)
33{ 48{
49 struct rb_node *nd;
34 struct map *pos; 50 struct map *pos;
35 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); 51 size_t ret = fprintf(fp, "Thread %d %s\nCurrent maps:\n",
52 self->pid, self->comm);
36 53
37 list_for_each_entry(pos, &self->maps, node) 54 for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) {
55 pos = rb_entry(nd, struct map, rb_node);
56 ret += map__fprintf(pos, fp);
57 }
58
59 ret = fprintf(fp, "Removed maps:\n");
60
61 list_for_each_entry(pos, &self->removed_maps, node)
38 ret += map__fprintf(pos, fp); 62 ret += map__fprintf(pos, fp);
39 63
40 return ret; 64 return ret;
41} 65}
42 66
43struct thread * 67struct thread *threads__findnew(pid_t pid)
44threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
45{ 68{
46 struct rb_node **p = &threads->rb_node; 69 struct rb_node **p = &threads.rb_node;
47 struct rb_node *parent = NULL; 70 struct rb_node *parent = NULL;
48 struct thread *th; 71 struct thread *th;
49 72
@@ -52,15 +75,15 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
52 * so most of the time we dont have to look up 75 * so most of the time we dont have to look up
53 * the full rbtree: 76 * the full rbtree:
54 */ 77 */
55 if (*last_match && (*last_match)->pid == pid) 78 if (last_match && last_match->pid == pid)
56 return *last_match; 79 return last_match;
57 80
58 while (*p != NULL) { 81 while (*p != NULL) {
59 parent = *p; 82 parent = *p;
60 th = rb_entry(parent, struct thread, rb_node); 83 th = rb_entry(parent, struct thread, rb_node);
61 84
62 if (th->pid == pid) { 85 if (th->pid == pid) {
63 *last_match = th; 86 last_match = th;
64 return th; 87 return th;
65 } 88 }
66 89
@@ -73,17 +96,16 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
73 th = thread__new(pid); 96 th = thread__new(pid);
74 if (th != NULL) { 97 if (th != NULL) {
75 rb_link_node(&th->rb_node, parent, p); 98 rb_link_node(&th->rb_node, parent, p);
76 rb_insert_color(&th->rb_node, threads); 99 rb_insert_color(&th->rb_node, &threads);
77 *last_match = th; 100 last_match = th;
78 } 101 }
79 102
80 return th; 103 return th;
81} 104}
82 105
83struct thread * 106struct thread *register_idle_thread(void)
84register_idle_thread(struct rb_root *threads, struct thread **last_match)
85{ 107{
86 struct thread *thread = threads__findnew(0, threads, last_match); 108 struct thread *thread = threads__findnew(0);
87 109
88 if (!thread || thread__set_comm(thread, "swapper")) { 110 if (!thread || thread__set_comm(thread, "swapper")) {
89 fprintf(stderr, "problem inserting idle task.\n"); 111 fprintf(stderr, "problem inserting idle task.\n");
@@ -93,42 +115,82 @@ register_idle_thread(struct rb_root *threads, struct thread **last_match)
93 return thread; 115 return thread;
94} 116}
95 117
96void thread__insert_map(struct thread *self, struct map *map) 118static void thread__remove_overlappings(struct thread *self, struct map *map)
97{ 119{
98 struct map *pos, *tmp; 120 struct rb_node *next = rb_first(&self->maps);
99 121
100 list_for_each_entry_safe(pos, tmp, &self->maps, node) { 122 while (next) {
101 if (map__overlap(pos, map)) { 123 struct map *pos = rb_entry(next, struct map, rb_node);
102 if (verbose >= 2) { 124 next = rb_next(&pos->rb_node);
103 printf("overlapping maps:\n"); 125
104 map__fprintf(map, stdout); 126 if (!map__overlap(pos, map))
105 map__fprintf(pos, stdout); 127 continue;
106 } 128
107 129 if (verbose >= 2) {
108 if (map->start <= pos->start && map->end > pos->start) 130 fputs("overlapping maps:\n", stderr);
109 pos->start = map->end; 131 map__fprintf(map, stderr);
110 132 map__fprintf(pos, stderr);
111 if (map->end >= pos->end && map->start < pos->end)
112 pos->end = map->start;
113
114 if (verbose >= 2) {
115 printf("after collision:\n");
116 map__fprintf(pos, stdout);
117 }
118
119 if (pos->start >= pos->end) {
120 list_del_init(&pos->node);
121 free(pos);
122 }
123 } 133 }
134
135 rb_erase(&pos->rb_node, &self->maps);
136 /*
137 * We may have references to this map, for instance in some
138 * hist_entry instances, so just move them to a separate
139 * list.
140 */
141 list_add_tail(&pos->node, &self->removed_maps);
142 }
143}
144
145void maps__insert(struct rb_root *maps, struct map *map)
146{
147 struct rb_node **p = &maps->rb_node;
148 struct rb_node *parent = NULL;
149 const u64 ip = map->start;
150 struct map *m;
151
152 while (*p != NULL) {
153 parent = *p;
154 m = rb_entry(parent, struct map, rb_node);
155 if (ip < m->start)
156 p = &(*p)->rb_left;
157 else
158 p = &(*p)->rb_right;
124 } 159 }
125 160
126 list_add_tail(&map->node, &self->maps); 161 rb_link_node(&map->rb_node, parent, p);
162 rb_insert_color(&map->rb_node, maps);
163}
164
165struct map *maps__find(struct rb_root *maps, u64 ip)
166{
167 struct rb_node **p = &maps->rb_node;
168 struct rb_node *parent = NULL;
169 struct map *m;
170
171 while (*p != NULL) {
172 parent = *p;
173 m = rb_entry(parent, struct map, rb_node);
174 if (ip < m->start)
175 p = &(*p)->rb_left;
176 else if (ip > m->end)
177 p = &(*p)->rb_right;
178 else
179 return m;
180 }
181
182 return NULL;
183}
184
185void thread__insert_map(struct thread *self, struct map *map)
186{
187 thread__remove_overlappings(self, map);
188 maps__insert(&self->maps, map);
127} 189}
128 190
129int thread__fork(struct thread *self, struct thread *parent) 191int thread__fork(struct thread *self, struct thread *parent)
130{ 192{
131 struct map *map; 193 struct rb_node *nd;
132 194
133 if (self->comm) 195 if (self->comm)
134 free(self->comm); 196 free(self->comm);
@@ -136,7 +198,8 @@ int thread__fork(struct thread *self, struct thread *parent)
136 if (!self->comm) 198 if (!self->comm)
137 return -ENOMEM; 199 return -ENOMEM;
138 200
139 list_for_each_entry(map, &parent->maps, node) { 201 for (nd = rb_first(&parent->maps); nd; nd = rb_next(nd)) {
202 struct map *map = rb_entry(nd, struct map, rb_node);
140 struct map *new = map__clone(map); 203 struct map *new = map__clone(map);
141 if (!new) 204 if (!new)
142 return -ENOMEM; 205 return -ENOMEM;
@@ -146,26 +209,12 @@ int thread__fork(struct thread *self, struct thread *parent)
146 return 0; 209 return 0;
147} 210}
148 211
149struct map *thread__find_map(struct thread *self, u64 ip) 212size_t threads__fprintf(FILE *fp)
150{
151 struct map *pos;
152
153 if (self == NULL)
154 return NULL;
155
156 list_for_each_entry(pos, &self->maps, node)
157 if (ip >= pos->start && ip <= pos->end)
158 return pos;
159
160 return NULL;
161}
162
163size_t threads__fprintf(FILE *fp, struct rb_root *threads)
164{ 213{
165 size_t ret = 0; 214 size_t ret = 0;
166 struct rb_node *nd; 215 struct rb_node *nd;
167 216
168 for (nd = rb_first(threads); nd; nd = rb_next(nd)) { 217 for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
169 struct thread *pos = rb_entry(nd, struct thread, rb_node); 218 struct thread *pos = rb_entry(nd, struct thread, rb_node);
170 219
171 ret += thread__fprintf(pos, fp); 220 ret += thread__fprintf(pos, fp);