aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-report.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-report.c')
-rw-r--r--tools/perf/builtin-report.c205
1 files changed, 21 insertions, 184 deletions
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 6321951fe1bf..298f26b8ac78 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -25,6 +25,8 @@
25#include "util/parse-options.h" 25#include "util/parse-options.h"
26#include "util/parse-events.h" 26#include "util/parse-events.h"
27 27
28#include "util/thread.h"
29
28#define SHOW_KERNEL 1 30#define SHOW_KERNEL 1
29#define SHOW_USER 2 31#define SHOW_USER 2
30#define SHOW_HV 4 32#define SHOW_HV 4
@@ -71,6 +73,9 @@ static char __cwd[PATH_MAX];
71static char *cwd = __cwd; 73static char *cwd = __cwd;
72static int cwdlen; 74static int cwdlen;
73 75
76static struct rb_root threads;
77static struct thread *last_match;
78
74static 79static
75struct callchain_param callchain_param = { 80struct callchain_param callchain_param = {
76 .mode = CHAIN_GRAPH_REL, 81 .mode = CHAIN_GRAPH_REL,
@@ -106,187 +111,10 @@ static int repsep_fprintf(FILE *fp, const char *fmt, ...)
106 return n; 111 return n;
107} 112}
108 113
109struct thread {
110 struct rb_node rb_node;
111 struct list_head maps;
112 pid_t pid;
113 char *comm;
114};
115
116static struct thread *thread__new(pid_t pid)
117{
118 struct thread *self = malloc(sizeof(*self));
119
120 if (self != NULL) {
121 self->pid = pid;
122 self->comm = malloc(32);
123 if (self->comm)
124 snprintf(self->comm, 32, ":%d", self->pid);
125 INIT_LIST_HEAD(&self->maps);
126 }
127
128 return self;
129}
130
131static unsigned int dsos__col_width, 114static unsigned int dsos__col_width,
132 comms__col_width, 115 comms__col_width,
133 threads__col_width; 116 threads__col_width;
134 117
135static int thread__set_comm(struct thread *self, const char *comm)
136{
137 if (self->comm)
138 free(self->comm);
139 self->comm = strdup(comm);
140 if (!self->comm)
141 return -ENOMEM;
142
143 if (!col_width_list_str && !field_sep &&
144 (!comm_list || strlist__has_entry(comm_list, comm))) {
145 unsigned int slen = strlen(comm);
146 if (slen > comms__col_width) {
147 comms__col_width = slen;
148 threads__col_width = slen + 6;
149 }
150 }
151
152 return 0;
153}
154
155static size_t thread__fprintf(struct thread *self, FILE *fp)
156{
157 struct map *pos;
158 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
159
160 list_for_each_entry(pos, &self->maps, node)
161 ret += map__fprintf(pos, fp);
162
163 return ret;
164}
165
166
167static struct rb_root threads;
168static struct thread *last_match;
169
170static struct thread *threads__findnew(pid_t pid)
171{
172 struct rb_node **p = &threads.rb_node;
173 struct rb_node *parent = NULL;
174 struct thread *th;
175
176 /*
177 * Font-end cache - PID lookups come in blocks,
178 * so most of the time we dont have to look up
179 * the full rbtree:
180 */
181 if (last_match && last_match->pid == pid)
182 return last_match;
183
184 while (*p != NULL) {
185 parent = *p;
186 th = rb_entry(parent, struct thread, rb_node);
187
188 if (th->pid == pid) {
189 last_match = th;
190 return th;
191 }
192
193 if (pid < th->pid)
194 p = &(*p)->rb_left;
195 else
196 p = &(*p)->rb_right;
197 }
198
199 th = thread__new(pid);
200 if (th != NULL) {
201 rb_link_node(&th->rb_node, parent, p);
202 rb_insert_color(&th->rb_node, &threads);
203 last_match = th;
204 }
205
206 return th;
207}
208
209static void thread__insert_map(struct thread *self, struct map *map)
210{
211 struct map *pos, *tmp;
212
213 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
214 if (map__overlap(pos, map)) {
215 if (verbose >= 2) {
216 printf("overlapping maps:\n");
217 map__fprintf(map, stdout);
218 map__fprintf(pos, stdout);
219 }
220
221 if (map->start <= pos->start && map->end > pos->start)
222 pos->start = map->end;
223
224 if (map->end >= pos->end && map->start < pos->end)
225 pos->end = map->start;
226
227 if (verbose >= 2) {
228 printf("after collision:\n");
229 map__fprintf(pos, stdout);
230 }
231
232 if (pos->start >= pos->end) {
233 list_del_init(&pos->node);
234 free(pos);
235 }
236 }
237 }
238
239 list_add_tail(&map->node, &self->maps);
240}
241
242static int thread__fork(struct thread *self, struct thread *parent)
243{
244 struct map *map;
245
246 if (self->comm)
247 free(self->comm);
248 self->comm = strdup(parent->comm);
249 if (!self->comm)
250 return -ENOMEM;
251
252 list_for_each_entry(map, &parent->maps, node) {
253 struct map *new = map__clone(map);
254 if (!new)
255 return -ENOMEM;
256 thread__insert_map(self, new);
257 }
258
259 return 0;
260}
261
262static struct map *thread__find_map(struct thread *self, u64 ip)
263{
264 struct map *pos;
265
266 if (self == NULL)
267 return NULL;
268
269 list_for_each_entry(pos, &self->maps, node)
270 if (ip >= pos->start && ip <= pos->end)
271 return pos;
272
273 return NULL;
274}
275
276static size_t threads__fprintf(FILE *fp)
277{
278 size_t ret = 0;
279 struct rb_node *nd;
280
281 for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
282 struct thread *pos = rb_entry(nd, struct thread, rb_node);
283
284 ret += thread__fprintf(pos, fp);
285 }
286
287 return ret;
288}
289
290/* 118/*
291 * histogram, sorted on item, collects counts 119 * histogram, sorted on item, collects counts
292 */ 120 */
@@ -1228,7 +1056,7 @@ print_entries:
1228 1056
1229static void register_idle_thread(void) 1057static void register_idle_thread(void)
1230{ 1058{
1231 struct thread *thread = threads__findnew(0); 1059 struct thread *thread = threads__findnew(0, &threads, &last_match);
1232 1060
1233 if (thread == NULL || 1061 if (thread == NULL ||
1234 thread__set_comm(thread, "[idle]")) { 1062 thread__set_comm(thread, "[idle]")) {
@@ -1263,7 +1091,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1263 char level; 1091 char level;
1264 int show = 0; 1092 int show = 0;
1265 struct dso *dso = NULL; 1093 struct dso *dso = NULL;
1266 struct thread *thread = threads__findnew(event->ip.pid); 1094 struct thread *thread;
1267 u64 ip = event->ip.ip; 1095 u64 ip = event->ip.ip;
1268 u64 period = 1; 1096 u64 period = 1;
1269 struct map *map = NULL; 1097 struct map *map = NULL;
@@ -1271,6 +1099,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1271 struct ip_callchain *chain = NULL; 1099 struct ip_callchain *chain = NULL;
1272 int cpumode; 1100 int cpumode;
1273 1101
1102 thread = threads__findnew(event->ip.pid, &threads, &last_match);
1103
1274 if (sample_type & PERF_SAMPLE_PERIOD) { 1104 if (sample_type & PERF_SAMPLE_PERIOD) {
1275 period = *(u64 *)more_data; 1105 period = *(u64 *)more_data;
1276 more_data += sizeof(u64); 1106 more_data += sizeof(u64);
@@ -1360,9 +1190,11 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1360static int 1190static int
1361process_mmap_event(event_t *event, unsigned long offset, unsigned long head) 1191process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1362{ 1192{
1363 struct thread *thread = threads__findnew(event->mmap.pid); 1193 struct thread *thread;
1364 struct map *map = map__new(&event->mmap, cwd, cwdlen); 1194 struct map *map = map__new(&event->mmap, cwd, cwdlen);
1365 1195
1196 thread = threads__findnew(event->mmap.pid, &threads, &last_match);
1197
1366 dprintf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n", 1198 dprintf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n",
1367 (void *)(offset + head), 1199 (void *)(offset + head),
1368 (void *)(long)(event->header.size), 1200 (void *)(long)(event->header.size),
@@ -1387,7 +1219,9 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1387static int 1219static int
1388process_comm_event(event_t *event, unsigned long offset, unsigned long head) 1220process_comm_event(event_t *event, unsigned long offset, unsigned long head)
1389{ 1221{
1390 struct thread *thread = threads__findnew(event->comm.pid); 1222 struct thread *thread;
1223
1224 thread = threads__findnew(event->comm.pid, &threads, &last_match);
1391 1225
1392 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 1226 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
1393 (void *)(offset + head), 1227 (void *)(offset + head),
@@ -1407,8 +1241,11 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
1407static int 1241static int
1408process_task_event(event_t *event, unsigned long offset, unsigned long head) 1242process_task_event(event_t *event, unsigned long offset, unsigned long head)
1409{ 1243{
1410 struct thread *thread = threads__findnew(event->fork.pid); 1244 struct thread *thread;
1411 struct thread *parent = threads__findnew(event->fork.ppid); 1245 struct thread *parent;
1246
1247 thread = threads__findnew(event->fork.pid, &threads, &last_match);
1248 parent = threads__findnew(event->fork.ppid, &threads, &last_match);
1412 1249
1413 dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n", 1250 dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
1414 (void *)(offset + head), 1251 (void *)(offset + head),
@@ -1749,7 +1586,7 @@ done:
1749 return 0; 1586 return 0;
1750 1587
1751 if (verbose >= 3) 1588 if (verbose >= 3)
1752 threads__fprintf(stdout); 1589 threads__fprintf(stdout, &threads);
1753 1590
1754 if (verbose >= 2) 1591 if (verbose >= 2)
1755 dsos__fprintf(stdout); 1592 dsos__fprintf(stdout);