diff options
Diffstat (limited to 'tools/perf/util')
82 files changed, 8852 insertions, 3173 deletions
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN index 97d76562a1a0..ad73300f7bac 100755 --- a/tools/perf/util/PERF-VERSION-GEN +++ b/tools/perf/util/PERF-VERSION-GEN | |||
@@ -23,12 +23,7 @@ if test -d ../../.git -o -f ../../.git && | |||
23 | then | 23 | then |
24 | VN=$(echo "$VN" | sed -e 's/-/./g'); | 24 | VN=$(echo "$VN" | sed -e 's/-/./g'); |
25 | else | 25 | else |
26 | eval `grep '^VERSION\s*=' ../../Makefile|tr -d ' '` | 26 | VN=$(MAKEFLAGS= make -sC ../.. kernelversion) |
27 | eval `grep '^PATCHLEVEL\s*=' ../../Makefile|tr -d ' '` | ||
28 | eval `grep '^SUBLEVEL\s*=' ../../Makefile|tr -d ' '` | ||
29 | eval `grep '^EXTRAVERSION\s*=' ../../Makefile|tr -d ' '` | ||
30 | |||
31 | VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}" | ||
32 | fi | 27 | fi |
33 | 28 | ||
34 | VN=$(expr "$VN" : v*'\(.*\)') | 29 | VN=$(expr "$VN" : v*'\(.*\)') |
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c new file mode 100644 index 000000000000..e01af2b1a469 --- /dev/null +++ b/tools/perf/util/annotate.c | |||
@@ -0,0 +1,605 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | ||
3 | * | ||
4 | * Parts came from builtin-annotate.c, see those files for further | ||
5 | * copyright notes. | ||
6 | * | ||
7 | * Released under the GPL v2. (and only v2, not any later version) | ||
8 | */ | ||
9 | |||
10 | #include "util.h" | ||
11 | #include "build-id.h" | ||
12 | #include "color.h" | ||
13 | #include "cache.h" | ||
14 | #include "symbol.h" | ||
15 | #include "debug.h" | ||
16 | #include "annotate.h" | ||
17 | #include <pthread.h> | ||
18 | |||
19 | int symbol__annotate_init(struct map *map __used, struct symbol *sym) | ||
20 | { | ||
21 | struct annotation *notes = symbol__annotation(sym); | ||
22 | pthread_mutex_init(¬es->lock, NULL); | ||
23 | return 0; | ||
24 | } | ||
25 | |||
26 | int symbol__alloc_hist(struct symbol *sym, int nevents) | ||
27 | { | ||
28 | struct annotation *notes = symbol__annotation(sym); | ||
29 | size_t sizeof_sym_hist = (sizeof(struct sym_hist) + | ||
30 | (sym->end - sym->start) * sizeof(u64)); | ||
31 | |||
32 | notes->src = zalloc(sizeof(*notes->src) + nevents * sizeof_sym_hist); | ||
33 | if (notes->src == NULL) | ||
34 | return -1; | ||
35 | notes->src->sizeof_sym_hist = sizeof_sym_hist; | ||
36 | notes->src->nr_histograms = nevents; | ||
37 | INIT_LIST_HEAD(¬es->src->source); | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | void symbol__annotate_zero_histograms(struct symbol *sym) | ||
42 | { | ||
43 | struct annotation *notes = symbol__annotation(sym); | ||
44 | |||
45 | pthread_mutex_lock(¬es->lock); | ||
46 | if (notes->src != NULL) | ||
47 | memset(notes->src->histograms, 0, | ||
48 | notes->src->nr_histograms * notes->src->sizeof_sym_hist); | ||
49 | pthread_mutex_unlock(¬es->lock); | ||
50 | } | ||
51 | |||
52 | int symbol__inc_addr_samples(struct symbol *sym, struct map *map, | ||
53 | int evidx, u64 addr) | ||
54 | { | ||
55 | unsigned offset; | ||
56 | struct annotation *notes; | ||
57 | struct sym_hist *h; | ||
58 | |||
59 | notes = symbol__annotation(sym); | ||
60 | if (notes->src == NULL) | ||
61 | return -ENOMEM; | ||
62 | |||
63 | pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr)); | ||
64 | |||
65 | if (addr >= sym->end) | ||
66 | return 0; | ||
67 | |||
68 | offset = addr - sym->start; | ||
69 | h = annotation__histogram(notes, evidx); | ||
70 | h->sum++; | ||
71 | h->addr[offset]++; | ||
72 | |||
73 | pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64 | ||
74 | ", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name, | ||
75 | addr, addr - sym->start, evidx, h->addr[offset]); | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize) | ||
80 | { | ||
81 | struct objdump_line *self = malloc(sizeof(*self) + privsize); | ||
82 | |||
83 | if (self != NULL) { | ||
84 | self->offset = offset; | ||
85 | self->line = line; | ||
86 | } | ||
87 | |||
88 | return self; | ||
89 | } | ||
90 | |||
91 | void objdump_line__free(struct objdump_line *self) | ||
92 | { | ||
93 | free(self->line); | ||
94 | free(self); | ||
95 | } | ||
96 | |||
97 | static void objdump__add_line(struct list_head *head, struct objdump_line *line) | ||
98 | { | ||
99 | list_add_tail(&line->node, head); | ||
100 | } | ||
101 | |||
102 | struct objdump_line *objdump__get_next_ip_line(struct list_head *head, | ||
103 | struct objdump_line *pos) | ||
104 | { | ||
105 | list_for_each_entry_continue(pos, head, node) | ||
106 | if (pos->offset >= 0) | ||
107 | return pos; | ||
108 | |||
109 | return NULL; | ||
110 | } | ||
111 | |||
112 | static int objdump_line__print(struct objdump_line *oline, struct symbol *sym, | ||
113 | int evidx, u64 len, int min_pcnt, | ||
114 | int printed, int max_lines, | ||
115 | struct objdump_line *queue) | ||
116 | { | ||
117 | static const char *prev_line; | ||
118 | static const char *prev_color; | ||
119 | |||
120 | if (oline->offset != -1) { | ||
121 | const char *path = NULL; | ||
122 | unsigned int hits = 0; | ||
123 | double percent = 0.0; | ||
124 | const char *color; | ||
125 | struct annotation *notes = symbol__annotation(sym); | ||
126 | struct source_line *src_line = notes->src->lines; | ||
127 | struct sym_hist *h = annotation__histogram(notes, evidx); | ||
128 | s64 offset = oline->offset; | ||
129 | struct objdump_line *next; | ||
130 | |||
131 | next = objdump__get_next_ip_line(¬es->src->source, oline); | ||
132 | |||
133 | while (offset < (s64)len && | ||
134 | (next == NULL || offset < next->offset)) { | ||
135 | if (src_line) { | ||
136 | if (path == NULL) | ||
137 | path = src_line[offset].path; | ||
138 | percent += src_line[offset].percent; | ||
139 | } else | ||
140 | hits += h->addr[offset]; | ||
141 | |||
142 | ++offset; | ||
143 | } | ||
144 | |||
145 | if (src_line == NULL && h->sum) | ||
146 | percent = 100.0 * hits / h->sum; | ||
147 | |||
148 | if (percent < min_pcnt) | ||
149 | return -1; | ||
150 | |||
151 | if (max_lines && printed >= max_lines) | ||
152 | return 1; | ||
153 | |||
154 | if (queue != NULL) { | ||
155 | list_for_each_entry_from(queue, ¬es->src->source, node) { | ||
156 | if (queue == oline) | ||
157 | break; | ||
158 | objdump_line__print(queue, sym, evidx, len, | ||
159 | 0, 0, 1, NULL); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | color = get_percent_color(percent); | ||
164 | |||
165 | /* | ||
166 | * Also color the filename and line if needed, with | ||
167 | * the same color than the percentage. Don't print it | ||
168 | * twice for close colored addr with the same filename:line | ||
169 | */ | ||
170 | if (path) { | ||
171 | if (!prev_line || strcmp(prev_line, path) | ||
172 | || color != prev_color) { | ||
173 | color_fprintf(stdout, color, " %s", path); | ||
174 | prev_line = path; | ||
175 | prev_color = color; | ||
176 | } | ||
177 | } | ||
178 | |||
179 | color_fprintf(stdout, color, " %7.2f", percent); | ||
180 | printf(" : "); | ||
181 | color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", oline->line); | ||
182 | } else if (max_lines && printed >= max_lines) | ||
183 | return 1; | ||
184 | else { | ||
185 | if (queue) | ||
186 | return -1; | ||
187 | |||
188 | if (!*oline->line) | ||
189 | printf(" :\n"); | ||
190 | else | ||
191 | printf(" : %s\n", oline->line); | ||
192 | } | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static int symbol__parse_objdump_line(struct symbol *sym, struct map *map, | ||
198 | FILE *file, size_t privsize) | ||
199 | { | ||
200 | struct annotation *notes = symbol__annotation(sym); | ||
201 | struct objdump_line *objdump_line; | ||
202 | char *line = NULL, *tmp, *tmp2, *c; | ||
203 | size_t line_len; | ||
204 | s64 line_ip, offset = -1; | ||
205 | |||
206 | if (getline(&line, &line_len, file) < 0) | ||
207 | return -1; | ||
208 | |||
209 | if (!line) | ||
210 | return -1; | ||
211 | |||
212 | while (line_len != 0 && isspace(line[line_len - 1])) | ||
213 | line[--line_len] = '\0'; | ||
214 | |||
215 | c = strchr(line, '\n'); | ||
216 | if (c) | ||
217 | *c = 0; | ||
218 | |||
219 | line_ip = -1; | ||
220 | |||
221 | /* | ||
222 | * Strip leading spaces: | ||
223 | */ | ||
224 | tmp = line; | ||
225 | while (*tmp) { | ||
226 | if (*tmp != ' ') | ||
227 | break; | ||
228 | tmp++; | ||
229 | } | ||
230 | |||
231 | if (*tmp) { | ||
232 | /* | ||
233 | * Parse hexa addresses followed by ':' | ||
234 | */ | ||
235 | line_ip = strtoull(tmp, &tmp2, 16); | ||
236 | if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0') | ||
237 | line_ip = -1; | ||
238 | } | ||
239 | |||
240 | if (line_ip != -1) { | ||
241 | u64 start = map__rip_2objdump(map, sym->start), | ||
242 | end = map__rip_2objdump(map, sym->end); | ||
243 | |||
244 | offset = line_ip - start; | ||
245 | if (offset < 0 || (u64)line_ip > end) | ||
246 | offset = -1; | ||
247 | } | ||
248 | |||
249 | objdump_line = objdump_line__new(offset, line, privsize); | ||
250 | if (objdump_line == NULL) { | ||
251 | free(line); | ||
252 | return -1; | ||
253 | } | ||
254 | objdump__add_line(¬es->src->source, objdump_line); | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) | ||
260 | { | ||
261 | struct dso *dso = map->dso; | ||
262 | char *filename = dso__build_id_filename(dso, NULL, 0); | ||
263 | bool free_filename = true; | ||
264 | char command[PATH_MAX * 2]; | ||
265 | FILE *file; | ||
266 | int err = 0; | ||
267 | char symfs_filename[PATH_MAX]; | ||
268 | |||
269 | if (filename) { | ||
270 | snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", | ||
271 | symbol_conf.symfs, filename); | ||
272 | } | ||
273 | |||
274 | if (filename == NULL) { | ||
275 | if (dso->has_build_id) { | ||
276 | pr_err("Can't annotate %s: not enough memory\n", | ||
277 | sym->name); | ||
278 | return -ENOMEM; | ||
279 | } | ||
280 | goto fallback; | ||
281 | } else if (readlink(symfs_filename, command, sizeof(command)) < 0 || | ||
282 | strstr(command, "[kernel.kallsyms]") || | ||
283 | access(symfs_filename, R_OK)) { | ||
284 | free(filename); | ||
285 | fallback: | ||
286 | /* | ||
287 | * If we don't have build-ids or the build-id file isn't in the | ||
288 | * cache, or is just a kallsyms file, well, lets hope that this | ||
289 | * DSO is the same as when 'perf record' ran. | ||
290 | */ | ||
291 | filename = dso->long_name; | ||
292 | snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", | ||
293 | symbol_conf.symfs, filename); | ||
294 | free_filename = false; | ||
295 | } | ||
296 | |||
297 | if (dso->symtab_type == SYMTAB__KALLSYMS) { | ||
298 | char bf[BUILD_ID_SIZE * 2 + 16] = " with build id "; | ||
299 | char *build_id_msg = NULL; | ||
300 | |||
301 | if (dso->annotate_warned) | ||
302 | goto out_free_filename; | ||
303 | |||
304 | if (dso->has_build_id) { | ||
305 | build_id__sprintf(dso->build_id, | ||
306 | sizeof(dso->build_id), bf + 15); | ||
307 | build_id_msg = bf; | ||
308 | } | ||
309 | err = -ENOENT; | ||
310 | dso->annotate_warned = 1; | ||
311 | pr_err("Can't annotate %s: No vmlinux file%s was found in the " | ||
312 | "path.\nPlease use 'perf buildid-cache -av vmlinux' or " | ||
313 | "--vmlinux vmlinux.\n", | ||
314 | sym->name, build_id_msg ?: ""); | ||
315 | goto out_free_filename; | ||
316 | } | ||
317 | |||
318 | pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, | ||
319 | filename, sym->name, map->unmap_ip(map, sym->start), | ||
320 | map->unmap_ip(map, sym->end)); | ||
321 | |||
322 | pr_debug("annotating [%p] %30s : [%p] %30s\n", | ||
323 | dso, dso->long_name, sym, sym->name); | ||
324 | |||
325 | snprintf(command, sizeof(command), | ||
326 | "objdump --start-address=0x%016" PRIx64 | ||
327 | " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand", | ||
328 | map__rip_2objdump(map, sym->start), | ||
329 | map__rip_2objdump(map, sym->end), | ||
330 | symfs_filename, filename); | ||
331 | |||
332 | pr_debug("Executing: %s\n", command); | ||
333 | |||
334 | file = popen(command, "r"); | ||
335 | if (!file) | ||
336 | goto out_free_filename; | ||
337 | |||
338 | while (!feof(file)) | ||
339 | if (symbol__parse_objdump_line(sym, map, file, privsize) < 0) | ||
340 | break; | ||
341 | |||
342 | pclose(file); | ||
343 | out_free_filename: | ||
344 | if (free_filename) | ||
345 | free(filename); | ||
346 | return err; | ||
347 | } | ||
348 | |||
349 | static void insert_source_line(struct rb_root *root, struct source_line *src_line) | ||
350 | { | ||
351 | struct source_line *iter; | ||
352 | struct rb_node **p = &root->rb_node; | ||
353 | struct rb_node *parent = NULL; | ||
354 | |||
355 | while (*p != NULL) { | ||
356 | parent = *p; | ||
357 | iter = rb_entry(parent, struct source_line, node); | ||
358 | |||
359 | if (src_line->percent > iter->percent) | ||
360 | p = &(*p)->rb_left; | ||
361 | else | ||
362 | p = &(*p)->rb_right; | ||
363 | } | ||
364 | |||
365 | rb_link_node(&src_line->node, parent, p); | ||
366 | rb_insert_color(&src_line->node, root); | ||
367 | } | ||
368 | |||
369 | static void symbol__free_source_line(struct symbol *sym, int len) | ||
370 | { | ||
371 | struct annotation *notes = symbol__annotation(sym); | ||
372 | struct source_line *src_line = notes->src->lines; | ||
373 | int i; | ||
374 | |||
375 | for (i = 0; i < len; i++) | ||
376 | free(src_line[i].path); | ||
377 | |||
378 | free(src_line); | ||
379 | notes->src->lines = NULL; | ||
380 | } | ||
381 | |||
382 | /* Get the filename:line for the colored entries */ | ||
383 | static int symbol__get_source_line(struct symbol *sym, struct map *map, | ||
384 | int evidx, struct rb_root *root, int len, | ||
385 | const char *filename) | ||
386 | { | ||
387 | u64 start; | ||
388 | int i; | ||
389 | char cmd[PATH_MAX * 2]; | ||
390 | struct source_line *src_line; | ||
391 | struct annotation *notes = symbol__annotation(sym); | ||
392 | struct sym_hist *h = annotation__histogram(notes, evidx); | ||
393 | |||
394 | if (!h->sum) | ||
395 | return 0; | ||
396 | |||
397 | src_line = notes->src->lines = calloc(len, sizeof(struct source_line)); | ||
398 | if (!notes->src->lines) | ||
399 | return -1; | ||
400 | |||
401 | start = map->unmap_ip(map, sym->start); | ||
402 | |||
403 | for (i = 0; i < len; i++) { | ||
404 | char *path = NULL; | ||
405 | size_t line_len; | ||
406 | u64 offset; | ||
407 | FILE *fp; | ||
408 | |||
409 | src_line[i].percent = 100.0 * h->addr[i] / h->sum; | ||
410 | if (src_line[i].percent <= 0.5) | ||
411 | continue; | ||
412 | |||
413 | offset = start + i; | ||
414 | sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset); | ||
415 | fp = popen(cmd, "r"); | ||
416 | if (!fp) | ||
417 | continue; | ||
418 | |||
419 | if (getline(&path, &line_len, fp) < 0 || !line_len) | ||
420 | goto next; | ||
421 | |||
422 | src_line[i].path = malloc(sizeof(char) * line_len + 1); | ||
423 | if (!src_line[i].path) | ||
424 | goto next; | ||
425 | |||
426 | strcpy(src_line[i].path, path); | ||
427 | insert_source_line(root, &src_line[i]); | ||
428 | |||
429 | next: | ||
430 | pclose(fp); | ||
431 | } | ||
432 | |||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | static void print_summary(struct rb_root *root, const char *filename) | ||
437 | { | ||
438 | struct source_line *src_line; | ||
439 | struct rb_node *node; | ||
440 | |||
441 | printf("\nSorted summary for file %s\n", filename); | ||
442 | printf("----------------------------------------------\n\n"); | ||
443 | |||
444 | if (RB_EMPTY_ROOT(root)) { | ||
445 | printf(" Nothing higher than %1.1f%%\n", MIN_GREEN); | ||
446 | return; | ||
447 | } | ||
448 | |||
449 | node = rb_first(root); | ||
450 | while (node) { | ||
451 | double percent; | ||
452 | const char *color; | ||
453 | char *path; | ||
454 | |||
455 | src_line = rb_entry(node, struct source_line, node); | ||
456 | percent = src_line->percent; | ||
457 | color = get_percent_color(percent); | ||
458 | path = src_line->path; | ||
459 | |||
460 | color_fprintf(stdout, color, " %7.2f %s", percent, path); | ||
461 | node = rb_next(node); | ||
462 | } | ||
463 | } | ||
464 | |||
465 | static void symbol__annotate_hits(struct symbol *sym, int evidx) | ||
466 | { | ||
467 | struct annotation *notes = symbol__annotation(sym); | ||
468 | struct sym_hist *h = annotation__histogram(notes, evidx); | ||
469 | u64 len = sym->end - sym->start, offset; | ||
470 | |||
471 | for (offset = 0; offset < len; ++offset) | ||
472 | if (h->addr[offset] != 0) | ||
473 | printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, | ||
474 | sym->start + offset, h->addr[offset]); | ||
475 | printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum); | ||
476 | } | ||
477 | |||
478 | int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, | ||
479 | bool full_paths, int min_pcnt, int max_lines, | ||
480 | int context) | ||
481 | { | ||
482 | struct dso *dso = map->dso; | ||
483 | const char *filename = dso->long_name, *d_filename; | ||
484 | struct annotation *notes = symbol__annotation(sym); | ||
485 | struct objdump_line *pos, *queue = NULL; | ||
486 | int printed = 2, queue_len = 0; | ||
487 | int more = 0; | ||
488 | u64 len; | ||
489 | |||
490 | if (full_paths) | ||
491 | d_filename = filename; | ||
492 | else | ||
493 | d_filename = basename(filename); | ||
494 | |||
495 | len = sym->end - sym->start; | ||
496 | |||
497 | printf(" Percent | Source code & Disassembly of %s\n", d_filename); | ||
498 | printf("------------------------------------------------\n"); | ||
499 | |||
500 | if (verbose) | ||
501 | symbol__annotate_hits(sym, evidx); | ||
502 | |||
503 | list_for_each_entry(pos, ¬es->src->source, node) { | ||
504 | if (context && queue == NULL) { | ||
505 | queue = pos; | ||
506 | queue_len = 0; | ||
507 | } | ||
508 | |||
509 | switch (objdump_line__print(pos, sym, evidx, len, min_pcnt, | ||
510 | printed, max_lines, queue)) { | ||
511 | case 0: | ||
512 | ++printed; | ||
513 | if (context) { | ||
514 | printed += queue_len; | ||
515 | queue = NULL; | ||
516 | queue_len = 0; | ||
517 | } | ||
518 | break; | ||
519 | case 1: | ||
520 | /* filtered by max_lines */ | ||
521 | ++more; | ||
522 | break; | ||
523 | case -1: | ||
524 | default: | ||
525 | /* | ||
526 | * Filtered by min_pcnt or non IP lines when | ||
527 | * context != 0 | ||
528 | */ | ||
529 | if (!context) | ||
530 | break; | ||
531 | if (queue_len == context) | ||
532 | queue = list_entry(queue->node.next, typeof(*queue), node); | ||
533 | else | ||
534 | ++queue_len; | ||
535 | break; | ||
536 | } | ||
537 | } | ||
538 | |||
539 | return more; | ||
540 | } | ||
541 | |||
542 | void symbol__annotate_zero_histogram(struct symbol *sym, int evidx) | ||
543 | { | ||
544 | struct annotation *notes = symbol__annotation(sym); | ||
545 | struct sym_hist *h = annotation__histogram(notes, evidx); | ||
546 | |||
547 | memset(h, 0, notes->src->sizeof_sym_hist); | ||
548 | } | ||
549 | |||
550 | void symbol__annotate_decay_histogram(struct symbol *sym, int evidx) | ||
551 | { | ||
552 | struct annotation *notes = symbol__annotation(sym); | ||
553 | struct sym_hist *h = annotation__histogram(notes, evidx); | ||
554 | struct objdump_line *pos; | ||
555 | int len = sym->end - sym->start; | ||
556 | |||
557 | h->sum = 0; | ||
558 | |||
559 | list_for_each_entry(pos, ¬es->src->source, node) { | ||
560 | if (pos->offset != -1 && pos->offset < len) { | ||
561 | h->addr[pos->offset] = h->addr[pos->offset] * 7 / 8; | ||
562 | h->sum += h->addr[pos->offset]; | ||
563 | } | ||
564 | } | ||
565 | } | ||
566 | |||
567 | void objdump_line_list__purge(struct list_head *head) | ||
568 | { | ||
569 | struct objdump_line *pos, *n; | ||
570 | |||
571 | list_for_each_entry_safe(pos, n, head, node) { | ||
572 | list_del(&pos->node); | ||
573 | objdump_line__free(pos); | ||
574 | } | ||
575 | } | ||
576 | |||
577 | int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, | ||
578 | bool print_lines, bool full_paths, int min_pcnt, | ||
579 | int max_lines) | ||
580 | { | ||
581 | struct dso *dso = map->dso; | ||
582 | const char *filename = dso->long_name; | ||
583 | struct rb_root source_line = RB_ROOT; | ||
584 | u64 len; | ||
585 | |||
586 | if (symbol__annotate(sym, map, 0) < 0) | ||
587 | return -1; | ||
588 | |||
589 | len = sym->end - sym->start; | ||
590 | |||
591 | if (print_lines) { | ||
592 | symbol__get_source_line(sym, map, evidx, &source_line, | ||
593 | len, filename); | ||
594 | print_summary(&source_line, filename); | ||
595 | } | ||
596 | |||
597 | symbol__annotate_printf(sym, map, evidx, full_paths, | ||
598 | min_pcnt, max_lines, 0); | ||
599 | if (print_lines) | ||
600 | symbol__free_source_line(sym, len); | ||
601 | |||
602 | objdump_line_list__purge(&symbol__annotation(sym)->src->source); | ||
603 | |||
604 | return 0; | ||
605 | } | ||
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h new file mode 100644 index 000000000000..c2c286896801 --- /dev/null +++ b/tools/perf/util/annotate.h | |||
@@ -0,0 +1,103 @@ | |||
1 | #ifndef __PERF_ANNOTATE_H | ||
2 | #define __PERF_ANNOTATE_H | ||
3 | |||
4 | #include <stdbool.h> | ||
5 | #include "types.h" | ||
6 | #include "symbol.h" | ||
7 | #include <linux/list.h> | ||
8 | #include <linux/rbtree.h> | ||
9 | |||
10 | struct objdump_line { | ||
11 | struct list_head node; | ||
12 | s64 offset; | ||
13 | char *line; | ||
14 | }; | ||
15 | |||
16 | void objdump_line__free(struct objdump_line *self); | ||
17 | struct objdump_line *objdump__get_next_ip_line(struct list_head *head, | ||
18 | struct objdump_line *pos); | ||
19 | |||
20 | struct sym_hist { | ||
21 | u64 sum; | ||
22 | u64 addr[0]; | ||
23 | }; | ||
24 | |||
25 | struct source_line { | ||
26 | struct rb_node node; | ||
27 | double percent; | ||
28 | char *path; | ||
29 | }; | ||
30 | |||
31 | /** struct annotated_source - symbols with hits have this attached as in sannotation | ||
32 | * | ||
33 | * @histogram: Array of addr hit histograms per event being monitored | ||
34 | * @lines: If 'print_lines' is specified, per source code line percentages | ||
35 | * @source: source parsed from objdump -dS | ||
36 | * | ||
37 | * lines is allocated, percentages calculated and all sorted by percentage | ||
38 | * when the annotation is about to be presented, so the percentages are for | ||
39 | * one of the entries in the histogram array, i.e. for the event/counter being | ||
40 | * presented. It is deallocated right after symbol__{tui,tty,etc}_annotate | ||
41 | * returns. | ||
42 | */ | ||
43 | struct annotated_source { | ||
44 | struct list_head source; | ||
45 | struct source_line *lines; | ||
46 | int nr_histograms; | ||
47 | int sizeof_sym_hist; | ||
48 | struct sym_hist histograms[0]; | ||
49 | }; | ||
50 | |||
51 | struct annotation { | ||
52 | pthread_mutex_t lock; | ||
53 | struct annotated_source *src; | ||
54 | }; | ||
55 | |||
56 | struct sannotation { | ||
57 | struct annotation annotation; | ||
58 | struct symbol symbol; | ||
59 | }; | ||
60 | |||
61 | static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx) | ||
62 | { | ||
63 | return (((void *)¬es->src->histograms) + | ||
64 | (notes->src->sizeof_sym_hist * idx)); | ||
65 | } | ||
66 | |||
67 | static inline struct annotation *symbol__annotation(struct symbol *sym) | ||
68 | { | ||
69 | struct sannotation *a = container_of(sym, struct sannotation, symbol); | ||
70 | return &a->annotation; | ||
71 | } | ||
72 | |||
73 | int symbol__inc_addr_samples(struct symbol *sym, struct map *map, | ||
74 | int evidx, u64 addr); | ||
75 | int symbol__alloc_hist(struct symbol *sym, int nevents); | ||
76 | void symbol__annotate_zero_histograms(struct symbol *sym); | ||
77 | |||
78 | int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize); | ||
79 | int symbol__annotate_init(struct map *map __used, struct symbol *sym); | ||
80 | int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, | ||
81 | bool full_paths, int min_pcnt, int max_lines, | ||
82 | int context); | ||
83 | void symbol__annotate_zero_histogram(struct symbol *sym, int evidx); | ||
84 | void symbol__annotate_decay_histogram(struct symbol *sym, int evidx); | ||
85 | void objdump_line_list__purge(struct list_head *head); | ||
86 | |||
87 | int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, | ||
88 | bool print_lines, bool full_paths, int min_pcnt, | ||
89 | int max_lines); | ||
90 | |||
91 | #ifdef NO_NEWT_SUPPORT | ||
92 | static inline int symbol__tui_annotate(struct symbol *sym __used, | ||
93 | struct map *map __used, | ||
94 | int evidx __used, int refresh __used) | ||
95 | { | ||
96 | return 0; | ||
97 | } | ||
98 | #else | ||
99 | int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, | ||
100 | int refresh); | ||
101 | #endif | ||
102 | |||
103 | #endif /* __PERF_ANNOTATE_H */ | ||
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index e437edb72417..a91cd99f26ea 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c | |||
@@ -14,7 +14,10 @@ | |||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include "debug.h" | 15 | #include "debug.h" |
16 | 16 | ||
17 | static int build_id__mark_dso_hit(event_t *event, struct perf_session *session) | 17 | static int build_id__mark_dso_hit(union perf_event *event, |
18 | struct perf_sample *sample __used, | ||
19 | struct perf_evsel *evsel __used, | ||
20 | struct perf_session *session) | ||
18 | { | 21 | { |
19 | struct addr_location al; | 22 | struct addr_location al; |
20 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 23 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
@@ -35,12 +38,14 @@ static int build_id__mark_dso_hit(event_t *event, struct perf_session *session) | |||
35 | return 0; | 38 | return 0; |
36 | } | 39 | } |
37 | 40 | ||
38 | static int event__exit_del_thread(event_t *self, struct perf_session *session) | 41 | static int perf_event__exit_del_thread(union perf_event *event, |
42 | struct perf_sample *sample __used, | ||
43 | struct perf_session *session) | ||
39 | { | 44 | { |
40 | struct thread *thread = perf_session__findnew(session, self->fork.tid); | 45 | struct thread *thread = perf_session__findnew(session, event->fork.tid); |
41 | 46 | ||
42 | dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, | 47 | dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, |
43 | self->fork.ppid, self->fork.ptid); | 48 | event->fork.ppid, event->fork.ptid); |
44 | 49 | ||
45 | if (thread) { | 50 | if (thread) { |
46 | rb_erase(&thread->rb_node, &session->threads); | 51 | rb_erase(&thread->rb_node, &session->threads); |
@@ -53,9 +58,9 @@ static int event__exit_del_thread(event_t *self, struct perf_session *session) | |||
53 | 58 | ||
54 | struct perf_event_ops build_id__mark_dso_hit_ops = { | 59 | struct perf_event_ops build_id__mark_dso_hit_ops = { |
55 | .sample = build_id__mark_dso_hit, | 60 | .sample = build_id__mark_dso_hit, |
56 | .mmap = event__process_mmap, | 61 | .mmap = perf_event__process_mmap, |
57 | .fork = event__process_task, | 62 | .fork = perf_event__process_task, |
58 | .exit = event__exit_del_thread, | 63 | .exit = perf_event__exit_del_thread, |
59 | }; | 64 | }; |
60 | 65 | ||
61 | char *dso__build_id_filename(struct dso *self, char *bf, size_t size) | 66 | char *dso__build_id_filename(struct dso *self, char *bf, size_t size) |
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index 27e9ebe4076e..fc5e5a09d5b9 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h | |||
@@ -34,13 +34,14 @@ extern int pager_use_color; | |||
34 | extern int use_browser; | 34 | extern int use_browser; |
35 | 35 | ||
36 | #ifdef NO_NEWT_SUPPORT | 36 | #ifdef NO_NEWT_SUPPORT |
37 | static inline void setup_browser(void) | 37 | static inline void setup_browser(bool fallback_to_pager) |
38 | { | 38 | { |
39 | setup_pager(); | 39 | if (fallback_to_pager) |
40 | setup_pager(); | ||
40 | } | 41 | } |
41 | static inline void exit_browser(bool wait_for_ok __used) {} | 42 | static inline void exit_browser(bool wait_for_ok __used) {} |
42 | #else | 43 | #else |
43 | void setup_browser(void); | 44 | void setup_browser(bool fallback_to_pager); |
44 | void exit_browser(bool wait_for_ok); | 45 | void exit_browser(bool wait_for_ok); |
45 | #endif | 46 | #endif |
46 | 47 | ||
@@ -82,6 +83,8 @@ extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2 | |||
82 | extern char *perf_pathdup(const char *fmt, ...) | 83 | extern char *perf_pathdup(const char *fmt, ...) |
83 | __attribute__((format (printf, 1, 2))); | 84 | __attribute__((format (printf, 1, 2))); |
84 | 85 | ||
86 | #ifdef NO_STRLCPY | ||
85 | extern size_t strlcpy(char *dest, const char *src, size_t size); | 87 | extern size_t strlcpy(char *dest, const char *src, size_t size); |
88 | #endif | ||
86 | 89 | ||
87 | #endif /* __PERF_CACHE_H */ | 90 | #endif /* __PERF_CACHE_H */ |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index f231f43424d2..9f7106a8d9a4 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2009-2010, Frederic Weisbecker <fweisbec@gmail.com> | 2 | * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com> |
3 | * | 3 | * |
4 | * Handle the callchains from the stream in an ad-hoc radix tree and then | 4 | * Handle the callchains from the stream in an ad-hoc radix tree and then |
5 | * sort them in an rbtree. | 5 | * sort them in an rbtree. |
@@ -18,7 +18,8 @@ | |||
18 | #include "util.h" | 18 | #include "util.h" |
19 | #include "callchain.h" | 19 | #include "callchain.h" |
20 | 20 | ||
21 | bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event) | 21 | bool ip_callchain__valid(struct ip_callchain *chain, |
22 | const union perf_event *event) | ||
22 | { | 23 | { |
23 | unsigned int chain_size = event->header.size; | 24 | unsigned int chain_size = event->header.size; |
24 | chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event; | 25 | chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event; |
@@ -26,7 +27,10 @@ bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event) | |||
26 | } | 27 | } |
27 | 28 | ||
28 | #define chain_for_each_child(child, parent) \ | 29 | #define chain_for_each_child(child, parent) \ |
29 | list_for_each_entry(child, &parent->children, brothers) | 30 | list_for_each_entry(child, &parent->children, siblings) |
31 | |||
32 | #define chain_for_each_child_safe(child, next, parent) \ | ||
33 | list_for_each_entry_safe(child, next, &parent->children, siblings) | ||
30 | 34 | ||
31 | static void | 35 | static void |
32 | rb_insert_callchain(struct rb_root *root, struct callchain_node *chain, | 36 | rb_insert_callchain(struct rb_root *root, struct callchain_node *chain, |
@@ -35,14 +39,14 @@ rb_insert_callchain(struct rb_root *root, struct callchain_node *chain, | |||
35 | struct rb_node **p = &root->rb_node; | 39 | struct rb_node **p = &root->rb_node; |
36 | struct rb_node *parent = NULL; | 40 | struct rb_node *parent = NULL; |
37 | struct callchain_node *rnode; | 41 | struct callchain_node *rnode; |
38 | u64 chain_cumul = cumul_hits(chain); | 42 | u64 chain_cumul = callchain_cumul_hits(chain); |
39 | 43 | ||
40 | while (*p) { | 44 | while (*p) { |
41 | u64 rnode_cumul; | 45 | u64 rnode_cumul; |
42 | 46 | ||
43 | parent = *p; | 47 | parent = *p; |
44 | rnode = rb_entry(parent, struct callchain_node, rb_node); | 48 | rnode = rb_entry(parent, struct callchain_node, rb_node); |
45 | rnode_cumul = cumul_hits(rnode); | 49 | rnode_cumul = callchain_cumul_hits(rnode); |
46 | 50 | ||
47 | switch (mode) { | 51 | switch (mode) { |
48 | case CHAIN_FLAT: | 52 | case CHAIN_FLAT: |
@@ -86,10 +90,10 @@ __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node, | |||
86 | * sort them by hit | 90 | * sort them by hit |
87 | */ | 91 | */ |
88 | static void | 92 | static void |
89 | sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node, | 93 | sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root, |
90 | u64 min_hit, struct callchain_param *param __used) | 94 | u64 min_hit, struct callchain_param *param __used) |
91 | { | 95 | { |
92 | __sort_chain_flat(rb_root, node, min_hit); | 96 | __sort_chain_flat(rb_root, &root->node, min_hit); |
93 | } | 97 | } |
94 | 98 | ||
95 | static void __sort_chain_graph_abs(struct callchain_node *node, | 99 | static void __sort_chain_graph_abs(struct callchain_node *node, |
@@ -101,18 +105,18 @@ static void __sort_chain_graph_abs(struct callchain_node *node, | |||
101 | 105 | ||
102 | chain_for_each_child(child, node) { | 106 | chain_for_each_child(child, node) { |
103 | __sort_chain_graph_abs(child, min_hit); | 107 | __sort_chain_graph_abs(child, min_hit); |
104 | if (cumul_hits(child) >= min_hit) | 108 | if (callchain_cumul_hits(child) >= min_hit) |
105 | rb_insert_callchain(&node->rb_root, child, | 109 | rb_insert_callchain(&node->rb_root, child, |
106 | CHAIN_GRAPH_ABS); | 110 | CHAIN_GRAPH_ABS); |
107 | } | 111 | } |
108 | } | 112 | } |
109 | 113 | ||
110 | static void | 114 | static void |
111 | sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_node *chain_root, | 115 | sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root, |
112 | u64 min_hit, struct callchain_param *param __used) | 116 | u64 min_hit, struct callchain_param *param __used) |
113 | { | 117 | { |
114 | __sort_chain_graph_abs(chain_root, min_hit); | 118 | __sort_chain_graph_abs(&chain_root->node, min_hit); |
115 | rb_root->rb_node = chain_root->rb_root.rb_node; | 119 | rb_root->rb_node = chain_root->node.rb_root.rb_node; |
116 | } | 120 | } |
117 | 121 | ||
118 | static void __sort_chain_graph_rel(struct callchain_node *node, | 122 | static void __sort_chain_graph_rel(struct callchain_node *node, |
@@ -126,21 +130,21 @@ static void __sort_chain_graph_rel(struct callchain_node *node, | |||
126 | 130 | ||
127 | chain_for_each_child(child, node) { | 131 | chain_for_each_child(child, node) { |
128 | __sort_chain_graph_rel(child, min_percent); | 132 | __sort_chain_graph_rel(child, min_percent); |
129 | if (cumul_hits(child) >= min_hit) | 133 | if (callchain_cumul_hits(child) >= min_hit) |
130 | rb_insert_callchain(&node->rb_root, child, | 134 | rb_insert_callchain(&node->rb_root, child, |
131 | CHAIN_GRAPH_REL); | 135 | CHAIN_GRAPH_REL); |
132 | } | 136 | } |
133 | } | 137 | } |
134 | 138 | ||
135 | static void | 139 | static void |
136 | sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_node *chain_root, | 140 | sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root, |
137 | u64 min_hit __used, struct callchain_param *param) | 141 | u64 min_hit __used, struct callchain_param *param) |
138 | { | 142 | { |
139 | __sort_chain_graph_rel(chain_root, param->min_percent / 100.0); | 143 | __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0); |
140 | rb_root->rb_node = chain_root->rb_root.rb_node; | 144 | rb_root->rb_node = chain_root->node.rb_root.rb_node; |
141 | } | 145 | } |
142 | 146 | ||
143 | int register_callchain_param(struct callchain_param *param) | 147 | int callchain_register_param(struct callchain_param *param) |
144 | { | 148 | { |
145 | switch (param->mode) { | 149 | switch (param->mode) { |
146 | case CHAIN_GRAPH_ABS: | 150 | case CHAIN_GRAPH_ABS: |
@@ -186,32 +190,27 @@ create_child(struct callchain_node *parent, bool inherit_children) | |||
186 | chain_for_each_child(next, new) | 190 | chain_for_each_child(next, new) |
187 | next->parent = new; | 191 | next->parent = new; |
188 | } | 192 | } |
189 | list_add_tail(&new->brothers, &parent->children); | 193 | list_add_tail(&new->siblings, &parent->children); |
190 | 194 | ||
191 | return new; | 195 | return new; |
192 | } | 196 | } |
193 | 197 | ||
194 | 198 | ||
195 | struct resolved_ip { | ||
196 | u64 ip; | ||
197 | struct map_symbol ms; | ||
198 | }; | ||
199 | |||
200 | struct resolved_chain { | ||
201 | u64 nr; | ||
202 | struct resolved_ip ips[0]; | ||
203 | }; | ||
204 | |||
205 | |||
206 | /* | 199 | /* |
207 | * Fill the node with callchain values | 200 | * Fill the node with callchain values |
208 | */ | 201 | */ |
209 | static void | 202 | static void |
210 | fill_node(struct callchain_node *node, struct resolved_chain *chain, int start) | 203 | fill_node(struct callchain_node *node, struct callchain_cursor *cursor) |
211 | { | 204 | { |
212 | unsigned int i; | 205 | struct callchain_cursor_node *cursor_node; |
213 | 206 | ||
214 | for (i = start; i < chain->nr; i++) { | 207 | node->val_nr = cursor->nr - cursor->pos; |
208 | if (!node->val_nr) | ||
209 | pr_warning("Warning: empty node in callchain tree\n"); | ||
210 | |||
211 | cursor_node = callchain_cursor_current(cursor); | ||
212 | |||
213 | while (cursor_node) { | ||
215 | struct callchain_list *call; | 214 | struct callchain_list *call; |
216 | 215 | ||
217 | call = zalloc(sizeof(*call)); | 216 | call = zalloc(sizeof(*call)); |
@@ -219,23 +218,25 @@ fill_node(struct callchain_node *node, struct resolved_chain *chain, int start) | |||
219 | perror("not enough memory for the code path tree"); | 218 | perror("not enough memory for the code path tree"); |
220 | return; | 219 | return; |
221 | } | 220 | } |
222 | call->ip = chain->ips[i].ip; | 221 | call->ip = cursor_node->ip; |
223 | call->ms = chain->ips[i].ms; | 222 | call->ms.sym = cursor_node->sym; |
223 | call->ms.map = cursor_node->map; | ||
224 | list_add_tail(&call->list, &node->val); | 224 | list_add_tail(&call->list, &node->val); |
225 | |||
226 | callchain_cursor_advance(cursor); | ||
227 | cursor_node = callchain_cursor_current(cursor); | ||
225 | } | 228 | } |
226 | node->val_nr = chain->nr - start; | ||
227 | if (!node->val_nr) | ||
228 | pr_warning("Warning: empty node in callchain tree\n"); | ||
229 | } | 229 | } |
230 | 230 | ||
231 | static void | 231 | static void |
232 | add_child(struct callchain_node *parent, struct resolved_chain *chain, | 232 | add_child(struct callchain_node *parent, |
233 | int start, u64 period) | 233 | struct callchain_cursor *cursor, |
234 | u64 period) | ||
234 | { | 235 | { |
235 | struct callchain_node *new; | 236 | struct callchain_node *new; |
236 | 237 | ||
237 | new = create_child(parent, false); | 238 | new = create_child(parent, false); |
238 | fill_node(new, chain, start); | 239 | fill_node(new, cursor); |
239 | 240 | ||
240 | new->children_hit = 0; | 241 | new->children_hit = 0; |
241 | new->hit = period; | 242 | new->hit = period; |
@@ -247,9 +248,10 @@ add_child(struct callchain_node *parent, struct resolved_chain *chain, | |||
247 | * Then create another child to host the given callchain of new branch | 248 | * Then create another child to host the given callchain of new branch |
248 | */ | 249 | */ |
249 | static void | 250 | static void |
250 | split_add_child(struct callchain_node *parent, struct resolved_chain *chain, | 251 | split_add_child(struct callchain_node *parent, |
251 | struct callchain_list *to_split, int idx_parents, int idx_local, | 252 | struct callchain_cursor *cursor, |
252 | u64 period) | 253 | struct callchain_list *to_split, |
254 | u64 idx_parents, u64 idx_local, u64 period) | ||
253 | { | 255 | { |
254 | struct callchain_node *new; | 256 | struct callchain_node *new; |
255 | struct list_head *old_tail; | 257 | struct list_head *old_tail; |
@@ -269,14 +271,14 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain, | |||
269 | /* split the hits */ | 271 | /* split the hits */ |
270 | new->hit = parent->hit; | 272 | new->hit = parent->hit; |
271 | new->children_hit = parent->children_hit; | 273 | new->children_hit = parent->children_hit; |
272 | parent->children_hit = cumul_hits(new); | 274 | parent->children_hit = callchain_cumul_hits(new); |
273 | new->val_nr = parent->val_nr - idx_local; | 275 | new->val_nr = parent->val_nr - idx_local; |
274 | parent->val_nr = idx_local; | 276 | parent->val_nr = idx_local; |
275 | 277 | ||
276 | /* create a new child for the new branch if any */ | 278 | /* create a new child for the new branch if any */ |
277 | if (idx_total < chain->nr) { | 279 | if (idx_total < cursor->nr) { |
278 | parent->hit = 0; | 280 | parent->hit = 0; |
279 | add_child(parent, chain, idx_total, period); | 281 | add_child(parent, cursor, period); |
280 | parent->children_hit += period; | 282 | parent->children_hit += period; |
281 | } else { | 283 | } else { |
282 | parent->hit = period; | 284 | parent->hit = period; |
@@ -284,37 +286,41 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain, | |||
284 | } | 286 | } |
285 | 287 | ||
286 | static int | 288 | static int |
287 | __append_chain(struct callchain_node *root, struct resolved_chain *chain, | 289 | append_chain(struct callchain_node *root, |
288 | unsigned int start, u64 period); | 290 | struct callchain_cursor *cursor, |
291 | u64 period); | ||
289 | 292 | ||
290 | static void | 293 | static void |
291 | __append_chain_children(struct callchain_node *root, | 294 | append_chain_children(struct callchain_node *root, |
292 | struct resolved_chain *chain, | 295 | struct callchain_cursor *cursor, |
293 | unsigned int start, u64 period) | 296 | u64 period) |
294 | { | 297 | { |
295 | struct callchain_node *rnode; | 298 | struct callchain_node *rnode; |
296 | 299 | ||
297 | /* lookup in childrens */ | 300 | /* lookup in childrens */ |
298 | chain_for_each_child(rnode, root) { | 301 | chain_for_each_child(rnode, root) { |
299 | unsigned int ret = __append_chain(rnode, chain, start, period); | 302 | unsigned int ret = append_chain(rnode, cursor, period); |
300 | 303 | ||
301 | if (!ret) | 304 | if (!ret) |
302 | goto inc_children_hit; | 305 | goto inc_children_hit; |
303 | } | 306 | } |
304 | /* nothing in children, add to the current node */ | 307 | /* nothing in children, add to the current node */ |
305 | add_child(root, chain, start, period); | 308 | add_child(root, cursor, period); |
306 | 309 | ||
307 | inc_children_hit: | 310 | inc_children_hit: |
308 | root->children_hit += period; | 311 | root->children_hit += period; |
309 | } | 312 | } |
310 | 313 | ||
311 | static int | 314 | static int |
312 | __append_chain(struct callchain_node *root, struct resolved_chain *chain, | 315 | append_chain(struct callchain_node *root, |
313 | unsigned int start, u64 period) | 316 | struct callchain_cursor *cursor, |
317 | u64 period) | ||
314 | { | 318 | { |
319 | struct callchain_cursor_node *curr_snap = cursor->curr; | ||
315 | struct callchain_list *cnode; | 320 | struct callchain_list *cnode; |
316 | unsigned int i = start; | 321 | u64 start = cursor->pos; |
317 | bool found = false; | 322 | bool found = false; |
323 | u64 matches; | ||
318 | 324 | ||
319 | /* | 325 | /* |
320 | * Lookup in the current node | 326 | * Lookup in the current node |
@@ -322,85 +328,134 @@ __append_chain(struct callchain_node *root, struct resolved_chain *chain, | |||
322 | * anywhere inside a function. | 328 | * anywhere inside a function. |
323 | */ | 329 | */ |
324 | list_for_each_entry(cnode, &root->val, list) { | 330 | list_for_each_entry(cnode, &root->val, list) { |
331 | struct callchain_cursor_node *node; | ||
325 | struct symbol *sym; | 332 | struct symbol *sym; |
326 | 333 | ||
327 | if (i == chain->nr) | 334 | node = callchain_cursor_current(cursor); |
335 | if (!node) | ||
328 | break; | 336 | break; |
329 | 337 | ||
330 | sym = chain->ips[i].ms.sym; | 338 | sym = node->sym; |
331 | 339 | ||
332 | if (cnode->ms.sym && sym) { | 340 | if (cnode->ms.sym && sym) { |
333 | if (cnode->ms.sym->start != sym->start) | 341 | if (cnode->ms.sym->start != sym->start) |
334 | break; | 342 | break; |
335 | } else if (cnode->ip != chain->ips[i].ip) | 343 | } else if (cnode->ip != node->ip) |
336 | break; | 344 | break; |
337 | 345 | ||
338 | if (!found) | 346 | if (!found) |
339 | found = true; | 347 | found = true; |
340 | i++; | 348 | |
349 | callchain_cursor_advance(cursor); | ||
341 | } | 350 | } |
342 | 351 | ||
343 | /* matches not, relay on the parent */ | 352 | /* matches not, relay on the parent */ |
344 | if (!found) | 353 | if (!found) { |
354 | cursor->curr = curr_snap; | ||
355 | cursor->pos = start; | ||
345 | return -1; | 356 | return -1; |
357 | } | ||
358 | |||
359 | matches = cursor->pos - start; | ||
346 | 360 | ||
347 | /* we match only a part of the node. Split it and add the new chain */ | 361 | /* we match only a part of the node. Split it and add the new chain */ |
348 | if (i - start < root->val_nr) { | 362 | if (matches < root->val_nr) { |
349 | split_add_child(root, chain, cnode, start, i - start, period); | 363 | split_add_child(root, cursor, cnode, start, matches, period); |
350 | return 0; | 364 | return 0; |
351 | } | 365 | } |
352 | 366 | ||
353 | /* we match 100% of the path, increment the hit */ | 367 | /* we match 100% of the path, increment the hit */ |
354 | if (i - start == root->val_nr && i == chain->nr) { | 368 | if (matches == root->val_nr && cursor->pos == cursor->nr) { |
355 | root->hit += period; | 369 | root->hit += period; |
356 | return 0; | 370 | return 0; |
357 | } | 371 | } |
358 | 372 | ||
359 | /* We match the node and still have a part remaining */ | 373 | /* We match the node and still have a part remaining */ |
360 | __append_chain_children(root, chain, i, period); | 374 | append_chain_children(root, cursor, period); |
361 | 375 | ||
362 | return 0; | 376 | return 0; |
363 | } | 377 | } |
364 | 378 | ||
365 | static void filter_context(struct ip_callchain *old, struct resolved_chain *new, | 379 | int callchain_append(struct callchain_root *root, |
366 | struct map_symbol *syms) | 380 | struct callchain_cursor *cursor, |
381 | u64 period) | ||
367 | { | 382 | { |
368 | int i, j = 0; | 383 | if (!cursor->nr) |
384 | return 0; | ||
385 | |||
386 | callchain_cursor_commit(cursor); | ||
387 | |||
388 | append_chain_children(&root->node, cursor, period); | ||
369 | 389 | ||
370 | for (i = 0; i < (int)old->nr; i++) { | 390 | if (cursor->nr > root->max_depth) |
371 | if (old->ips[i] >= PERF_CONTEXT_MAX) | 391 | root->max_depth = cursor->nr; |
372 | continue; | ||
373 | 392 | ||
374 | new->ips[j].ip = old->ips[i]; | 393 | return 0; |
375 | new->ips[j].ms = syms[i]; | 394 | } |
376 | j++; | 395 | |
396 | static int | ||
397 | merge_chain_branch(struct callchain_cursor *cursor, | ||
398 | struct callchain_node *dst, struct callchain_node *src) | ||
399 | { | ||
400 | struct callchain_cursor_node **old_last = cursor->last; | ||
401 | struct callchain_node *child, *next_child; | ||
402 | struct callchain_list *list, *next_list; | ||
403 | int old_pos = cursor->nr; | ||
404 | int err = 0; | ||
405 | |||
406 | list_for_each_entry_safe(list, next_list, &src->val, list) { | ||
407 | callchain_cursor_append(cursor, list->ip, | ||
408 | list->ms.map, list->ms.sym); | ||
409 | list_del(&list->list); | ||
410 | free(list); | ||
411 | } | ||
412 | |||
413 | if (src->hit) { | ||
414 | callchain_cursor_commit(cursor); | ||
415 | append_chain_children(dst, cursor, src->hit); | ||
416 | } | ||
417 | |||
418 | chain_for_each_child_safe(child, next_child, src) { | ||
419 | err = merge_chain_branch(cursor, dst, child); | ||
420 | if (err) | ||
421 | break; | ||
422 | |||
423 | list_del(&child->siblings); | ||
424 | free(child); | ||
377 | } | 425 | } |
378 | 426 | ||
379 | new->nr = j; | 427 | cursor->nr = old_pos; |
428 | cursor->last = old_last; | ||
429 | |||
430 | return err; | ||
380 | } | 431 | } |
381 | 432 | ||
433 | int callchain_merge(struct callchain_cursor *cursor, | ||
434 | struct callchain_root *dst, struct callchain_root *src) | ||
435 | { | ||
436 | return merge_chain_branch(cursor, &dst->node, &src->node); | ||
437 | } | ||
382 | 438 | ||
383 | int append_chain(struct callchain_node *root, struct ip_callchain *chain, | 439 | int callchain_cursor_append(struct callchain_cursor *cursor, |
384 | struct map_symbol *syms, u64 period) | 440 | u64 ip, struct map *map, struct symbol *sym) |
385 | { | 441 | { |
386 | struct resolved_chain *filtered; | 442 | struct callchain_cursor_node *node = *cursor->last; |
387 | 443 | ||
388 | if (!chain->nr) | 444 | if (!node) { |
389 | return 0; | 445 | node = calloc(sizeof(*node), 1); |
446 | if (!node) | ||
447 | return -ENOMEM; | ||
390 | 448 | ||
391 | filtered = zalloc(sizeof(*filtered) + | 449 | *cursor->last = node; |
392 | chain->nr * sizeof(struct resolved_ip)); | 450 | } |
393 | if (!filtered) | ||
394 | return -ENOMEM; | ||
395 | 451 | ||
396 | filter_context(chain, filtered, syms); | 452 | node->ip = ip; |
453 | node->map = map; | ||
454 | node->sym = sym; | ||
397 | 455 | ||
398 | if (!filtered->nr) | 456 | cursor->nr++; |
399 | goto end; | ||
400 | 457 | ||
401 | __append_chain_children(root, filtered, 0, period); | 458 | cursor->last = &node->next; |
402 | end: | ||
403 | free(filtered); | ||
404 | 459 | ||
405 | return 0; | 460 | return 0; |
406 | } | 461 | } |
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 6de4313924fb..1a79df9f739f 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h | |||
@@ -16,7 +16,7 @@ enum chain_mode { | |||
16 | 16 | ||
17 | struct callchain_node { | 17 | struct callchain_node { |
18 | struct callchain_node *parent; | 18 | struct callchain_node *parent; |
19 | struct list_head brothers; | 19 | struct list_head siblings; |
20 | struct list_head children; | 20 | struct list_head children; |
21 | struct list_head val; | 21 | struct list_head val; |
22 | struct rb_node rb_node; /* to sort nodes in an rbtree */ | 22 | struct rb_node rb_node; /* to sort nodes in an rbtree */ |
@@ -26,9 +26,14 @@ struct callchain_node { | |||
26 | u64 children_hit; | 26 | u64 children_hit; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | struct callchain_root { | ||
30 | u64 max_depth; | ||
31 | struct callchain_node node; | ||
32 | }; | ||
33 | |||
29 | struct callchain_param; | 34 | struct callchain_param; |
30 | 35 | ||
31 | typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_node *, | 36 | typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *, |
32 | u64, struct callchain_param *); | 37 | u64, struct callchain_param *); |
33 | 38 | ||
34 | struct callchain_param { | 39 | struct callchain_param { |
@@ -44,25 +49,87 @@ struct callchain_list { | |||
44 | struct list_head list; | 49 | struct list_head list; |
45 | }; | 50 | }; |
46 | 51 | ||
47 | static inline void callchain_init(struct callchain_node *node) | 52 | /* |
53 | * A callchain cursor is a single linked list that | ||
54 | * let one feed a callchain progressively. | ||
55 | * It keeps persitent allocated entries to minimize | ||
56 | * allocations. | ||
57 | */ | ||
58 | struct callchain_cursor_node { | ||
59 | u64 ip; | ||
60 | struct map *map; | ||
61 | struct symbol *sym; | ||
62 | struct callchain_cursor_node *next; | ||
63 | }; | ||
64 | |||
65 | struct callchain_cursor { | ||
66 | u64 nr; | ||
67 | struct callchain_cursor_node *first; | ||
68 | struct callchain_cursor_node **last; | ||
69 | u64 pos; | ||
70 | struct callchain_cursor_node *curr; | ||
71 | }; | ||
72 | |||
73 | static inline void callchain_init(struct callchain_root *root) | ||
48 | { | 74 | { |
49 | INIT_LIST_HEAD(&node->brothers); | 75 | INIT_LIST_HEAD(&root->node.siblings); |
50 | INIT_LIST_HEAD(&node->children); | 76 | INIT_LIST_HEAD(&root->node.children); |
51 | INIT_LIST_HEAD(&node->val); | 77 | INIT_LIST_HEAD(&root->node.val); |
52 | 78 | ||
53 | node->children_hit = 0; | 79 | root->node.parent = NULL; |
54 | node->parent = NULL; | 80 | root->node.hit = 0; |
55 | node->hit = 0; | 81 | root->node.children_hit = 0; |
82 | root->max_depth = 0; | ||
56 | } | 83 | } |
57 | 84 | ||
58 | static inline u64 cumul_hits(struct callchain_node *node) | 85 | static inline u64 callchain_cumul_hits(struct callchain_node *node) |
59 | { | 86 | { |
60 | return node->hit + node->children_hit; | 87 | return node->hit + node->children_hit; |
61 | } | 88 | } |
62 | 89 | ||
63 | int register_callchain_param(struct callchain_param *param); | 90 | int callchain_register_param(struct callchain_param *param); |
64 | int append_chain(struct callchain_node *root, struct ip_callchain *chain, | 91 | int callchain_append(struct callchain_root *root, |
65 | struct map_symbol *syms, u64 period); | 92 | struct callchain_cursor *cursor, |
93 | u64 period); | ||
94 | |||
95 | int callchain_merge(struct callchain_cursor *cursor, | ||
96 | struct callchain_root *dst, struct callchain_root *src); | ||
97 | |||
98 | bool ip_callchain__valid(struct ip_callchain *chain, | ||
99 | const union perf_event *event); | ||
100 | /* | ||
101 | * Initialize a cursor before adding entries inside, but keep | ||
102 | * the previously allocated entries as a cache. | ||
103 | */ | ||
104 | static inline void callchain_cursor_reset(struct callchain_cursor *cursor) | ||
105 | { | ||
106 | cursor->nr = 0; | ||
107 | cursor->last = &cursor->first; | ||
108 | } | ||
109 | |||
110 | int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip, | ||
111 | struct map *map, struct symbol *sym); | ||
112 | |||
113 | /* Close a cursor writing session. Initialize for the reader */ | ||
114 | static inline void callchain_cursor_commit(struct callchain_cursor *cursor) | ||
115 | { | ||
116 | cursor->curr = cursor->first; | ||
117 | cursor->pos = 0; | ||
118 | } | ||
119 | |||
120 | /* Cursor reading iteration helpers */ | ||
121 | static inline struct callchain_cursor_node * | ||
122 | callchain_cursor_current(struct callchain_cursor *cursor) | ||
123 | { | ||
124 | if (cursor->pos == cursor->nr) | ||
125 | return NULL; | ||
126 | |||
127 | return cursor->curr; | ||
128 | } | ||
66 | 129 | ||
67 | bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event); | 130 | static inline void callchain_cursor_advance(struct callchain_cursor *cursor) |
131 | { | ||
132 | cursor->curr = cursor->curr->next; | ||
133 | cursor->pos++; | ||
134 | } | ||
68 | #endif /* __PERF_CALLCHAIN_H */ | 135 | #endif /* __PERF_CALLCHAIN_H */ |
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c new file mode 100644 index 000000000000..96bee5c46008 --- /dev/null +++ b/tools/perf/util/cgroup.c | |||
@@ -0,0 +1,178 @@ | |||
1 | #include "util.h" | ||
2 | #include "../perf.h" | ||
3 | #include "parse-options.h" | ||
4 | #include "evsel.h" | ||
5 | #include "cgroup.h" | ||
6 | #include "debugfs.h" /* MAX_PATH, STR() */ | ||
7 | #include "evlist.h" | ||
8 | |||
9 | int nr_cgroups; | ||
10 | |||
11 | static int | ||
12 | cgroupfs_find_mountpoint(char *buf, size_t maxlen) | ||
13 | { | ||
14 | FILE *fp; | ||
15 | char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1]; | ||
16 | char *token, *saved_ptr = NULL; | ||
17 | int found = 0; | ||
18 | |||
19 | fp = fopen("/proc/mounts", "r"); | ||
20 | if (!fp) | ||
21 | return -1; | ||
22 | |||
23 | /* | ||
24 | * in order to handle split hierarchy, we need to scan /proc/mounts | ||
25 | * and inspect every cgroupfs mount point to find one that has | ||
26 | * perf_event subsystem | ||
27 | */ | ||
28 | while (fscanf(fp, "%*s %"STR(MAX_PATH)"s %"STR(MAX_PATH)"s %" | ||
29 | STR(MAX_PATH)"s %*d %*d\n", | ||
30 | mountpoint, type, tokens) == 3) { | ||
31 | |||
32 | if (!strcmp(type, "cgroup")) { | ||
33 | |||
34 | token = strtok_r(tokens, ",", &saved_ptr); | ||
35 | |||
36 | while (token != NULL) { | ||
37 | if (!strcmp(token, "perf_event")) { | ||
38 | found = 1; | ||
39 | break; | ||
40 | } | ||
41 | token = strtok_r(NULL, ",", &saved_ptr); | ||
42 | } | ||
43 | } | ||
44 | if (found) | ||
45 | break; | ||
46 | } | ||
47 | fclose(fp); | ||
48 | if (!found) | ||
49 | return -1; | ||
50 | |||
51 | if (strlen(mountpoint) < maxlen) { | ||
52 | strcpy(buf, mountpoint); | ||
53 | return 0; | ||
54 | } | ||
55 | return -1; | ||
56 | } | ||
57 | |||
58 | static int open_cgroup(char *name) | ||
59 | { | ||
60 | char path[MAX_PATH+1]; | ||
61 | char mnt[MAX_PATH+1]; | ||
62 | int fd; | ||
63 | |||
64 | |||
65 | if (cgroupfs_find_mountpoint(mnt, MAX_PATH+1)) | ||
66 | return -1; | ||
67 | |||
68 | snprintf(path, MAX_PATH, "%s/%s", mnt, name); | ||
69 | |||
70 | fd = open(path, O_RDONLY); | ||
71 | if (fd == -1) | ||
72 | fprintf(stderr, "no access to cgroup %s\n", path); | ||
73 | |||
74 | return fd; | ||
75 | } | ||
76 | |||
77 | static int add_cgroup(struct perf_evlist *evlist, char *str) | ||
78 | { | ||
79 | struct perf_evsel *counter; | ||
80 | struct cgroup_sel *cgrp = NULL; | ||
81 | int n; | ||
82 | /* | ||
83 | * check if cgrp is already defined, if so we reuse it | ||
84 | */ | ||
85 | list_for_each_entry(counter, &evlist->entries, node) { | ||
86 | cgrp = counter->cgrp; | ||
87 | if (!cgrp) | ||
88 | continue; | ||
89 | if (!strcmp(cgrp->name, str)) | ||
90 | break; | ||
91 | |||
92 | cgrp = NULL; | ||
93 | } | ||
94 | |||
95 | if (!cgrp) { | ||
96 | cgrp = zalloc(sizeof(*cgrp)); | ||
97 | if (!cgrp) | ||
98 | return -1; | ||
99 | |||
100 | cgrp->name = str; | ||
101 | |||
102 | cgrp->fd = open_cgroup(str); | ||
103 | if (cgrp->fd == -1) { | ||
104 | free(cgrp); | ||
105 | return -1; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * find corresponding event | ||
111 | * if add cgroup N, then need to find event N | ||
112 | */ | ||
113 | n = 0; | ||
114 | list_for_each_entry(counter, &evlist->entries, node) { | ||
115 | if (n == nr_cgroups) | ||
116 | goto found; | ||
117 | n++; | ||
118 | } | ||
119 | if (cgrp->refcnt == 0) | ||
120 | free(cgrp); | ||
121 | |||
122 | return -1; | ||
123 | found: | ||
124 | cgrp->refcnt++; | ||
125 | counter->cgrp = cgrp; | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | void close_cgroup(struct cgroup_sel *cgrp) | ||
130 | { | ||
131 | if (!cgrp) | ||
132 | return; | ||
133 | |||
134 | /* XXX: not reentrant */ | ||
135 | if (--cgrp->refcnt == 0) { | ||
136 | close(cgrp->fd); | ||
137 | free(cgrp->name); | ||
138 | free(cgrp); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | int parse_cgroups(const struct option *opt __used, const char *str, | ||
143 | int unset __used) | ||
144 | { | ||
145 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; | ||
146 | const char *p, *e, *eos = str + strlen(str); | ||
147 | char *s; | ||
148 | int ret; | ||
149 | |||
150 | if (list_empty(&evlist->entries)) { | ||
151 | fprintf(stderr, "must define events before cgroups\n"); | ||
152 | return -1; | ||
153 | } | ||
154 | |||
155 | for (;;) { | ||
156 | p = strchr(str, ','); | ||
157 | e = p ? p : eos; | ||
158 | |||
159 | /* allow empty cgroups, i.e., skip */ | ||
160 | if (e - str) { | ||
161 | /* termination added */ | ||
162 | s = strndup(str, e - str); | ||
163 | if (!s) | ||
164 | return -1; | ||
165 | ret = add_cgroup(evlist, s); | ||
166 | if (ret) { | ||
167 | free(s); | ||
168 | return -1; | ||
169 | } | ||
170 | } | ||
171 | /* nr_cgroups is increased een for empty cgroups */ | ||
172 | nr_cgroups++; | ||
173 | if (!p) | ||
174 | break; | ||
175 | str = p+1; | ||
176 | } | ||
177 | return 0; | ||
178 | } | ||
diff --git a/tools/perf/util/cgroup.h b/tools/perf/util/cgroup.h new file mode 100644 index 000000000000..89acd6debdc5 --- /dev/null +++ b/tools/perf/util/cgroup.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef __CGROUP_H__ | ||
2 | #define __CGROUP_H__ | ||
3 | |||
4 | struct option; | ||
5 | |||
6 | struct cgroup_sel { | ||
7 | char *name; | ||
8 | int fd; | ||
9 | int refcnt; | ||
10 | }; | ||
11 | |||
12 | |||
13 | extern int nr_cgroups; /* number of explicit cgroups defined */ | ||
14 | extern void close_cgroup(struct cgroup_sel *cgrp); | ||
15 | extern int parse_cgroups(const struct option *opt, const char *str, int unset); | ||
16 | |||
17 | #endif /* __CGROUP_H__ */ | ||
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 0f9b8d7a7d7e..6893eec693ab 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c | |||
@@ -4,32 +4,53 @@ | |||
4 | #include <assert.h> | 4 | #include <assert.h> |
5 | #include <stdio.h> | 5 | #include <stdio.h> |
6 | 6 | ||
7 | int cpumap[MAX_NR_CPUS]; | 7 | static struct cpu_map *cpu_map__default_new(void) |
8 | |||
9 | static int default_cpu_map(void) | ||
10 | { | 8 | { |
11 | int nr_cpus, i; | 9 | struct cpu_map *cpus; |
10 | int nr_cpus; | ||
12 | 11 | ||
13 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); | 12 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); |
14 | assert(nr_cpus <= MAX_NR_CPUS); | 13 | if (nr_cpus < 0) |
15 | assert((int)nr_cpus >= 0); | 14 | return NULL; |
15 | |||
16 | cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int)); | ||
17 | if (cpus != NULL) { | ||
18 | int i; | ||
19 | for (i = 0; i < nr_cpus; ++i) | ||
20 | cpus->map[i] = i; | ||
16 | 21 | ||
17 | for (i = 0; i < nr_cpus; ++i) | 22 | cpus->nr = nr_cpus; |
18 | cpumap[i] = i; | 23 | } |
19 | 24 | ||
20 | return nr_cpus; | 25 | return cpus; |
21 | } | 26 | } |
22 | 27 | ||
23 | static int read_all_cpu_map(void) | 28 | static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus) |
24 | { | 29 | { |
30 | size_t payload_size = nr_cpus * sizeof(int); | ||
31 | struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size); | ||
32 | |||
33 | if (cpus != NULL) { | ||
34 | cpus->nr = nr_cpus; | ||
35 | memcpy(cpus->map, tmp_cpus, payload_size); | ||
36 | } | ||
37 | |||
38 | return cpus; | ||
39 | } | ||
40 | |||
41 | static struct cpu_map *cpu_map__read_all_cpu_map(void) | ||
42 | { | ||
43 | struct cpu_map *cpus = NULL; | ||
25 | FILE *onlnf; | 44 | FILE *onlnf; |
26 | int nr_cpus = 0; | 45 | int nr_cpus = 0; |
46 | int *tmp_cpus = NULL, *tmp; | ||
47 | int max_entries = 0; | ||
27 | int n, cpu, prev; | 48 | int n, cpu, prev; |
28 | char sep; | 49 | char sep; |
29 | 50 | ||
30 | onlnf = fopen("/sys/devices/system/cpu/online", "r"); | 51 | onlnf = fopen("/sys/devices/system/cpu/online", "r"); |
31 | if (!onlnf) | 52 | if (!onlnf) |
32 | return default_cpu_map(); | 53 | return cpu_map__default_new(); |
33 | 54 | ||
34 | sep = 0; | 55 | sep = 0; |
35 | prev = -1; | 56 | prev = -1; |
@@ -38,12 +59,28 @@ static int read_all_cpu_map(void) | |||
38 | if (n <= 0) | 59 | if (n <= 0) |
39 | break; | 60 | break; |
40 | if (prev >= 0) { | 61 | if (prev >= 0) { |
41 | assert(nr_cpus + cpu - prev - 1 < MAX_NR_CPUS); | 62 | int new_max = nr_cpus + cpu - prev - 1; |
63 | |||
64 | if (new_max >= max_entries) { | ||
65 | max_entries = new_max + MAX_NR_CPUS / 2; | ||
66 | tmp = realloc(tmp_cpus, max_entries * sizeof(int)); | ||
67 | if (tmp == NULL) | ||
68 | goto out_free_tmp; | ||
69 | tmp_cpus = tmp; | ||
70 | } | ||
71 | |||
42 | while (++prev < cpu) | 72 | while (++prev < cpu) |
43 | cpumap[nr_cpus++] = prev; | 73 | tmp_cpus[nr_cpus++] = prev; |
74 | } | ||
75 | if (nr_cpus == max_entries) { | ||
76 | max_entries += MAX_NR_CPUS; | ||
77 | tmp = realloc(tmp_cpus, max_entries * sizeof(int)); | ||
78 | if (tmp == NULL) | ||
79 | goto out_free_tmp; | ||
80 | tmp_cpus = tmp; | ||
44 | } | 81 | } |
45 | assert (nr_cpus < MAX_NR_CPUS); | 82 | |
46 | cpumap[nr_cpus++] = cpu; | 83 | tmp_cpus[nr_cpus++] = cpu; |
47 | if (n == 2 && sep == '-') | 84 | if (n == 2 && sep == '-') |
48 | prev = cpu; | 85 | prev = cpu; |
49 | else | 86 | else |
@@ -51,24 +88,31 @@ static int read_all_cpu_map(void) | |||
51 | if (n == 1 || sep == '\n') | 88 | if (n == 1 || sep == '\n') |
52 | break; | 89 | break; |
53 | } | 90 | } |
54 | fclose(onlnf); | ||
55 | if (nr_cpus > 0) | ||
56 | return nr_cpus; | ||
57 | 91 | ||
58 | return default_cpu_map(); | 92 | if (nr_cpus > 0) |
93 | cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); | ||
94 | else | ||
95 | cpus = cpu_map__default_new(); | ||
96 | out_free_tmp: | ||
97 | free(tmp_cpus); | ||
98 | fclose(onlnf); | ||
99 | return cpus; | ||
59 | } | 100 | } |
60 | 101 | ||
61 | int read_cpu_map(const char *cpu_list) | 102 | struct cpu_map *cpu_map__new(const char *cpu_list) |
62 | { | 103 | { |
104 | struct cpu_map *cpus = NULL; | ||
63 | unsigned long start_cpu, end_cpu = 0; | 105 | unsigned long start_cpu, end_cpu = 0; |
64 | char *p = NULL; | 106 | char *p = NULL; |
65 | int i, nr_cpus = 0; | 107 | int i, nr_cpus = 0; |
108 | int *tmp_cpus = NULL, *tmp; | ||
109 | int max_entries = 0; | ||
66 | 110 | ||
67 | if (!cpu_list) | 111 | if (!cpu_list) |
68 | return read_all_cpu_map(); | 112 | return cpu_map__read_all_cpu_map(); |
69 | 113 | ||
70 | if (!isdigit(*cpu_list)) | 114 | if (!isdigit(*cpu_list)) |
71 | goto invalid; | 115 | goto out; |
72 | 116 | ||
73 | while (isdigit(*cpu_list)) { | 117 | while (isdigit(*cpu_list)) { |
74 | p = NULL; | 118 | p = NULL; |
@@ -94,21 +138,47 @@ int read_cpu_map(const char *cpu_list) | |||
94 | for (; start_cpu <= end_cpu; start_cpu++) { | 138 | for (; start_cpu <= end_cpu; start_cpu++) { |
95 | /* check for duplicates */ | 139 | /* check for duplicates */ |
96 | for (i = 0; i < nr_cpus; i++) | 140 | for (i = 0; i < nr_cpus; i++) |
97 | if (cpumap[i] == (int)start_cpu) | 141 | if (tmp_cpus[i] == (int)start_cpu) |
98 | goto invalid; | 142 | goto invalid; |
99 | 143 | ||
100 | assert(nr_cpus < MAX_NR_CPUS); | 144 | if (nr_cpus == max_entries) { |
101 | cpumap[nr_cpus++] = (int)start_cpu; | 145 | max_entries += MAX_NR_CPUS; |
146 | tmp = realloc(tmp_cpus, max_entries * sizeof(int)); | ||
147 | if (tmp == NULL) | ||
148 | goto invalid; | ||
149 | tmp_cpus = tmp; | ||
150 | } | ||
151 | tmp_cpus[nr_cpus++] = (int)start_cpu; | ||
102 | } | 152 | } |
103 | if (*p) | 153 | if (*p) |
104 | ++p; | 154 | ++p; |
105 | 155 | ||
106 | cpu_list = p; | 156 | cpu_list = p; |
107 | } | 157 | } |
108 | if (nr_cpus > 0) | ||
109 | return nr_cpus; | ||
110 | 158 | ||
111 | return default_cpu_map(); | 159 | if (nr_cpus > 0) |
160 | cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); | ||
161 | else | ||
162 | cpus = cpu_map__default_new(); | ||
112 | invalid: | 163 | invalid: |
113 | return -1; | 164 | free(tmp_cpus); |
165 | out: | ||
166 | return cpus; | ||
167 | } | ||
168 | |||
169 | struct cpu_map *cpu_map__dummy_new(void) | ||
170 | { | ||
171 | struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int)); | ||
172 | |||
173 | if (cpus != NULL) { | ||
174 | cpus->nr = 1; | ||
175 | cpus->map[0] = -1; | ||
176 | } | ||
177 | |||
178 | return cpus; | ||
179 | } | ||
180 | |||
181 | void cpu_map__delete(struct cpu_map *map) | ||
182 | { | ||
183 | free(map); | ||
114 | } | 184 | } |
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 3e60f56e490e..072c0a374794 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h | |||
@@ -1,7 +1,13 @@ | |||
1 | #ifndef __PERF_CPUMAP_H | 1 | #ifndef __PERF_CPUMAP_H |
2 | #define __PERF_CPUMAP_H | 2 | #define __PERF_CPUMAP_H |
3 | 3 | ||
4 | extern int read_cpu_map(const char *cpu_list); | 4 | struct cpu_map { |
5 | extern int cpumap[]; | 5 | int nr; |
6 | int map[]; | ||
7 | }; | ||
8 | |||
9 | struct cpu_map *cpu_map__new(const char *cpu_list); | ||
10 | struct cpu_map *cpu_map__dummy_new(void); | ||
11 | void cpu_map__delete(struct cpu_map *map); | ||
6 | 12 | ||
7 | #endif /* __PERF_CPUMAP_H */ | 13 | #endif /* __PERF_CPUMAP_H */ |
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index f9c7e3ad1aa7..155749d74350 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c | |||
@@ -12,8 +12,8 @@ | |||
12 | #include "debug.h" | 12 | #include "debug.h" |
13 | #include "util.h" | 13 | #include "util.h" |
14 | 14 | ||
15 | int verbose = 0; | 15 | int verbose; |
16 | bool dump_trace = false; | 16 | bool dump_trace = false, quiet = false; |
17 | 17 | ||
18 | int eprintf(int level, const char *fmt, ...) | 18 | int eprintf(int level, const char *fmt, ...) |
19 | { | 19 | { |
@@ -46,22 +46,28 @@ int dump_printf(const char *fmt, ...) | |||
46 | return ret; | 46 | return ret; |
47 | } | 47 | } |
48 | 48 | ||
49 | static int dump_printf_color(const char *fmt, const char *color, ...) | 49 | #ifdef NO_NEWT_SUPPORT |
50 | void ui__warning(const char *format, ...) | ||
50 | { | 51 | { |
51 | va_list args; | 52 | va_list args; |
52 | int ret = 0; | ||
53 | |||
54 | if (dump_trace) { | ||
55 | va_start(args, color); | ||
56 | ret = color_vfprintf(stdout, color, fmt, args); | ||
57 | va_end(args); | ||
58 | } | ||
59 | 53 | ||
60 | return ret; | 54 | va_start(args, format); |
55 | vfprintf(stderr, format, args); | ||
56 | va_end(args); | ||
61 | } | 57 | } |
58 | #endif | ||
62 | 59 | ||
60 | void ui__warning_paranoid(void) | ||
61 | { | ||
62 | ui__warning("Permission error - are you root?\n" | ||
63 | "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" | ||
64 | " -1 - Not paranoid at all\n" | ||
65 | " 0 - Disallow raw tracepoint access for unpriv\n" | ||
66 | " 1 - Disallow cpu events for unpriv\n" | ||
67 | " 2 - Disallow kernel profiling for unpriv\n"); | ||
68 | } | ||
63 | 69 | ||
64 | void trace_event(event_t *event) | 70 | void trace_event(union perf_event *event) |
65 | { | 71 | { |
66 | unsigned char *raw_event = (void *)event; | 72 | unsigned char *raw_event = (void *)event; |
67 | const char *color = PERF_COLOR_BLUE; | 73 | const char *color = PERF_COLOR_BLUE; |
@@ -70,29 +76,29 @@ void trace_event(event_t *event) | |||
70 | if (!dump_trace) | 76 | if (!dump_trace) |
71 | return; | 77 | return; |
72 | 78 | ||
73 | dump_printf("."); | 79 | printf("."); |
74 | dump_printf_color("\n. ... raw event: size %d bytes\n", color, | 80 | color_fprintf(stdout, color, "\n. ... raw event: size %d bytes\n", |
75 | event->header.size); | 81 | event->header.size); |
76 | 82 | ||
77 | for (i = 0; i < event->header.size; i++) { | 83 | for (i = 0; i < event->header.size; i++) { |
78 | if ((i & 15) == 0) { | 84 | if ((i & 15) == 0) { |
79 | dump_printf("."); | 85 | printf("."); |
80 | dump_printf_color(" %04x: ", color, i); | 86 | color_fprintf(stdout, color, " %04x: ", i); |
81 | } | 87 | } |
82 | 88 | ||
83 | dump_printf_color(" %02x", color, raw_event[i]); | 89 | color_fprintf(stdout, color, " %02x", raw_event[i]); |
84 | 90 | ||
85 | if (((i & 15) == 15) || i == event->header.size-1) { | 91 | if (((i & 15) == 15) || i == event->header.size-1) { |
86 | dump_printf_color(" ", color); | 92 | color_fprintf(stdout, color, " "); |
87 | for (j = 0; j < 15-(i & 15); j++) | 93 | for (j = 0; j < 15-(i & 15); j++) |
88 | dump_printf_color(" ", color); | 94 | color_fprintf(stdout, color, " "); |
89 | for (j = i & ~15; j <= i; j++) { | 95 | for (j = i & ~15; j <= i; j++) { |
90 | dump_printf_color("%c", color, | 96 | color_fprintf(stdout, color, "%c", |
91 | isprint(raw_event[j]) ? | 97 | isprint(raw_event[j]) ? |
92 | raw_event[j] : '.'); | 98 | raw_event[j] : '.'); |
93 | } | 99 | } |
94 | dump_printf_color("\n", color); | 100 | color_fprintf(stdout, color, "\n"); |
95 | } | 101 | } |
96 | } | 102 | } |
97 | dump_printf(".\n"); | 103 | printf(".\n"); |
98 | } | 104 | } |
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 7a17ee061bcb..fd53db47e3de 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h | |||
@@ -6,10 +6,10 @@ | |||
6 | #include "event.h" | 6 | #include "event.h" |
7 | 7 | ||
8 | extern int verbose; | 8 | extern int verbose; |
9 | extern bool dump_trace; | 9 | extern bool quiet, dump_trace; |
10 | 10 | ||
11 | int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); | 11 | int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); |
12 | void trace_event(event_t *event); | 12 | void trace_event(union perf_event *event); |
13 | 13 | ||
14 | struct ui_progress; | 14 | struct ui_progress; |
15 | 15 | ||
@@ -35,4 +35,7 @@ int ui_helpline__show_help(const char *format, va_list ap); | |||
35 | #include "ui/progress.h" | 35 | #include "ui/progress.h" |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | void ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); | ||
39 | void ui__warning_paranoid(void); | ||
40 | |||
38 | #endif /* __PERF_DEBUG_H */ | 41 | #endif /* __PERF_DEBUG_H */ |
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index dab9e754a281..3c1b8a632101 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -6,29 +6,48 @@ | |||
6 | #include "string.h" | 6 | #include "string.h" |
7 | #include "strlist.h" | 7 | #include "strlist.h" |
8 | #include "thread.h" | 8 | #include "thread.h" |
9 | #include "thread_map.h" | ||
10 | |||
11 | static const char *perf_event__names[] = { | ||
12 | [0] = "TOTAL", | ||
13 | [PERF_RECORD_MMAP] = "MMAP", | ||
14 | [PERF_RECORD_LOST] = "LOST", | ||
15 | [PERF_RECORD_COMM] = "COMM", | ||
16 | [PERF_RECORD_EXIT] = "EXIT", | ||
17 | [PERF_RECORD_THROTTLE] = "THROTTLE", | ||
18 | [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", | ||
19 | [PERF_RECORD_FORK] = "FORK", | ||
20 | [PERF_RECORD_READ] = "READ", | ||
21 | [PERF_RECORD_SAMPLE] = "SAMPLE", | ||
22 | [PERF_RECORD_HEADER_ATTR] = "ATTR", | ||
23 | [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", | ||
24 | [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", | ||
25 | [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", | ||
26 | [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", | ||
27 | }; | ||
28 | |||
29 | const char *perf_event__name(unsigned int id) | ||
30 | { | ||
31 | if (id >= ARRAY_SIZE(perf_event__names)) | ||
32 | return "INVALID"; | ||
33 | if (!perf_event__names[id]) | ||
34 | return "UNKNOWN"; | ||
35 | return perf_event__names[id]; | ||
36 | } | ||
9 | 37 | ||
10 | const char *event__name[] = { | 38 | static struct perf_sample synth_sample = { |
11 | [0] = "TOTAL", | 39 | .pid = -1, |
12 | [PERF_RECORD_MMAP] = "MMAP", | 40 | .tid = -1, |
13 | [PERF_RECORD_LOST] = "LOST", | 41 | .time = -1, |
14 | [PERF_RECORD_COMM] = "COMM", | 42 | .stream_id = -1, |
15 | [PERF_RECORD_EXIT] = "EXIT", | 43 | .cpu = -1, |
16 | [PERF_RECORD_THROTTLE] = "THROTTLE", | 44 | .period = 1, |
17 | [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", | ||
18 | [PERF_RECORD_FORK] = "FORK", | ||
19 | [PERF_RECORD_READ] = "READ", | ||
20 | [PERF_RECORD_SAMPLE] = "SAMPLE", | ||
21 | [PERF_RECORD_HEADER_ATTR] = "ATTR", | ||
22 | [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", | ||
23 | [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", | ||
24 | [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", | ||
25 | }; | 45 | }; |
26 | 46 | ||
27 | static pid_t event__synthesize_comm(pid_t pid, int full, | 47 | static pid_t perf_event__synthesize_comm(union perf_event *event, pid_t pid, |
28 | event__handler_t process, | 48 | int full, perf_event__handler_t process, |
29 | struct perf_session *session) | 49 | struct perf_session *session) |
30 | { | 50 | { |
31 | event_t ev; | ||
32 | char filename[PATH_MAX]; | 51 | char filename[PATH_MAX]; |
33 | char bf[BUFSIZ]; | 52 | char bf[BUFSIZ]; |
34 | FILE *fp; | 53 | FILE *fp; |
@@ -49,34 +68,39 @@ out_race: | |||
49 | return 0; | 68 | return 0; |
50 | } | 69 | } |
51 | 70 | ||
52 | memset(&ev.comm, 0, sizeof(ev.comm)); | 71 | memset(&event->comm, 0, sizeof(event->comm)); |
53 | while (!ev.comm.comm[0] || !ev.comm.pid) { | 72 | |
54 | if (fgets(bf, sizeof(bf), fp) == NULL) | 73 | while (!event->comm.comm[0] || !event->comm.pid) { |
55 | goto out_failure; | 74 | if (fgets(bf, sizeof(bf), fp) == NULL) { |
75 | pr_warning("couldn't get COMM and pgid, malformed %s\n", filename); | ||
76 | goto out; | ||
77 | } | ||
56 | 78 | ||
57 | if (memcmp(bf, "Name:", 5) == 0) { | 79 | if (memcmp(bf, "Name:", 5) == 0) { |
58 | char *name = bf + 5; | 80 | char *name = bf + 5; |
59 | while (*name && isspace(*name)) | 81 | while (*name && isspace(*name)) |
60 | ++name; | 82 | ++name; |
61 | size = strlen(name) - 1; | 83 | size = strlen(name) - 1; |
62 | memcpy(ev.comm.comm, name, size++); | 84 | memcpy(event->comm.comm, name, size++); |
63 | } else if (memcmp(bf, "Tgid:", 5) == 0) { | 85 | } else if (memcmp(bf, "Tgid:", 5) == 0) { |
64 | char *tgids = bf + 5; | 86 | char *tgids = bf + 5; |
65 | while (*tgids && isspace(*tgids)) | 87 | while (*tgids && isspace(*tgids)) |
66 | ++tgids; | 88 | ++tgids; |
67 | tgid = ev.comm.pid = atoi(tgids); | 89 | tgid = event->comm.pid = atoi(tgids); |
68 | } | 90 | } |
69 | } | 91 | } |
70 | 92 | ||
71 | ev.comm.header.type = PERF_RECORD_COMM; | 93 | event->comm.header.type = PERF_RECORD_COMM; |
72 | size = ALIGN(size, sizeof(u64)); | 94 | size = ALIGN(size, sizeof(u64)); |
73 | ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size); | 95 | memset(event->comm.comm + size, 0, session->id_hdr_size); |
74 | 96 | event->comm.header.size = (sizeof(event->comm) - | |
97 | (sizeof(event->comm.comm) - size) + | ||
98 | session->id_hdr_size); | ||
75 | if (!full) { | 99 | if (!full) { |
76 | ev.comm.tid = pid; | 100 | event->comm.tid = pid; |
77 | 101 | ||
78 | process(&ev, session); | 102 | process(event, &synth_sample, session); |
79 | goto out_fclose; | 103 | goto out; |
80 | } | 104 | } |
81 | 105 | ||
82 | snprintf(filename, sizeof(filename), "/proc/%d/task", pid); | 106 | snprintf(filename, sizeof(filename), "/proc/%d/task", pid); |
@@ -91,24 +115,22 @@ out_race: | |||
91 | if (*end) | 115 | if (*end) |
92 | continue; | 116 | continue; |
93 | 117 | ||
94 | ev.comm.tid = pid; | 118 | event->comm.tid = pid; |
95 | 119 | ||
96 | process(&ev, session); | 120 | process(event, &synth_sample, session); |
97 | } | 121 | } |
98 | closedir(tasks); | ||
99 | 122 | ||
100 | out_fclose: | 123 | closedir(tasks); |
124 | out: | ||
101 | fclose(fp); | 125 | fclose(fp); |
102 | return tgid; | ||
103 | 126 | ||
104 | out_failure: | 127 | return tgid; |
105 | pr_warning("couldn't get COMM and pgid, malformed %s\n", filename); | ||
106 | return -1; | ||
107 | } | 128 | } |
108 | 129 | ||
109 | static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, | 130 | static int perf_event__synthesize_mmap_events(union perf_event *event, |
110 | event__handler_t process, | 131 | pid_t pid, pid_t tgid, |
111 | struct perf_session *session) | 132 | perf_event__handler_t process, |
133 | struct perf_session *session) | ||
112 | { | 134 | { |
113 | char filename[PATH_MAX]; | 135 | char filename[PATH_MAX]; |
114 | FILE *fp; | 136 | FILE *fp; |
@@ -124,29 +146,25 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, | |||
124 | return -1; | 146 | return -1; |
125 | } | 147 | } |
126 | 148 | ||
149 | event->header.type = PERF_RECORD_MMAP; | ||
150 | /* | ||
151 | * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c | ||
152 | */ | ||
153 | event->header.misc = PERF_RECORD_MISC_USER; | ||
154 | |||
127 | while (1) { | 155 | while (1) { |
128 | char bf[BUFSIZ], *pbf = bf; | 156 | char bf[BUFSIZ], *pbf = bf; |
129 | event_t ev = { | ||
130 | .header = { | ||
131 | .type = PERF_RECORD_MMAP, | ||
132 | /* | ||
133 | * Just like the kernel, see __perf_event_mmap | ||
134 | * in kernel/perf_event.c | ||
135 | */ | ||
136 | .misc = PERF_RECORD_MISC_USER, | ||
137 | }, | ||
138 | }; | ||
139 | int n; | 157 | int n; |
140 | size_t size; | 158 | size_t size; |
141 | if (fgets(bf, sizeof(bf), fp) == NULL) | 159 | if (fgets(bf, sizeof(bf), fp) == NULL) |
142 | break; | 160 | break; |
143 | 161 | ||
144 | /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ | 162 | /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ |
145 | n = hex2u64(pbf, &ev.mmap.start); | 163 | n = hex2u64(pbf, &event->mmap.start); |
146 | if (n < 0) | 164 | if (n < 0) |
147 | continue; | 165 | continue; |
148 | pbf += n + 1; | 166 | pbf += n + 1; |
149 | n = hex2u64(pbf, &ev.mmap.len); | 167 | n = hex2u64(pbf, &event->mmap.len); |
150 | if (n < 0) | 168 | if (n < 0) |
151 | continue; | 169 | continue; |
152 | pbf += n + 3; | 170 | pbf += n + 3; |
@@ -161,19 +179,21 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, | |||
161 | continue; | 179 | continue; |
162 | 180 | ||
163 | pbf += 3; | 181 | pbf += 3; |
164 | n = hex2u64(pbf, &ev.mmap.pgoff); | 182 | n = hex2u64(pbf, &event->mmap.pgoff); |
165 | 183 | ||
166 | size = strlen(execname); | 184 | size = strlen(execname); |
167 | execname[size - 1] = '\0'; /* Remove \n */ | 185 | execname[size - 1] = '\0'; /* Remove \n */ |
168 | memcpy(ev.mmap.filename, execname, size); | 186 | memcpy(event->mmap.filename, execname, size); |
169 | size = ALIGN(size, sizeof(u64)); | 187 | size = ALIGN(size, sizeof(u64)); |
170 | ev.mmap.len -= ev.mmap.start; | 188 | event->mmap.len -= event->mmap.start; |
171 | ev.mmap.header.size = (sizeof(ev.mmap) - | 189 | event->mmap.header.size = (sizeof(event->mmap) - |
172 | (sizeof(ev.mmap.filename) - size)); | 190 | (sizeof(event->mmap.filename) - size)); |
173 | ev.mmap.pid = tgid; | 191 | memset(event->mmap.filename + size, 0, session->id_hdr_size); |
174 | ev.mmap.tid = pid; | 192 | event->mmap.header.size += session->id_hdr_size; |
175 | 193 | event->mmap.pid = tgid; | |
176 | process(&ev, session); | 194 | event->mmap.tid = pid; |
195 | |||
196 | process(event, &synth_sample, session); | ||
177 | } | 197 | } |
178 | } | 198 | } |
179 | 199 | ||
@@ -181,26 +201,33 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, | |||
181 | return 0; | 201 | return 0; |
182 | } | 202 | } |
183 | 203 | ||
184 | int event__synthesize_modules(event__handler_t process, | 204 | int perf_event__synthesize_modules(perf_event__handler_t process, |
185 | struct perf_session *session, | 205 | struct perf_session *session, |
186 | struct machine *machine) | 206 | struct machine *machine) |
187 | { | 207 | { |
188 | struct rb_node *nd; | 208 | struct rb_node *nd; |
189 | struct map_groups *kmaps = &machine->kmaps; | 209 | struct map_groups *kmaps = &machine->kmaps; |
190 | u16 misc; | 210 | union perf_event *event = zalloc((sizeof(event->mmap) + |
211 | session->id_hdr_size)); | ||
212 | if (event == NULL) { | ||
213 | pr_debug("Not enough memory synthesizing mmap event " | ||
214 | "for kernel modules\n"); | ||
215 | return -1; | ||
216 | } | ||
217 | |||
218 | event->header.type = PERF_RECORD_MMAP; | ||
191 | 219 | ||
192 | /* | 220 | /* |
193 | * kernel uses 0 for user space maps, see kernel/perf_event.c | 221 | * kernel uses 0 for user space maps, see kernel/perf_event.c |
194 | * __perf_event_mmap | 222 | * __perf_event_mmap |
195 | */ | 223 | */ |
196 | if (machine__is_host(machine)) | 224 | if (machine__is_host(machine)) |
197 | misc = PERF_RECORD_MISC_KERNEL; | 225 | event->header.misc = PERF_RECORD_MISC_KERNEL; |
198 | else | 226 | else |
199 | misc = PERF_RECORD_MISC_GUEST_KERNEL; | 227 | event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; |
200 | 228 | ||
201 | for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); | 229 | for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); |
202 | nd; nd = rb_next(nd)) { | 230 | nd; nd = rb_next(nd)) { |
203 | event_t ev; | ||
204 | size_t size; | 231 | size_t size; |
205 | struct map *pos = rb_entry(nd, struct map, rb_node); | 232 | struct map *pos = rb_entry(nd, struct map, rb_node); |
206 | 233 | ||
@@ -208,39 +235,87 @@ int event__synthesize_modules(event__handler_t process, | |||
208 | continue; | 235 | continue; |
209 | 236 | ||
210 | size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); | 237 | size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); |
211 | memset(&ev, 0, sizeof(ev)); | 238 | event->mmap.header.type = PERF_RECORD_MMAP; |
212 | ev.mmap.header.misc = misc; | 239 | event->mmap.header.size = (sizeof(event->mmap) - |
213 | ev.mmap.header.type = PERF_RECORD_MMAP; | 240 | (sizeof(event->mmap.filename) - size)); |
214 | ev.mmap.header.size = (sizeof(ev.mmap) - | 241 | memset(event->mmap.filename + size, 0, session->id_hdr_size); |
215 | (sizeof(ev.mmap.filename) - size)); | 242 | event->mmap.header.size += session->id_hdr_size; |
216 | ev.mmap.start = pos->start; | 243 | event->mmap.start = pos->start; |
217 | ev.mmap.len = pos->end - pos->start; | 244 | event->mmap.len = pos->end - pos->start; |
218 | ev.mmap.pid = machine->pid; | 245 | event->mmap.pid = machine->pid; |
219 | 246 | ||
220 | memcpy(ev.mmap.filename, pos->dso->long_name, | 247 | memcpy(event->mmap.filename, pos->dso->long_name, |
221 | pos->dso->long_name_len + 1); | 248 | pos->dso->long_name_len + 1); |
222 | process(&ev, session); | 249 | process(event, &synth_sample, session); |
223 | } | 250 | } |
224 | 251 | ||
252 | free(event); | ||
225 | return 0; | 253 | return 0; |
226 | } | 254 | } |
227 | 255 | ||
228 | int event__synthesize_thread(pid_t pid, event__handler_t process, | 256 | static int __event__synthesize_thread(union perf_event *comm_event, |
229 | struct perf_session *session) | 257 | union perf_event *mmap_event, |
258 | pid_t pid, perf_event__handler_t process, | ||
259 | struct perf_session *session) | ||
230 | { | 260 | { |
231 | pid_t tgid = event__synthesize_comm(pid, 1, process, session); | 261 | pid_t tgid = perf_event__synthesize_comm(comm_event, pid, 1, process, |
262 | session); | ||
232 | if (tgid == -1) | 263 | if (tgid == -1) |
233 | return -1; | 264 | return -1; |
234 | return event__synthesize_mmap_events(pid, tgid, process, session); | 265 | return perf_event__synthesize_mmap_events(mmap_event, pid, tgid, |
266 | process, session); | ||
267 | } | ||
268 | |||
269 | int perf_event__synthesize_thread_map(struct thread_map *threads, | ||
270 | perf_event__handler_t process, | ||
271 | struct perf_session *session) | ||
272 | { | ||
273 | union perf_event *comm_event, *mmap_event; | ||
274 | int err = -1, thread; | ||
275 | |||
276 | comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); | ||
277 | if (comm_event == NULL) | ||
278 | goto out; | ||
279 | |||
280 | mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size); | ||
281 | if (mmap_event == NULL) | ||
282 | goto out_free_comm; | ||
283 | |||
284 | err = 0; | ||
285 | for (thread = 0; thread < threads->nr; ++thread) { | ||
286 | if (__event__synthesize_thread(comm_event, mmap_event, | ||
287 | threads->map[thread], | ||
288 | process, session)) { | ||
289 | err = -1; | ||
290 | break; | ||
291 | } | ||
292 | } | ||
293 | free(mmap_event); | ||
294 | out_free_comm: | ||
295 | free(comm_event); | ||
296 | out: | ||
297 | return err; | ||
235 | } | 298 | } |
236 | 299 | ||
237 | void event__synthesize_threads(event__handler_t process, | 300 | int perf_event__synthesize_threads(perf_event__handler_t process, |
238 | struct perf_session *session) | 301 | struct perf_session *session) |
239 | { | 302 | { |
240 | DIR *proc; | 303 | DIR *proc; |
241 | struct dirent dirent, *next; | 304 | struct dirent dirent, *next; |
305 | union perf_event *comm_event, *mmap_event; | ||
306 | int err = -1; | ||
307 | |||
308 | comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); | ||
309 | if (comm_event == NULL) | ||
310 | goto out; | ||
311 | |||
312 | mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size); | ||
313 | if (mmap_event == NULL) | ||
314 | goto out_free_comm; | ||
242 | 315 | ||
243 | proc = opendir("/proc"); | 316 | proc = opendir("/proc"); |
317 | if (proc == NULL) | ||
318 | goto out_free_mmap; | ||
244 | 319 | ||
245 | while (!readdir_r(proc, &dirent, &next) && next) { | 320 | while (!readdir_r(proc, &dirent, &next) && next) { |
246 | char *end; | 321 | char *end; |
@@ -249,10 +324,18 @@ void event__synthesize_threads(event__handler_t process, | |||
249 | if (*end) /* only interested in proper numerical dirents */ | 324 | if (*end) /* only interested in proper numerical dirents */ |
250 | continue; | 325 | continue; |
251 | 326 | ||
252 | event__synthesize_thread(pid, process, session); | 327 | __event__synthesize_thread(comm_event, mmap_event, pid, |
328 | process, session); | ||
253 | } | 329 | } |
254 | 330 | ||
255 | closedir(proc); | 331 | closedir(proc); |
332 | err = 0; | ||
333 | out_free_mmap: | ||
334 | free(mmap_event); | ||
335 | out_free_comm: | ||
336 | free(comm_event); | ||
337 | out: | ||
338 | return err; | ||
256 | } | 339 | } |
257 | 340 | ||
258 | struct process_symbol_args { | 341 | struct process_symbol_args { |
@@ -260,7 +343,8 @@ struct process_symbol_args { | |||
260 | u64 start; | 343 | u64 start; |
261 | }; | 344 | }; |
262 | 345 | ||
263 | static int find_symbol_cb(void *arg, const char *name, char type, u64 start) | 346 | static int find_symbol_cb(void *arg, const char *name, char type, |
347 | u64 start, u64 end __used) | ||
264 | { | 348 | { |
265 | struct process_symbol_args *args = arg; | 349 | struct process_symbol_args *args = arg; |
266 | 350 | ||
@@ -276,28 +360,30 @@ static int find_symbol_cb(void *arg, const char *name, char type, u64 start) | |||
276 | return 1; | 360 | return 1; |
277 | } | 361 | } |
278 | 362 | ||
279 | int event__synthesize_kernel_mmap(event__handler_t process, | 363 | int perf_event__synthesize_kernel_mmap(perf_event__handler_t process, |
280 | struct perf_session *session, | 364 | struct perf_session *session, |
281 | struct machine *machine, | 365 | struct machine *machine, |
282 | const char *symbol_name) | 366 | const char *symbol_name) |
283 | { | 367 | { |
284 | size_t size; | 368 | size_t size; |
285 | const char *filename, *mmap_name; | 369 | const char *filename, *mmap_name; |
286 | char path[PATH_MAX]; | 370 | char path[PATH_MAX]; |
287 | char name_buff[PATH_MAX]; | 371 | char name_buff[PATH_MAX]; |
288 | struct map *map; | 372 | struct map *map; |
289 | 373 | int err; | |
290 | event_t ev = { | ||
291 | .header = { | ||
292 | .type = PERF_RECORD_MMAP, | ||
293 | }, | ||
294 | }; | ||
295 | /* | 374 | /* |
296 | * We should get this from /sys/kernel/sections/.text, but till that is | 375 | * We should get this from /sys/kernel/sections/.text, but till that is |
297 | * available use this, and after it is use this as a fallback for older | 376 | * available use this, and after it is use this as a fallback for older |
298 | * kernels. | 377 | * kernels. |
299 | */ | 378 | */ |
300 | struct process_symbol_args args = { .name = symbol_name, }; | 379 | struct process_symbol_args args = { .name = symbol_name, }; |
380 | union perf_event *event = zalloc((sizeof(event->mmap) + | ||
381 | session->id_hdr_size)); | ||
382 | if (event == NULL) { | ||
383 | pr_debug("Not enough memory synthesizing mmap event " | ||
384 | "for kernel modules\n"); | ||
385 | return -1; | ||
386 | } | ||
301 | 387 | ||
302 | mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); | 388 | mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); |
303 | if (machine__is_host(machine)) { | 389 | if (machine__is_host(machine)) { |
@@ -305,10 +391,10 @@ int event__synthesize_kernel_mmap(event__handler_t process, | |||
305 | * kernel uses PERF_RECORD_MISC_USER for user space maps, | 391 | * kernel uses PERF_RECORD_MISC_USER for user space maps, |
306 | * see kernel/perf_event.c __perf_event_mmap | 392 | * see kernel/perf_event.c __perf_event_mmap |
307 | */ | 393 | */ |
308 | ev.header.misc = PERF_RECORD_MISC_KERNEL; | 394 | event->header.misc = PERF_RECORD_MISC_KERNEL; |
309 | filename = "/proc/kallsyms"; | 395 | filename = "/proc/kallsyms"; |
310 | } else { | 396 | } else { |
311 | ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL; | 397 | event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; |
312 | if (machine__is_default_guest(machine)) | 398 | if (machine__is_default_guest(machine)) |
313 | filename = (char *) symbol_conf.default_guest_kallsyms; | 399 | filename = (char *) symbol_conf.default_guest_kallsyms; |
314 | else { | 400 | else { |
@@ -321,54 +407,32 @@ int event__synthesize_kernel_mmap(event__handler_t process, | |||
321 | return -ENOENT; | 407 | return -ENOENT; |
322 | 408 | ||
323 | map = machine->vmlinux_maps[MAP__FUNCTION]; | 409 | map = machine->vmlinux_maps[MAP__FUNCTION]; |
324 | size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename), | 410 | size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), |
325 | "%s%s", mmap_name, symbol_name) + 1; | 411 | "%s%s", mmap_name, symbol_name) + 1; |
326 | size = ALIGN(size, sizeof(u64)); | 412 | size = ALIGN(size, sizeof(u64)); |
327 | ev.mmap.header.size = (sizeof(ev.mmap) - | 413 | event->mmap.header.type = PERF_RECORD_MMAP; |
328 | (sizeof(ev.mmap.filename) - size)); | 414 | event->mmap.header.size = (sizeof(event->mmap) - |
329 | ev.mmap.pgoff = args.start; | 415 | (sizeof(event->mmap.filename) - size) + session->id_hdr_size); |
330 | ev.mmap.start = map->start; | 416 | event->mmap.pgoff = args.start; |
331 | ev.mmap.len = map->end - ev.mmap.start; | 417 | event->mmap.start = map->start; |
332 | ev.mmap.pid = machine->pid; | 418 | event->mmap.len = map->end - event->mmap.start; |
333 | 419 | event->mmap.pid = machine->pid; | |
334 | return process(&ev, session); | 420 | |
335 | } | 421 | err = process(event, &synth_sample, session); |
336 | 422 | free(event); | |
337 | static void thread__comm_adjust(struct thread *self, struct hists *hists) | 423 | |
338 | { | 424 | return err; |
339 | char *comm = self->comm; | ||
340 | |||
341 | if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && | ||
342 | (!symbol_conf.comm_list || | ||
343 | strlist__has_entry(symbol_conf.comm_list, comm))) { | ||
344 | u16 slen = strlen(comm); | ||
345 | |||
346 | if (hists__new_col_len(hists, HISTC_COMM, slen)) | ||
347 | hists__set_col_len(hists, HISTC_THREAD, slen + 6); | ||
348 | } | ||
349 | } | 425 | } |
350 | 426 | ||
351 | static int thread__set_comm_adjust(struct thread *self, const char *comm, | 427 | int perf_event__process_comm(union perf_event *event, |
352 | struct hists *hists) | 428 | struct perf_sample *sample __used, |
353 | { | 429 | struct perf_session *session) |
354 | int ret = thread__set_comm(self, comm); | ||
355 | |||
356 | if (ret) | ||
357 | return ret; | ||
358 | |||
359 | thread__comm_adjust(self, hists); | ||
360 | |||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | int event__process_comm(event_t *self, struct perf_session *session) | ||
365 | { | 430 | { |
366 | struct thread *thread = perf_session__findnew(session, self->comm.tid); | 431 | struct thread *thread = perf_session__findnew(session, event->comm.tid); |
367 | 432 | ||
368 | dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid); | 433 | dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid); |
369 | 434 | ||
370 | if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm, | 435 | if (thread == NULL || thread__set_comm(thread, event->comm.comm)) { |
371 | &session->hists)) { | ||
372 | dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); | 436 | dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); |
373 | return -1; | 437 | return -1; |
374 | } | 438 | } |
@@ -376,27 +440,31 @@ int event__process_comm(event_t *self, struct perf_session *session) | |||
376 | return 0; | 440 | return 0; |
377 | } | 441 | } |
378 | 442 | ||
379 | int event__process_lost(event_t *self, struct perf_session *session) | 443 | int perf_event__process_lost(union perf_event *event, |
444 | struct perf_sample *sample __used, | ||
445 | struct perf_session *session) | ||
380 | { | 446 | { |
381 | dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); | 447 | dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", |
382 | session->hists.stats.total_lost += self->lost.lost; | 448 | event->lost.id, event->lost.lost); |
449 | session->hists.stats.total_lost += event->lost.lost; | ||
383 | return 0; | 450 | return 0; |
384 | } | 451 | } |
385 | 452 | ||
386 | static void event_set_kernel_mmap_len(struct map **maps, event_t *self) | 453 | static void perf_event__set_kernel_mmap_len(union perf_event *event, |
454 | struct map **maps) | ||
387 | { | 455 | { |
388 | maps[MAP__FUNCTION]->start = self->mmap.start; | 456 | maps[MAP__FUNCTION]->start = event->mmap.start; |
389 | maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len; | 457 | maps[MAP__FUNCTION]->end = event->mmap.start + event->mmap.len; |
390 | /* | 458 | /* |
391 | * Be a bit paranoid here, some perf.data file came with | 459 | * Be a bit paranoid here, some perf.data file came with |
392 | * a zero sized synthesized MMAP event for the kernel. | 460 | * a zero sized synthesized MMAP event for the kernel. |
393 | */ | 461 | */ |
394 | if (maps[MAP__FUNCTION]->end == 0) | 462 | if (maps[MAP__FUNCTION]->end == 0) |
395 | maps[MAP__FUNCTION]->end = ~0UL; | 463 | maps[MAP__FUNCTION]->end = ~0ULL; |
396 | } | 464 | } |
397 | 465 | ||
398 | static int event__process_kernel_mmap(event_t *self, | 466 | static int perf_event__process_kernel_mmap(union perf_event *event, |
399 | struct perf_session *session) | 467 | struct perf_session *session) |
400 | { | 468 | { |
401 | struct map *map; | 469 | struct map *map; |
402 | char kmmap_prefix[PATH_MAX]; | 470 | char kmmap_prefix[PATH_MAX]; |
@@ -404,9 +472,9 @@ static int event__process_kernel_mmap(event_t *self, | |||
404 | enum dso_kernel_type kernel_type; | 472 | enum dso_kernel_type kernel_type; |
405 | bool is_kernel_mmap; | 473 | bool is_kernel_mmap; |
406 | 474 | ||
407 | machine = perf_session__findnew_machine(session, self->mmap.pid); | 475 | machine = perf_session__findnew_machine(session, event->mmap.pid); |
408 | if (!machine) { | 476 | if (!machine) { |
409 | pr_err("Can't find id %d's machine\n", self->mmap.pid); | 477 | pr_err("Can't find id %d's machine\n", event->mmap.pid); |
410 | goto out_problem; | 478 | goto out_problem; |
411 | } | 479 | } |
412 | 480 | ||
@@ -416,17 +484,17 @@ static int event__process_kernel_mmap(event_t *self, | |||
416 | else | 484 | else |
417 | kernel_type = DSO_TYPE_GUEST_KERNEL; | 485 | kernel_type = DSO_TYPE_GUEST_KERNEL; |
418 | 486 | ||
419 | is_kernel_mmap = memcmp(self->mmap.filename, | 487 | is_kernel_mmap = memcmp(event->mmap.filename, |
420 | kmmap_prefix, | 488 | kmmap_prefix, |
421 | strlen(kmmap_prefix)) == 0; | 489 | strlen(kmmap_prefix)) == 0; |
422 | if (self->mmap.filename[0] == '/' || | 490 | if (event->mmap.filename[0] == '/' || |
423 | (!is_kernel_mmap && self->mmap.filename[0] == '[')) { | 491 | (!is_kernel_mmap && event->mmap.filename[0] == '[')) { |
424 | 492 | ||
425 | char short_module_name[1024]; | 493 | char short_module_name[1024]; |
426 | char *name, *dot; | 494 | char *name, *dot; |
427 | 495 | ||
428 | if (self->mmap.filename[0] == '/') { | 496 | if (event->mmap.filename[0] == '/') { |
429 | name = strrchr(self->mmap.filename, '/'); | 497 | name = strrchr(event->mmap.filename, '/'); |
430 | if (name == NULL) | 498 | if (name == NULL) |
431 | goto out_problem; | 499 | goto out_problem; |
432 | 500 | ||
@@ -438,10 +506,10 @@ static int event__process_kernel_mmap(event_t *self, | |||
438 | "[%.*s]", (int)(dot - name), name); | 506 | "[%.*s]", (int)(dot - name), name); |
439 | strxfrchar(short_module_name, '-', '_'); | 507 | strxfrchar(short_module_name, '-', '_'); |
440 | } else | 508 | } else |
441 | strcpy(short_module_name, self->mmap.filename); | 509 | strcpy(short_module_name, event->mmap.filename); |
442 | 510 | ||
443 | map = machine__new_module(machine, self->mmap.start, | 511 | map = machine__new_module(machine, event->mmap.start, |
444 | self->mmap.filename); | 512 | event->mmap.filename); |
445 | if (map == NULL) | 513 | if (map == NULL) |
446 | goto out_problem; | 514 | goto out_problem; |
447 | 515 | ||
@@ -451,9 +519,9 @@ static int event__process_kernel_mmap(event_t *self, | |||
451 | 519 | ||
452 | map->dso->short_name = name; | 520 | map->dso->short_name = name; |
453 | map->dso->sname_alloc = 1; | 521 | map->dso->sname_alloc = 1; |
454 | map->end = map->start + self->mmap.len; | 522 | map->end = map->start + event->mmap.len; |
455 | } else if (is_kernel_mmap) { | 523 | } else if (is_kernel_mmap) { |
456 | const char *symbol_name = (self->mmap.filename + | 524 | const char *symbol_name = (event->mmap.filename + |
457 | strlen(kmmap_prefix)); | 525 | strlen(kmmap_prefix)); |
458 | /* | 526 | /* |
459 | * Should be there already, from the build-id table in | 527 | * Should be there already, from the build-id table in |
@@ -468,10 +536,19 @@ static int event__process_kernel_mmap(event_t *self, | |||
468 | if (__machine__create_kernel_maps(machine, kernel) < 0) | 536 | if (__machine__create_kernel_maps(machine, kernel) < 0) |
469 | goto out_problem; | 537 | goto out_problem; |
470 | 538 | ||
471 | event_set_kernel_mmap_len(machine->vmlinux_maps, self); | 539 | perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps); |
472 | perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, | 540 | |
473 | symbol_name, | 541 | /* |
474 | self->mmap.pgoff); | 542 | * Avoid using a zero address (kptr_restrict) for the ref reloc |
543 | * symbol. Effectively having zero here means that at record | ||
544 | * time /proc/sys/kernel/kptr_restrict was non zero. | ||
545 | */ | ||
546 | if (event->mmap.pgoff != 0) { | ||
547 | perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, | ||
548 | symbol_name, | ||
549 | event->mmap.pgoff); | ||
550 | } | ||
551 | |||
475 | if (machine__is_default_guest(machine)) { | 552 | if (machine__is_default_guest(machine)) { |
476 | /* | 553 | /* |
477 | * preload dso of guest kernel and modules | 554 | * preload dso of guest kernel and modules |
@@ -485,21 +562,23 @@ out_problem: | |||
485 | return -1; | 562 | return -1; |
486 | } | 563 | } |
487 | 564 | ||
488 | int event__process_mmap(event_t *self, struct perf_session *session) | 565 | int perf_event__process_mmap(union perf_event *event, |
566 | struct perf_sample *sample __used, | ||
567 | struct perf_session *session) | ||
489 | { | 568 | { |
490 | struct machine *machine; | 569 | struct machine *machine; |
491 | struct thread *thread; | 570 | struct thread *thread; |
492 | struct map *map; | 571 | struct map *map; |
493 | u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 572 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
494 | int ret = 0; | 573 | int ret = 0; |
495 | 574 | ||
496 | dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n", | 575 | dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", |
497 | self->mmap.pid, self->mmap.tid, self->mmap.start, | 576 | event->mmap.pid, event->mmap.tid, event->mmap.start, |
498 | self->mmap.len, self->mmap.pgoff, self->mmap.filename); | 577 | event->mmap.len, event->mmap.pgoff, event->mmap.filename); |
499 | 578 | ||
500 | if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || | 579 | if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || |
501 | cpumode == PERF_RECORD_MISC_KERNEL) { | 580 | cpumode == PERF_RECORD_MISC_KERNEL) { |
502 | ret = event__process_kernel_mmap(self, session); | 581 | ret = perf_event__process_kernel_mmap(event, session); |
503 | if (ret < 0) | 582 | if (ret < 0) |
504 | goto out_problem; | 583 | goto out_problem; |
505 | return 0; | 584 | return 0; |
@@ -508,12 +587,12 @@ int event__process_mmap(event_t *self, struct perf_session *session) | |||
508 | machine = perf_session__find_host_machine(session); | 587 | machine = perf_session__find_host_machine(session); |
509 | if (machine == NULL) | 588 | if (machine == NULL) |
510 | goto out_problem; | 589 | goto out_problem; |
511 | thread = perf_session__findnew(session, self->mmap.pid); | 590 | thread = perf_session__findnew(session, event->mmap.pid); |
512 | if (thread == NULL) | 591 | if (thread == NULL) |
513 | goto out_problem; | 592 | goto out_problem; |
514 | map = map__new(&machine->user_dsos, self->mmap.start, | 593 | map = map__new(&machine->user_dsos, event->mmap.start, |
515 | self->mmap.len, self->mmap.pgoff, | 594 | event->mmap.len, event->mmap.pgoff, |
516 | self->mmap.pid, self->mmap.filename, | 595 | event->mmap.pid, event->mmap.filename, |
517 | MAP__FUNCTION); | 596 | MAP__FUNCTION); |
518 | if (map == NULL) | 597 | if (map == NULL) |
519 | goto out_problem; | 598 | goto out_problem; |
@@ -526,15 +605,17 @@ out_problem: | |||
526 | return 0; | 605 | return 0; |
527 | } | 606 | } |
528 | 607 | ||
529 | int event__process_task(event_t *self, struct perf_session *session) | 608 | int perf_event__process_task(union perf_event *event, |
609 | struct perf_sample *sample __used, | ||
610 | struct perf_session *session) | ||
530 | { | 611 | { |
531 | struct thread *thread = perf_session__findnew(session, self->fork.tid); | 612 | struct thread *thread = perf_session__findnew(session, event->fork.tid); |
532 | struct thread *parent = perf_session__findnew(session, self->fork.ptid); | 613 | struct thread *parent = perf_session__findnew(session, event->fork.ptid); |
533 | 614 | ||
534 | dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, | 615 | dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, |
535 | self->fork.ppid, self->fork.ptid); | 616 | event->fork.ppid, event->fork.ptid); |
536 | 617 | ||
537 | if (self->header.type == PERF_RECORD_EXIT) { | 618 | if (event->header.type == PERF_RECORD_EXIT) { |
538 | perf_session__remove_thread(session, thread); | 619 | perf_session__remove_thread(session, thread); |
539 | return 0; | 620 | return 0; |
540 | } | 621 | } |
@@ -548,19 +629,22 @@ int event__process_task(event_t *self, struct perf_session *session) | |||
548 | return 0; | 629 | return 0; |
549 | } | 630 | } |
550 | 631 | ||
551 | int event__process(event_t *event, struct perf_session *session) | 632 | int perf_event__process(union perf_event *event, struct perf_sample *sample, |
633 | struct perf_session *session) | ||
552 | { | 634 | { |
553 | switch (event->header.type) { | 635 | switch (event->header.type) { |
554 | case PERF_RECORD_COMM: | 636 | case PERF_RECORD_COMM: |
555 | event__process_comm(event, session); | 637 | perf_event__process_comm(event, sample, session); |
556 | break; | 638 | break; |
557 | case PERF_RECORD_MMAP: | 639 | case PERF_RECORD_MMAP: |
558 | event__process_mmap(event, session); | 640 | perf_event__process_mmap(event, sample, session); |
559 | break; | 641 | break; |
560 | case PERF_RECORD_FORK: | 642 | case PERF_RECORD_FORK: |
561 | case PERF_RECORD_EXIT: | 643 | case PERF_RECORD_EXIT: |
562 | event__process_task(event, session); | 644 | perf_event__process_task(event, sample, session); |
563 | break; | 645 | break; |
646 | case PERF_RECORD_LOST: | ||
647 | perf_event__process_lost(event, sample, session); | ||
564 | default: | 648 | default: |
565 | break; | 649 | break; |
566 | } | 650 | } |
@@ -635,7 +719,7 @@ try_again: | |||
635 | * in the whole kernel symbol list. | 719 | * in the whole kernel symbol list. |
636 | */ | 720 | */ |
637 | if ((long long)al->addr < 0 && | 721 | if ((long long)al->addr < 0 && |
638 | cpumode == PERF_RECORD_MISC_KERNEL && | 722 | cpumode == PERF_RECORD_MISC_USER && |
639 | machine && mg != &machine->kmaps) { | 723 | machine && mg != &machine->kmaps) { |
640 | mg = &machine->kmaps; | 724 | mg = &machine->kmaps; |
641 | goto try_again; | 725 | goto try_again; |
@@ -657,49 +741,15 @@ void thread__find_addr_location(struct thread *self, | |||
657 | al->sym = NULL; | 741 | al->sym = NULL; |
658 | } | 742 | } |
659 | 743 | ||
660 | static void dso__calc_col_width(struct dso *self, struct hists *hists) | 744 | int perf_event__preprocess_sample(const union perf_event *event, |
661 | { | 745 | struct perf_session *session, |
662 | if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && | 746 | struct addr_location *al, |
663 | (!symbol_conf.dso_list || | 747 | struct perf_sample *sample, |
664 | strlist__has_entry(symbol_conf.dso_list, self->name))) { | 748 | symbol_filter_t filter) |
665 | u16 slen = dso__name_len(self); | ||
666 | hists__new_col_len(hists, HISTC_DSO, slen); | ||
667 | } | ||
668 | |||
669 | self->slen_calculated = 1; | ||
670 | } | ||
671 | |||
672 | int event__preprocess_sample(const event_t *self, struct perf_session *session, | ||
673 | struct addr_location *al, struct sample_data *data, | ||
674 | symbol_filter_t filter) | ||
675 | { | 749 | { |
676 | u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 750 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
677 | struct thread *thread; | 751 | struct thread *thread = perf_session__findnew(session, event->ip.pid); |
678 | |||
679 | event__parse_sample(self, session->sample_type, data); | ||
680 | |||
681 | dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld cpu:%d\n", | ||
682 | self->header.misc, data->pid, data->tid, data->ip, | ||
683 | data->period, data->cpu); | ||
684 | 752 | ||
685 | if (session->sample_type & PERF_SAMPLE_CALLCHAIN) { | ||
686 | unsigned int i; | ||
687 | |||
688 | dump_printf("... chain: nr:%Lu\n", data->callchain->nr); | ||
689 | |||
690 | if (!ip_callchain__valid(data->callchain, self)) { | ||
691 | pr_debug("call-chain problem with event, " | ||
692 | "skipping it.\n"); | ||
693 | goto out_filtered; | ||
694 | } | ||
695 | |||
696 | if (dump_trace) { | ||
697 | for (i = 0; i < data->callchain->nr; i++) | ||
698 | dump_printf("..... %2d: %016Lx\n", | ||
699 | i, data->callchain->ips[i]); | ||
700 | } | ||
701 | } | ||
702 | thread = perf_session__findnew(session, self->ip.pid); | ||
703 | if (thread == NULL) | 753 | if (thread == NULL) |
704 | return -1; | 754 | return -1; |
705 | 755 | ||
@@ -720,12 +770,12 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session, | |||
720 | machine__create_kernel_maps(&session->host_machine); | 770 | machine__create_kernel_maps(&session->host_machine); |
721 | 771 | ||
722 | thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, | 772 | thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, |
723 | self->ip.pid, self->ip.ip, al); | 773 | event->ip.pid, event->ip.ip, al); |
724 | dump_printf(" ...... dso: %s\n", | 774 | dump_printf(" ...... dso: %s\n", |
725 | al->map ? al->map->dso->long_name : | 775 | al->map ? al->map->dso->long_name : |
726 | al->level == 'H' ? "[hypervisor]" : "<not found>"); | 776 | al->level == 'H' ? "[hypervisor]" : "<not found>"); |
727 | al->sym = NULL; | 777 | al->sym = NULL; |
728 | al->cpu = data->cpu; | 778 | al->cpu = sample->cpu; |
729 | 779 | ||
730 | if (al->map) { | 780 | if (al->map) { |
731 | if (symbol_conf.dso_list && | 781 | if (symbol_conf.dso_list && |
@@ -736,23 +786,8 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session, | |||
736 | strlist__has_entry(symbol_conf.dso_list, | 786 | strlist__has_entry(symbol_conf.dso_list, |
737 | al->map->dso->long_name))))) | 787 | al->map->dso->long_name))))) |
738 | goto out_filtered; | 788 | goto out_filtered; |
739 | /* | ||
740 | * We have to do this here as we may have a dso with no symbol | ||
741 | * hit that has a name longer than the ones with symbols | ||
742 | * sampled. | ||
743 | */ | ||
744 | if (!sort_dso.elide && !al->map->dso->slen_calculated) | ||
745 | dso__calc_col_width(al->map->dso, &session->hists); | ||
746 | 789 | ||
747 | al->sym = map__find_symbol(al->map, al->addr, filter); | 790 | al->sym = map__find_symbol(al->map, al->addr, filter); |
748 | } else { | ||
749 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; | ||
750 | |||
751 | if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width && | ||
752 | !symbol_conf.col_width_list_str && !symbol_conf.field_sep && | ||
753 | !symbol_conf.dso_list) | ||
754 | hists__set_col_len(&session->hists, HISTC_DSO, | ||
755 | unresolved_col_width); | ||
756 | } | 791 | } |
757 | 792 | ||
758 | if (symbol_conf.sym_list && al->sym && | 793 | if (symbol_conf.sym_list && al->sym && |
@@ -765,72 +800,3 @@ out_filtered: | |||
765 | al->filtered = true; | 800 | al->filtered = true; |
766 | return 0; | 801 | return 0; |
767 | } | 802 | } |
768 | |||
769 | int event__parse_sample(const event_t *event, u64 type, struct sample_data *data) | ||
770 | { | ||
771 | const u64 *array = event->sample.array; | ||
772 | |||
773 | if (type & PERF_SAMPLE_IP) { | ||
774 | data->ip = event->ip.ip; | ||
775 | array++; | ||
776 | } | ||
777 | |||
778 | if (type & PERF_SAMPLE_TID) { | ||
779 | u32 *p = (u32 *)array; | ||
780 | data->pid = p[0]; | ||
781 | data->tid = p[1]; | ||
782 | array++; | ||
783 | } | ||
784 | |||
785 | if (type & PERF_SAMPLE_TIME) { | ||
786 | data->time = *array; | ||
787 | array++; | ||
788 | } | ||
789 | |||
790 | if (type & PERF_SAMPLE_ADDR) { | ||
791 | data->addr = *array; | ||
792 | array++; | ||
793 | } | ||
794 | |||
795 | data->id = -1ULL; | ||
796 | if (type & PERF_SAMPLE_ID) { | ||
797 | data->id = *array; | ||
798 | array++; | ||
799 | } | ||
800 | |||
801 | if (type & PERF_SAMPLE_STREAM_ID) { | ||
802 | data->stream_id = *array; | ||
803 | array++; | ||
804 | } | ||
805 | |||
806 | if (type & PERF_SAMPLE_CPU) { | ||
807 | u32 *p = (u32 *)array; | ||
808 | data->cpu = *p; | ||
809 | array++; | ||
810 | } else | ||
811 | data->cpu = -1; | ||
812 | |||
813 | if (type & PERF_SAMPLE_PERIOD) { | ||
814 | data->period = *array; | ||
815 | array++; | ||
816 | } | ||
817 | |||
818 | if (type & PERF_SAMPLE_READ) { | ||
819 | pr_debug("PERF_SAMPLE_READ is unsuported for now\n"); | ||
820 | return -1; | ||
821 | } | ||
822 | |||
823 | if (type & PERF_SAMPLE_CALLCHAIN) { | ||
824 | data->callchain = (struct ip_callchain *)array; | ||
825 | array += 1 + data->callchain->nr; | ||
826 | } | ||
827 | |||
828 | if (type & PERF_SAMPLE_RAW) { | ||
829 | u32 *p = (u32 *)array; | ||
830 | data->raw_size = *p; | ||
831 | p++; | ||
832 | data->raw_data = p; | ||
833 | } | ||
834 | |||
835 | return 0; | ||
836 | } | ||
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 8e790dae7026..1d7f66488a88 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h | |||
@@ -56,12 +56,19 @@ struct read_event { | |||
56 | u64 id; | 56 | u64 id; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | |||
60 | #define PERF_SAMPLE_MASK \ | ||
61 | (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \ | ||
62 | PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \ | ||
63 | PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \ | ||
64 | PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD) | ||
65 | |||
59 | struct sample_event { | 66 | struct sample_event { |
60 | struct perf_event_header header; | 67 | struct perf_event_header header; |
61 | u64 array[]; | 68 | u64 array[]; |
62 | }; | 69 | }; |
63 | 70 | ||
64 | struct sample_data { | 71 | struct perf_sample { |
65 | u64 ip; | 72 | u64 ip; |
66 | u32 pid, tid; | 73 | u32 pid, tid; |
67 | u64 time; | 74 | u64 time; |
@@ -85,6 +92,7 @@ struct build_id_event { | |||
85 | }; | 92 | }; |
86 | 93 | ||
87 | enum perf_user_event_type { /* above any possible kernel type */ | 94 | enum perf_user_event_type { /* above any possible kernel type */ |
95 | PERF_RECORD_USER_TYPE_START = 64, | ||
88 | PERF_RECORD_HEADER_ATTR = 64, | 96 | PERF_RECORD_HEADER_ATTR = 64, |
89 | PERF_RECORD_HEADER_EVENT_TYPE = 65, | 97 | PERF_RECORD_HEADER_EVENT_TYPE = 65, |
90 | PERF_RECORD_HEADER_TRACING_DATA = 66, | 98 | PERF_RECORD_HEADER_TRACING_DATA = 66, |
@@ -116,7 +124,7 @@ struct tracing_data_event { | |||
116 | u32 size; | 124 | u32 size; |
117 | }; | 125 | }; |
118 | 126 | ||
119 | typedef union event_union { | 127 | union perf_event { |
120 | struct perf_event_header header; | 128 | struct perf_event_header header; |
121 | struct ip_event ip; | 129 | struct ip_event ip; |
122 | struct mmap_event mmap; | 130 | struct mmap_event mmap; |
@@ -129,39 +137,55 @@ typedef union event_union { | |||
129 | struct event_type_event event_type; | 137 | struct event_type_event event_type; |
130 | struct tracing_data_event tracing_data; | 138 | struct tracing_data_event tracing_data; |
131 | struct build_id_event build_id; | 139 | struct build_id_event build_id; |
132 | } event_t; | 140 | }; |
133 | 141 | ||
134 | void event__print_totals(void); | 142 | void perf_event__print_totals(void); |
135 | 143 | ||
136 | struct perf_session; | 144 | struct perf_session; |
137 | 145 | struct thread_map; | |
138 | typedef int (*event__handler_t)(event_t *event, struct perf_session *session); | 146 | |
139 | 147 | typedef int (*perf_event__handler_synth_t)(union perf_event *event, | |
140 | int event__synthesize_thread(pid_t pid, event__handler_t process, | 148 | struct perf_session *session); |
149 | typedef int (*perf_event__handler_t)(union perf_event *event, | ||
150 | struct perf_sample *sample, | ||
151 | struct perf_session *session); | ||
152 | |||
153 | int perf_event__synthesize_thread_map(struct thread_map *threads, | ||
154 | perf_event__handler_t process, | ||
155 | struct perf_session *session); | ||
156 | int perf_event__synthesize_threads(perf_event__handler_t process, | ||
157 | struct perf_session *session); | ||
158 | int perf_event__synthesize_kernel_mmap(perf_event__handler_t process, | ||
159 | struct perf_session *session, | ||
160 | struct machine *machine, | ||
161 | const char *symbol_name); | ||
162 | |||
163 | int perf_event__synthesize_modules(perf_event__handler_t process, | ||
164 | struct perf_session *session, | ||
165 | struct machine *machine); | ||
166 | |||
167 | int perf_event__process_comm(union perf_event *event, struct perf_sample *sample, | ||
141 | struct perf_session *session); | 168 | struct perf_session *session); |
142 | void event__synthesize_threads(event__handler_t process, | 169 | int perf_event__process_lost(union perf_event *event, struct perf_sample *sample, |
143 | struct perf_session *session); | 170 | struct perf_session *session); |
144 | int event__synthesize_kernel_mmap(event__handler_t process, | 171 | int perf_event__process_mmap(union perf_event *event, struct perf_sample *sample, |
145 | struct perf_session *session, | 172 | struct perf_session *session); |
146 | struct machine *machine, | 173 | int perf_event__process_task(union perf_event *event, struct perf_sample *sample, |
147 | const char *symbol_name); | 174 | struct perf_session *session); |
148 | 175 | int perf_event__process(union perf_event *event, struct perf_sample *sample, | |
149 | int event__synthesize_modules(event__handler_t process, | 176 | struct perf_session *session); |
150 | struct perf_session *session, | ||
151 | struct machine *machine); | ||
152 | |||
153 | int event__process_comm(event_t *self, struct perf_session *session); | ||
154 | int event__process_lost(event_t *self, struct perf_session *session); | ||
155 | int event__process_mmap(event_t *self, struct perf_session *session); | ||
156 | int event__process_task(event_t *self, struct perf_session *session); | ||
157 | int event__process(event_t *event, struct perf_session *session); | ||
158 | 177 | ||
159 | struct addr_location; | 178 | struct addr_location; |
160 | int event__preprocess_sample(const event_t *self, struct perf_session *session, | 179 | int perf_event__preprocess_sample(const union perf_event *self, |
161 | struct addr_location *al, struct sample_data *data, | 180 | struct perf_session *session, |
162 | symbol_filter_t filter); | 181 | struct addr_location *al, |
163 | int event__parse_sample(const event_t *event, u64 type, struct sample_data *data); | 182 | struct perf_sample *sample, |
183 | symbol_filter_t filter); | ||
184 | |||
185 | const char *perf_event__name(unsigned int id); | ||
164 | 186 | ||
165 | extern const char *event__name[]; | 187 | int perf_event__parse_sample(const union perf_event *event, u64 type, |
188 | int sample_size, bool sample_id_all, | ||
189 | struct perf_sample *sample); | ||
166 | 190 | ||
167 | #endif /* __PERF_RECORD_H */ | 191 | #endif /* __PERF_RECORD_H */ |
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c new file mode 100644 index 000000000000..b021ea9265c3 --- /dev/null +++ b/tools/perf/util/evlist.c | |||
@@ -0,0 +1,500 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | ||
3 | * | ||
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | ||
5 | * copyright notes. | ||
6 | * | ||
7 | * Released under the GPL v2. (and only v2, not any later version) | ||
8 | */ | ||
9 | #include <poll.h> | ||
10 | #include "cpumap.h" | ||
11 | #include "thread_map.h" | ||
12 | #include "evlist.h" | ||
13 | #include "evsel.h" | ||
14 | #include "util.h" | ||
15 | |||
16 | #include <sys/mman.h> | ||
17 | |||
18 | #include <linux/bitops.h> | ||
19 | #include <linux/hash.h> | ||
20 | |||
21 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | ||
22 | #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) | ||
23 | |||
24 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, | ||
25 | struct thread_map *threads) | ||
26 | { | ||
27 | int i; | ||
28 | |||
29 | for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) | ||
30 | INIT_HLIST_HEAD(&evlist->heads[i]); | ||
31 | INIT_LIST_HEAD(&evlist->entries); | ||
32 | perf_evlist__set_maps(evlist, cpus, threads); | ||
33 | } | ||
34 | |||
35 | struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, | ||
36 | struct thread_map *threads) | ||
37 | { | ||
38 | struct perf_evlist *evlist = zalloc(sizeof(*evlist)); | ||
39 | |||
40 | if (evlist != NULL) | ||
41 | perf_evlist__init(evlist, cpus, threads); | ||
42 | |||
43 | return evlist; | ||
44 | } | ||
45 | |||
46 | static void perf_evlist__purge(struct perf_evlist *evlist) | ||
47 | { | ||
48 | struct perf_evsel *pos, *n; | ||
49 | |||
50 | list_for_each_entry_safe(pos, n, &evlist->entries, node) { | ||
51 | list_del_init(&pos->node); | ||
52 | perf_evsel__delete(pos); | ||
53 | } | ||
54 | |||
55 | evlist->nr_entries = 0; | ||
56 | } | ||
57 | |||
58 | void perf_evlist__exit(struct perf_evlist *evlist) | ||
59 | { | ||
60 | free(evlist->mmap); | ||
61 | free(evlist->pollfd); | ||
62 | evlist->mmap = NULL; | ||
63 | evlist->pollfd = NULL; | ||
64 | } | ||
65 | |||
66 | void perf_evlist__delete(struct perf_evlist *evlist) | ||
67 | { | ||
68 | perf_evlist__purge(evlist); | ||
69 | perf_evlist__exit(evlist); | ||
70 | free(evlist); | ||
71 | } | ||
72 | |||
73 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) | ||
74 | { | ||
75 | list_add_tail(&entry->node, &evlist->entries); | ||
76 | ++evlist->nr_entries; | ||
77 | } | ||
78 | |||
79 | int perf_evlist__add_default(struct perf_evlist *evlist) | ||
80 | { | ||
81 | struct perf_event_attr attr = { | ||
82 | .type = PERF_TYPE_HARDWARE, | ||
83 | .config = PERF_COUNT_HW_CPU_CYCLES, | ||
84 | }; | ||
85 | struct perf_evsel *evsel = perf_evsel__new(&attr, 0); | ||
86 | |||
87 | if (evsel == NULL) | ||
88 | return -ENOMEM; | ||
89 | |||
90 | perf_evlist__add(evlist, evsel); | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) | ||
95 | { | ||
96 | int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; | ||
97 | evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); | ||
98 | return evlist->pollfd != NULL ? 0 : -ENOMEM; | ||
99 | } | ||
100 | |||
101 | void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) | ||
102 | { | ||
103 | fcntl(fd, F_SETFL, O_NONBLOCK); | ||
104 | evlist->pollfd[evlist->nr_fds].fd = fd; | ||
105 | evlist->pollfd[evlist->nr_fds].events = POLLIN; | ||
106 | evlist->nr_fds++; | ||
107 | } | ||
108 | |||
109 | static void perf_evlist__id_hash(struct perf_evlist *evlist, | ||
110 | struct perf_evsel *evsel, | ||
111 | int cpu, int thread, u64 id) | ||
112 | { | ||
113 | int hash; | ||
114 | struct perf_sample_id *sid = SID(evsel, cpu, thread); | ||
115 | |||
116 | sid->id = id; | ||
117 | sid->evsel = evsel; | ||
118 | hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); | ||
119 | hlist_add_head(&sid->node, &evlist->heads[hash]); | ||
120 | } | ||
121 | |||
122 | void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, | ||
123 | int cpu, int thread, u64 id) | ||
124 | { | ||
125 | perf_evlist__id_hash(evlist, evsel, cpu, thread, id); | ||
126 | evsel->id[evsel->ids++] = id; | ||
127 | } | ||
128 | |||
129 | static int perf_evlist__id_add_fd(struct perf_evlist *evlist, | ||
130 | struct perf_evsel *evsel, | ||
131 | int cpu, int thread, int fd) | ||
132 | { | ||
133 | u64 read_data[4] = { 0, }; | ||
134 | int id_idx = 1; /* The first entry is the counter value */ | ||
135 | |||
136 | if (!(evsel->attr.read_format & PERF_FORMAT_ID) || | ||
137 | read(fd, &read_data, sizeof(read_data)) == -1) | ||
138 | return -1; | ||
139 | |||
140 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
141 | ++id_idx; | ||
142 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
143 | ++id_idx; | ||
144 | |||
145 | perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]); | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | ||
150 | { | ||
151 | struct hlist_head *head; | ||
152 | struct hlist_node *pos; | ||
153 | struct perf_sample_id *sid; | ||
154 | int hash; | ||
155 | |||
156 | if (evlist->nr_entries == 1) | ||
157 | return list_entry(evlist->entries.next, struct perf_evsel, node); | ||
158 | |||
159 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); | ||
160 | head = &evlist->heads[hash]; | ||
161 | |||
162 | hlist_for_each_entry(sid, pos, head, node) | ||
163 | if (sid->id == id) | ||
164 | return sid->evsel; | ||
165 | return NULL; | ||
166 | } | ||
167 | |||
168 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) | ||
169 | { | ||
170 | /* XXX Move this to perf.c, making it generally available */ | ||
171 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | ||
172 | struct perf_mmap *md = &evlist->mmap[idx]; | ||
173 | unsigned int head = perf_mmap__read_head(md); | ||
174 | unsigned int old = md->prev; | ||
175 | unsigned char *data = md->base + page_size; | ||
176 | union perf_event *event = NULL; | ||
177 | |||
178 | if (evlist->overwrite) { | ||
179 | /* | ||
180 | * If we're further behind than half the buffer, there's a chance | ||
181 | * the writer will bite our tail and mess up the samples under us. | ||
182 | * | ||
183 | * If we somehow ended up ahead of the head, we got messed up. | ||
184 | * | ||
185 | * In either case, truncate and restart at head. | ||
186 | */ | ||
187 | int diff = head - old; | ||
188 | if (diff > md->mask / 2 || diff < 0) { | ||
189 | fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); | ||
190 | |||
191 | /* | ||
192 | * head points to a known good entry, start there. | ||
193 | */ | ||
194 | old = head; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | if (old != head) { | ||
199 | size_t size; | ||
200 | |||
201 | event = (union perf_event *)&data[old & md->mask]; | ||
202 | size = event->header.size; | ||
203 | |||
204 | /* | ||
205 | * Event straddles the mmap boundary -- header should always | ||
206 | * be inside due to u64 alignment of output. | ||
207 | */ | ||
208 | if ((old & md->mask) + size != ((old + size) & md->mask)) { | ||
209 | unsigned int offset = old; | ||
210 | unsigned int len = min(sizeof(*event), size), cpy; | ||
211 | void *dst = &evlist->event_copy; | ||
212 | |||
213 | do { | ||
214 | cpy = min(md->mask + 1 - (offset & md->mask), len); | ||
215 | memcpy(dst, &data[offset & md->mask], cpy); | ||
216 | offset += cpy; | ||
217 | dst += cpy; | ||
218 | len -= cpy; | ||
219 | } while (len); | ||
220 | |||
221 | event = &evlist->event_copy; | ||
222 | } | ||
223 | |||
224 | old += size; | ||
225 | } | ||
226 | |||
227 | md->prev = old; | ||
228 | |||
229 | if (!evlist->overwrite) | ||
230 | perf_mmap__write_tail(md, old); | ||
231 | |||
232 | return event; | ||
233 | } | ||
234 | |||
235 | void perf_evlist__munmap(struct perf_evlist *evlist) | ||
236 | { | ||
237 | int i; | ||
238 | |||
239 | for (i = 0; i < evlist->nr_mmaps; i++) { | ||
240 | if (evlist->mmap[i].base != NULL) { | ||
241 | munmap(evlist->mmap[i].base, evlist->mmap_len); | ||
242 | evlist->mmap[i].base = NULL; | ||
243 | } | ||
244 | } | ||
245 | |||
246 | free(evlist->mmap); | ||
247 | evlist->mmap = NULL; | ||
248 | } | ||
249 | |||
250 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist) | ||
251 | { | ||
252 | evlist->nr_mmaps = evlist->cpus->nr; | ||
253 | if (evlist->cpus->map[0] == -1) | ||
254 | evlist->nr_mmaps = evlist->threads->nr; | ||
255 | evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); | ||
256 | return evlist->mmap != NULL ? 0 : -ENOMEM; | ||
257 | } | ||
258 | |||
259 | static int __perf_evlist__mmap(struct perf_evlist *evlist, | ||
260 | int idx, int prot, int mask, int fd) | ||
261 | { | ||
262 | evlist->mmap[idx].prev = 0; | ||
263 | evlist->mmap[idx].mask = mask; | ||
264 | evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, | ||
265 | MAP_SHARED, fd, 0); | ||
266 | if (evlist->mmap[idx].base == MAP_FAILED) | ||
267 | return -1; | ||
268 | |||
269 | perf_evlist__add_pollfd(evlist, fd); | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) | ||
274 | { | ||
275 | struct perf_evsel *evsel; | ||
276 | int cpu, thread; | ||
277 | |||
278 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | ||
279 | int output = -1; | ||
280 | |||
281 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
282 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
283 | int fd = FD(evsel, cpu, thread); | ||
284 | |||
285 | if (output == -1) { | ||
286 | output = fd; | ||
287 | if (__perf_evlist__mmap(evlist, cpu, | ||
288 | prot, mask, output) < 0) | ||
289 | goto out_unmap; | ||
290 | } else { | ||
291 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) | ||
292 | goto out_unmap; | ||
293 | } | ||
294 | |||
295 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
296 | perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) | ||
297 | goto out_unmap; | ||
298 | } | ||
299 | } | ||
300 | } | ||
301 | |||
302 | return 0; | ||
303 | |||
304 | out_unmap: | ||
305 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | ||
306 | if (evlist->mmap[cpu].base != NULL) { | ||
307 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
308 | evlist->mmap[cpu].base = NULL; | ||
309 | } | ||
310 | } | ||
311 | return -1; | ||
312 | } | ||
313 | |||
314 | static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) | ||
315 | { | ||
316 | struct perf_evsel *evsel; | ||
317 | int thread; | ||
318 | |||
319 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
320 | int output = -1; | ||
321 | |||
322 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
323 | int fd = FD(evsel, 0, thread); | ||
324 | |||
325 | if (output == -1) { | ||
326 | output = fd; | ||
327 | if (__perf_evlist__mmap(evlist, thread, | ||
328 | prot, mask, output) < 0) | ||
329 | goto out_unmap; | ||
330 | } else { | ||
331 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) | ||
332 | goto out_unmap; | ||
333 | } | ||
334 | |||
335 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
336 | perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) | ||
337 | goto out_unmap; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | return 0; | ||
342 | |||
343 | out_unmap: | ||
344 | for (thread = 0; thread < evlist->threads->nr; thread++) { | ||
345 | if (evlist->mmap[thread].base != NULL) { | ||
346 | munmap(evlist->mmap[thread].base, evlist->mmap_len); | ||
347 | evlist->mmap[thread].base = NULL; | ||
348 | } | ||
349 | } | ||
350 | return -1; | ||
351 | } | ||
352 | |||
353 | /** perf_evlist__mmap - Create per cpu maps to receive events | ||
354 | * | ||
355 | * @evlist - list of events | ||
356 | * @pages - map length in pages | ||
357 | * @overwrite - overwrite older events? | ||
358 | * | ||
359 | * If overwrite is false the user needs to signal event consuption using: | ||
360 | * | ||
361 | * struct perf_mmap *m = &evlist->mmap[cpu]; | ||
362 | * unsigned int head = perf_mmap__read_head(m); | ||
363 | * | ||
364 | * perf_mmap__write_tail(m, head) | ||
365 | * | ||
366 | * Using perf_evlist__read_on_cpu does this automatically. | ||
367 | */ | ||
368 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) | ||
369 | { | ||
370 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | ||
371 | int mask = pages * page_size - 1; | ||
372 | struct perf_evsel *evsel; | ||
373 | const struct cpu_map *cpus = evlist->cpus; | ||
374 | const struct thread_map *threads = evlist->threads; | ||
375 | int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); | ||
376 | |||
377 | if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) | ||
378 | return -ENOMEM; | ||
379 | |||
380 | if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) | ||
381 | return -ENOMEM; | ||
382 | |||
383 | evlist->overwrite = overwrite; | ||
384 | evlist->mmap_len = (pages + 1) * page_size; | ||
385 | |||
386 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
387 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
388 | evsel->sample_id == NULL && | ||
389 | perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) | ||
390 | return -ENOMEM; | ||
391 | } | ||
392 | |||
393 | if (evlist->cpus->map[0] == -1) | ||
394 | return perf_evlist__mmap_per_thread(evlist, prot, mask); | ||
395 | |||
396 | return perf_evlist__mmap_per_cpu(evlist, prot, mask); | ||
397 | } | ||
398 | |||
399 | int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, | ||
400 | pid_t target_tid, const char *cpu_list) | ||
401 | { | ||
402 | evlist->threads = thread_map__new(target_pid, target_tid); | ||
403 | |||
404 | if (evlist->threads == NULL) | ||
405 | return -1; | ||
406 | |||
407 | if (cpu_list == NULL && target_tid != -1) | ||
408 | evlist->cpus = cpu_map__dummy_new(); | ||
409 | else | ||
410 | evlist->cpus = cpu_map__new(cpu_list); | ||
411 | |||
412 | if (evlist->cpus == NULL) | ||
413 | goto out_delete_threads; | ||
414 | |||
415 | return 0; | ||
416 | |||
417 | out_delete_threads: | ||
418 | thread_map__delete(evlist->threads); | ||
419 | return -1; | ||
420 | } | ||
421 | |||
422 | void perf_evlist__delete_maps(struct perf_evlist *evlist) | ||
423 | { | ||
424 | cpu_map__delete(evlist->cpus); | ||
425 | thread_map__delete(evlist->threads); | ||
426 | evlist->cpus = NULL; | ||
427 | evlist->threads = NULL; | ||
428 | } | ||
429 | |||
430 | int perf_evlist__set_filters(struct perf_evlist *evlist) | ||
431 | { | ||
432 | const struct thread_map *threads = evlist->threads; | ||
433 | const struct cpu_map *cpus = evlist->cpus; | ||
434 | struct perf_evsel *evsel; | ||
435 | char *filter; | ||
436 | int thread; | ||
437 | int cpu; | ||
438 | int err; | ||
439 | int fd; | ||
440 | |||
441 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
442 | filter = evsel->filter; | ||
443 | if (!filter) | ||
444 | continue; | ||
445 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
446 | for (thread = 0; thread < threads->nr; thread++) { | ||
447 | fd = FD(evsel, cpu, thread); | ||
448 | err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); | ||
449 | if (err) | ||
450 | return err; | ||
451 | } | ||
452 | } | ||
453 | } | ||
454 | |||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist) | ||
459 | { | ||
460 | struct perf_evsel *pos, *first; | ||
461 | |||
462 | pos = first = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
463 | |||
464 | list_for_each_entry_continue(pos, &evlist->entries, node) { | ||
465 | if (first->attr.sample_type != pos->attr.sample_type) | ||
466 | return false; | ||
467 | } | ||
468 | |||
469 | return true; | ||
470 | } | ||
471 | |||
472 | u64 perf_evlist__sample_type(const struct perf_evlist *evlist) | ||
473 | { | ||
474 | struct perf_evsel *first; | ||
475 | |||
476 | first = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
477 | return first->attr.sample_type; | ||
478 | } | ||
479 | |||
480 | bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist) | ||
481 | { | ||
482 | struct perf_evsel *pos, *first; | ||
483 | |||
484 | pos = first = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
485 | |||
486 | list_for_each_entry_continue(pos, &evlist->entries, node) { | ||
487 | if (first->attr.sample_id_all != pos->attr.sample_id_all) | ||
488 | return false; | ||
489 | } | ||
490 | |||
491 | return true; | ||
492 | } | ||
493 | |||
494 | bool perf_evlist__sample_id_all(const struct perf_evlist *evlist) | ||
495 | { | ||
496 | struct perf_evsel *first; | ||
497 | |||
498 | first = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
499 | return first->attr.sample_id_all; | ||
500 | } | ||
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h new file mode 100644 index 000000000000..b2b862374f37 --- /dev/null +++ b/tools/perf/util/evlist.h | |||
@@ -0,0 +1,74 @@ | |||
1 | #ifndef __PERF_EVLIST_H | ||
2 | #define __PERF_EVLIST_H 1 | ||
3 | |||
4 | #include <linux/list.h> | ||
5 | #include "../perf.h" | ||
6 | #include "event.h" | ||
7 | |||
8 | struct pollfd; | ||
9 | struct thread_map; | ||
10 | struct cpu_map; | ||
11 | |||
12 | #define PERF_EVLIST__HLIST_BITS 8 | ||
13 | #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) | ||
14 | |||
15 | struct perf_evlist { | ||
16 | struct list_head entries; | ||
17 | struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; | ||
18 | int nr_entries; | ||
19 | int nr_fds; | ||
20 | int nr_mmaps; | ||
21 | int mmap_len; | ||
22 | bool overwrite; | ||
23 | union perf_event event_copy; | ||
24 | struct perf_mmap *mmap; | ||
25 | struct pollfd *pollfd; | ||
26 | struct thread_map *threads; | ||
27 | struct cpu_map *cpus; | ||
28 | }; | ||
29 | |||
30 | struct perf_evsel; | ||
31 | |||
32 | struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, | ||
33 | struct thread_map *threads); | ||
34 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, | ||
35 | struct thread_map *threads); | ||
36 | void perf_evlist__exit(struct perf_evlist *evlist); | ||
37 | void perf_evlist__delete(struct perf_evlist *evlist); | ||
38 | |||
39 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); | ||
40 | int perf_evlist__add_default(struct perf_evlist *evlist); | ||
41 | |||
42 | void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, | ||
43 | int cpu, int thread, u64 id); | ||
44 | |||
45 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist); | ||
46 | void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); | ||
47 | |||
48 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); | ||
49 | |||
50 | union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); | ||
51 | |||
52 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist); | ||
53 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); | ||
54 | void perf_evlist__munmap(struct perf_evlist *evlist); | ||
55 | |||
56 | static inline void perf_evlist__set_maps(struct perf_evlist *evlist, | ||
57 | struct cpu_map *cpus, | ||
58 | struct thread_map *threads) | ||
59 | { | ||
60 | evlist->cpus = cpus; | ||
61 | evlist->threads = threads; | ||
62 | } | ||
63 | |||
64 | int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, | ||
65 | pid_t target_tid, const char *cpu_list); | ||
66 | void perf_evlist__delete_maps(struct perf_evlist *evlist); | ||
67 | int perf_evlist__set_filters(struct perf_evlist *evlist); | ||
68 | |||
69 | u64 perf_evlist__sample_type(const struct perf_evlist *evlist); | ||
70 | bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist); | ||
71 | |||
72 | bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist); | ||
73 | bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist); | ||
74 | #endif /* __PERF_EVLIST_H */ | ||
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c new file mode 100644 index 000000000000..0239eb87b232 --- /dev/null +++ b/tools/perf/util/evsel.c | |||
@@ -0,0 +1,440 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | ||
3 | * | ||
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | ||
5 | * copyright notes. | ||
6 | * | ||
7 | * Released under the GPL v2. (and only v2, not any later version) | ||
8 | */ | ||
9 | |||
10 | #include "evsel.h" | ||
11 | #include "evlist.h" | ||
12 | #include "util.h" | ||
13 | #include "cpumap.h" | ||
14 | #include "thread_map.h" | ||
15 | |||
16 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | ||
17 | |||
18 | int __perf_evsel__sample_size(u64 sample_type) | ||
19 | { | ||
20 | u64 mask = sample_type & PERF_SAMPLE_MASK; | ||
21 | int size = 0; | ||
22 | int i; | ||
23 | |||
24 | for (i = 0; i < 64; i++) { | ||
25 | if (mask & (1ULL << i)) | ||
26 | size++; | ||
27 | } | ||
28 | |||
29 | size *= sizeof(u64); | ||
30 | |||
31 | return size; | ||
32 | } | ||
33 | |||
34 | void perf_evsel__init(struct perf_evsel *evsel, | ||
35 | struct perf_event_attr *attr, int idx) | ||
36 | { | ||
37 | evsel->idx = idx; | ||
38 | evsel->attr = *attr; | ||
39 | INIT_LIST_HEAD(&evsel->node); | ||
40 | } | ||
41 | |||
42 | struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) | ||
43 | { | ||
44 | struct perf_evsel *evsel = zalloc(sizeof(*evsel)); | ||
45 | |||
46 | if (evsel != NULL) | ||
47 | perf_evsel__init(evsel, attr, idx); | ||
48 | |||
49 | return evsel; | ||
50 | } | ||
51 | |||
52 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | ||
53 | { | ||
54 | int cpu, thread; | ||
55 | evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); | ||
56 | |||
57 | if (evsel->fd) { | ||
58 | for (cpu = 0; cpu < ncpus; cpu++) { | ||
59 | for (thread = 0; thread < nthreads; thread++) { | ||
60 | FD(evsel, cpu, thread) = -1; | ||
61 | } | ||
62 | } | ||
63 | } | ||
64 | |||
65 | return evsel->fd != NULL ? 0 : -ENOMEM; | ||
66 | } | ||
67 | |||
68 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) | ||
69 | { | ||
70 | evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); | ||
71 | if (evsel->sample_id == NULL) | ||
72 | return -ENOMEM; | ||
73 | |||
74 | evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); | ||
75 | if (evsel->id == NULL) { | ||
76 | xyarray__delete(evsel->sample_id); | ||
77 | evsel->sample_id = NULL; | ||
78 | return -ENOMEM; | ||
79 | } | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus) | ||
85 | { | ||
86 | evsel->counts = zalloc((sizeof(*evsel->counts) + | ||
87 | (ncpus * sizeof(struct perf_counts_values)))); | ||
88 | return evsel->counts != NULL ? 0 : -ENOMEM; | ||
89 | } | ||
90 | |||
91 | void perf_evsel__free_fd(struct perf_evsel *evsel) | ||
92 | { | ||
93 | xyarray__delete(evsel->fd); | ||
94 | evsel->fd = NULL; | ||
95 | } | ||
96 | |||
97 | void perf_evsel__free_id(struct perf_evsel *evsel) | ||
98 | { | ||
99 | xyarray__delete(evsel->sample_id); | ||
100 | evsel->sample_id = NULL; | ||
101 | free(evsel->id); | ||
102 | evsel->id = NULL; | ||
103 | } | ||
104 | |||
105 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | ||
106 | { | ||
107 | int cpu, thread; | ||
108 | |||
109 | for (cpu = 0; cpu < ncpus; cpu++) | ||
110 | for (thread = 0; thread < nthreads; ++thread) { | ||
111 | close(FD(evsel, cpu, thread)); | ||
112 | FD(evsel, cpu, thread) = -1; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | void perf_evsel__exit(struct perf_evsel *evsel) | ||
117 | { | ||
118 | assert(list_empty(&evsel->node)); | ||
119 | xyarray__delete(evsel->fd); | ||
120 | xyarray__delete(evsel->sample_id); | ||
121 | free(evsel->id); | ||
122 | } | ||
123 | |||
124 | void perf_evsel__delete(struct perf_evsel *evsel) | ||
125 | { | ||
126 | perf_evsel__exit(evsel); | ||
127 | close_cgroup(evsel->cgrp); | ||
128 | free(evsel->name); | ||
129 | free(evsel); | ||
130 | } | ||
131 | |||
132 | int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, | ||
133 | int cpu, int thread, bool scale) | ||
134 | { | ||
135 | struct perf_counts_values count; | ||
136 | size_t nv = scale ? 3 : 1; | ||
137 | |||
138 | if (FD(evsel, cpu, thread) < 0) | ||
139 | return -EINVAL; | ||
140 | |||
141 | if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0) | ||
142 | return -ENOMEM; | ||
143 | |||
144 | if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) | ||
145 | return -errno; | ||
146 | |||
147 | if (scale) { | ||
148 | if (count.run == 0) | ||
149 | count.val = 0; | ||
150 | else if (count.run < count.ena) | ||
151 | count.val = (u64)((double)count.val * count.ena / count.run + 0.5); | ||
152 | } else | ||
153 | count.ena = count.run = 0; | ||
154 | |||
155 | evsel->counts->cpu[cpu] = count; | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | int __perf_evsel__read(struct perf_evsel *evsel, | ||
160 | int ncpus, int nthreads, bool scale) | ||
161 | { | ||
162 | size_t nv = scale ? 3 : 1; | ||
163 | int cpu, thread; | ||
164 | struct perf_counts_values *aggr = &evsel->counts->aggr, count; | ||
165 | |||
166 | aggr->val = aggr->ena = aggr->run = 0; | ||
167 | |||
168 | for (cpu = 0; cpu < ncpus; cpu++) { | ||
169 | for (thread = 0; thread < nthreads; thread++) { | ||
170 | if (FD(evsel, cpu, thread) < 0) | ||
171 | continue; | ||
172 | |||
173 | if (readn(FD(evsel, cpu, thread), | ||
174 | &count, nv * sizeof(u64)) < 0) | ||
175 | return -errno; | ||
176 | |||
177 | aggr->val += count.val; | ||
178 | if (scale) { | ||
179 | aggr->ena += count.ena; | ||
180 | aggr->run += count.run; | ||
181 | } | ||
182 | } | ||
183 | } | ||
184 | |||
185 | evsel->counts->scaled = 0; | ||
186 | if (scale) { | ||
187 | if (aggr->run == 0) { | ||
188 | evsel->counts->scaled = -1; | ||
189 | aggr->val = 0; | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | if (aggr->run < aggr->ena) { | ||
194 | evsel->counts->scaled = 1; | ||
195 | aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5); | ||
196 | } | ||
197 | } else | ||
198 | aggr->ena = aggr->run = 0; | ||
199 | |||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | ||
204 | struct thread_map *threads, bool group) | ||
205 | { | ||
206 | int cpu, thread; | ||
207 | unsigned long flags = 0; | ||
208 | int pid = -1; | ||
209 | |||
210 | if (evsel->fd == NULL && | ||
211 | perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) | ||
212 | return -1; | ||
213 | |||
214 | if (evsel->cgrp) { | ||
215 | flags = PERF_FLAG_PID_CGROUP; | ||
216 | pid = evsel->cgrp->fd; | ||
217 | } | ||
218 | |||
219 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
220 | int group_fd = -1; | ||
221 | |||
222 | for (thread = 0; thread < threads->nr; thread++) { | ||
223 | |||
224 | if (!evsel->cgrp) | ||
225 | pid = threads->map[thread]; | ||
226 | |||
227 | FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, | ||
228 | pid, | ||
229 | cpus->map[cpu], | ||
230 | group_fd, flags); | ||
231 | if (FD(evsel, cpu, thread) < 0) | ||
232 | goto out_close; | ||
233 | |||
234 | if (group && group_fd == -1) | ||
235 | group_fd = FD(evsel, cpu, thread); | ||
236 | } | ||
237 | } | ||
238 | |||
239 | return 0; | ||
240 | |||
241 | out_close: | ||
242 | do { | ||
243 | while (--thread >= 0) { | ||
244 | close(FD(evsel, cpu, thread)); | ||
245 | FD(evsel, cpu, thread) = -1; | ||
246 | } | ||
247 | thread = threads->nr; | ||
248 | } while (--cpu >= 0); | ||
249 | return -1; | ||
250 | } | ||
251 | |||
252 | static struct { | ||
253 | struct cpu_map map; | ||
254 | int cpus[1]; | ||
255 | } empty_cpu_map = { | ||
256 | .map.nr = 1, | ||
257 | .cpus = { -1, }, | ||
258 | }; | ||
259 | |||
260 | static struct { | ||
261 | struct thread_map map; | ||
262 | int threads[1]; | ||
263 | } empty_thread_map = { | ||
264 | .map.nr = 1, | ||
265 | .threads = { -1, }, | ||
266 | }; | ||
267 | |||
268 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | ||
269 | struct thread_map *threads, bool group) | ||
270 | { | ||
271 | if (cpus == NULL) { | ||
272 | /* Work around old compiler warnings about strict aliasing */ | ||
273 | cpus = &empty_cpu_map.map; | ||
274 | } | ||
275 | |||
276 | if (threads == NULL) | ||
277 | threads = &empty_thread_map.map; | ||
278 | |||
279 | return __perf_evsel__open(evsel, cpus, threads, group); | ||
280 | } | ||
281 | |||
282 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, | ||
283 | struct cpu_map *cpus, bool group) | ||
284 | { | ||
285 | return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group); | ||
286 | } | ||
287 | |||
288 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, | ||
289 | struct thread_map *threads, bool group) | ||
290 | { | ||
291 | return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group); | ||
292 | } | ||
293 | |||
294 | static int perf_event__parse_id_sample(const union perf_event *event, u64 type, | ||
295 | struct perf_sample *sample) | ||
296 | { | ||
297 | const u64 *array = event->sample.array; | ||
298 | |||
299 | array += ((event->header.size - | ||
300 | sizeof(event->header)) / sizeof(u64)) - 1; | ||
301 | |||
302 | if (type & PERF_SAMPLE_CPU) { | ||
303 | u32 *p = (u32 *)array; | ||
304 | sample->cpu = *p; | ||
305 | array--; | ||
306 | } | ||
307 | |||
308 | if (type & PERF_SAMPLE_STREAM_ID) { | ||
309 | sample->stream_id = *array; | ||
310 | array--; | ||
311 | } | ||
312 | |||
313 | if (type & PERF_SAMPLE_ID) { | ||
314 | sample->id = *array; | ||
315 | array--; | ||
316 | } | ||
317 | |||
318 | if (type & PERF_SAMPLE_TIME) { | ||
319 | sample->time = *array; | ||
320 | array--; | ||
321 | } | ||
322 | |||
323 | if (type & PERF_SAMPLE_TID) { | ||
324 | u32 *p = (u32 *)array; | ||
325 | sample->pid = p[0]; | ||
326 | sample->tid = p[1]; | ||
327 | } | ||
328 | |||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | static bool sample_overlap(const union perf_event *event, | ||
333 | const void *offset, u64 size) | ||
334 | { | ||
335 | const void *base = event; | ||
336 | |||
337 | if (offset + size > base + event->header.size) | ||
338 | return true; | ||
339 | |||
340 | return false; | ||
341 | } | ||
342 | |||
343 | int perf_event__parse_sample(const union perf_event *event, u64 type, | ||
344 | int sample_size, bool sample_id_all, | ||
345 | struct perf_sample *data) | ||
346 | { | ||
347 | const u64 *array; | ||
348 | |||
349 | data->cpu = data->pid = data->tid = -1; | ||
350 | data->stream_id = data->id = data->time = -1ULL; | ||
351 | |||
352 | if (event->header.type != PERF_RECORD_SAMPLE) { | ||
353 | if (!sample_id_all) | ||
354 | return 0; | ||
355 | return perf_event__parse_id_sample(event, type, data); | ||
356 | } | ||
357 | |||
358 | array = event->sample.array; | ||
359 | |||
360 | if (sample_size + sizeof(event->header) > event->header.size) | ||
361 | return -EFAULT; | ||
362 | |||
363 | if (type & PERF_SAMPLE_IP) { | ||
364 | data->ip = event->ip.ip; | ||
365 | array++; | ||
366 | } | ||
367 | |||
368 | if (type & PERF_SAMPLE_TID) { | ||
369 | u32 *p = (u32 *)array; | ||
370 | data->pid = p[0]; | ||
371 | data->tid = p[1]; | ||
372 | array++; | ||
373 | } | ||
374 | |||
375 | if (type & PERF_SAMPLE_TIME) { | ||
376 | data->time = *array; | ||
377 | array++; | ||
378 | } | ||
379 | |||
380 | if (type & PERF_SAMPLE_ADDR) { | ||
381 | data->addr = *array; | ||
382 | array++; | ||
383 | } | ||
384 | |||
385 | data->id = -1ULL; | ||
386 | if (type & PERF_SAMPLE_ID) { | ||
387 | data->id = *array; | ||
388 | array++; | ||
389 | } | ||
390 | |||
391 | if (type & PERF_SAMPLE_STREAM_ID) { | ||
392 | data->stream_id = *array; | ||
393 | array++; | ||
394 | } | ||
395 | |||
396 | if (type & PERF_SAMPLE_CPU) { | ||
397 | u32 *p = (u32 *)array; | ||
398 | data->cpu = *p; | ||
399 | array++; | ||
400 | } | ||
401 | |||
402 | if (type & PERF_SAMPLE_PERIOD) { | ||
403 | data->period = *array; | ||
404 | array++; | ||
405 | } | ||
406 | |||
407 | if (type & PERF_SAMPLE_READ) { | ||
408 | fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n"); | ||
409 | return -1; | ||
410 | } | ||
411 | |||
412 | if (type & PERF_SAMPLE_CALLCHAIN) { | ||
413 | if (sample_overlap(event, array, sizeof(data->callchain->nr))) | ||
414 | return -EFAULT; | ||
415 | |||
416 | data->callchain = (struct ip_callchain *)array; | ||
417 | |||
418 | if (sample_overlap(event, array, data->callchain->nr)) | ||
419 | return -EFAULT; | ||
420 | |||
421 | array += 1 + data->callchain->nr; | ||
422 | } | ||
423 | |||
424 | if (type & PERF_SAMPLE_RAW) { | ||
425 | u32 *p = (u32 *)array; | ||
426 | |||
427 | if (sample_overlap(event, array, sizeof(u32))) | ||
428 | return -EFAULT; | ||
429 | |||
430 | data->raw_size = *p; | ||
431 | p++; | ||
432 | |||
433 | if (sample_overlap(event, p, data->raw_size)) | ||
434 | return -EFAULT; | ||
435 | |||
436 | data->raw_data = p; | ||
437 | } | ||
438 | |||
439 | return 0; | ||
440 | } | ||
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h new file mode 100644 index 000000000000..7e9366e4490b --- /dev/null +++ b/tools/perf/util/evsel.h | |||
@@ -0,0 +1,159 @@ | |||
1 | #ifndef __PERF_EVSEL_H | ||
2 | #define __PERF_EVSEL_H 1 | ||
3 | |||
4 | #include <linux/list.h> | ||
5 | #include <stdbool.h> | ||
6 | #include "../../../include/linux/perf_event.h" | ||
7 | #include "types.h" | ||
8 | #include "xyarray.h" | ||
9 | #include "cgroup.h" | ||
10 | #include "hist.h" | ||
11 | |||
12 | struct perf_counts_values { | ||
13 | union { | ||
14 | struct { | ||
15 | u64 val; | ||
16 | u64 ena; | ||
17 | u64 run; | ||
18 | }; | ||
19 | u64 values[3]; | ||
20 | }; | ||
21 | }; | ||
22 | |||
23 | struct perf_counts { | ||
24 | s8 scaled; | ||
25 | struct perf_counts_values aggr; | ||
26 | struct perf_counts_values cpu[]; | ||
27 | }; | ||
28 | |||
29 | struct perf_evsel; | ||
30 | |||
31 | /* | ||
32 | * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are | ||
33 | * more than one entry in the evlist. | ||
34 | */ | ||
35 | struct perf_sample_id { | ||
36 | struct hlist_node node; | ||
37 | u64 id; | ||
38 | struct perf_evsel *evsel; | ||
39 | }; | ||
40 | |||
41 | /** struct perf_evsel - event selector | ||
42 | * | ||
43 | * @name - Can be set to retain the original event name passed by the user, | ||
44 | * so that when showing results in tools such as 'perf stat', we | ||
45 | * show the name used, not some alias. | ||
46 | */ | ||
47 | struct perf_evsel { | ||
48 | struct list_head node; | ||
49 | struct perf_event_attr attr; | ||
50 | char *filter; | ||
51 | struct xyarray *fd; | ||
52 | struct xyarray *sample_id; | ||
53 | u64 *id; | ||
54 | struct perf_counts *counts; | ||
55 | int idx; | ||
56 | int ids; | ||
57 | struct hists hists; | ||
58 | char *name; | ||
59 | union { | ||
60 | void *priv; | ||
61 | off_t id_offset; | ||
62 | }; | ||
63 | struct cgroup_sel *cgrp; | ||
64 | }; | ||
65 | |||
66 | struct cpu_map; | ||
67 | struct thread_map; | ||
68 | struct perf_evlist; | ||
69 | |||
70 | struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); | ||
71 | void perf_evsel__init(struct perf_evsel *evsel, | ||
72 | struct perf_event_attr *attr, int idx); | ||
73 | void perf_evsel__exit(struct perf_evsel *evsel); | ||
74 | void perf_evsel__delete(struct perf_evsel *evsel); | ||
75 | |||
76 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); | ||
77 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); | ||
78 | int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); | ||
79 | void perf_evsel__free_fd(struct perf_evsel *evsel); | ||
80 | void perf_evsel__free_id(struct perf_evsel *evsel); | ||
81 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); | ||
82 | |||
83 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, | ||
84 | struct cpu_map *cpus, bool group); | ||
85 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, | ||
86 | struct thread_map *threads, bool group); | ||
87 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | ||
88 | struct thread_map *threads, bool group); | ||
89 | |||
90 | #define perf_evsel__match(evsel, t, c) \ | ||
91 | (evsel->attr.type == PERF_TYPE_##t && \ | ||
92 | evsel->attr.config == PERF_COUNT_##c) | ||
93 | |||
94 | int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, | ||
95 | int cpu, int thread, bool scale); | ||
96 | |||
97 | /** | ||
98 | * perf_evsel__read_on_cpu - Read out the results on a CPU and thread | ||
99 | * | ||
100 | * @evsel - event selector to read value | ||
101 | * @cpu - CPU of interest | ||
102 | * @thread - thread of interest | ||
103 | */ | ||
104 | static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel, | ||
105 | int cpu, int thread) | ||
106 | { | ||
107 | return __perf_evsel__read_on_cpu(evsel, cpu, thread, false); | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled | ||
112 | * | ||
113 | * @evsel - event selector to read value | ||
114 | * @cpu - CPU of interest | ||
115 | * @thread - thread of interest | ||
116 | */ | ||
117 | static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel, | ||
118 | int cpu, int thread) | ||
119 | { | ||
120 | return __perf_evsel__read_on_cpu(evsel, cpu, thread, true); | ||
121 | } | ||
122 | |||
123 | int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads, | ||
124 | bool scale); | ||
125 | |||
126 | /** | ||
127 | * perf_evsel__read - Read the aggregate results on all CPUs | ||
128 | * | ||
129 | * @evsel - event selector to read value | ||
130 | * @ncpus - Number of cpus affected, from zero | ||
131 | * @nthreads - Number of threads affected, from zero | ||
132 | */ | ||
133 | static inline int perf_evsel__read(struct perf_evsel *evsel, | ||
134 | int ncpus, int nthreads) | ||
135 | { | ||
136 | return __perf_evsel__read(evsel, ncpus, nthreads, false); | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled | ||
141 | * | ||
142 | * @evsel - event selector to read value | ||
143 | * @ncpus - Number of cpus affected, from zero | ||
144 | * @nthreads - Number of threads affected, from zero | ||
145 | */ | ||
146 | static inline int perf_evsel__read_scaled(struct perf_evsel *evsel, | ||
147 | int ncpus, int nthreads) | ||
148 | { | ||
149 | return __perf_evsel__read(evsel, ncpus, nthreads, true); | ||
150 | } | ||
151 | |||
152 | int __perf_evsel__sample_size(u64 sample_type); | ||
153 | |||
154 | static inline int perf_evsel__sample_size(struct perf_evsel *evsel) | ||
155 | { | ||
156 | return __perf_evsel__sample_size(evsel->attr.sample_type); | ||
157 | } | ||
158 | |||
159 | #endif /* __PERF_EVSEL_H */ | ||
diff --git a/tools/perf/util/exec_cmd.c b/tools/perf/util/exec_cmd.c index 67eeff571568..7adf4ad15d8f 100644 --- a/tools/perf/util/exec_cmd.c +++ b/tools/perf/util/exec_cmd.c | |||
@@ -11,31 +11,12 @@ static const char *argv0_path; | |||
11 | 11 | ||
12 | const char *system_path(const char *path) | 12 | const char *system_path(const char *path) |
13 | { | 13 | { |
14 | #ifdef RUNTIME_PREFIX | ||
15 | static const char *prefix; | ||
16 | #else | ||
17 | static const char *prefix = PREFIX; | 14 | static const char *prefix = PREFIX; |
18 | #endif | ||
19 | struct strbuf d = STRBUF_INIT; | 15 | struct strbuf d = STRBUF_INIT; |
20 | 16 | ||
21 | if (is_absolute_path(path)) | 17 | if (is_absolute_path(path)) |
22 | return path; | 18 | return path; |
23 | 19 | ||
24 | #ifdef RUNTIME_PREFIX | ||
25 | assert(argv0_path); | ||
26 | assert(is_absolute_path(argv0_path)); | ||
27 | |||
28 | if (!prefix && | ||
29 | !(prefix = strip_path_suffix(argv0_path, PERF_EXEC_PATH)) && | ||
30 | !(prefix = strip_path_suffix(argv0_path, BINDIR)) && | ||
31 | !(prefix = strip_path_suffix(argv0_path, "perf"))) { | ||
32 | prefix = PREFIX; | ||
33 | fprintf(stderr, "RUNTIME_PREFIX requested, " | ||
34 | "but prefix computation failed. " | ||
35 | "Using static fallback '%s'.\n", prefix); | ||
36 | } | ||
37 | #endif | ||
38 | |||
39 | strbuf_addf(&d, "%s/%s", prefix, path); | 20 | strbuf_addf(&d, "%s/%s", prefix, path); |
40 | path = strbuf_detach(&d, NULL); | 21 | path = strbuf_detach(&d, NULL); |
41 | return path; | 22 | return path; |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index d7e67b167ea3..afb0849fe530 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -8,6 +8,8 @@ | |||
8 | #include <linux/list.h> | 8 | #include <linux/list.h> |
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | 10 | ||
11 | #include "evlist.h" | ||
12 | #include "evsel.h" | ||
11 | #include "util.h" | 13 | #include "util.h" |
12 | #include "header.h" | 14 | #include "header.h" |
13 | #include "../perf.h" | 15 | #include "../perf.h" |
@@ -18,89 +20,6 @@ | |||
18 | 20 | ||
19 | static bool no_buildid_cache = false; | 21 | static bool no_buildid_cache = false; |
20 | 22 | ||
21 | /* | ||
22 | * Create new perf.data header attribute: | ||
23 | */ | ||
24 | struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr) | ||
25 | { | ||
26 | struct perf_header_attr *self = malloc(sizeof(*self)); | ||
27 | |||
28 | if (self != NULL) { | ||
29 | self->attr = *attr; | ||
30 | self->ids = 0; | ||
31 | self->size = 1; | ||
32 | self->id = malloc(sizeof(u64)); | ||
33 | if (self->id == NULL) { | ||
34 | free(self); | ||
35 | self = NULL; | ||
36 | } | ||
37 | } | ||
38 | |||
39 | return self; | ||
40 | } | ||
41 | |||
42 | void perf_header_attr__delete(struct perf_header_attr *self) | ||
43 | { | ||
44 | free(self->id); | ||
45 | free(self); | ||
46 | } | ||
47 | |||
48 | int perf_header_attr__add_id(struct perf_header_attr *self, u64 id) | ||
49 | { | ||
50 | int pos = self->ids; | ||
51 | |||
52 | self->ids++; | ||
53 | if (self->ids > self->size) { | ||
54 | int nsize = self->size * 2; | ||
55 | u64 *nid = realloc(self->id, nsize * sizeof(u64)); | ||
56 | |||
57 | if (nid == NULL) | ||
58 | return -1; | ||
59 | |||
60 | self->size = nsize; | ||
61 | self->id = nid; | ||
62 | } | ||
63 | self->id[pos] = id; | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | int perf_header__init(struct perf_header *self) | ||
68 | { | ||
69 | self->size = 1; | ||
70 | self->attr = malloc(sizeof(void *)); | ||
71 | return self->attr == NULL ? -ENOMEM : 0; | ||
72 | } | ||
73 | |||
74 | void perf_header__exit(struct perf_header *self) | ||
75 | { | ||
76 | int i; | ||
77 | for (i = 0; i < self->attrs; ++i) | ||
78 | perf_header_attr__delete(self->attr[i]); | ||
79 | free(self->attr); | ||
80 | } | ||
81 | |||
82 | int perf_header__add_attr(struct perf_header *self, | ||
83 | struct perf_header_attr *attr) | ||
84 | { | ||
85 | if (self->frozen) | ||
86 | return -1; | ||
87 | |||
88 | if (self->attrs == self->size) { | ||
89 | int nsize = self->size * 2; | ||
90 | struct perf_header_attr **nattr; | ||
91 | |||
92 | nattr = realloc(self->attr, nsize * sizeof(void *)); | ||
93 | if (nattr == NULL) | ||
94 | return -1; | ||
95 | |||
96 | self->size = nsize; | ||
97 | self->attr = nattr; | ||
98 | } | ||
99 | |||
100 | self->attr[self->attrs++] = attr; | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int event_count; | 23 | static int event_count; |
105 | static struct perf_trace_event_type *events; | 24 | static struct perf_trace_event_type *events; |
106 | 25 | ||
@@ -147,14 +66,19 @@ struct perf_file_attr { | |||
147 | struct perf_file_section ids; | 66 | struct perf_file_section ids; |
148 | }; | 67 | }; |
149 | 68 | ||
150 | void perf_header__set_feat(struct perf_header *self, int feat) | 69 | void perf_header__set_feat(struct perf_header *header, int feat) |
70 | { | ||
71 | set_bit(feat, header->adds_features); | ||
72 | } | ||
73 | |||
74 | void perf_header__clear_feat(struct perf_header *header, int feat) | ||
151 | { | 75 | { |
152 | set_bit(feat, self->adds_features); | 76 | clear_bit(feat, header->adds_features); |
153 | } | 77 | } |
154 | 78 | ||
155 | bool perf_header__has_feat(const struct perf_header *self, int feat) | 79 | bool perf_header__has_feat(const struct perf_header *header, int feat) |
156 | { | 80 | { |
157 | return test_bit(feat, self->adds_features); | 81 | return test_bit(feat, header->adds_features); |
158 | } | 82 | } |
159 | 83 | ||
160 | static int do_write(int fd, const void *buf, size_t size) | 84 | static int do_write(int fd, const void *buf, size_t size) |
@@ -223,22 +147,22 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, | |||
223 | return 0; | 147 | return 0; |
224 | } | 148 | } |
225 | 149 | ||
226 | static int machine__write_buildid_table(struct machine *self, int fd) | 150 | static int machine__write_buildid_table(struct machine *machine, int fd) |
227 | { | 151 | { |
228 | int err; | 152 | int err; |
229 | u16 kmisc = PERF_RECORD_MISC_KERNEL, | 153 | u16 kmisc = PERF_RECORD_MISC_KERNEL, |
230 | umisc = PERF_RECORD_MISC_USER; | 154 | umisc = PERF_RECORD_MISC_USER; |
231 | 155 | ||
232 | if (!machine__is_host(self)) { | 156 | if (!machine__is_host(machine)) { |
233 | kmisc = PERF_RECORD_MISC_GUEST_KERNEL; | 157 | kmisc = PERF_RECORD_MISC_GUEST_KERNEL; |
234 | umisc = PERF_RECORD_MISC_GUEST_USER; | 158 | umisc = PERF_RECORD_MISC_GUEST_USER; |
235 | } | 159 | } |
236 | 160 | ||
237 | err = __dsos__write_buildid_table(&self->kernel_dsos, self->pid, | 161 | err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, |
238 | kmisc, fd); | 162 | kmisc, fd); |
239 | if (err == 0) | 163 | if (err == 0) |
240 | err = __dsos__write_buildid_table(&self->user_dsos, | 164 | err = __dsos__write_buildid_table(&machine->user_dsos, |
241 | self->pid, umisc, fd); | 165 | machine->pid, umisc, fd); |
242 | return err; | 166 | return err; |
243 | } | 167 | } |
244 | 168 | ||
@@ -265,15 +189,24 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | |||
265 | const char *name, bool is_kallsyms) | 189 | const char *name, bool is_kallsyms) |
266 | { | 190 | { |
267 | const size_t size = PATH_MAX; | 191 | const size_t size = PATH_MAX; |
268 | char *filename = malloc(size), | 192 | char *realname, *filename = malloc(size), |
269 | *linkname = malloc(size), *targetname; | 193 | *linkname = malloc(size), *targetname; |
270 | int len, err = -1; | 194 | int len, err = -1; |
271 | 195 | ||
272 | if (filename == NULL || linkname == NULL) | 196 | if (is_kallsyms) { |
197 | if (symbol_conf.kptr_restrict) { | ||
198 | pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); | ||
199 | return 0; | ||
200 | } | ||
201 | realname = (char *)name; | ||
202 | } else | ||
203 | realname = realpath(name, NULL); | ||
204 | |||
205 | if (realname == NULL || filename == NULL || linkname == NULL) | ||
273 | goto out_free; | 206 | goto out_free; |
274 | 207 | ||
275 | len = snprintf(filename, size, "%s%s%s", | 208 | len = snprintf(filename, size, "%s%s%s", |
276 | debugdir, is_kallsyms ? "/" : "", name); | 209 | debugdir, is_kallsyms ? "/" : "", realname); |
277 | if (mkdir_p(filename, 0755)) | 210 | if (mkdir_p(filename, 0755)) |
278 | goto out_free; | 211 | goto out_free; |
279 | 212 | ||
@@ -283,7 +216,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | |||
283 | if (is_kallsyms) { | 216 | if (is_kallsyms) { |
284 | if (copyfile("/proc/kallsyms", filename)) | 217 | if (copyfile("/proc/kallsyms", filename)) |
285 | goto out_free; | 218 | goto out_free; |
286 | } else if (link(name, filename) && copyfile(name, filename)) | 219 | } else if (link(realname, filename) && copyfile(name, filename)) |
287 | goto out_free; | 220 | goto out_free; |
288 | } | 221 | } |
289 | 222 | ||
@@ -300,6 +233,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | |||
300 | if (symlink(targetname, linkname) == 0) | 233 | if (symlink(targetname, linkname) == 0) |
301 | err = 0; | 234 | err = 0; |
302 | out_free: | 235 | out_free: |
236 | if (!is_kallsyms) | ||
237 | free(realname); | ||
303 | free(filename); | 238 | free(filename); |
304 | free(linkname); | 239 | free(linkname); |
305 | return err; | 240 | return err; |
@@ -354,12 +289,12 @@ out_free: | |||
354 | return err; | 289 | return err; |
355 | } | 290 | } |
356 | 291 | ||
357 | static int dso__cache_build_id(struct dso *self, const char *debugdir) | 292 | static int dso__cache_build_id(struct dso *dso, const char *debugdir) |
358 | { | 293 | { |
359 | bool is_kallsyms = self->kernel && self->long_name[0] != '/'; | 294 | bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; |
360 | 295 | ||
361 | return build_id_cache__add_b(self->build_id, sizeof(self->build_id), | 296 | return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), |
362 | self->long_name, debugdir, is_kallsyms); | 297 | dso->long_name, debugdir, is_kallsyms); |
363 | } | 298 | } |
364 | 299 | ||
365 | static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) | 300 | static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) |
@@ -374,14 +309,14 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) | |||
374 | return err; | 309 | return err; |
375 | } | 310 | } |
376 | 311 | ||
377 | static int machine__cache_build_ids(struct machine *self, const char *debugdir) | 312 | static int machine__cache_build_ids(struct machine *machine, const char *debugdir) |
378 | { | 313 | { |
379 | int ret = __dsos__cache_build_ids(&self->kernel_dsos, debugdir); | 314 | int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); |
380 | ret |= __dsos__cache_build_ids(&self->user_dsos, debugdir); | 315 | ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); |
381 | return ret; | 316 | return ret; |
382 | } | 317 | } |
383 | 318 | ||
384 | static int perf_session__cache_build_ids(struct perf_session *self) | 319 | static int perf_session__cache_build_ids(struct perf_session *session) |
385 | { | 320 | { |
386 | struct rb_node *nd; | 321 | struct rb_node *nd; |
387 | int ret; | 322 | int ret; |
@@ -392,28 +327,28 @@ static int perf_session__cache_build_ids(struct perf_session *self) | |||
392 | if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) | 327 | if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) |
393 | return -1; | 328 | return -1; |
394 | 329 | ||
395 | ret = machine__cache_build_ids(&self->host_machine, debugdir); | 330 | ret = machine__cache_build_ids(&session->host_machine, debugdir); |
396 | 331 | ||
397 | for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) { | 332 | for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { |
398 | struct machine *pos = rb_entry(nd, struct machine, rb_node); | 333 | struct machine *pos = rb_entry(nd, struct machine, rb_node); |
399 | ret |= machine__cache_build_ids(pos, debugdir); | 334 | ret |= machine__cache_build_ids(pos, debugdir); |
400 | } | 335 | } |
401 | return ret ? -1 : 0; | 336 | return ret ? -1 : 0; |
402 | } | 337 | } |
403 | 338 | ||
404 | static bool machine__read_build_ids(struct machine *self, bool with_hits) | 339 | static bool machine__read_build_ids(struct machine *machine, bool with_hits) |
405 | { | 340 | { |
406 | bool ret = __dsos__read_build_ids(&self->kernel_dsos, with_hits); | 341 | bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits); |
407 | ret |= __dsos__read_build_ids(&self->user_dsos, with_hits); | 342 | ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits); |
408 | return ret; | 343 | return ret; |
409 | } | 344 | } |
410 | 345 | ||
411 | static bool perf_session__read_build_ids(struct perf_session *self, bool with_hits) | 346 | static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) |
412 | { | 347 | { |
413 | struct rb_node *nd; | 348 | struct rb_node *nd; |
414 | bool ret = machine__read_build_ids(&self->host_machine, with_hits); | 349 | bool ret = machine__read_build_ids(&session->host_machine, with_hits); |
415 | 350 | ||
416 | for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) { | 351 | for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { |
417 | struct machine *pos = rb_entry(nd, struct machine, rb_node); | 352 | struct machine *pos = rb_entry(nd, struct machine, rb_node); |
418 | ret |= machine__read_build_ids(pos, with_hits); | 353 | ret |= machine__read_build_ids(pos, with_hits); |
419 | } | 354 | } |
@@ -421,7 +356,8 @@ static bool perf_session__read_build_ids(struct perf_session *self, bool with_hi | |||
421 | return ret; | 356 | return ret; |
422 | } | 357 | } |
423 | 358 | ||
424 | static int perf_header__adds_write(struct perf_header *self, int fd) | 359 | static int perf_header__adds_write(struct perf_header *header, |
360 | struct perf_evlist *evlist, int fd) | ||
425 | { | 361 | { |
426 | int nr_sections; | 362 | int nr_sections; |
427 | struct perf_session *session; | 363 | struct perf_session *session; |
@@ -430,11 +366,13 @@ static int perf_header__adds_write(struct perf_header *self, int fd) | |||
430 | u64 sec_start; | 366 | u64 sec_start; |
431 | int idx = 0, err; | 367 | int idx = 0, err; |
432 | 368 | ||
433 | session = container_of(self, struct perf_session, header); | 369 | session = container_of(header, struct perf_session, header); |
434 | if (perf_session__read_build_ids(session, true)) | ||
435 | perf_header__set_feat(self, HEADER_BUILD_ID); | ||
436 | 370 | ||
437 | nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); | 371 | if (perf_header__has_feat(header, HEADER_BUILD_ID && |
372 | !perf_session__read_build_ids(session, true))) | ||
373 | perf_header__clear_feat(header, HEADER_BUILD_ID); | ||
374 | |||
375 | nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); | ||
438 | if (!nr_sections) | 376 | if (!nr_sections) |
439 | return 0; | 377 | return 0; |
440 | 378 | ||
@@ -444,28 +382,28 @@ static int perf_header__adds_write(struct perf_header *self, int fd) | |||
444 | 382 | ||
445 | sec_size = sizeof(*feat_sec) * nr_sections; | 383 | sec_size = sizeof(*feat_sec) * nr_sections; |
446 | 384 | ||
447 | sec_start = self->data_offset + self->data_size; | 385 | sec_start = header->data_offset + header->data_size; |
448 | lseek(fd, sec_start + sec_size, SEEK_SET); | 386 | lseek(fd, sec_start + sec_size, SEEK_SET); |
449 | 387 | ||
450 | if (perf_header__has_feat(self, HEADER_TRACE_INFO)) { | 388 | if (perf_header__has_feat(header, HEADER_TRACE_INFO)) { |
451 | struct perf_file_section *trace_sec; | 389 | struct perf_file_section *trace_sec; |
452 | 390 | ||
453 | trace_sec = &feat_sec[idx++]; | 391 | trace_sec = &feat_sec[idx++]; |
454 | 392 | ||
455 | /* Write trace info */ | 393 | /* Write trace info */ |
456 | trace_sec->offset = lseek(fd, 0, SEEK_CUR); | 394 | trace_sec->offset = lseek(fd, 0, SEEK_CUR); |
457 | read_tracing_data(fd, attrs, nr_counters); | 395 | read_tracing_data(fd, &evlist->entries); |
458 | trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; | 396 | trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; |
459 | } | 397 | } |
460 | 398 | ||
461 | if (perf_header__has_feat(self, HEADER_BUILD_ID)) { | 399 | if (perf_header__has_feat(header, HEADER_BUILD_ID)) { |
462 | struct perf_file_section *buildid_sec; | 400 | struct perf_file_section *buildid_sec; |
463 | 401 | ||
464 | buildid_sec = &feat_sec[idx++]; | 402 | buildid_sec = &feat_sec[idx++]; |
465 | 403 | ||
466 | /* Write build-ids */ | 404 | /* Write build-ids */ |
467 | buildid_sec->offset = lseek(fd, 0, SEEK_CUR); | 405 | buildid_sec->offset = lseek(fd, 0, SEEK_CUR); |
468 | err = dsos__write_buildid_table(self, fd); | 406 | err = dsos__write_buildid_table(header, fd); |
469 | if (err < 0) { | 407 | if (err < 0) { |
470 | pr_debug("failed to write buildid table\n"); | 408 | pr_debug("failed to write buildid table\n"); |
471 | goto out_free; | 409 | goto out_free; |
@@ -504,32 +442,41 @@ int perf_header__write_pipe(int fd) | |||
504 | return 0; | 442 | return 0; |
505 | } | 443 | } |
506 | 444 | ||
507 | int perf_header__write(struct perf_header *self, int fd, bool at_exit) | 445 | int perf_session__write_header(struct perf_session *session, |
446 | struct perf_evlist *evlist, | ||
447 | int fd, bool at_exit) | ||
508 | { | 448 | { |
509 | struct perf_file_header f_header; | 449 | struct perf_file_header f_header; |
510 | struct perf_file_attr f_attr; | 450 | struct perf_file_attr f_attr; |
511 | struct perf_header_attr *attr; | 451 | struct perf_header *header = &session->header; |
512 | int i, err; | 452 | struct perf_evsel *attr, *pair = NULL; |
453 | int err; | ||
513 | 454 | ||
514 | lseek(fd, sizeof(f_header), SEEK_SET); | 455 | lseek(fd, sizeof(f_header), SEEK_SET); |
515 | 456 | ||
516 | for (i = 0; i < self->attrs; i++) { | 457 | if (session->evlist != evlist) |
517 | attr = self->attr[i]; | 458 | pair = list_entry(session->evlist->entries.next, struct perf_evsel, node); |
518 | 459 | ||
460 | list_for_each_entry(attr, &evlist->entries, node) { | ||
519 | attr->id_offset = lseek(fd, 0, SEEK_CUR); | 461 | attr->id_offset = lseek(fd, 0, SEEK_CUR); |
520 | err = do_write(fd, attr->id, attr->ids * sizeof(u64)); | 462 | err = do_write(fd, attr->id, attr->ids * sizeof(u64)); |
521 | if (err < 0) { | 463 | if (err < 0) { |
464 | out_err_write: | ||
522 | pr_debug("failed to write perf header\n"); | 465 | pr_debug("failed to write perf header\n"); |
523 | return err; | 466 | return err; |
524 | } | 467 | } |
468 | if (session->evlist != evlist) { | ||
469 | err = do_write(fd, pair->id, pair->ids * sizeof(u64)); | ||
470 | if (err < 0) | ||
471 | goto out_err_write; | ||
472 | attr->ids += pair->ids; | ||
473 | pair = list_entry(pair->node.next, struct perf_evsel, node); | ||
474 | } | ||
525 | } | 475 | } |
526 | 476 | ||
477 | header->attr_offset = lseek(fd, 0, SEEK_CUR); | ||
527 | 478 | ||
528 | self->attr_offset = lseek(fd, 0, SEEK_CUR); | 479 | list_for_each_entry(attr, &evlist->entries, node) { |
529 | |||
530 | for (i = 0; i < self->attrs; i++) { | ||
531 | attr = self->attr[i]; | ||
532 | |||
533 | f_attr = (struct perf_file_attr){ | 480 | f_attr = (struct perf_file_attr){ |
534 | .attr = attr->attr, | 481 | .attr = attr->attr, |
535 | .ids = { | 482 | .ids = { |
@@ -544,20 +491,20 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit) | |||
544 | } | 491 | } |
545 | } | 492 | } |
546 | 493 | ||
547 | self->event_offset = lseek(fd, 0, SEEK_CUR); | 494 | header->event_offset = lseek(fd, 0, SEEK_CUR); |
548 | self->event_size = event_count * sizeof(struct perf_trace_event_type); | 495 | header->event_size = event_count * sizeof(struct perf_trace_event_type); |
549 | if (events) { | 496 | if (events) { |
550 | err = do_write(fd, events, self->event_size); | 497 | err = do_write(fd, events, header->event_size); |
551 | if (err < 0) { | 498 | if (err < 0) { |
552 | pr_debug("failed to write perf header events\n"); | 499 | pr_debug("failed to write perf header events\n"); |
553 | return err; | 500 | return err; |
554 | } | 501 | } |
555 | } | 502 | } |
556 | 503 | ||
557 | self->data_offset = lseek(fd, 0, SEEK_CUR); | 504 | header->data_offset = lseek(fd, 0, SEEK_CUR); |
558 | 505 | ||
559 | if (at_exit) { | 506 | if (at_exit) { |
560 | err = perf_header__adds_write(self, fd); | 507 | err = perf_header__adds_write(header, evlist, fd); |
561 | if (err < 0) | 508 | if (err < 0) |
562 | return err; | 509 | return err; |
563 | } | 510 | } |
@@ -567,20 +514,20 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit) | |||
567 | .size = sizeof(f_header), | 514 | .size = sizeof(f_header), |
568 | .attr_size = sizeof(f_attr), | 515 | .attr_size = sizeof(f_attr), |
569 | .attrs = { | 516 | .attrs = { |
570 | .offset = self->attr_offset, | 517 | .offset = header->attr_offset, |
571 | .size = self->attrs * sizeof(f_attr), | 518 | .size = evlist->nr_entries * sizeof(f_attr), |
572 | }, | 519 | }, |
573 | .data = { | 520 | .data = { |
574 | .offset = self->data_offset, | 521 | .offset = header->data_offset, |
575 | .size = self->data_size, | 522 | .size = header->data_size, |
576 | }, | 523 | }, |
577 | .event_types = { | 524 | .event_types = { |
578 | .offset = self->event_offset, | 525 | .offset = header->event_offset, |
579 | .size = self->event_size, | 526 | .size = header->event_size, |
580 | }, | 527 | }, |
581 | }; | 528 | }; |
582 | 529 | ||
583 | memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features)); | 530 | memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); |
584 | 531 | ||
585 | lseek(fd, 0, SEEK_SET); | 532 | lseek(fd, 0, SEEK_SET); |
586 | err = do_write(fd, &f_header, sizeof(f_header)); | 533 | err = do_write(fd, &f_header, sizeof(f_header)); |
@@ -588,26 +535,26 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit) | |||
588 | pr_debug("failed to write perf header\n"); | 535 | pr_debug("failed to write perf header\n"); |
589 | return err; | 536 | return err; |
590 | } | 537 | } |
591 | lseek(fd, self->data_offset + self->data_size, SEEK_SET); | 538 | lseek(fd, header->data_offset + header->data_size, SEEK_SET); |
592 | 539 | ||
593 | self->frozen = 1; | 540 | header->frozen = 1; |
594 | return 0; | 541 | return 0; |
595 | } | 542 | } |
596 | 543 | ||
597 | static int perf_header__getbuffer64(struct perf_header *self, | 544 | static int perf_header__getbuffer64(struct perf_header *header, |
598 | int fd, void *buf, size_t size) | 545 | int fd, void *buf, size_t size) |
599 | { | 546 | { |
600 | if (do_read(fd, buf, size) <= 0) | 547 | if (readn(fd, buf, size) <= 0) |
601 | return -1; | 548 | return -1; |
602 | 549 | ||
603 | if (self->needs_swap) | 550 | if (header->needs_swap) |
604 | mem_bswap_64(buf, size); | 551 | mem_bswap_64(buf, size); |
605 | 552 | ||
606 | return 0; | 553 | return 0; |
607 | } | 554 | } |
608 | 555 | ||
609 | int perf_header__process_sections(struct perf_header *self, int fd, | 556 | int perf_header__process_sections(struct perf_header *header, int fd, |
610 | int (*process)(struct perf_file_section *self, | 557 | int (*process)(struct perf_file_section *section, |
611 | struct perf_header *ph, | 558 | struct perf_header *ph, |
612 | int feat, int fd)) | 559 | int feat, int fd)) |
613 | { | 560 | { |
@@ -617,7 +564,7 @@ int perf_header__process_sections(struct perf_header *self, int fd, | |||
617 | int idx = 0; | 564 | int idx = 0; |
618 | int err = -1, feat = 1; | 565 | int err = -1, feat = 1; |
619 | 566 | ||
620 | nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); | 567 | nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); |
621 | if (!nr_sections) | 568 | if (!nr_sections) |
622 | return 0; | 569 | return 0; |
623 | 570 | ||
@@ -627,17 +574,17 @@ int perf_header__process_sections(struct perf_header *self, int fd, | |||
627 | 574 | ||
628 | sec_size = sizeof(*feat_sec) * nr_sections; | 575 | sec_size = sizeof(*feat_sec) * nr_sections; |
629 | 576 | ||
630 | lseek(fd, self->data_offset + self->data_size, SEEK_SET); | 577 | lseek(fd, header->data_offset + header->data_size, SEEK_SET); |
631 | 578 | ||
632 | if (perf_header__getbuffer64(self, fd, feat_sec, sec_size)) | 579 | if (perf_header__getbuffer64(header, fd, feat_sec, sec_size)) |
633 | goto out_free; | 580 | goto out_free; |
634 | 581 | ||
635 | err = 0; | 582 | err = 0; |
636 | while (idx < nr_sections && feat < HEADER_LAST_FEATURE) { | 583 | while (idx < nr_sections && feat < HEADER_LAST_FEATURE) { |
637 | if (perf_header__has_feat(self, feat)) { | 584 | if (perf_header__has_feat(header, feat)) { |
638 | struct perf_file_section *sec = &feat_sec[idx++]; | 585 | struct perf_file_section *sec = &feat_sec[idx++]; |
639 | 586 | ||
640 | err = process(sec, self, feat, fd); | 587 | err = process(sec, header, feat, fd); |
641 | if (err < 0) | 588 | if (err < 0) |
642 | break; | 589 | break; |
643 | } | 590 | } |
@@ -648,35 +595,35 @@ out_free: | |||
648 | return err; | 595 | return err; |
649 | } | 596 | } |
650 | 597 | ||
651 | int perf_file_header__read(struct perf_file_header *self, | 598 | int perf_file_header__read(struct perf_file_header *header, |
652 | struct perf_header *ph, int fd) | 599 | struct perf_header *ph, int fd) |
653 | { | 600 | { |
654 | lseek(fd, 0, SEEK_SET); | 601 | lseek(fd, 0, SEEK_SET); |
655 | 602 | ||
656 | if (do_read(fd, self, sizeof(*self)) <= 0 || | 603 | if (readn(fd, header, sizeof(*header)) <= 0 || |
657 | memcmp(&self->magic, __perf_magic, sizeof(self->magic))) | 604 | memcmp(&header->magic, __perf_magic, sizeof(header->magic))) |
658 | return -1; | 605 | return -1; |
659 | 606 | ||
660 | if (self->attr_size != sizeof(struct perf_file_attr)) { | 607 | if (header->attr_size != sizeof(struct perf_file_attr)) { |
661 | u64 attr_size = bswap_64(self->attr_size); | 608 | u64 attr_size = bswap_64(header->attr_size); |
662 | 609 | ||
663 | if (attr_size != sizeof(struct perf_file_attr)) | 610 | if (attr_size != sizeof(struct perf_file_attr)) |
664 | return -1; | 611 | return -1; |
665 | 612 | ||
666 | mem_bswap_64(self, offsetof(struct perf_file_header, | 613 | mem_bswap_64(header, offsetof(struct perf_file_header, |
667 | adds_features)); | 614 | adds_features)); |
668 | ph->needs_swap = true; | 615 | ph->needs_swap = true; |
669 | } | 616 | } |
670 | 617 | ||
671 | if (self->size != sizeof(*self)) { | 618 | if (header->size != sizeof(*header)) { |
672 | /* Support the previous format */ | 619 | /* Support the previous format */ |
673 | if (self->size == offsetof(typeof(*self), adds_features)) | 620 | if (header->size == offsetof(typeof(*header), adds_features)) |
674 | bitmap_zero(self->adds_features, HEADER_FEAT_BITS); | 621 | bitmap_zero(header->adds_features, HEADER_FEAT_BITS); |
675 | else | 622 | else |
676 | return -1; | 623 | return -1; |
677 | } | 624 | } |
678 | 625 | ||
679 | memcpy(&ph->adds_features, &self->adds_features, | 626 | memcpy(&ph->adds_features, &header->adds_features, |
680 | sizeof(ph->adds_features)); | 627 | sizeof(ph->adds_features)); |
681 | /* | 628 | /* |
682 | * FIXME: hack that assumes that if we need swap the perf.data file | 629 | * FIXME: hack that assumes that if we need swap the perf.data file |
@@ -690,10 +637,10 @@ int perf_file_header__read(struct perf_file_header *self, | |||
690 | perf_header__set_feat(ph, HEADER_BUILD_ID); | 637 | perf_header__set_feat(ph, HEADER_BUILD_ID); |
691 | } | 638 | } |
692 | 639 | ||
693 | ph->event_offset = self->event_types.offset; | 640 | ph->event_offset = header->event_types.offset; |
694 | ph->event_size = self->event_types.size; | 641 | ph->event_size = header->event_types.size; |
695 | ph->data_offset = self->data.offset; | 642 | ph->data_offset = header->data.offset; |
696 | ph->data_size = self->data.size; | 643 | ph->data_size = header->data.size; |
697 | return 0; | 644 | return 0; |
698 | } | 645 | } |
699 | 646 | ||
@@ -752,14 +699,50 @@ out: | |||
752 | return err; | 699 | return err; |
753 | } | 700 | } |
754 | 701 | ||
755 | static int perf_header__read_build_ids(struct perf_header *self, | 702 | static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, |
756 | int input, u64 offset, u64 size) | 703 | int input, u64 offset, u64 size) |
757 | { | 704 | { |
758 | struct perf_session *session = container_of(self, | 705 | struct perf_session *session = container_of(header, struct perf_session, header); |
759 | struct perf_session, header); | 706 | struct { |
707 | struct perf_event_header header; | ||
708 | u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; | ||
709 | char filename[0]; | ||
710 | } old_bev; | ||
760 | struct build_id_event bev; | 711 | struct build_id_event bev; |
761 | char filename[PATH_MAX]; | 712 | char filename[PATH_MAX]; |
762 | u64 limit = offset + size; | 713 | u64 limit = offset + size; |
714 | |||
715 | while (offset < limit) { | ||
716 | ssize_t len; | ||
717 | |||
718 | if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) | ||
719 | return -1; | ||
720 | |||
721 | if (header->needs_swap) | ||
722 | perf_event_header__bswap(&old_bev.header); | ||
723 | |||
724 | len = old_bev.header.size - sizeof(old_bev); | ||
725 | if (read(input, filename, len) != len) | ||
726 | return -1; | ||
727 | |||
728 | bev.header = old_bev.header; | ||
729 | bev.pid = 0; | ||
730 | memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); | ||
731 | __event_process_build_id(&bev, filename, session); | ||
732 | |||
733 | offset += bev.header.size; | ||
734 | } | ||
735 | |||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | static int perf_header__read_build_ids(struct perf_header *header, | ||
740 | int input, u64 offset, u64 size) | ||
741 | { | ||
742 | struct perf_session *session = container_of(header, struct perf_session, header); | ||
743 | struct build_id_event bev; | ||
744 | char filename[PATH_MAX]; | ||
745 | u64 limit = offset + size, orig_offset = offset; | ||
763 | int err = -1; | 746 | int err = -1; |
764 | 747 | ||
765 | while (offset < limit) { | 748 | while (offset < limit) { |
@@ -768,12 +751,30 @@ static int perf_header__read_build_ids(struct perf_header *self, | |||
768 | if (read(input, &bev, sizeof(bev)) != sizeof(bev)) | 751 | if (read(input, &bev, sizeof(bev)) != sizeof(bev)) |
769 | goto out; | 752 | goto out; |
770 | 753 | ||
771 | if (self->needs_swap) | 754 | if (header->needs_swap) |
772 | perf_event_header__bswap(&bev.header); | 755 | perf_event_header__bswap(&bev.header); |
773 | 756 | ||
774 | len = bev.header.size - sizeof(bev); | 757 | len = bev.header.size - sizeof(bev); |
775 | if (read(input, filename, len) != len) | 758 | if (read(input, filename, len) != len) |
776 | goto out; | 759 | goto out; |
760 | /* | ||
761 | * The a1645ce1 changeset: | ||
762 | * | ||
763 | * "perf: 'perf kvm' tool for monitoring guest performance from host" | ||
764 | * | ||
765 | * Added a field to struct build_id_event that broke the file | ||
766 | * format. | ||
767 | * | ||
768 | * Since the kernel build-id is the first entry, process the | ||
769 | * table using the old format if the well known | ||
770 | * '[kernel.kallsyms]' string for the kernel build-id has the | ||
771 | * first 4 characters chopped off (where the pid_t sits). | ||
772 | */ | ||
773 | if (memcmp(filename, "nel.kallsyms]", 13) == 0) { | ||
774 | if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) | ||
775 | return -1; | ||
776 | return perf_header__read_build_ids_abi_quirk(header, input, offset, size); | ||
777 | } | ||
777 | 778 | ||
778 | __event_process_build_id(&bev, filename, session); | 779 | __event_process_build_id(&bev, filename, session); |
779 | 780 | ||
@@ -784,13 +785,13 @@ out: | |||
784 | return err; | 785 | return err; |
785 | } | 786 | } |
786 | 787 | ||
787 | static int perf_file_section__process(struct perf_file_section *self, | 788 | static int perf_file_section__process(struct perf_file_section *section, |
788 | struct perf_header *ph, | 789 | struct perf_header *ph, |
789 | int feat, int fd) | 790 | int feat, int fd) |
790 | { | 791 | { |
791 | if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) { | 792 | if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { |
792 | pr_debug("Failed to lseek to %Ld offset for feature %d, " | 793 | pr_debug("Failed to lseek to %" PRIu64 " offset for feature " |
793 | "continuing...\n", self->offset, feat); | 794 | "%d, continuing...\n", section->offset, feat); |
794 | return 0; | 795 | return 0; |
795 | } | 796 | } |
796 | 797 | ||
@@ -800,7 +801,7 @@ static int perf_file_section__process(struct perf_file_section *self, | |||
800 | break; | 801 | break; |
801 | 802 | ||
802 | case HEADER_BUILD_ID: | 803 | case HEADER_BUILD_ID: |
803 | if (perf_header__read_build_ids(ph, fd, self->offset, self->size)) | 804 | if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) |
804 | pr_debug("Failed to read buildids, continuing...\n"); | 805 | pr_debug("Failed to read buildids, continuing...\n"); |
805 | break; | 806 | break; |
806 | default: | 807 | default: |
@@ -810,21 +811,21 @@ static int perf_file_section__process(struct perf_file_section *self, | |||
810 | return 0; | 811 | return 0; |
811 | } | 812 | } |
812 | 813 | ||
813 | static int perf_file_header__read_pipe(struct perf_pipe_file_header *self, | 814 | static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, |
814 | struct perf_header *ph, int fd, | 815 | struct perf_header *ph, int fd, |
815 | bool repipe) | 816 | bool repipe) |
816 | { | 817 | { |
817 | if (do_read(fd, self, sizeof(*self)) <= 0 || | 818 | if (readn(fd, header, sizeof(*header)) <= 0 || |
818 | memcmp(&self->magic, __perf_magic, sizeof(self->magic))) | 819 | memcmp(&header->magic, __perf_magic, sizeof(header->magic))) |
819 | return -1; | 820 | return -1; |
820 | 821 | ||
821 | if (repipe && do_write(STDOUT_FILENO, self, sizeof(*self)) < 0) | 822 | if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0) |
822 | return -1; | 823 | return -1; |
823 | 824 | ||
824 | if (self->size != sizeof(*self)) { | 825 | if (header->size != sizeof(*header)) { |
825 | u64 size = bswap_64(self->size); | 826 | u64 size = bswap_64(header->size); |
826 | 827 | ||
827 | if (size != sizeof(*self)) | 828 | if (size != sizeof(*header)) |
828 | return -1; | 829 | return -1; |
829 | 830 | ||
830 | ph->needs_swap = true; | 831 | ph->needs_swap = true; |
@@ -835,10 +836,10 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *self, | |||
835 | 836 | ||
836 | static int perf_header__read_pipe(struct perf_session *session, int fd) | 837 | static int perf_header__read_pipe(struct perf_session *session, int fd) |
837 | { | 838 | { |
838 | struct perf_header *self = &session->header; | 839 | struct perf_header *header = &session->header; |
839 | struct perf_pipe_file_header f_header; | 840 | struct perf_pipe_file_header f_header; |
840 | 841 | ||
841 | if (perf_file_header__read_pipe(&f_header, self, fd, | 842 | if (perf_file_header__read_pipe(&f_header, header, fd, |
842 | session->repipe) < 0) { | 843 | session->repipe) < 0) { |
843 | pr_debug("incompatible file format\n"); | 844 | pr_debug("incompatible file format\n"); |
844 | return -EINVAL; | 845 | return -EINVAL; |
@@ -849,18 +850,22 @@ static int perf_header__read_pipe(struct perf_session *session, int fd) | |||
849 | return 0; | 850 | return 0; |
850 | } | 851 | } |
851 | 852 | ||
852 | int perf_header__read(struct perf_session *session, int fd) | 853 | int perf_session__read_header(struct perf_session *session, int fd) |
853 | { | 854 | { |
854 | struct perf_header *self = &session->header; | 855 | struct perf_header *header = &session->header; |
855 | struct perf_file_header f_header; | 856 | struct perf_file_header f_header; |
856 | struct perf_file_attr f_attr; | 857 | struct perf_file_attr f_attr; |
857 | u64 f_id; | 858 | u64 f_id; |
858 | int nr_attrs, nr_ids, i, j; | 859 | int nr_attrs, nr_ids, i, j; |
859 | 860 | ||
861 | session->evlist = perf_evlist__new(NULL, NULL); | ||
862 | if (session->evlist == NULL) | ||
863 | return -ENOMEM; | ||
864 | |||
860 | if (session->fd_pipe) | 865 | if (session->fd_pipe) |
861 | return perf_header__read_pipe(session, fd); | 866 | return perf_header__read_pipe(session, fd); |
862 | 867 | ||
863 | if (perf_file_header__read(&f_header, self, fd) < 0) { | 868 | if (perf_file_header__read(&f_header, header, fd) < 0) { |
864 | pr_debug("incompatible file format\n"); | 869 | pr_debug("incompatible file format\n"); |
865 | return -EINVAL; | 870 | return -EINVAL; |
866 | } | 871 | } |
@@ -869,33 +874,39 @@ int perf_header__read(struct perf_session *session, int fd) | |||
869 | lseek(fd, f_header.attrs.offset, SEEK_SET); | 874 | lseek(fd, f_header.attrs.offset, SEEK_SET); |
870 | 875 | ||
871 | for (i = 0; i < nr_attrs; i++) { | 876 | for (i = 0; i < nr_attrs; i++) { |
872 | struct perf_header_attr *attr; | 877 | struct perf_evsel *evsel; |
873 | off_t tmp; | 878 | off_t tmp; |
874 | 879 | ||
875 | if (perf_header__getbuffer64(self, fd, &f_attr, sizeof(f_attr))) | 880 | if (perf_header__getbuffer64(header, fd, &f_attr, sizeof(f_attr))) |
876 | goto out_errno; | 881 | goto out_errno; |
877 | 882 | ||
878 | tmp = lseek(fd, 0, SEEK_CUR); | 883 | tmp = lseek(fd, 0, SEEK_CUR); |
884 | evsel = perf_evsel__new(&f_attr.attr, i); | ||
879 | 885 | ||
880 | attr = perf_header_attr__new(&f_attr.attr); | 886 | if (evsel == NULL) |
881 | if (attr == NULL) | 887 | goto out_delete_evlist; |
882 | return -ENOMEM; | 888 | /* |
889 | * Do it before so that if perf_evsel__alloc_id fails, this | ||
890 | * entry gets purged too at perf_evlist__delete(). | ||
891 | */ | ||
892 | perf_evlist__add(session->evlist, evsel); | ||
883 | 893 | ||
884 | nr_ids = f_attr.ids.size / sizeof(u64); | 894 | nr_ids = f_attr.ids.size / sizeof(u64); |
895 | /* | ||
896 | * We don't have the cpu and thread maps on the header, so | ||
897 | * for allocating the perf_sample_id table we fake 1 cpu and | ||
898 | * hattr->ids threads. | ||
899 | */ | ||
900 | if (perf_evsel__alloc_id(evsel, 1, nr_ids)) | ||
901 | goto out_delete_evlist; | ||
902 | |||
885 | lseek(fd, f_attr.ids.offset, SEEK_SET); | 903 | lseek(fd, f_attr.ids.offset, SEEK_SET); |
886 | 904 | ||
887 | for (j = 0; j < nr_ids; j++) { | 905 | for (j = 0; j < nr_ids; j++) { |
888 | if (perf_header__getbuffer64(self, fd, &f_id, sizeof(f_id))) | 906 | if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) |
889 | goto out_errno; | 907 | goto out_errno; |
890 | 908 | ||
891 | if (perf_header_attr__add_id(attr, f_id) < 0) { | 909 | perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); |
892 | perf_header_attr__delete(attr); | ||
893 | return -ENOMEM; | ||
894 | } | ||
895 | } | ||
896 | if (perf_header__add_attr(self, attr) < 0) { | ||
897 | perf_header_attr__delete(attr); | ||
898 | return -ENOMEM; | ||
899 | } | 910 | } |
900 | 911 | ||
901 | lseek(fd, tmp, SEEK_SET); | 912 | lseek(fd, tmp, SEEK_SET); |
@@ -906,70 +917,32 @@ int perf_header__read(struct perf_session *session, int fd) | |||
906 | events = malloc(f_header.event_types.size); | 917 | events = malloc(f_header.event_types.size); |
907 | if (events == NULL) | 918 | if (events == NULL) |
908 | return -ENOMEM; | 919 | return -ENOMEM; |
909 | if (perf_header__getbuffer64(self, fd, events, | 920 | if (perf_header__getbuffer64(header, fd, events, |
910 | f_header.event_types.size)) | 921 | f_header.event_types.size)) |
911 | goto out_errno; | 922 | goto out_errno; |
912 | event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); | 923 | event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); |
913 | } | 924 | } |
914 | 925 | ||
915 | perf_header__process_sections(self, fd, perf_file_section__process); | 926 | perf_header__process_sections(header, fd, perf_file_section__process); |
916 | 927 | ||
917 | lseek(fd, self->data_offset, SEEK_SET); | 928 | lseek(fd, header->data_offset, SEEK_SET); |
918 | 929 | ||
919 | self->frozen = 1; | 930 | header->frozen = 1; |
920 | return 0; | 931 | return 0; |
921 | out_errno: | 932 | out_errno: |
922 | return -errno; | 933 | return -errno; |
923 | } | ||
924 | |||
925 | u64 perf_header__sample_type(struct perf_header *header) | ||
926 | { | ||
927 | u64 type = 0; | ||
928 | int i; | ||
929 | |||
930 | for (i = 0; i < header->attrs; i++) { | ||
931 | struct perf_header_attr *attr = header->attr[i]; | ||
932 | |||
933 | if (!type) | ||
934 | type = attr->attr.sample_type; | ||
935 | else if (type != attr->attr.sample_type) | ||
936 | die("non matching sample_type"); | ||
937 | } | ||
938 | |||
939 | return type; | ||
940 | } | ||
941 | |||
942 | struct perf_event_attr * | ||
943 | perf_header__find_attr(u64 id, struct perf_header *header) | ||
944 | { | ||
945 | int i; | ||
946 | |||
947 | /* | ||
948 | * We set id to -1 if the data file doesn't contain sample | ||
949 | * ids. Check for this and avoid walking through the entire | ||
950 | * list of ids which may be large. | ||
951 | */ | ||
952 | if (id == -1ULL) | ||
953 | return NULL; | ||
954 | |||
955 | for (i = 0; i < header->attrs; i++) { | ||
956 | struct perf_header_attr *attr = header->attr[i]; | ||
957 | int j; | ||
958 | |||
959 | for (j = 0; j < attr->ids; j++) { | ||
960 | if (attr->id[j] == id) | ||
961 | return &attr->attr; | ||
962 | } | ||
963 | } | ||
964 | 934 | ||
965 | return NULL; | 935 | out_delete_evlist: |
936 | perf_evlist__delete(session->evlist); | ||
937 | session->evlist = NULL; | ||
938 | return -ENOMEM; | ||
966 | } | 939 | } |
967 | 940 | ||
968 | int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, | 941 | int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, |
969 | event__handler_t process, | 942 | perf_event__handler_t process, |
970 | struct perf_session *session) | 943 | struct perf_session *session) |
971 | { | 944 | { |
972 | event_t *ev; | 945 | union perf_event *ev; |
973 | size_t size; | 946 | size_t size; |
974 | int err; | 947 | int err; |
975 | 948 | ||
@@ -980,31 +953,31 @@ int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, | |||
980 | 953 | ||
981 | ev = malloc(size); | 954 | ev = malloc(size); |
982 | 955 | ||
956 | if (ev == NULL) | ||
957 | return -ENOMEM; | ||
958 | |||
983 | ev->attr.attr = *attr; | 959 | ev->attr.attr = *attr; |
984 | memcpy(ev->attr.id, id, ids * sizeof(u64)); | 960 | memcpy(ev->attr.id, id, ids * sizeof(u64)); |
985 | 961 | ||
986 | ev->attr.header.type = PERF_RECORD_HEADER_ATTR; | 962 | ev->attr.header.type = PERF_RECORD_HEADER_ATTR; |
987 | ev->attr.header.size = size; | 963 | ev->attr.header.size = size; |
988 | 964 | ||
989 | err = process(ev, session); | 965 | err = process(ev, NULL, session); |
990 | 966 | ||
991 | free(ev); | 967 | free(ev); |
992 | 968 | ||
993 | return err; | 969 | return err; |
994 | } | 970 | } |
995 | 971 | ||
996 | int event__synthesize_attrs(struct perf_header *self, | 972 | int perf_session__synthesize_attrs(struct perf_session *session, |
997 | event__handler_t process, | 973 | perf_event__handler_t process) |
998 | struct perf_session *session) | ||
999 | { | 974 | { |
1000 | struct perf_header_attr *attr; | 975 | struct perf_evsel *attr; |
1001 | int i, err = 0; | 976 | int err = 0; |
1002 | |||
1003 | for (i = 0; i < self->attrs; i++) { | ||
1004 | attr = self->attr[i]; | ||
1005 | 977 | ||
1006 | err = event__synthesize_attr(&attr->attr, attr->ids, attr->id, | 978 | list_for_each_entry(attr, &session->evlist->entries, node) { |
1007 | process, session); | 979 | err = perf_event__synthesize_attr(&attr->attr, attr->ids, |
980 | attr->id, process, session); | ||
1008 | if (err) { | 981 | if (err) { |
1009 | pr_debug("failed to create perf header attribute\n"); | 982 | pr_debug("failed to create perf header attribute\n"); |
1010 | return err; | 983 | return err; |
@@ -1014,29 +987,39 @@ int event__synthesize_attrs(struct perf_header *self, | |||
1014 | return err; | 987 | return err; |
1015 | } | 988 | } |
1016 | 989 | ||
1017 | int event__process_attr(event_t *self, struct perf_session *session) | 990 | int perf_event__process_attr(union perf_event *event, |
991 | struct perf_session *session) | ||
1018 | { | 992 | { |
1019 | struct perf_header_attr *attr; | ||
1020 | unsigned int i, ids, n_ids; | 993 | unsigned int i, ids, n_ids; |
994 | struct perf_evsel *evsel; | ||
995 | |||
996 | if (session->evlist == NULL) { | ||
997 | session->evlist = perf_evlist__new(NULL, NULL); | ||
998 | if (session->evlist == NULL) | ||
999 | return -ENOMEM; | ||
1000 | } | ||
1021 | 1001 | ||
1022 | attr = perf_header_attr__new(&self->attr.attr); | 1002 | evsel = perf_evsel__new(&event->attr.attr, |
1023 | if (attr == NULL) | 1003 | session->evlist->nr_entries); |
1004 | if (evsel == NULL) | ||
1024 | return -ENOMEM; | 1005 | return -ENOMEM; |
1025 | 1006 | ||
1026 | ids = self->header.size; | 1007 | perf_evlist__add(session->evlist, evsel); |
1027 | ids -= (void *)&self->attr.id - (void *)self; | 1008 | |
1009 | ids = event->header.size; | ||
1010 | ids -= (void *)&event->attr.id - (void *)event; | ||
1028 | n_ids = ids / sizeof(u64); | 1011 | n_ids = ids / sizeof(u64); |
1012 | /* | ||
1013 | * We don't have the cpu and thread maps on the header, so | ||
1014 | * for allocating the perf_sample_id table we fake 1 cpu and | ||
1015 | * hattr->ids threads. | ||
1016 | */ | ||
1017 | if (perf_evsel__alloc_id(evsel, 1, n_ids)) | ||
1018 | return -ENOMEM; | ||
1029 | 1019 | ||
1030 | for (i = 0; i < n_ids; i++) { | 1020 | for (i = 0; i < n_ids; i++) { |
1031 | if (perf_header_attr__add_id(attr, self->attr.id[i]) < 0) { | 1021 | perf_evlist__id_add(session->evlist, evsel, 0, i, |
1032 | perf_header_attr__delete(attr); | 1022 | event->attr.id[i]); |
1033 | return -ENOMEM; | ||
1034 | } | ||
1035 | } | ||
1036 | |||
1037 | if (perf_header__add_attr(&session->header, attr) < 0) { | ||
1038 | perf_header_attr__delete(attr); | ||
1039 | return -ENOMEM; | ||
1040 | } | 1023 | } |
1041 | 1024 | ||
1042 | perf_session__update_sample_type(session); | 1025 | perf_session__update_sample_type(session); |
@@ -1044,11 +1027,11 @@ int event__process_attr(event_t *self, struct perf_session *session) | |||
1044 | return 0; | 1027 | return 0; |
1045 | } | 1028 | } |
1046 | 1029 | ||
1047 | int event__synthesize_event_type(u64 event_id, char *name, | 1030 | int perf_event__synthesize_event_type(u64 event_id, char *name, |
1048 | event__handler_t process, | 1031 | perf_event__handler_t process, |
1049 | struct perf_session *session) | 1032 | struct perf_session *session) |
1050 | { | 1033 | { |
1051 | event_t ev; | 1034 | union perf_event ev; |
1052 | size_t size = 0; | 1035 | size_t size = 0; |
1053 | int err = 0; | 1036 | int err = 0; |
1054 | 1037 | ||
@@ -1064,13 +1047,13 @@ int event__synthesize_event_type(u64 event_id, char *name, | |||
1064 | ev.event_type.header.size = sizeof(ev.event_type) - | 1047 | ev.event_type.header.size = sizeof(ev.event_type) - |
1065 | (sizeof(ev.event_type.event_type.name) - size); | 1048 | (sizeof(ev.event_type.event_type.name) - size); |
1066 | 1049 | ||
1067 | err = process(&ev, session); | 1050 | err = process(&ev, NULL, session); |
1068 | 1051 | ||
1069 | return err; | 1052 | return err; |
1070 | } | 1053 | } |
1071 | 1054 | ||
1072 | int event__synthesize_event_types(event__handler_t process, | 1055 | int perf_event__synthesize_event_types(perf_event__handler_t process, |
1073 | struct perf_session *session) | 1056 | struct perf_session *session) |
1074 | { | 1057 | { |
1075 | struct perf_trace_event_type *type; | 1058 | struct perf_trace_event_type *type; |
1076 | int i, err = 0; | 1059 | int i, err = 0; |
@@ -1078,8 +1061,9 @@ int event__synthesize_event_types(event__handler_t process, | |||
1078 | for (i = 0; i < event_count; i++) { | 1061 | for (i = 0; i < event_count; i++) { |
1079 | type = &events[i]; | 1062 | type = &events[i]; |
1080 | 1063 | ||
1081 | err = event__synthesize_event_type(type->event_id, type->name, | 1064 | err = perf_event__synthesize_event_type(type->event_id, |
1082 | process, session); | 1065 | type->name, process, |
1066 | session); | ||
1083 | if (err) { | 1067 | if (err) { |
1084 | pr_debug("failed to create perf header event type\n"); | 1068 | pr_debug("failed to create perf header event type\n"); |
1085 | return err; | 1069 | return err; |
@@ -1089,29 +1073,28 @@ int event__synthesize_event_types(event__handler_t process, | |||
1089 | return err; | 1073 | return err; |
1090 | } | 1074 | } |
1091 | 1075 | ||
1092 | int event__process_event_type(event_t *self, | 1076 | int perf_event__process_event_type(union perf_event *event, |
1093 | struct perf_session *session __unused) | 1077 | struct perf_session *session __unused) |
1094 | { | 1078 | { |
1095 | if (perf_header__push_event(self->event_type.event_type.event_id, | 1079 | if (perf_header__push_event(event->event_type.event_type.event_id, |
1096 | self->event_type.event_type.name) < 0) | 1080 | event->event_type.event_type.name) < 0) |
1097 | return -ENOMEM; | 1081 | return -ENOMEM; |
1098 | 1082 | ||
1099 | return 0; | 1083 | return 0; |
1100 | } | 1084 | } |
1101 | 1085 | ||
1102 | int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs, | 1086 | int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist, |
1103 | int nb_events, | 1087 | perf_event__handler_t process, |
1104 | event__handler_t process, | ||
1105 | struct perf_session *session __unused) | 1088 | struct perf_session *session __unused) |
1106 | { | 1089 | { |
1107 | event_t ev; | 1090 | union perf_event ev; |
1108 | ssize_t size = 0, aligned_size = 0, padding; | 1091 | ssize_t size = 0, aligned_size = 0, padding; |
1109 | int err = 0; | 1092 | int err __used = 0; |
1110 | 1093 | ||
1111 | memset(&ev, 0, sizeof(ev)); | 1094 | memset(&ev, 0, sizeof(ev)); |
1112 | 1095 | ||
1113 | ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; | 1096 | ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; |
1114 | size = read_tracing_data_size(fd, pattrs, nb_events); | 1097 | size = read_tracing_data_size(fd, &evlist->entries); |
1115 | if (size <= 0) | 1098 | if (size <= 0) |
1116 | return size; | 1099 | return size; |
1117 | aligned_size = ALIGN(size, sizeof(u64)); | 1100 | aligned_size = ALIGN(size, sizeof(u64)); |
@@ -1119,18 +1102,18 @@ int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs, | |||
1119 | ev.tracing_data.header.size = sizeof(ev.tracing_data); | 1102 | ev.tracing_data.header.size = sizeof(ev.tracing_data); |
1120 | ev.tracing_data.size = aligned_size; | 1103 | ev.tracing_data.size = aligned_size; |
1121 | 1104 | ||
1122 | process(&ev, session); | 1105 | process(&ev, NULL, session); |
1123 | 1106 | ||
1124 | err = read_tracing_data(fd, pattrs, nb_events); | 1107 | err = read_tracing_data(fd, &evlist->entries); |
1125 | write_padded(fd, NULL, 0, padding); | 1108 | write_padded(fd, NULL, 0, padding); |
1126 | 1109 | ||
1127 | return aligned_size; | 1110 | return aligned_size; |
1128 | } | 1111 | } |
1129 | 1112 | ||
1130 | int event__process_tracing_data(event_t *self, | 1113 | int perf_event__process_tracing_data(union perf_event *event, |
1131 | struct perf_session *session) | 1114 | struct perf_session *session) |
1132 | { | 1115 | { |
1133 | ssize_t size_read, padding, size = self->tracing_data.size; | 1116 | ssize_t size_read, padding, size = event->tracing_data.size; |
1134 | off_t offset = lseek(session->fd, 0, SEEK_CUR); | 1117 | off_t offset = lseek(session->fd, 0, SEEK_CUR); |
1135 | char buf[BUFSIZ]; | 1118 | char buf[BUFSIZ]; |
1136 | 1119 | ||
@@ -1156,12 +1139,12 @@ int event__process_tracing_data(event_t *self, | |||
1156 | return size_read + padding; | 1139 | return size_read + padding; |
1157 | } | 1140 | } |
1158 | 1141 | ||
1159 | int event__synthesize_build_id(struct dso *pos, u16 misc, | 1142 | int perf_event__synthesize_build_id(struct dso *pos, u16 misc, |
1160 | event__handler_t process, | 1143 | perf_event__handler_t process, |
1161 | struct machine *machine, | 1144 | struct machine *machine, |
1162 | struct perf_session *session) | 1145 | struct perf_session *session) |
1163 | { | 1146 | { |
1164 | event_t ev; | 1147 | union perf_event ev; |
1165 | size_t len; | 1148 | size_t len; |
1166 | int err = 0; | 1149 | int err = 0; |
1167 | 1150 | ||
@@ -1179,16 +1162,16 @@ int event__synthesize_build_id(struct dso *pos, u16 misc, | |||
1179 | ev.build_id.header.size = sizeof(ev.build_id) + len; | 1162 | ev.build_id.header.size = sizeof(ev.build_id) + len; |
1180 | memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); | 1163 | memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); |
1181 | 1164 | ||
1182 | err = process(&ev, session); | 1165 | err = process(&ev, NULL, session); |
1183 | 1166 | ||
1184 | return err; | 1167 | return err; |
1185 | } | 1168 | } |
1186 | 1169 | ||
1187 | int event__process_build_id(event_t *self, | 1170 | int perf_event__process_build_id(union perf_event *event, |
1188 | struct perf_session *session) | 1171 | struct perf_session *session) |
1189 | { | 1172 | { |
1190 | __event_process_build_id(&self->build_id, | 1173 | __event_process_build_id(&event->build_id, |
1191 | self->build_id.filename, | 1174 | event->build_id.filename, |
1192 | session); | 1175 | session); |
1193 | return 0; | 1176 | return 0; |
1194 | } | 1177 | } |
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 402ac2454cf8..1886256768a1 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h | |||
@@ -9,13 +9,6 @@ | |||
9 | 9 | ||
10 | #include <linux/bitmap.h> | 10 | #include <linux/bitmap.h> |
11 | 11 | ||
12 | struct perf_header_attr { | ||
13 | struct perf_event_attr attr; | ||
14 | int ids, size; | ||
15 | u64 *id; | ||
16 | off_t id_offset; | ||
17 | }; | ||
18 | |||
19 | enum { | 12 | enum { |
20 | HEADER_TRACE_INFO = 1, | 13 | HEADER_TRACE_INFO = 1, |
21 | HEADER_BUILD_ID, | 14 | HEADER_BUILD_ID, |
@@ -46,14 +39,12 @@ struct perf_pipe_file_header { | |||
46 | 39 | ||
47 | struct perf_header; | 40 | struct perf_header; |
48 | 41 | ||
49 | int perf_file_header__read(struct perf_file_header *self, | 42 | int perf_file_header__read(struct perf_file_header *header, |
50 | struct perf_header *ph, int fd); | 43 | struct perf_header *ph, int fd); |
51 | 44 | ||
52 | struct perf_header { | 45 | struct perf_header { |
53 | int frozen; | 46 | int frozen; |
54 | int attrs, size; | ||
55 | bool needs_swap; | 47 | bool needs_swap; |
56 | struct perf_header_attr **attr; | ||
57 | s64 attr_offset; | 48 | s64 attr_offset; |
58 | u64 data_offset; | 49 | u64 data_offset; |
59 | u64 data_size; | 50 | u64 data_size; |
@@ -62,32 +53,23 @@ struct perf_header { | |||
62 | DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); | 53 | DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); |
63 | }; | 54 | }; |
64 | 55 | ||
65 | int perf_header__init(struct perf_header *self); | 56 | struct perf_evlist; |
66 | void perf_header__exit(struct perf_header *self); | ||
67 | 57 | ||
68 | int perf_header__read(struct perf_session *session, int fd); | 58 | int perf_session__read_header(struct perf_session *session, int fd); |
69 | int perf_header__write(struct perf_header *self, int fd, bool at_exit); | 59 | int perf_session__write_header(struct perf_session *session, |
60 | struct perf_evlist *evlist, | ||
61 | int fd, bool at_exit); | ||
70 | int perf_header__write_pipe(int fd); | 62 | int perf_header__write_pipe(int fd); |
71 | 63 | ||
72 | int perf_header__add_attr(struct perf_header *self, | ||
73 | struct perf_header_attr *attr); | ||
74 | |||
75 | int perf_header__push_event(u64 id, const char *name); | 64 | int perf_header__push_event(u64 id, const char *name); |
76 | char *perf_header__find_event(u64 id); | 65 | char *perf_header__find_event(u64 id); |
77 | 66 | ||
78 | struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr); | 67 | void perf_header__set_feat(struct perf_header *header, int feat); |
79 | void perf_header_attr__delete(struct perf_header_attr *self); | 68 | void perf_header__clear_feat(struct perf_header *header, int feat); |
69 | bool perf_header__has_feat(const struct perf_header *header, int feat); | ||
80 | 70 | ||
81 | int perf_header_attr__add_id(struct perf_header_attr *self, u64 id); | 71 | int perf_header__process_sections(struct perf_header *header, int fd, |
82 | 72 | int (*process)(struct perf_file_section *section, | |
83 | u64 perf_header__sample_type(struct perf_header *header); | ||
84 | struct perf_event_attr * | ||
85 | perf_header__find_attr(u64 id, struct perf_header *header); | ||
86 | void perf_header__set_feat(struct perf_header *self, int feat); | ||
87 | bool perf_header__has_feat(const struct perf_header *self, int feat); | ||
88 | |||
89 | int perf_header__process_sections(struct perf_header *self, int fd, | ||
90 | int (*process)(struct perf_file_section *self, | ||
91 | struct perf_header *ph, | 73 | struct perf_header *ph, |
92 | int feat, int fd)); | 74 | int feat, int fd)); |
93 | 75 | ||
@@ -95,33 +77,31 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | |||
95 | const char *name, bool is_kallsyms); | 77 | const char *name, bool is_kallsyms); |
96 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir); | 78 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir); |
97 | 79 | ||
98 | int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, | 80 | int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, |
99 | event__handler_t process, | 81 | perf_event__handler_t process, |
100 | struct perf_session *session); | ||
101 | int event__synthesize_attrs(struct perf_header *self, | ||
102 | event__handler_t process, | ||
103 | struct perf_session *session); | ||
104 | int event__process_attr(event_t *self, struct perf_session *session); | ||
105 | |||
106 | int event__synthesize_event_type(u64 event_id, char *name, | ||
107 | event__handler_t process, | ||
108 | struct perf_session *session); | ||
109 | int event__synthesize_event_types(event__handler_t process, | ||
110 | struct perf_session *session); | ||
111 | int event__process_event_type(event_t *self, | ||
112 | struct perf_session *session); | ||
113 | |||
114 | int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs, | ||
115 | int nb_events, | ||
116 | event__handler_t process, | ||
117 | struct perf_session *session); | ||
118 | int event__process_tracing_data(event_t *self, | ||
119 | struct perf_session *session); | 82 | struct perf_session *session); |
83 | int perf_session__synthesize_attrs(struct perf_session *session, | ||
84 | perf_event__handler_t process); | ||
85 | int perf_event__process_attr(union perf_event *event, struct perf_session *session); | ||
86 | |||
87 | int perf_event__synthesize_event_type(u64 event_id, char *name, | ||
88 | perf_event__handler_t process, | ||
89 | struct perf_session *session); | ||
90 | int perf_event__synthesize_event_types(perf_event__handler_t process, | ||
91 | struct perf_session *session); | ||
92 | int perf_event__process_event_type(union perf_event *event, | ||
93 | struct perf_session *session); | ||
120 | 94 | ||
121 | int event__synthesize_build_id(struct dso *pos, u16 misc, | 95 | int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist, |
122 | event__handler_t process, | 96 | perf_event__handler_t process, |
123 | struct machine *machine, | 97 | struct perf_session *session); |
124 | struct perf_session *session); | 98 | int perf_event__process_tracing_data(union perf_event *event, |
125 | int event__process_build_id(event_t *self, struct perf_session *session); | 99 | struct perf_session *session); |
126 | 100 | ||
101 | int perf_event__synthesize_build_id(struct dso *pos, u16 misc, | ||
102 | perf_event__handler_t process, | ||
103 | struct machine *machine, | ||
104 | struct perf_session *session); | ||
105 | int perf_event__process_build_id(union perf_event *event, | ||
106 | struct perf_session *session); | ||
127 | #endif /* __PERF_HEADER_H */ | 107 | #endif /* __PERF_HEADER_H */ |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index be22ae6ef055..627a02e03c57 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #include "annotate.h" | ||
1 | #include "util.h" | 2 | #include "util.h" |
2 | #include "build-id.h" | 3 | #include "build-id.h" |
3 | #include "hist.h" | 4 | #include "hist.h" |
@@ -49,6 +50,15 @@ static void hists__calc_col_len(struct hists *self, struct hist_entry *h) | |||
49 | 50 | ||
50 | if (h->ms.sym) | 51 | if (h->ms.sym) |
51 | hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen); | 52 | hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen); |
53 | else { | ||
54 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; | ||
55 | |||
56 | if (hists__col_len(self, HISTC_DSO) < unresolved_col_width && | ||
57 | !symbol_conf.col_width_list_str && !symbol_conf.field_sep && | ||
58 | !symbol_conf.dso_list) | ||
59 | hists__set_col_len(self, HISTC_DSO, | ||
60 | unresolved_col_width); | ||
61 | } | ||
52 | 62 | ||
53 | len = thread__comm_len(h->thread); | 63 | len = thread__comm_len(h->thread); |
54 | if (hists__new_col_len(self, HISTC_COMM, len)) | 64 | if (hists__new_col_len(self, HISTC_COMM, len)) |
@@ -87,7 +97,7 @@ static void hist_entry__add_cpumode_period(struct hist_entry *self, | |||
87 | 97 | ||
88 | static struct hist_entry *hist_entry__new(struct hist_entry *template) | 98 | static struct hist_entry *hist_entry__new(struct hist_entry *template) |
89 | { | 99 | { |
90 | size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0; | 100 | size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; |
91 | struct hist_entry *self = malloc(sizeof(*self) + callchain_size); | 101 | struct hist_entry *self = malloc(sizeof(*self) + callchain_size); |
92 | 102 | ||
93 | if (self != NULL) { | 103 | if (self != NULL) { |
@@ -211,7 +221,9 @@ void hist_entry__free(struct hist_entry *he) | |||
211 | * collapse the histogram | 221 | * collapse the histogram |
212 | */ | 222 | */ |
213 | 223 | ||
214 | static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he) | 224 | static bool hists__collapse_insert_entry(struct hists *self, |
225 | struct rb_root *root, | ||
226 | struct hist_entry *he) | ||
215 | { | 227 | { |
216 | struct rb_node **p = &root->rb_node; | 228 | struct rb_node **p = &root->rb_node; |
217 | struct rb_node *parent = NULL; | 229 | struct rb_node *parent = NULL; |
@@ -226,6 +238,11 @@ static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he) | |||
226 | 238 | ||
227 | if (!cmp) { | 239 | if (!cmp) { |
228 | iter->period += he->period; | 240 | iter->period += he->period; |
241 | if (symbol_conf.use_callchain) { | ||
242 | callchain_cursor_reset(&self->callchain_cursor); | ||
243 | callchain_merge(&self->callchain_cursor, iter->callchain, | ||
244 | he->callchain); | ||
245 | } | ||
229 | hist_entry__free(he); | 246 | hist_entry__free(he); |
230 | return false; | 247 | return false; |
231 | } | 248 | } |
@@ -260,7 +277,7 @@ void hists__collapse_resort(struct hists *self) | |||
260 | next = rb_next(&n->rb_node); | 277 | next = rb_next(&n->rb_node); |
261 | 278 | ||
262 | rb_erase(&n->rb_node, &self->entries); | 279 | rb_erase(&n->rb_node, &self->entries); |
263 | if (collapse__insert_entry(&tmp, n)) | 280 | if (hists__collapse_insert_entry(self, &tmp, n)) |
264 | hists__inc_nr_entries(self, n); | 281 | hists__inc_nr_entries(self, n); |
265 | } | 282 | } |
266 | 283 | ||
@@ -354,7 +371,7 @@ static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, | |||
354 | 371 | ||
355 | static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, | 372 | static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, |
356 | int depth, int depth_mask, int period, | 373 | int depth, int depth_mask, int period, |
357 | u64 total_samples, int hits, | 374 | u64 total_samples, u64 hits, |
358 | int left_margin) | 375 | int left_margin) |
359 | { | 376 | { |
360 | int i; | 377 | int i; |
@@ -423,7 +440,7 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, | |||
423 | u64 cumul; | 440 | u64 cumul; |
424 | 441 | ||
425 | child = rb_entry(node, struct callchain_node, rb_node); | 442 | child = rb_entry(node, struct callchain_node, rb_node); |
426 | cumul = cumul_hits(child); | 443 | cumul = callchain_cumul_hits(child); |
427 | remaining -= cumul; | 444 | remaining -= cumul; |
428 | 445 | ||
429 | /* | 446 | /* |
@@ -583,6 +600,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, | |||
583 | { | 600 | { |
584 | struct sort_entry *se; | 601 | struct sort_entry *se; |
585 | u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; | 602 | u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; |
603 | u64 nr_events; | ||
586 | const char *sep = symbol_conf.field_sep; | 604 | const char *sep = symbol_conf.field_sep; |
587 | int ret; | 605 | int ret; |
588 | 606 | ||
@@ -591,6 +609,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, | |||
591 | 609 | ||
592 | if (pair_hists) { | 610 | if (pair_hists) { |
593 | period = self->pair ? self->pair->period : 0; | 611 | period = self->pair ? self->pair->period : 0; |
612 | nr_events = self->pair ? self->pair->nr_events : 0; | ||
594 | total = pair_hists->stats.total_period; | 613 | total = pair_hists->stats.total_period; |
595 | period_sys = self->pair ? self->pair->period_sys : 0; | 614 | period_sys = self->pair ? self->pair->period_sys : 0; |
596 | period_us = self->pair ? self->pair->period_us : 0; | 615 | period_us = self->pair ? self->pair->period_us : 0; |
@@ -598,6 +617,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, | |||
598 | period_guest_us = self->pair ? self->pair->period_guest_us : 0; | 617 | period_guest_us = self->pair ? self->pair->period_guest_us : 0; |
599 | } else { | 618 | } else { |
600 | period = self->period; | 619 | period = self->period; |
620 | nr_events = self->nr_events; | ||
601 | total = session_total; | 621 | total = session_total; |
602 | period_sys = self->period_sys; | 622 | period_sys = self->period_sys; |
603 | period_us = self->period_us; | 623 | period_us = self->period_us; |
@@ -634,13 +654,13 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, | |||
634 | } | 654 | } |
635 | } | 655 | } |
636 | } else | 656 | } else |
637 | ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period); | 657 | ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period); |
638 | 658 | ||
639 | if (symbol_conf.show_nr_samples) { | 659 | if (symbol_conf.show_nr_samples) { |
640 | if (sep) | 660 | if (sep) |
641 | ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period); | 661 | ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events); |
642 | else | 662 | else |
643 | ret += snprintf(s + ret, size - ret, "%11lld", period); | 663 | ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events); |
644 | } | 664 | } |
645 | 665 | ||
646 | if (pair_hists) { | 666 | if (pair_hists) { |
@@ -942,216 +962,14 @@ void hists__filter_by_thread(struct hists *self, const struct thread *thread) | |||
942 | } | 962 | } |
943 | } | 963 | } |
944 | 964 | ||
945 | static int symbol__alloc_hist(struct symbol *self) | 965 | int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) |
946 | { | ||
947 | struct sym_priv *priv = symbol__priv(self); | ||
948 | const int size = (sizeof(*priv->hist) + | ||
949 | (self->end - self->start) * sizeof(u64)); | ||
950 | |||
951 | priv->hist = zalloc(size); | ||
952 | return priv->hist == NULL ? -1 : 0; | ||
953 | } | ||
954 | |||
955 | int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip) | ||
956 | { | ||
957 | unsigned int sym_size, offset; | ||
958 | struct symbol *sym = self->ms.sym; | ||
959 | struct sym_priv *priv; | ||
960 | struct sym_hist *h; | ||
961 | |||
962 | if (!sym || !self->ms.map) | ||
963 | return 0; | ||
964 | |||
965 | priv = symbol__priv(sym); | ||
966 | if (priv->hist == NULL && symbol__alloc_hist(sym) < 0) | ||
967 | return -ENOMEM; | ||
968 | |||
969 | sym_size = sym->end - sym->start; | ||
970 | offset = ip - sym->start; | ||
971 | |||
972 | pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip)); | ||
973 | |||
974 | if (offset >= sym_size) | ||
975 | return 0; | ||
976 | |||
977 | h = priv->hist; | ||
978 | h->sum++; | ||
979 | h->ip[offset]++; | ||
980 | |||
981 | pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start, | ||
982 | self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]); | ||
983 | return 0; | ||
984 | } | ||
985 | |||
986 | static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize) | ||
987 | { | ||
988 | struct objdump_line *self = malloc(sizeof(*self) + privsize); | ||
989 | |||
990 | if (self != NULL) { | ||
991 | self->offset = offset; | ||
992 | self->line = line; | ||
993 | } | ||
994 | |||
995 | return self; | ||
996 | } | ||
997 | |||
998 | void objdump_line__free(struct objdump_line *self) | ||
999 | { | ||
1000 | free(self->line); | ||
1001 | free(self); | ||
1002 | } | ||
1003 | |||
1004 | static void objdump__add_line(struct list_head *head, struct objdump_line *line) | ||
1005 | { | ||
1006 | list_add_tail(&line->node, head); | ||
1007 | } | ||
1008 | |||
1009 | struct objdump_line *objdump__get_next_ip_line(struct list_head *head, | ||
1010 | struct objdump_line *pos) | ||
1011 | { | ||
1012 | list_for_each_entry_continue(pos, head, node) | ||
1013 | if (pos->offset >= 0) | ||
1014 | return pos; | ||
1015 | |||
1016 | return NULL; | ||
1017 | } | ||
1018 | |||
1019 | static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file, | ||
1020 | struct list_head *head, size_t privsize) | ||
1021 | { | 966 | { |
1022 | struct symbol *sym = self->ms.sym; | 967 | return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); |
1023 | struct objdump_line *objdump_line; | ||
1024 | char *line = NULL, *tmp, *tmp2, *c; | ||
1025 | size_t line_len; | ||
1026 | s64 line_ip, offset = -1; | ||
1027 | |||
1028 | if (getline(&line, &line_len, file) < 0) | ||
1029 | return -1; | ||
1030 | |||
1031 | if (!line) | ||
1032 | return -1; | ||
1033 | |||
1034 | while (line_len != 0 && isspace(line[line_len - 1])) | ||
1035 | line[--line_len] = '\0'; | ||
1036 | |||
1037 | c = strchr(line, '\n'); | ||
1038 | if (c) | ||
1039 | *c = 0; | ||
1040 | |||
1041 | line_ip = -1; | ||
1042 | |||
1043 | /* | ||
1044 | * Strip leading spaces: | ||
1045 | */ | ||
1046 | tmp = line; | ||
1047 | while (*tmp) { | ||
1048 | if (*tmp != ' ') | ||
1049 | break; | ||
1050 | tmp++; | ||
1051 | } | ||
1052 | |||
1053 | if (*tmp) { | ||
1054 | /* | ||
1055 | * Parse hexa addresses followed by ':' | ||
1056 | */ | ||
1057 | line_ip = strtoull(tmp, &tmp2, 16); | ||
1058 | if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0') | ||
1059 | line_ip = -1; | ||
1060 | } | ||
1061 | |||
1062 | if (line_ip != -1) { | ||
1063 | u64 start = map__rip_2objdump(self->ms.map, sym->start), | ||
1064 | end = map__rip_2objdump(self->ms.map, sym->end); | ||
1065 | |||
1066 | offset = line_ip - start; | ||
1067 | if (offset < 0 || (u64)line_ip > end) | ||
1068 | offset = -1; | ||
1069 | } | ||
1070 | |||
1071 | objdump_line = objdump_line__new(offset, line, privsize); | ||
1072 | if (objdump_line == NULL) { | ||
1073 | free(line); | ||
1074 | return -1; | ||
1075 | } | ||
1076 | objdump__add_line(head, objdump_line); | ||
1077 | |||
1078 | return 0; | ||
1079 | } | 968 | } |
1080 | 969 | ||
1081 | int hist_entry__annotate(struct hist_entry *self, struct list_head *head, | 970 | int hist_entry__annotate(struct hist_entry *he, size_t privsize) |
1082 | size_t privsize) | ||
1083 | { | 971 | { |
1084 | struct symbol *sym = self->ms.sym; | 972 | return symbol__annotate(he->ms.sym, he->ms.map, privsize); |
1085 | struct map *map = self->ms.map; | ||
1086 | struct dso *dso = map->dso; | ||
1087 | char *filename = dso__build_id_filename(dso, NULL, 0); | ||
1088 | bool free_filename = true; | ||
1089 | char command[PATH_MAX * 2]; | ||
1090 | FILE *file; | ||
1091 | int err = 0; | ||
1092 | u64 len; | ||
1093 | |||
1094 | if (filename == NULL) { | ||
1095 | if (dso->has_build_id) { | ||
1096 | pr_err("Can't annotate %s: not enough memory\n", | ||
1097 | sym->name); | ||
1098 | return -ENOMEM; | ||
1099 | } | ||
1100 | goto fallback; | ||
1101 | } else if (readlink(filename, command, sizeof(command)) < 0 || | ||
1102 | strstr(command, "[kernel.kallsyms]") || | ||
1103 | access(filename, R_OK)) { | ||
1104 | free(filename); | ||
1105 | fallback: | ||
1106 | /* | ||
1107 | * If we don't have build-ids or the build-id file isn't in the | ||
1108 | * cache, or is just a kallsyms file, well, lets hope that this | ||
1109 | * DSO is the same as when 'perf record' ran. | ||
1110 | */ | ||
1111 | filename = dso->long_name; | ||
1112 | free_filename = false; | ||
1113 | } | ||
1114 | |||
1115 | if (dso->origin == DSO__ORIG_KERNEL) { | ||
1116 | if (dso->annotate_warned) | ||
1117 | goto out_free_filename; | ||
1118 | err = -ENOENT; | ||
1119 | dso->annotate_warned = 1; | ||
1120 | pr_err("Can't annotate %s: No vmlinux file was found in the " | ||
1121 | "path\n", sym->name); | ||
1122 | goto out_free_filename; | ||
1123 | } | ||
1124 | |||
1125 | pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__, | ||
1126 | filename, sym->name, map->unmap_ip(map, sym->start), | ||
1127 | map->unmap_ip(map, sym->end)); | ||
1128 | |||
1129 | len = sym->end - sym->start; | ||
1130 | |||
1131 | pr_debug("annotating [%p] %30s : [%p] %30s\n", | ||
1132 | dso, dso->long_name, sym, sym->name); | ||
1133 | |||
1134 | snprintf(command, sizeof(command), | ||
1135 | "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand", | ||
1136 | map__rip_2objdump(map, sym->start), | ||
1137 | map__rip_2objdump(map, sym->end), | ||
1138 | filename, filename); | ||
1139 | |||
1140 | pr_debug("Executing: %s\n", command); | ||
1141 | |||
1142 | file = popen(command, "r"); | ||
1143 | if (!file) | ||
1144 | goto out_free_filename; | ||
1145 | |||
1146 | while (!feof(file)) | ||
1147 | if (hist_entry__parse_objdump_line(self, file, head, privsize) < 0) | ||
1148 | break; | ||
1149 | |||
1150 | pclose(file); | ||
1151 | out_free_filename: | ||
1152 | if (free_filename) | ||
1153 | free(filename); | ||
1154 | return err; | ||
1155 | } | 973 | } |
1156 | 974 | ||
1157 | void hists__inc_nr_events(struct hists *self, u32 type) | 975 | void hists__inc_nr_events(struct hists *self, u32 type) |
@@ -1166,10 +984,17 @@ size_t hists__fprintf_nr_events(struct hists *self, FILE *fp) | |||
1166 | size_t ret = 0; | 984 | size_t ret = 0; |
1167 | 985 | ||
1168 | for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { | 986 | for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { |
1169 | if (!event__name[i]) | 987 | const char *name; |
988 | |||
989 | if (self->stats.nr_events[i] == 0) | ||
990 | continue; | ||
991 | |||
992 | name = perf_event__name(i); | ||
993 | if (!strcmp(name, "UNKNOWN")) | ||
1170 | continue; | 994 | continue; |
1171 | ret += fprintf(fp, "%10s events: %10d\n", | 995 | |
1172 | event__name[i], self->stats.nr_events[i]); | 996 | ret += fprintf(fp, "%16s events: %10d\n", name, |
997 | self->stats.nr_events[i]); | ||
1173 | } | 998 | } |
1174 | 999 | ||
1175 | return ret; | 1000 | return ret; |
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 587d375d3430..3beb97c4d822 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h | |||
@@ -9,33 +9,6 @@ extern struct callchain_param callchain_param; | |||
9 | struct hist_entry; | 9 | struct hist_entry; |
10 | struct addr_location; | 10 | struct addr_location; |
11 | struct symbol; | 11 | struct symbol; |
12 | struct rb_root; | ||
13 | |||
14 | struct objdump_line { | ||
15 | struct list_head node; | ||
16 | s64 offset; | ||
17 | char *line; | ||
18 | }; | ||
19 | |||
20 | void objdump_line__free(struct objdump_line *self); | ||
21 | struct objdump_line *objdump__get_next_ip_line(struct list_head *head, | ||
22 | struct objdump_line *pos); | ||
23 | |||
24 | struct sym_hist { | ||
25 | u64 sum; | ||
26 | u64 ip[0]; | ||
27 | }; | ||
28 | |||
29 | struct sym_ext { | ||
30 | struct rb_node node; | ||
31 | double percent; | ||
32 | char *path; | ||
33 | }; | ||
34 | |||
35 | struct sym_priv { | ||
36 | struct sym_hist *hist; | ||
37 | struct sym_ext *ext; | ||
38 | }; | ||
39 | 12 | ||
40 | /* | 13 | /* |
41 | * The kernel collects the number of events it couldn't send in a stretch and | 14 | * The kernel collects the number of events it couldn't send in a stretch and |
@@ -52,8 +25,11 @@ struct sym_priv { | |||
52 | struct events_stats { | 25 | struct events_stats { |
53 | u64 total_period; | 26 | u64 total_period; |
54 | u64 total_lost; | 27 | u64 total_lost; |
28 | u64 total_invalid_chains; | ||
55 | u32 nr_events[PERF_RECORD_HEADER_MAX]; | 29 | u32 nr_events[PERF_RECORD_HEADER_MAX]; |
56 | u32 nr_unknown_events; | 30 | u32 nr_unknown_events; |
31 | u32 nr_invalid_chains; | ||
32 | u32 nr_unknown_id; | ||
57 | }; | 33 | }; |
58 | 34 | ||
59 | enum hist_column { | 35 | enum hist_column { |
@@ -67,14 +43,13 @@ enum hist_column { | |||
67 | }; | 43 | }; |
68 | 44 | ||
69 | struct hists { | 45 | struct hists { |
70 | struct rb_node rb_node; | ||
71 | struct rb_root entries; | 46 | struct rb_root entries; |
72 | u64 nr_entries; | 47 | u64 nr_entries; |
73 | struct events_stats stats; | 48 | struct events_stats stats; |
74 | u64 config; | ||
75 | u64 event_stream; | 49 | u64 event_stream; |
76 | u32 type; | ||
77 | u16 col_len[HISTC_NR_COLS]; | 50 | u16 col_len[HISTC_NR_COLS]; |
51 | /* Best would be to reuse the session callchain cursor */ | ||
52 | struct callchain_cursor callchain_cursor; | ||
78 | }; | 53 | }; |
79 | 54 | ||
80 | struct hist_entry *__hists__add_entry(struct hists *self, | 55 | struct hist_entry *__hists__add_entry(struct hists *self, |
@@ -100,9 +75,8 @@ size_t hists__fprintf_nr_events(struct hists *self, FILE *fp); | |||
100 | size_t hists__fprintf(struct hists *self, struct hists *pair, | 75 | size_t hists__fprintf(struct hists *self, struct hists *pair, |
101 | bool show_displacement, FILE *fp); | 76 | bool show_displacement, FILE *fp); |
102 | 77 | ||
103 | int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip); | 78 | int hist_entry__inc_addr_samples(struct hist_entry *self, int evidx, u64 addr); |
104 | int hist_entry__annotate(struct hist_entry *self, struct list_head *head, | 79 | int hist_entry__annotate(struct hist_entry *self, size_t privsize); |
105 | size_t privsize); | ||
106 | 80 | ||
107 | void hists__filter_by_dso(struct hists *self, const struct dso *dso); | 81 | void hists__filter_by_dso(struct hists *self, const struct dso *dso); |
108 | void hists__filter_by_thread(struct hists *self, const struct thread *thread); | 82 | void hists__filter_by_thread(struct hists *self, const struct thread *thread); |
@@ -111,21 +85,18 @@ u16 hists__col_len(struct hists *self, enum hist_column col); | |||
111 | void hists__set_col_len(struct hists *self, enum hist_column col, u16 len); | 85 | void hists__set_col_len(struct hists *self, enum hist_column col, u16 len); |
112 | bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len); | 86 | bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len); |
113 | 87 | ||
114 | #ifdef NO_NEWT_SUPPORT | 88 | struct perf_evlist; |
115 | static inline int hists__browse(struct hists *self __used, | ||
116 | const char *helpline __used, | ||
117 | const char *ev_name __used) | ||
118 | { | ||
119 | return 0; | ||
120 | } | ||
121 | 89 | ||
122 | static inline int hists__tui_browse_tree(struct rb_root *self __used, | 90 | #ifdef NO_NEWT_SUPPORT |
123 | const char *help __used) | 91 | static inline |
92 | int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used, | ||
93 | const char *help __used) | ||
124 | { | 94 | { |
125 | return 0; | 95 | return 0; |
126 | } | 96 | } |
127 | 97 | ||
128 | static inline int hist_entry__tui_annotate(struct hist_entry *self __used) | 98 | static inline int hist_entry__tui_annotate(struct hist_entry *self __used, |
99 | int evidx __used) | ||
129 | { | 100 | { |
130 | return 0; | 101 | return 0; |
131 | } | 102 | } |
@@ -133,14 +104,12 @@ static inline int hist_entry__tui_annotate(struct hist_entry *self __used) | |||
133 | #define KEY_RIGHT -2 | 104 | #define KEY_RIGHT -2 |
134 | #else | 105 | #else |
135 | #include <newt.h> | 106 | #include <newt.h> |
136 | int hists__browse(struct hists *self, const char *helpline, | 107 | int hist_entry__tui_annotate(struct hist_entry *self, int evidx); |
137 | const char *ev_name); | ||
138 | int hist_entry__tui_annotate(struct hist_entry *self); | ||
139 | 108 | ||
140 | #define KEY_LEFT NEWT_KEY_LEFT | 109 | #define KEY_LEFT NEWT_KEY_LEFT |
141 | #define KEY_RIGHT NEWT_KEY_RIGHT | 110 | #define KEY_RIGHT NEWT_KEY_RIGHT |
142 | 111 | ||
143 | int hists__tui_browse_tree(struct rb_root *self, const char *help); | 112 | int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help); |
144 | #endif | 113 | #endif |
145 | 114 | ||
146 | unsigned int hists__sort_list_width(struct hists *self); | 115 | unsigned int hists__sort_list_width(struct hists *self); |
diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h new file mode 100644 index 000000000000..6789d788d494 --- /dev/null +++ b/tools/perf/util/include/asm/alternative-asm.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _PERF_ASM_ALTERNATIVE_ASM_H | ||
2 | #define _PERF_ASM_ALTERNATIVE_ASM_H | ||
3 | |||
4 | /* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */ | ||
5 | |||
6 | #define altinstruction_entry # | ||
7 | |||
8 | #endif | ||
diff --git a/tools/perf/util/include/asm/cpufeature.h b/tools/perf/util/include/asm/cpufeature.h new file mode 100644 index 000000000000..acffd5e4d1d4 --- /dev/null +++ b/tools/perf/util/include/asm/cpufeature.h | |||
@@ -0,0 +1,9 @@ | |||
1 | |||
2 | #ifndef PERF_CPUFEATURE_H | ||
3 | #define PERF_CPUFEATURE_H | ||
4 | |||
5 | /* cpufeature.h ... dummy header file for including arch/x86/lib/memcpy_64.S */ | ||
6 | |||
7 | #define X86_FEATURE_REP_GOOD 0 | ||
8 | |||
9 | #endif /* PERF_CPUFEATURE_H */ | ||
diff --git a/tools/perf/util/include/asm/dwarf2.h b/tools/perf/util/include/asm/dwarf2.h new file mode 100644 index 000000000000..bb4198e7837a --- /dev/null +++ b/tools/perf/util/include/asm/dwarf2.h | |||
@@ -0,0 +1,11 @@ | |||
1 | |||
2 | #ifndef PERF_DWARF2_H | ||
3 | #define PERF_DWARF2_H | ||
4 | |||
5 | /* dwarf2.h ... dummy header file for including arch/x86/lib/memcpy_64.S */ | ||
6 | |||
7 | #define CFI_STARTPROC | ||
8 | #define CFI_ENDPROC | ||
9 | |||
10 | #endif /* PERF_DWARF2_H */ | ||
11 | |||
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h index bb4ac2e05385..305c8484f200 100644 --- a/tools/perf/util/include/linux/bitops.h +++ b/tools/perf/util/include/linux/bitops.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _PERF_LINUX_BITOPS_H_ | 2 | #define _PERF_LINUX_BITOPS_H_ |
3 | 3 | ||
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <linux/compiler.h> | ||
5 | #include <asm/hweight.h> | 6 | #include <asm/hweight.h> |
6 | 7 | ||
7 | #define BITS_PER_LONG __WORDSIZE | 8 | #define BITS_PER_LONG __WORDSIZE |
@@ -13,6 +14,11 @@ static inline void set_bit(int nr, unsigned long *addr) | |||
13 | addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG); | 14 | addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG); |
14 | } | 15 | } |
15 | 16 | ||
17 | static inline void clear_bit(int nr, unsigned long *addr) | ||
18 | { | ||
19 | addr[nr / BITS_PER_LONG] &= ~(1UL << (nr % BITS_PER_LONG)); | ||
20 | } | ||
21 | |||
16 | static __always_inline int test_bit(unsigned int nr, const unsigned long *addr) | 22 | static __always_inline int test_bit(unsigned int nr, const unsigned long *addr) |
17 | { | 23 | { |
18 | return ((1UL << (nr % BITS_PER_LONG)) & | 24 | return ((1UL << (nr % BITS_PER_LONG)) & |
diff --git a/tools/perf/util/include/linux/const.h b/tools/perf/util/include/linux/const.h new file mode 100644 index 000000000000..1b476c9ae649 --- /dev/null +++ b/tools/perf/util/include/linux/const.h | |||
@@ -0,0 +1 @@ | |||
#include "../../../../include/linux/const.h" | |||
diff --git a/tools/perf/util/include/linux/linkage.h b/tools/perf/util/include/linux/linkage.h new file mode 100644 index 000000000000..06387cffe125 --- /dev/null +++ b/tools/perf/util/include/linux/linkage.h | |||
@@ -0,0 +1,13 @@ | |||
1 | |||
2 | #ifndef PERF_LINUX_LINKAGE_H_ | ||
3 | #define PERF_LINUX_LINKAGE_H_ | ||
4 | |||
5 | /* linkage.h ... for including arch/x86/lib/memcpy_64.S */ | ||
6 | |||
7 | #define ENTRY(name) \ | ||
8 | .globl name; \ | ||
9 | name: | ||
10 | |||
11 | #define ENDPROC(name) | ||
12 | |||
13 | #endif /* PERF_LINUX_LINKAGE_H_ */ | ||
diff --git a/tools/perf/util/include/linux/list.h b/tools/perf/util/include/linux/list.h index f5ca26e53fbb..1d928a0ce997 100644 --- a/tools/perf/util/include/linux/list.h +++ b/tools/perf/util/include/linux/list.h | |||
@@ -1,3 +1,6 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/prefetch.h> | ||
3 | |||
1 | #include "../../../../include/linux/list.h" | 4 | #include "../../../../include/linux/list.h" |
2 | 5 | ||
3 | #ifndef PERF_LIST_H | 6 | #ifndef PERF_LIST_H |
@@ -22,5 +25,5 @@ static inline void list_del_range(struct list_head *begin, | |||
22 | * @head: the head for your list. | 25 | * @head: the head for your list. |
23 | */ | 26 | */ |
24 | #define list_for_each_from(pos, head) \ | 27 | #define list_for_each_from(pos, head) \ |
25 | for (; prefetch(pos->next), pos != (head); pos = pos->next) | 28 | for (; pos != (head); pos = pos->next) |
26 | #endif | 29 | #endif |
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 3a7eb6ec0eec..a16ecab5229d 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include "symbol.h" | 1 | #include "symbol.h" |
2 | #include <errno.h> | 2 | #include <errno.h> |
3 | #include <inttypes.h> | ||
3 | #include <limits.h> | 4 | #include <limits.h> |
4 | #include <stdlib.h> | 5 | #include <stdlib.h> |
5 | #include <string.h> | 6 | #include <string.h> |
@@ -195,7 +196,7 @@ int map__overlap(struct map *l, struct map *r) | |||
195 | 196 | ||
196 | size_t map__fprintf(struct map *self, FILE *fp) | 197 | size_t map__fprintf(struct map *self, FILE *fp) |
197 | { | 198 | { |
198 | return fprintf(fp, " %Lx-%Lx %Lx %s\n", | 199 | return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", |
199 | self->start, self->end, self->pgoff, self->dso->name); | 200 | self->start, self->end, self->pgoff, self->dso->name); |
200 | } | 201 | } |
201 | 202 | ||
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 78575796d5f3..b397c0383728 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h | |||
@@ -215,6 +215,16 @@ struct symbol *map_groups__find_function_by_name(struct map_groups *self, | |||
215 | return map_groups__find_symbol_by_name(self, MAP__FUNCTION, name, mapp, filter); | 215 | return map_groups__find_symbol_by_name(self, MAP__FUNCTION, name, mapp, filter); |
216 | } | 216 | } |
217 | 217 | ||
218 | static inline | ||
219 | struct symbol *machine__find_kernel_function_by_name(struct machine *self, | ||
220 | const char *name, | ||
221 | struct map **mapp, | ||
222 | symbol_filter_t filter) | ||
223 | { | ||
224 | return map_groups__find_function_by_name(&self->kmaps, name, mapp, | ||
225 | filter); | ||
226 | } | ||
227 | |||
218 | int map_groups__fixup_overlappings(struct map_groups *self, struct map *map, | 228 | int map_groups__fixup_overlappings(struct map_groups *self, struct map *map, |
219 | int verbose, FILE *fp); | 229 | int verbose, FILE *fp); |
220 | 230 | ||
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 4af5bd59cfd1..41982c373faf 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -1,6 +1,8 @@ | |||
1 | #include "../../../include/linux/hw_breakpoint.h" | 1 | #include "../../../include/linux/hw_breakpoint.h" |
2 | #include "util.h" | 2 | #include "util.h" |
3 | #include "../perf.h" | 3 | #include "../perf.h" |
4 | #include "evlist.h" | ||
5 | #include "evsel.h" | ||
4 | #include "parse-options.h" | 6 | #include "parse-options.h" |
5 | #include "parse-events.h" | 7 | #include "parse-events.h" |
6 | #include "exec_cmd.h" | 8 | #include "exec_cmd.h" |
@@ -10,11 +12,6 @@ | |||
10 | #include "header.h" | 12 | #include "header.h" |
11 | #include "debugfs.h" | 13 | #include "debugfs.h" |
12 | 14 | ||
13 | int nr_counters; | ||
14 | |||
15 | struct perf_event_attr attrs[MAX_COUNTERS]; | ||
16 | char *filters[MAX_COUNTERS]; | ||
17 | |||
18 | struct event_symbol { | 15 | struct event_symbol { |
19 | u8 type; | 16 | u8 type; |
20 | u64 config; | 17 | u64 config; |
@@ -34,34 +31,36 @@ char debugfs_path[MAXPATHLEN]; | |||
34 | #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x | 31 | #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x |
35 | 32 | ||
36 | static struct event_symbol event_symbols[] = { | 33 | static struct event_symbol event_symbols[] = { |
37 | { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, | 34 | { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, |
38 | { CHW(INSTRUCTIONS), "instructions", "" }, | 35 | { CHW(STALLED_CYCLES_FRONTEND), "stalled-cycles-frontend", "idle-cycles-frontend" }, |
39 | { CHW(CACHE_REFERENCES), "cache-references", "" }, | 36 | { CHW(STALLED_CYCLES_BACKEND), "stalled-cycles-backend", "idle-cycles-backend" }, |
40 | { CHW(CACHE_MISSES), "cache-misses", "" }, | 37 | { CHW(INSTRUCTIONS), "instructions", "" }, |
41 | { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, | 38 | { CHW(CACHE_REFERENCES), "cache-references", "" }, |
42 | { CHW(BRANCH_MISSES), "branch-misses", "" }, | 39 | { CHW(CACHE_MISSES), "cache-misses", "" }, |
43 | { CHW(BUS_CYCLES), "bus-cycles", "" }, | 40 | { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, |
44 | 41 | { CHW(BRANCH_MISSES), "branch-misses", "" }, | |
45 | { CSW(CPU_CLOCK), "cpu-clock", "" }, | 42 | { CHW(BUS_CYCLES), "bus-cycles", "" }, |
46 | { CSW(TASK_CLOCK), "task-clock", "" }, | 43 | |
47 | { CSW(PAGE_FAULTS), "page-faults", "faults" }, | 44 | { CSW(CPU_CLOCK), "cpu-clock", "" }, |
48 | { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, | 45 | { CSW(TASK_CLOCK), "task-clock", "" }, |
49 | { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, | 46 | { CSW(PAGE_FAULTS), "page-faults", "faults" }, |
50 | { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, | 47 | { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, |
51 | { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, | 48 | { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, |
52 | { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, | 49 | { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, |
53 | { CSW(EMULATION_FAULTS), "emulation-faults", "" }, | 50 | { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, |
51 | { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, | ||
52 | { CSW(EMULATION_FAULTS), "emulation-faults", "" }, | ||
54 | }; | 53 | }; |
55 | 54 | ||
56 | #define __PERF_EVENT_FIELD(config, name) \ | 55 | #define __PERF_EVENT_FIELD(config, name) \ |
57 | ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) | 56 | ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) |
58 | 57 | ||
59 | #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) | 58 | #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) |
60 | #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) | 59 | #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) |
61 | #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) | 60 | #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) |
62 | #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) | 61 | #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) |
63 | 62 | ||
64 | static const char *hw_event_names[] = { | 63 | static const char *hw_event_names[PERF_COUNT_HW_MAX] = { |
65 | "cycles", | 64 | "cycles", |
66 | "instructions", | 65 | "instructions", |
67 | "cache-references", | 66 | "cache-references", |
@@ -69,11 +68,13 @@ static const char *hw_event_names[] = { | |||
69 | "branches", | 68 | "branches", |
70 | "branch-misses", | 69 | "branch-misses", |
71 | "bus-cycles", | 70 | "bus-cycles", |
71 | "stalled-cycles-frontend", | ||
72 | "stalled-cycles-backend", | ||
72 | }; | 73 | }; |
73 | 74 | ||
74 | static const char *sw_event_names[] = { | 75 | static const char *sw_event_names[PERF_COUNT_SW_MAX] = { |
75 | "cpu-clock-msecs", | 76 | "cpu-clock", |
76 | "task-clock-msecs", | 77 | "task-clock", |
77 | "page-faults", | 78 | "page-faults", |
78 | "context-switches", | 79 | "context-switches", |
79 | "CPU-migrations", | 80 | "CPU-migrations", |
@@ -266,10 +267,35 @@ static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result) | |||
266 | return name; | 267 | return name; |
267 | } | 268 | } |
268 | 269 | ||
269 | const char *event_name(int counter) | 270 | const char *event_type(int type) |
270 | { | 271 | { |
271 | u64 config = attrs[counter].config; | 272 | switch (type) { |
272 | int type = attrs[counter].type; | 273 | case PERF_TYPE_HARDWARE: |
274 | return "hardware"; | ||
275 | |||
276 | case PERF_TYPE_SOFTWARE: | ||
277 | return "software"; | ||
278 | |||
279 | case PERF_TYPE_TRACEPOINT: | ||
280 | return "tracepoint"; | ||
281 | |||
282 | case PERF_TYPE_HW_CACHE: | ||
283 | return "hardware-cache"; | ||
284 | |||
285 | default: | ||
286 | break; | ||
287 | } | ||
288 | |||
289 | return "unknown"; | ||
290 | } | ||
291 | |||
292 | const char *event_name(struct perf_evsel *evsel) | ||
293 | { | ||
294 | u64 config = evsel->attr.config; | ||
295 | int type = evsel->attr.type; | ||
296 | |||
297 | if (evsel->name) | ||
298 | return evsel->name; | ||
273 | 299 | ||
274 | return __event_name(type, config); | 300 | return __event_name(type, config); |
275 | } | 301 | } |
@@ -279,13 +305,13 @@ const char *__event_name(int type, u64 config) | |||
279 | static char buf[32]; | 305 | static char buf[32]; |
280 | 306 | ||
281 | if (type == PERF_TYPE_RAW) { | 307 | if (type == PERF_TYPE_RAW) { |
282 | sprintf(buf, "raw 0x%llx", config); | 308 | sprintf(buf, "raw 0x%" PRIx64, config); |
283 | return buf; | 309 | return buf; |
284 | } | 310 | } |
285 | 311 | ||
286 | switch (type) { | 312 | switch (type) { |
287 | case PERF_TYPE_HARDWARE: | 313 | case PERF_TYPE_HARDWARE: |
288 | if (config < PERF_COUNT_HW_MAX) | 314 | if (config < PERF_COUNT_HW_MAX && hw_event_names[config]) |
289 | return hw_event_names[config]; | 315 | return hw_event_names[config]; |
290 | return "unknown-hardware"; | 316 | return "unknown-hardware"; |
291 | 317 | ||
@@ -311,7 +337,7 @@ const char *__event_name(int type, u64 config) | |||
311 | } | 337 | } |
312 | 338 | ||
313 | case PERF_TYPE_SOFTWARE: | 339 | case PERF_TYPE_SOFTWARE: |
314 | if (config < PERF_COUNT_SW_MAX) | 340 | if (config < PERF_COUNT_SW_MAX && sw_event_names[config]) |
315 | return sw_event_names[config]; | 341 | return sw_event_names[config]; |
316 | return "unknown-software"; | 342 | return "unknown-software"; |
317 | 343 | ||
@@ -434,7 +460,7 @@ parse_single_tracepoint_event(char *sys_name, | |||
434 | id = atoll(id_buf); | 460 | id = atoll(id_buf); |
435 | attr->config = id; | 461 | attr->config = id; |
436 | attr->type = PERF_TYPE_TRACEPOINT; | 462 | attr->type = PERF_TYPE_TRACEPOINT; |
437 | *strp = evt_name + evt_length; | 463 | *strp += strlen(sys_name) + evt_length + 1; /* + 1 for the ':' */ |
438 | 464 | ||
439 | attr->sample_type |= PERF_SAMPLE_RAW; | 465 | attr->sample_type |= PERF_SAMPLE_RAW; |
440 | attr->sample_type |= PERF_SAMPLE_TIME; | 466 | attr->sample_type |= PERF_SAMPLE_TIME; |
@@ -449,8 +475,8 @@ parse_single_tracepoint_event(char *sys_name, | |||
449 | /* sys + ':' + event + ':' + flags*/ | 475 | /* sys + ':' + event + ':' + flags*/ |
450 | #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128) | 476 | #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128) |
451 | static enum event_result | 477 | static enum event_result |
452 | parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp, | 478 | parse_multiple_tracepoint_event(const struct option *opt, char *sys_name, |
453 | char *flags) | 479 | const char *evt_exp, char *flags) |
454 | { | 480 | { |
455 | char evt_path[MAXPATHLEN]; | 481 | char evt_path[MAXPATHLEN]; |
456 | struct dirent *evt_ent; | 482 | struct dirent *evt_ent; |
@@ -483,19 +509,19 @@ parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp, | |||
483 | if (len < 0) | 509 | if (len < 0) |
484 | return EVT_FAILED; | 510 | return EVT_FAILED; |
485 | 511 | ||
486 | if (parse_events(NULL, event_opt, 0)) | 512 | if (parse_events(opt, event_opt, 0)) |
487 | return EVT_FAILED; | 513 | return EVT_FAILED; |
488 | } | 514 | } |
489 | 515 | ||
490 | return EVT_HANDLED_ALL; | 516 | return EVT_HANDLED_ALL; |
491 | } | 517 | } |
492 | 518 | ||
493 | 519 | static enum event_result | |
494 | static enum event_result parse_tracepoint_event(const char **strp, | 520 | parse_tracepoint_event(const struct option *opt, const char **strp, |
495 | struct perf_event_attr *attr) | 521 | struct perf_event_attr *attr) |
496 | { | 522 | { |
497 | const char *evt_name; | 523 | const char *evt_name; |
498 | char *flags; | 524 | char *flags = NULL, *comma_loc; |
499 | char sys_name[MAX_EVENT_LENGTH]; | 525 | char sys_name[MAX_EVENT_LENGTH]; |
500 | unsigned int sys_length, evt_length; | 526 | unsigned int sys_length, evt_length; |
501 | 527 | ||
@@ -514,6 +540,11 @@ static enum event_result parse_tracepoint_event(const char **strp, | |||
514 | sys_name[sys_length] = '\0'; | 540 | sys_name[sys_length] = '\0'; |
515 | evt_name = evt_name + 1; | 541 | evt_name = evt_name + 1; |
516 | 542 | ||
543 | comma_loc = strchr(evt_name, ','); | ||
544 | if (comma_loc) { | ||
545 | /* take the event name up to the comma */ | ||
546 | evt_name = strndup(evt_name, comma_loc - evt_name); | ||
547 | } | ||
517 | flags = strchr(evt_name, ':'); | 548 | flags = strchr(evt_name, ':'); |
518 | if (flags) { | 549 | if (flags) { |
519 | /* split it out: */ | 550 | /* split it out: */ |
@@ -524,14 +555,14 @@ static enum event_result parse_tracepoint_event(const char **strp, | |||
524 | evt_length = strlen(evt_name); | 555 | evt_length = strlen(evt_name); |
525 | if (evt_length >= MAX_EVENT_LENGTH) | 556 | if (evt_length >= MAX_EVENT_LENGTH) |
526 | return EVT_FAILED; | 557 | return EVT_FAILED; |
527 | |||
528 | if (strpbrk(evt_name, "*?")) { | 558 | if (strpbrk(evt_name, "*?")) { |
529 | *strp = evt_name + evt_length; | 559 | *strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */ |
530 | return parse_multiple_tracepoint_event(sys_name, evt_name, | 560 | return parse_multiple_tracepoint_event(opt, sys_name, evt_name, |
531 | flags); | 561 | flags); |
532 | } else | 562 | } else { |
533 | return parse_single_tracepoint_event(sys_name, evt_name, | 563 | return parse_single_tracepoint_event(sys_name, evt_name, |
534 | evt_length, attr, strp); | 564 | evt_length, attr, strp); |
565 | } | ||
535 | } | 566 | } |
536 | 567 | ||
537 | static enum event_result | 568 | static enum event_result |
@@ -621,13 +652,15 @@ static int check_events(const char *str, unsigned int i) | |||
621 | int n; | 652 | int n; |
622 | 653 | ||
623 | n = strlen(event_symbols[i].symbol); | 654 | n = strlen(event_symbols[i].symbol); |
624 | if (!strncmp(str, event_symbols[i].symbol, n)) | 655 | if (!strncasecmp(str, event_symbols[i].symbol, n)) |
625 | return n; | 656 | return n; |
626 | 657 | ||
627 | n = strlen(event_symbols[i].alias); | 658 | n = strlen(event_symbols[i].alias); |
628 | if (n) | 659 | if (n) { |
629 | if (!strncmp(str, event_symbols[i].alias, n)) | 660 | if (!strncasecmp(str, event_symbols[i].alias, n)) |
630 | return n; | 661 | return n; |
662 | } | ||
663 | |||
631 | return 0; | 664 | return 0; |
632 | } | 665 | } |
633 | 666 | ||
@@ -691,15 +724,22 @@ parse_numeric_event(const char **strp, struct perf_event_attr *attr) | |||
691 | return EVT_FAILED; | 724 | return EVT_FAILED; |
692 | } | 725 | } |
693 | 726 | ||
694 | static enum event_result | 727 | static int |
695 | parse_event_modifier(const char **strp, struct perf_event_attr *attr) | 728 | parse_event_modifier(const char **strp, struct perf_event_attr *attr) |
696 | { | 729 | { |
697 | const char *str = *strp; | 730 | const char *str = *strp; |
698 | int exclude = 0; | 731 | int exclude = 0; |
699 | int eu = 0, ek = 0, eh = 0, precise = 0; | 732 | int eu = 0, ek = 0, eh = 0, precise = 0; |
700 | 733 | ||
701 | if (*str++ != ':') | 734 | if (!*str) |
702 | return 0; | 735 | return 0; |
736 | |||
737 | if (*str == ',') | ||
738 | return 0; | ||
739 | |||
740 | if (*str++ != ':') | ||
741 | return -1; | ||
742 | |||
703 | while (*str) { | 743 | while (*str) { |
704 | if (*str == 'u') { | 744 | if (*str == 'u') { |
705 | if (!exclude) | 745 | if (!exclude) |
@@ -720,14 +760,16 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr) | |||
720 | 760 | ||
721 | ++str; | 761 | ++str; |
722 | } | 762 | } |
723 | if (str >= *strp + 2) { | 763 | if (str < *strp + 2) |
724 | *strp = str; | 764 | return -1; |
725 | attr->exclude_user = eu; | 765 | |
726 | attr->exclude_kernel = ek; | 766 | *strp = str; |
727 | attr->exclude_hv = eh; | 767 | |
728 | attr->precise_ip = precise; | 768 | attr->exclude_user = eu; |
729 | return 1; | 769 | attr->exclude_kernel = ek; |
730 | } | 770 | attr->exclude_hv = eh; |
771 | attr->precise_ip = precise; | ||
772 | |||
731 | return 0; | 773 | return 0; |
732 | } | 774 | } |
733 | 775 | ||
@@ -736,11 +778,12 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr) | |||
736 | * Symbolic names are (almost) exactly matched. | 778 | * Symbolic names are (almost) exactly matched. |
737 | */ | 779 | */ |
738 | static enum event_result | 780 | static enum event_result |
739 | parse_event_symbols(const char **str, struct perf_event_attr *attr) | 781 | parse_event_symbols(const struct option *opt, const char **str, |
782 | struct perf_event_attr *attr) | ||
740 | { | 783 | { |
741 | enum event_result ret; | 784 | enum event_result ret; |
742 | 785 | ||
743 | ret = parse_tracepoint_event(str, attr); | 786 | ret = parse_tracepoint_event(opt, str, attr); |
744 | if (ret != EVT_FAILED) | 787 | if (ret != EVT_FAILED) |
745 | goto modifier; | 788 | goto modifier; |
746 | 789 | ||
@@ -769,52 +812,27 @@ parse_event_symbols(const char **str, struct perf_event_attr *attr) | |||
769 | return EVT_FAILED; | 812 | return EVT_FAILED; |
770 | 813 | ||
771 | modifier: | 814 | modifier: |
772 | parse_event_modifier(str, attr); | 815 | if (parse_event_modifier(str, attr) < 0) { |
773 | 816 | fprintf(stderr, "invalid event modifier: '%s'\n", *str); | |
774 | return ret; | 817 | fprintf(stderr, "Run 'perf list' for a list of valid events and modifiers\n"); |
775 | } | ||
776 | |||
777 | static int store_event_type(const char *orgname) | ||
778 | { | ||
779 | char filename[PATH_MAX], *c; | ||
780 | FILE *file; | ||
781 | int id, n; | ||
782 | 818 | ||
783 | sprintf(filename, "%s/", debugfs_path); | 819 | return EVT_FAILED; |
784 | strncat(filename, orgname, strlen(orgname)); | ||
785 | strcat(filename, "/id"); | ||
786 | |||
787 | c = strchr(filename, ':'); | ||
788 | if (c) | ||
789 | *c = '/'; | ||
790 | |||
791 | file = fopen(filename, "r"); | ||
792 | if (!file) | ||
793 | return 0; | ||
794 | n = fscanf(file, "%i", &id); | ||
795 | fclose(file); | ||
796 | if (n < 1) { | ||
797 | pr_err("cannot store event ID\n"); | ||
798 | return -EINVAL; | ||
799 | } | 820 | } |
800 | return perf_header__push_event(id, orgname); | 821 | |
822 | return ret; | ||
801 | } | 823 | } |
802 | 824 | ||
803 | int parse_events(const struct option *opt __used, const char *str, int unset __used) | 825 | int parse_events(const struct option *opt, const char *str, int unset __used) |
804 | { | 826 | { |
827 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; | ||
805 | struct perf_event_attr attr; | 828 | struct perf_event_attr attr; |
806 | enum event_result ret; | 829 | enum event_result ret; |
807 | 830 | const char *ostr; | |
808 | if (strchr(str, ':')) | ||
809 | if (store_event_type(str) < 0) | ||
810 | return -1; | ||
811 | 831 | ||
812 | for (;;) { | 832 | for (;;) { |
813 | if (nr_counters == MAX_COUNTERS) | 833 | ostr = str; |
814 | return -1; | ||
815 | |||
816 | memset(&attr, 0, sizeof(attr)); | 834 | memset(&attr, 0, sizeof(attr)); |
817 | ret = parse_event_symbols(&str, &attr); | 835 | ret = parse_event_symbols(opt, &str, &attr); |
818 | if (ret == EVT_FAILED) | 836 | if (ret == EVT_FAILED) |
819 | return -1; | 837 | return -1; |
820 | 838 | ||
@@ -822,8 +840,16 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u | |||
822 | return -1; | 840 | return -1; |
823 | 841 | ||
824 | if (ret != EVT_HANDLED_ALL) { | 842 | if (ret != EVT_HANDLED_ALL) { |
825 | attrs[nr_counters] = attr; | 843 | struct perf_evsel *evsel; |
826 | nr_counters++; | 844 | evsel = perf_evsel__new(&attr, evlist->nr_entries); |
845 | if (evsel == NULL) | ||
846 | return -1; | ||
847 | perf_evlist__add(evlist, evsel); | ||
848 | |||
849 | evsel->name = calloc(str - ostr + 1, 1); | ||
850 | if (!evsel->name) | ||
851 | return -1; | ||
852 | strncpy(evsel->name, ostr, str - ostr); | ||
827 | } | 853 | } |
828 | 854 | ||
829 | if (*str == 0) | 855 | if (*str == 0) |
@@ -837,24 +863,26 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u | |||
837 | return 0; | 863 | return 0; |
838 | } | 864 | } |
839 | 865 | ||
840 | int parse_filter(const struct option *opt __used, const char *str, | 866 | int parse_filter(const struct option *opt, const char *str, |
841 | int unset __used) | 867 | int unset __used) |
842 | { | 868 | { |
843 | int i = nr_counters - 1; | 869 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; |
844 | int len = strlen(str); | 870 | struct perf_evsel *last = NULL; |
871 | |||
872 | if (evlist->nr_entries > 0) | ||
873 | last = list_entry(evlist->entries.prev, struct perf_evsel, node); | ||
845 | 874 | ||
846 | if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) { | 875 | if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) { |
847 | fprintf(stderr, | 876 | fprintf(stderr, |
848 | "-F option should follow a -e tracepoint option\n"); | 877 | "-F option should follow a -e tracepoint option\n"); |
849 | return -1; | 878 | return -1; |
850 | } | 879 | } |
851 | 880 | ||
852 | filters[i] = malloc(len + 1); | 881 | last->filter = strdup(str); |
853 | if (!filters[i]) { | 882 | if (last->filter == NULL) { |
854 | fprintf(stderr, "not enough memory to hold filter string\n"); | 883 | fprintf(stderr, "not enough memory to hold filter string\n"); |
855 | return -1; | 884 | return -1; |
856 | } | 885 | } |
857 | strcpy(filters[i], str); | ||
858 | 886 | ||
859 | return 0; | 887 | return 0; |
860 | } | 888 | } |
@@ -872,7 +900,7 @@ static const char * const event_type_descriptors[] = { | |||
872 | * Print the events from <debugfs_mount_point>/tracing/events | 900 | * Print the events from <debugfs_mount_point>/tracing/events |
873 | */ | 901 | */ |
874 | 902 | ||
875 | static void print_tracepoint_events(void) | 903 | void print_tracepoint_events(const char *subsys_glob, const char *event_glob) |
876 | { | 904 | { |
877 | DIR *sys_dir, *evt_dir; | 905 | DIR *sys_dir, *evt_dir; |
878 | struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; | 906 | struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; |
@@ -887,6 +915,9 @@ static void print_tracepoint_events(void) | |||
887 | return; | 915 | return; |
888 | 916 | ||
889 | for_each_subsystem(sys_dir, sys_dirent, sys_next) { | 917 | for_each_subsystem(sys_dir, sys_dirent, sys_next) { |
918 | if (subsys_glob != NULL && | ||
919 | !strglobmatch(sys_dirent.d_name, subsys_glob)) | ||
920 | continue; | ||
890 | 921 | ||
891 | snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, | 922 | snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, |
892 | sys_dirent.d_name); | 923 | sys_dirent.d_name); |
@@ -895,9 +926,13 @@ static void print_tracepoint_events(void) | |||
895 | continue; | 926 | continue; |
896 | 927 | ||
897 | for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { | 928 | for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { |
929 | if (event_glob != NULL && | ||
930 | !strglobmatch(evt_dirent.d_name, event_glob)) | ||
931 | continue; | ||
932 | |||
898 | snprintf(evt_path, MAXPATHLEN, "%s:%s", | 933 | snprintf(evt_path, MAXPATHLEN, "%s:%s", |
899 | sys_dirent.d_name, evt_dirent.d_name); | 934 | sys_dirent.d_name, evt_dirent.d_name); |
900 | printf(" %-42s [%s]\n", evt_path, | 935 | printf(" %-50s [%s]\n", evt_path, |
901 | event_type_descriptors[PERF_TYPE_TRACEPOINT]); | 936 | event_type_descriptors[PERF_TYPE_TRACEPOINT]); |
902 | } | 937 | } |
903 | closedir(evt_dir); | 938 | closedir(evt_dir); |
@@ -906,34 +941,71 @@ static void print_tracepoint_events(void) | |||
906 | } | 941 | } |
907 | 942 | ||
908 | /* | 943 | /* |
909 | * Print the help text for the event symbols: | 944 | * Check whether event is in <debugfs_mount_point>/tracing/events |
910 | */ | 945 | */ |
911 | void print_events(void) | 946 | |
947 | int is_valid_tracepoint(const char *event_string) | ||
912 | { | 948 | { |
913 | struct event_symbol *syms = event_symbols; | 949 | DIR *sys_dir, *evt_dir; |
914 | unsigned int i, type, op, prev_type = -1; | 950 | struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; |
915 | char name[40]; | 951 | char evt_path[MAXPATHLEN]; |
952 | char dir_path[MAXPATHLEN]; | ||
916 | 953 | ||
917 | printf("\n"); | 954 | if (debugfs_valid_mountpoint(debugfs_path)) |
918 | printf("List of pre-defined events (to be used in -e):\n"); | 955 | return 0; |
919 | 956 | ||
920 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { | 957 | sys_dir = opendir(debugfs_path); |
921 | type = syms->type; | 958 | if (!sys_dir) |
959 | return 0; | ||
922 | 960 | ||
923 | if (type != prev_type) | 961 | for_each_subsystem(sys_dir, sys_dirent, sys_next) { |
924 | printf("\n"); | 962 | |
963 | snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, | ||
964 | sys_dirent.d_name); | ||
965 | evt_dir = opendir(dir_path); | ||
966 | if (!evt_dir) | ||
967 | continue; | ||
968 | |||
969 | for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { | ||
970 | snprintf(evt_path, MAXPATHLEN, "%s:%s", | ||
971 | sys_dirent.d_name, evt_dirent.d_name); | ||
972 | if (!strcmp(evt_path, event_string)) { | ||
973 | closedir(evt_dir); | ||
974 | closedir(sys_dir); | ||
975 | return 1; | ||
976 | } | ||
977 | } | ||
978 | closedir(evt_dir); | ||
979 | } | ||
980 | closedir(sys_dir); | ||
981 | return 0; | ||
982 | } | ||
983 | |||
984 | void print_events_type(u8 type) | ||
985 | { | ||
986 | struct event_symbol *syms = event_symbols; | ||
987 | unsigned int i; | ||
988 | char name[64]; | ||
989 | |||
990 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { | ||
991 | if (type != syms->type) | ||
992 | continue; | ||
925 | 993 | ||
926 | if (strlen(syms->alias)) | 994 | if (strlen(syms->alias)) |
927 | sprintf(name, "%s OR %s", syms->symbol, syms->alias); | 995 | snprintf(name, sizeof(name), "%s OR %s", |
996 | syms->symbol, syms->alias); | ||
928 | else | 997 | else |
929 | strcpy(name, syms->symbol); | 998 | snprintf(name, sizeof(name), "%s", syms->symbol); |
930 | printf(" %-42s [%s]\n", name, | ||
931 | event_type_descriptors[type]); | ||
932 | 999 | ||
933 | prev_type = type; | 1000 | printf(" %-50s [%s]\n", name, |
1001 | event_type_descriptors[type]); | ||
934 | } | 1002 | } |
1003 | } | ||
1004 | |||
1005 | int print_hwcache_events(const char *event_glob) | ||
1006 | { | ||
1007 | unsigned int type, op, i, printed = 0; | ||
935 | 1008 | ||
936 | printf("\n"); | ||
937 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | 1009 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { |
938 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | 1010 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { |
939 | /* skip invalid cache type */ | 1011 | /* skip invalid cache type */ |
@@ -941,25 +1013,81 @@ void print_events(void) | |||
941 | continue; | 1013 | continue; |
942 | 1014 | ||
943 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | 1015 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { |
944 | printf(" %-42s [%s]\n", | 1016 | char *name = event_cache_name(type, op, i); |
945 | event_cache_name(type, op, i), | 1017 | |
1018 | if (event_glob != NULL && !strglobmatch(name, event_glob)) | ||
1019 | continue; | ||
1020 | |||
1021 | printf(" %-50s [%s]\n", name, | ||
946 | event_type_descriptors[PERF_TYPE_HW_CACHE]); | 1022 | event_type_descriptors[PERF_TYPE_HW_CACHE]); |
1023 | ++printed; | ||
947 | } | 1024 | } |
948 | } | 1025 | } |
949 | } | 1026 | } |
950 | 1027 | ||
1028 | return printed; | ||
1029 | } | ||
1030 | |||
1031 | #define MAX_NAME_LEN 100 | ||
1032 | |||
1033 | /* | ||
1034 | * Print the help text for the event symbols: | ||
1035 | */ | ||
1036 | void print_events(const char *event_glob) | ||
1037 | { | ||
1038 | unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0; | ||
1039 | struct event_symbol *syms = event_symbols; | ||
1040 | char name[MAX_NAME_LEN]; | ||
1041 | |||
1042 | printf("\n"); | ||
1043 | printf("List of pre-defined events (to be used in -e):\n"); | ||
1044 | |||
1045 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { | ||
1046 | type = syms->type; | ||
1047 | |||
1048 | if (type != prev_type && printed) { | ||
1049 | printf("\n"); | ||
1050 | printed = 0; | ||
1051 | ntypes_printed++; | ||
1052 | } | ||
1053 | |||
1054 | if (event_glob != NULL && | ||
1055 | !(strglobmatch(syms->symbol, event_glob) || | ||
1056 | (syms->alias && strglobmatch(syms->alias, event_glob)))) | ||
1057 | continue; | ||
1058 | |||
1059 | if (strlen(syms->alias)) | ||
1060 | snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); | ||
1061 | else | ||
1062 | strncpy(name, syms->symbol, MAX_NAME_LEN); | ||
1063 | printf(" %-50s [%s]\n", name, | ||
1064 | event_type_descriptors[type]); | ||
1065 | |||
1066 | prev_type = type; | ||
1067 | ++printed; | ||
1068 | } | ||
1069 | |||
1070 | if (ntypes_printed) { | ||
1071 | printed = 0; | ||
1072 | printf("\n"); | ||
1073 | } | ||
1074 | print_hwcache_events(event_glob); | ||
1075 | |||
1076 | if (event_glob != NULL) | ||
1077 | return; | ||
1078 | |||
951 | printf("\n"); | 1079 | printf("\n"); |
952 | printf(" %-42s [%s]\n", | 1080 | printf(" %-50s [%s]\n", |
953 | "rNNN (see 'perf list --help' on how to encode it)", | 1081 | "rNNN (see 'perf list --help' on how to encode it)", |
954 | event_type_descriptors[PERF_TYPE_RAW]); | 1082 | event_type_descriptors[PERF_TYPE_RAW]); |
955 | printf("\n"); | 1083 | printf("\n"); |
956 | 1084 | ||
957 | printf(" %-42s [%s]\n", | 1085 | printf(" %-50s [%s]\n", |
958 | "mem:<addr>[:access]", | 1086 | "mem:<addr>[:access]", |
959 | event_type_descriptors[PERF_TYPE_BREAKPOINT]); | 1087 | event_type_descriptors[PERF_TYPE_BREAKPOINT]); |
960 | printf("\n"); | 1088 | printf("\n"); |
961 | 1089 | ||
962 | print_tracepoint_events(); | 1090 | print_tracepoint_events(NULL, NULL); |
963 | 1091 | ||
964 | exit(129); | 1092 | exit(129); |
965 | } | 1093 | } |
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index fc4ab3fe877a..746d3fcbfc2a 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h | |||
@@ -4,6 +4,11 @@ | |||
4 | * Parse symbolic events/counts passed in as options: | 4 | * Parse symbolic events/counts passed in as options: |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include "../../../include/linux/perf_event.h" | ||
8 | |||
9 | struct list_head; | ||
10 | struct perf_evsel; | ||
11 | |||
7 | struct option; | 12 | struct option; |
8 | 13 | ||
9 | struct tracepoint_path { | 14 | struct tracepoint_path { |
@@ -13,14 +18,10 @@ struct tracepoint_path { | |||
13 | }; | 18 | }; |
14 | 19 | ||
15 | extern struct tracepoint_path *tracepoint_id_to_path(u64 config); | 20 | extern struct tracepoint_path *tracepoint_id_to_path(u64 config); |
16 | extern bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events); | 21 | extern bool have_tracepoints(struct list_head *evlist); |
17 | |||
18 | extern int nr_counters; | ||
19 | 22 | ||
20 | extern struct perf_event_attr attrs[MAX_COUNTERS]; | 23 | const char *event_type(int type); |
21 | extern char *filters[MAX_COUNTERS]; | 24 | const char *event_name(struct perf_evsel *event); |
22 | |||
23 | extern const char *event_name(int ctr); | ||
24 | extern const char *__event_name(int type, u64 config); | 25 | extern const char *__event_name(int type, u64 config); |
25 | 26 | ||
26 | extern int parse_events(const struct option *opt, const char *str, int unset); | 27 | extern int parse_events(const struct option *opt, const char *str, int unset); |
@@ -28,10 +29,13 @@ extern int parse_filter(const struct option *opt, const char *str, int unset); | |||
28 | 29 | ||
29 | #define EVENTS_HELP_MAX (128*1024) | 30 | #define EVENTS_HELP_MAX (128*1024) |
30 | 31 | ||
31 | extern void print_events(void); | 32 | void print_events(const char *event_glob); |
33 | void print_events_type(u8 type); | ||
34 | void print_tracepoint_events(const char *subsys_glob, const char *event_glob); | ||
35 | int print_hwcache_events(const char *event_glob); | ||
36 | extern int is_valid_tracepoint(const char *event_string); | ||
32 | 37 | ||
33 | extern char debugfs_path[]; | 38 | extern char debugfs_path[]; |
34 | extern int valid_debugfs_mount(const char *debugfs); | 39 | extern int valid_debugfs_mount(const char *debugfs); |
35 | 40 | ||
36 | |||
37 | #endif /* __PERF_PARSE_EVENTS_H */ | 41 | #endif /* __PERF_PARSE_EVENTS_H */ |
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h index c7d72dce54b2..abc31a1dac1a 100644 --- a/tools/perf/util/parse-options.h +++ b/tools/perf/util/parse-options.h | |||
@@ -119,6 +119,10 @@ struct option { | |||
119 | { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG } | 119 | { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG } |
120 | #define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \ | 120 | #define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \ |
121 | { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT } | 121 | { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT } |
122 | #define OPT_CALLBACK_DEFAULT_NOOPT(s, l, v, a, h, f, d) \ | ||
123 | { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l),\ | ||
124 | .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d,\ | ||
125 | .flags = PARSE_OPT_LASTARG_DEFAULT | PARSE_OPT_NOARG} | ||
122 | 126 | ||
123 | /* parse_options() will filter out the processed options and leave the | 127 | /* parse_options() will filter out the processed options and leave the |
124 | * non-option argments in argv[]. | 128 | * non-option argments in argv[]. |
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c index 58a470d036dd..bd7497711424 100644 --- a/tools/perf/util/path.c +++ b/tools/perf/util/path.c | |||
@@ -22,6 +22,7 @@ static const char *get_perf_dir(void) | |||
22 | return "."; | 22 | return "."; |
23 | } | 23 | } |
24 | 24 | ||
25 | #ifdef NO_STRLCPY | ||
25 | size_t strlcpy(char *dest, const char *src, size_t size) | 26 | size_t strlcpy(char *dest, const char *src, size_t size) |
26 | { | 27 | { |
27 | size_t ret = strlen(src); | 28 | size_t ret = strlen(src); |
@@ -33,7 +34,7 @@ size_t strlcpy(char *dest, const char *src, size_t size) | |||
33 | } | 34 | } |
34 | return ret; | 35 | return ret; |
35 | } | 36 | } |
36 | 37 | #endif | |
37 | 38 | ||
38 | static char *get_pathname(void) | 39 | static char *get_pathname(void) |
39 | { | 40 | { |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index fcc16e4349df..f0223166e761 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <string.h> | 31 | #include <string.h> |
32 | #include <stdarg.h> | 32 | #include <stdarg.h> |
33 | #include <limits.h> | 33 | #include <limits.h> |
34 | #include <elf.h> | ||
34 | 35 | ||
35 | #undef _GNU_SOURCE | 36 | #undef _GNU_SOURCE |
36 | #include "util.h" | 37 | #include "util.h" |
@@ -74,10 +75,9 @@ static int e_snprintf(char *str, size_t size, const char *format, ...) | |||
74 | static char *synthesize_perf_probe_point(struct perf_probe_point *pp); | 75 | static char *synthesize_perf_probe_point(struct perf_probe_point *pp); |
75 | static struct machine machine; | 76 | static struct machine machine; |
76 | 77 | ||
77 | /* Initialize symbol maps and path of vmlinux */ | 78 | /* Initialize symbol maps and path of vmlinux/modules */ |
78 | static int init_vmlinux(void) | 79 | static int init_vmlinux(void) |
79 | { | 80 | { |
80 | struct dso *kernel; | ||
81 | int ret; | 81 | int ret; |
82 | 82 | ||
83 | symbol_conf.sort_by_name = true; | 83 | symbol_conf.sort_by_name = true; |
@@ -91,33 +91,95 @@ static int init_vmlinux(void) | |||
91 | goto out; | 91 | goto out; |
92 | } | 92 | } |
93 | 93 | ||
94 | ret = machine__init(&machine, "/", 0); | 94 | ret = machine__init(&machine, "", HOST_KERNEL_ID); |
95 | if (ret < 0) | 95 | if (ret < 0) |
96 | goto out; | 96 | goto out; |
97 | 97 | ||
98 | kernel = dso__new_kernel(symbol_conf.vmlinux_name); | 98 | if (machine__create_kernel_maps(&machine) < 0) { |
99 | if (kernel == NULL) | 99 | pr_debug("machine__create_kernel_maps() failed.\n"); |
100 | die("Failed to create kernel dso."); | 100 | goto out; |
101 | 101 | } | |
102 | ret = __machine__create_kernel_maps(&machine, kernel); | ||
103 | if (ret < 0) | ||
104 | pr_debug("Failed to create kernel maps.\n"); | ||
105 | |||
106 | out: | 102 | out: |
107 | if (ret < 0) | 103 | if (ret < 0) |
108 | pr_warning("Failed to init vmlinux path.\n"); | 104 | pr_warning("Failed to init vmlinux path.\n"); |
109 | return ret; | 105 | return ret; |
110 | } | 106 | } |
111 | 107 | ||
108 | static struct symbol *__find_kernel_function_by_name(const char *name, | ||
109 | struct map **mapp) | ||
110 | { | ||
111 | return machine__find_kernel_function_by_name(&machine, name, mapp, | ||
112 | NULL); | ||
113 | } | ||
114 | |||
115 | static struct map *kernel_get_module_map(const char *module) | ||
116 | { | ||
117 | struct rb_node *nd; | ||
118 | struct map_groups *grp = &machine.kmaps; | ||
119 | |||
120 | if (!module) | ||
121 | module = "kernel"; | ||
122 | |||
123 | for (nd = rb_first(&grp->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) { | ||
124 | struct map *pos = rb_entry(nd, struct map, rb_node); | ||
125 | if (strncmp(pos->dso->short_name + 1, module, | ||
126 | pos->dso->short_name_len - 2) == 0) { | ||
127 | return pos; | ||
128 | } | ||
129 | } | ||
130 | return NULL; | ||
131 | } | ||
132 | |||
133 | static struct dso *kernel_get_module_dso(const char *module) | ||
134 | { | ||
135 | struct dso *dso; | ||
136 | struct map *map; | ||
137 | const char *vmlinux_name; | ||
138 | |||
139 | if (module) { | ||
140 | list_for_each_entry(dso, &machine.kernel_dsos, node) { | ||
141 | if (strncmp(dso->short_name + 1, module, | ||
142 | dso->short_name_len - 2) == 0) | ||
143 | goto found; | ||
144 | } | ||
145 | pr_debug("Failed to find module %s.\n", module); | ||
146 | return NULL; | ||
147 | } | ||
148 | |||
149 | map = machine.vmlinux_maps[MAP__FUNCTION]; | ||
150 | dso = map->dso; | ||
151 | |||
152 | vmlinux_name = symbol_conf.vmlinux_name; | ||
153 | if (vmlinux_name) { | ||
154 | if (dso__load_vmlinux(dso, map, vmlinux_name, NULL) <= 0) | ||
155 | return NULL; | ||
156 | } else { | ||
157 | if (dso__load_vmlinux_path(dso, map, NULL) <= 0) { | ||
158 | pr_debug("Failed to load kernel map.\n"); | ||
159 | return NULL; | ||
160 | } | ||
161 | } | ||
162 | found: | ||
163 | return dso; | ||
164 | } | ||
165 | |||
166 | const char *kernel_get_module_path(const char *module) | ||
167 | { | ||
168 | struct dso *dso = kernel_get_module_dso(module); | ||
169 | return (dso) ? dso->long_name : NULL; | ||
170 | } | ||
171 | |||
112 | #ifdef DWARF_SUPPORT | 172 | #ifdef DWARF_SUPPORT |
113 | static int open_vmlinux(void) | 173 | static int open_vmlinux(const char *module) |
114 | { | 174 | { |
115 | if (map__load(machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) { | 175 | const char *path = kernel_get_module_path(module); |
116 | pr_debug("Failed to load kernel map.\n"); | 176 | if (!path) { |
117 | return -EINVAL; | 177 | pr_err("Failed to find path of %s module.\n", |
178 | module ?: "kernel"); | ||
179 | return -ENOENT; | ||
118 | } | 180 | } |
119 | pr_debug("Try to open %s\n", machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name); | 181 | pr_debug("Try to open %s\n", path); |
120 | return open(machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name, O_RDONLY); | 182 | return open(path, O_RDONLY); |
121 | } | 183 | } |
122 | 184 | ||
123 | /* | 185 | /* |
@@ -125,20 +187,19 @@ static int open_vmlinux(void) | |||
125 | * Currently only handles kprobes. | 187 | * Currently only handles kprobes. |
126 | */ | 188 | */ |
127 | static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, | 189 | static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, |
128 | struct perf_probe_point *pp) | 190 | struct perf_probe_point *pp) |
129 | { | 191 | { |
130 | struct symbol *sym; | 192 | struct symbol *sym; |
131 | int fd, ret = -ENOENT; | 193 | struct map *map; |
194 | u64 addr; | ||
195 | int ret = -ENOENT; | ||
132 | 196 | ||
133 | sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION], | 197 | sym = __find_kernel_function_by_name(tp->symbol, &map); |
134 | tp->symbol, NULL); | ||
135 | if (sym) { | 198 | if (sym) { |
136 | fd = open_vmlinux(); | 199 | addr = map->unmap_ip(map, sym->start + tp->offset); |
137 | if (fd >= 0) { | 200 | pr_debug("try to find %s+%ld@%" PRIx64 "\n", tp->symbol, |
138 | ret = find_perf_probe_point(fd, | 201 | tp->offset, addr); |
139 | sym->start + tp->offset, pp); | 202 | ret = find_perf_probe_point((unsigned long)addr, pp); |
140 | close(fd); | ||
141 | } | ||
142 | } | 203 | } |
143 | if (ret <= 0) { | 204 | if (ret <= 0) { |
144 | pr_debug("Failed to find corresponding probes from " | 205 | pr_debug("Failed to find corresponding probes from " |
@@ -156,12 +217,12 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, | |||
156 | /* Try to find perf_probe_event with debuginfo */ | 217 | /* Try to find perf_probe_event with debuginfo */ |
157 | static int try_to_find_probe_trace_events(struct perf_probe_event *pev, | 218 | static int try_to_find_probe_trace_events(struct perf_probe_event *pev, |
158 | struct probe_trace_event **tevs, | 219 | struct probe_trace_event **tevs, |
159 | int max_tevs) | 220 | int max_tevs, const char *module) |
160 | { | 221 | { |
161 | bool need_dwarf = perf_probe_event_need_dwarf(pev); | 222 | bool need_dwarf = perf_probe_event_need_dwarf(pev); |
162 | int fd, ntevs; | 223 | int fd, ntevs; |
163 | 224 | ||
164 | fd = open_vmlinux(); | 225 | fd = open_vmlinux(module); |
165 | if (fd < 0) { | 226 | if (fd < 0) { |
166 | if (need_dwarf) { | 227 | if (need_dwarf) { |
167 | pr_warning("Failed to open debuginfo file.\n"); | 228 | pr_warning("Failed to open debuginfo file.\n"); |
@@ -173,7 +234,6 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, | |||
173 | 234 | ||
174 | /* Searching trace events corresponding to probe event */ | 235 | /* Searching trace events corresponding to probe event */ |
175 | ntevs = find_probe_trace_events(fd, pev, tevs, max_tevs); | 236 | ntevs = find_probe_trace_events(fd, pev, tevs, max_tevs); |
176 | close(fd); | ||
177 | 237 | ||
178 | if (ntevs > 0) { /* Succeeded to find trace events */ | 238 | if (ntevs > 0) { /* Succeeded to find trace events */ |
179 | pr_debug("find %d probe_trace_events.\n", ntevs); | 239 | pr_debug("find %d probe_trace_events.\n", ntevs); |
@@ -191,7 +251,7 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, | |||
191 | pr_warning("Warning: No dwarf info found in the vmlinux - " | 251 | pr_warning("Warning: No dwarf info found in the vmlinux - " |
192 | "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n"); | 252 | "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n"); |
193 | if (!need_dwarf) { | 253 | if (!need_dwarf) { |
194 | pr_debug("Trying to use symbols.\nn"); | 254 | pr_debug("Trying to use symbols.\n"); |
195 | return 0; | 255 | return 0; |
196 | } | 256 | } |
197 | } | 257 | } |
@@ -260,47 +320,54 @@ static int get_real_path(const char *raw_path, const char *comp_dir, | |||
260 | #define LINEBUF_SIZE 256 | 320 | #define LINEBUF_SIZE 256 |
261 | #define NR_ADDITIONAL_LINES 2 | 321 | #define NR_ADDITIONAL_LINES 2 |
262 | 322 | ||
263 | static int show_one_line(FILE *fp, int l, bool skip, bool show_num) | 323 | static int __show_one_line(FILE *fp, int l, bool skip, bool show_num) |
264 | { | 324 | { |
265 | char buf[LINEBUF_SIZE]; | 325 | char buf[LINEBUF_SIZE]; |
266 | const char *color = PERF_COLOR_BLUE; | 326 | const char *color = show_num ? "" : PERF_COLOR_BLUE; |
267 | 327 | const char *prefix = NULL; | |
268 | if (fgets(buf, LINEBUF_SIZE, fp) == NULL) | ||
269 | goto error; | ||
270 | if (!skip) { | ||
271 | if (show_num) | ||
272 | fprintf(stdout, "%7d %s", l, buf); | ||
273 | else | ||
274 | color_fprintf(stdout, color, " %s", buf); | ||
275 | } | ||
276 | 328 | ||
277 | while (strlen(buf) == LINEBUF_SIZE - 1 && | 329 | do { |
278 | buf[LINEBUF_SIZE - 2] != '\n') { | ||
279 | if (fgets(buf, LINEBUF_SIZE, fp) == NULL) | 330 | if (fgets(buf, LINEBUF_SIZE, fp) == NULL) |
280 | goto error; | 331 | goto error; |
281 | if (!skip) { | 332 | if (skip) |
282 | if (show_num) | 333 | continue; |
283 | fprintf(stdout, "%s", buf); | 334 | if (!prefix) { |
284 | else | 335 | prefix = show_num ? "%7d " : " "; |
285 | color_fprintf(stdout, color, "%s", buf); | 336 | color_fprintf(stdout, color, prefix, l); |
286 | } | 337 | } |
287 | } | 338 | color_fprintf(stdout, color, "%s", buf); |
288 | 339 | ||
289 | return 0; | 340 | } while (strchr(buf, '\n') == NULL); |
341 | |||
342 | return 1; | ||
290 | error: | 343 | error: |
291 | if (feof(fp)) | 344 | if (ferror(fp)) { |
292 | pr_warning("Source file is shorter than expected.\n"); | ||
293 | else | ||
294 | pr_warning("File read error: %s\n", strerror(errno)); | 345 | pr_warning("File read error: %s\n", strerror(errno)); |
346 | return -1; | ||
347 | } | ||
348 | return 0; | ||
349 | } | ||
295 | 350 | ||
296 | return -1; | 351 | static int _show_one_line(FILE *fp, int l, bool skip, bool show_num) |
352 | { | ||
353 | int rv = __show_one_line(fp, l, skip, show_num); | ||
354 | if (rv == 0) { | ||
355 | pr_warning("Source file is shorter than expected.\n"); | ||
356 | rv = -1; | ||
357 | } | ||
358 | return rv; | ||
297 | } | 359 | } |
298 | 360 | ||
361 | #define show_one_line_with_num(f,l) _show_one_line(f,l,false,true) | ||
362 | #define show_one_line(f,l) _show_one_line(f,l,false,false) | ||
363 | #define skip_one_line(f,l) _show_one_line(f,l,true,false) | ||
364 | #define show_one_line_or_eof(f,l) __show_one_line(f,l,false,false) | ||
365 | |||
299 | /* | 366 | /* |
300 | * Show line-range always requires debuginfo to find source file and | 367 | * Show line-range always requires debuginfo to find source file and |
301 | * line number. | 368 | * line number. |
302 | */ | 369 | */ |
303 | int show_line_range(struct line_range *lr) | 370 | int show_line_range(struct line_range *lr, const char *module) |
304 | { | 371 | { |
305 | int l = 1; | 372 | int l = 1; |
306 | struct line_node *ln; | 373 | struct line_node *ln; |
@@ -313,14 +380,13 @@ int show_line_range(struct line_range *lr) | |||
313 | if (ret < 0) | 380 | if (ret < 0) |
314 | return ret; | 381 | return ret; |
315 | 382 | ||
316 | fd = open_vmlinux(); | 383 | fd = open_vmlinux(module); |
317 | if (fd < 0) { | 384 | if (fd < 0) { |
318 | pr_warning("Failed to open debuginfo file.\n"); | 385 | pr_warning("Failed to open debuginfo file.\n"); |
319 | return fd; | 386 | return fd; |
320 | } | 387 | } |
321 | 388 | ||
322 | ret = find_line_range(fd, lr); | 389 | ret = find_line_range(fd, lr); |
323 | close(fd); | ||
324 | if (ret == 0) { | 390 | if (ret == 0) { |
325 | pr_warning("Specified source line is not found.\n"); | 391 | pr_warning("Specified source line is not found.\n"); |
326 | return -ENOENT; | 392 | return -ENOENT; |
@@ -341,10 +407,10 @@ int show_line_range(struct line_range *lr) | |||
341 | setup_pager(); | 407 | setup_pager(); |
342 | 408 | ||
343 | if (lr->function) | 409 | if (lr->function) |
344 | fprintf(stdout, "<%s:%d>\n", lr->function, | 410 | fprintf(stdout, "<%s@%s:%d>\n", lr->function, lr->path, |
345 | lr->start - lr->offset); | 411 | lr->start - lr->offset); |
346 | else | 412 | else |
347 | fprintf(stdout, "<%s:%d>\n", lr->file, lr->start); | 413 | fprintf(stdout, "<%s:%d>\n", lr->path, lr->start); |
348 | 414 | ||
349 | fp = fopen(lr->path, "r"); | 415 | fp = fopen(lr->path, "r"); |
350 | if (fp == NULL) { | 416 | if (fp == NULL) { |
@@ -353,36 +419,124 @@ int show_line_range(struct line_range *lr) | |||
353 | return -errno; | 419 | return -errno; |
354 | } | 420 | } |
355 | /* Skip to starting line number */ | 421 | /* Skip to starting line number */ |
356 | while (l < lr->start && ret >= 0) | 422 | while (l < lr->start) { |
357 | ret = show_one_line(fp, l++, true, false); | 423 | ret = skip_one_line(fp, l++); |
358 | if (ret < 0) | 424 | if (ret < 0) |
359 | goto end; | 425 | goto end; |
426 | } | ||
360 | 427 | ||
361 | list_for_each_entry(ln, &lr->line_list, list) { | 428 | list_for_each_entry(ln, &lr->line_list, list) { |
362 | while (ln->line > l && ret >= 0) | 429 | for (; ln->line > l; l++) { |
363 | ret = show_one_line(fp, (l++) - lr->offset, | 430 | ret = show_one_line(fp, l - lr->offset); |
364 | false, false); | 431 | if (ret < 0) |
365 | if (ret >= 0) | 432 | goto end; |
366 | ret = show_one_line(fp, (l++) - lr->offset, | 433 | } |
367 | false, true); | 434 | ret = show_one_line_with_num(fp, l++ - lr->offset); |
368 | if (ret < 0) | 435 | if (ret < 0) |
369 | goto end; | 436 | goto end; |
370 | } | 437 | } |
371 | 438 | ||
372 | if (lr->end == INT_MAX) | 439 | if (lr->end == INT_MAX) |
373 | lr->end = l + NR_ADDITIONAL_LINES; | 440 | lr->end = l + NR_ADDITIONAL_LINES; |
374 | while (l <= lr->end && !feof(fp) && ret >= 0) | 441 | while (l <= lr->end) { |
375 | ret = show_one_line(fp, (l++) - lr->offset, false, false); | 442 | ret = show_one_line_or_eof(fp, l++ - lr->offset); |
443 | if (ret <= 0) | ||
444 | break; | ||
445 | } | ||
376 | end: | 446 | end: |
377 | fclose(fp); | 447 | fclose(fp); |
378 | return ret; | 448 | return ret; |
379 | } | 449 | } |
380 | 450 | ||
451 | static int show_available_vars_at(int fd, struct perf_probe_event *pev, | ||
452 | int max_vls, struct strfilter *_filter, | ||
453 | bool externs) | ||
454 | { | ||
455 | char *buf; | ||
456 | int ret, i, nvars; | ||
457 | struct str_node *node; | ||
458 | struct variable_list *vls = NULL, *vl; | ||
459 | const char *var; | ||
460 | |||
461 | buf = synthesize_perf_probe_point(&pev->point); | ||
462 | if (!buf) | ||
463 | return -EINVAL; | ||
464 | pr_debug("Searching variables at %s\n", buf); | ||
465 | |||
466 | ret = find_available_vars_at(fd, pev, &vls, max_vls, externs); | ||
467 | if (ret <= 0) { | ||
468 | pr_err("Failed to find variables at %s (%d)\n", buf, ret); | ||
469 | goto end; | ||
470 | } | ||
471 | /* Some variables are found */ | ||
472 | fprintf(stdout, "Available variables at %s\n", buf); | ||
473 | for (i = 0; i < ret; i++) { | ||
474 | vl = &vls[i]; | ||
475 | /* | ||
476 | * A probe point might be converted to | ||
477 | * several trace points. | ||
478 | */ | ||
479 | fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol, | ||
480 | vl->point.offset); | ||
481 | free(vl->point.symbol); | ||
482 | nvars = 0; | ||
483 | if (vl->vars) { | ||
484 | strlist__for_each(node, vl->vars) { | ||
485 | var = strchr(node->s, '\t') + 1; | ||
486 | if (strfilter__compare(_filter, var)) { | ||
487 | fprintf(stdout, "\t\t%s\n", node->s); | ||
488 | nvars++; | ||
489 | } | ||
490 | } | ||
491 | strlist__delete(vl->vars); | ||
492 | } | ||
493 | if (nvars == 0) | ||
494 | fprintf(stdout, "\t\t(No matched variables)\n"); | ||
495 | } | ||
496 | free(vls); | ||
497 | end: | ||
498 | free(buf); | ||
499 | return ret; | ||
500 | } | ||
501 | |||
502 | /* Show available variables on given probe point */ | ||
503 | int show_available_vars(struct perf_probe_event *pevs, int npevs, | ||
504 | int max_vls, const char *module, | ||
505 | struct strfilter *_filter, bool externs) | ||
506 | { | ||
507 | int i, fd, ret = 0; | ||
508 | |||
509 | ret = init_vmlinux(); | ||
510 | if (ret < 0) | ||
511 | return ret; | ||
512 | |||
513 | setup_pager(); | ||
514 | |||
515 | for (i = 0; i < npevs && ret >= 0; i++) { | ||
516 | fd = open_vmlinux(module); | ||
517 | if (fd < 0) { | ||
518 | pr_warning("Failed to open debug information file.\n"); | ||
519 | ret = fd; | ||
520 | break; | ||
521 | } | ||
522 | ret = show_available_vars_at(fd, &pevs[i], max_vls, _filter, | ||
523 | externs); | ||
524 | } | ||
525 | return ret; | ||
526 | } | ||
527 | |||
381 | #else /* !DWARF_SUPPORT */ | 528 | #else /* !DWARF_SUPPORT */ |
382 | 529 | ||
383 | static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, | 530 | static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, |
384 | struct perf_probe_point *pp) | 531 | struct perf_probe_point *pp) |
385 | { | 532 | { |
533 | struct symbol *sym; | ||
534 | |||
535 | sym = __find_kernel_function_by_name(tp->symbol, NULL); | ||
536 | if (!sym) { | ||
537 | pr_err("Failed to find symbol %s in kernel.\n", tp->symbol); | ||
538 | return -ENOENT; | ||
539 | } | ||
386 | pp->function = strdup(tp->symbol); | 540 | pp->function = strdup(tp->symbol); |
387 | if (pp->function == NULL) | 541 | if (pp->function == NULL) |
388 | return -ENOMEM; | 542 | return -ENOMEM; |
@@ -394,7 +548,7 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, | |||
394 | 548 | ||
395 | static int try_to_find_probe_trace_events(struct perf_probe_event *pev, | 549 | static int try_to_find_probe_trace_events(struct perf_probe_event *pev, |
396 | struct probe_trace_event **tevs __unused, | 550 | struct probe_trace_event **tevs __unused, |
397 | int max_tevs __unused) | 551 | int max_tevs __unused, const char *mod __unused) |
398 | { | 552 | { |
399 | if (perf_probe_event_need_dwarf(pev)) { | 553 | if (perf_probe_event_need_dwarf(pev)) { |
400 | pr_warning("Debuginfo-analysis is not supported.\n"); | 554 | pr_warning("Debuginfo-analysis is not supported.\n"); |
@@ -403,64 +557,113 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, | |||
403 | return 0; | 557 | return 0; |
404 | } | 558 | } |
405 | 559 | ||
406 | int show_line_range(struct line_range *lr __unused) | 560 | int show_line_range(struct line_range *lr __unused, const char *module __unused) |
407 | { | 561 | { |
408 | pr_warning("Debuginfo-analysis is not supported.\n"); | 562 | pr_warning("Debuginfo-analysis is not supported.\n"); |
409 | return -ENOSYS; | 563 | return -ENOSYS; |
410 | } | 564 | } |
411 | 565 | ||
566 | int show_available_vars(struct perf_probe_event *pevs __unused, | ||
567 | int npevs __unused, int max_vls __unused, | ||
568 | const char *module __unused, | ||
569 | struct strfilter *filter __unused, | ||
570 | bool externs __unused) | ||
571 | { | ||
572 | pr_warning("Debuginfo-analysis is not supported.\n"); | ||
573 | return -ENOSYS; | ||
574 | } | ||
412 | #endif | 575 | #endif |
413 | 576 | ||
577 | static int parse_line_num(char **ptr, int *val, const char *what) | ||
578 | { | ||
579 | const char *start = *ptr; | ||
580 | |||
581 | errno = 0; | ||
582 | *val = strtol(*ptr, ptr, 0); | ||
583 | if (errno || *ptr == start) { | ||
584 | semantic_error("'%s' is not a valid number.\n", what); | ||
585 | return -EINVAL; | ||
586 | } | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | /* | ||
591 | * Stuff 'lr' according to the line range described by 'arg'. | ||
592 | * The line range syntax is described by: | ||
593 | * | ||
594 | * SRC[:SLN[+NUM|-ELN]] | ||
595 | * FNC[@SRC][:SLN[+NUM|-ELN]] | ||
596 | */ | ||
414 | int parse_line_range_desc(const char *arg, struct line_range *lr) | 597 | int parse_line_range_desc(const char *arg, struct line_range *lr) |
415 | { | 598 | { |
416 | const char *ptr; | 599 | char *range, *file, *name = strdup(arg); |
417 | char *tmp; | 600 | int err; |
418 | /* | 601 | |
419 | * <Syntax> | 602 | if (!name) |
420 | * SRC:SLN[+NUM|-ELN] | 603 | return -ENOMEM; |
421 | * FUNC[:SLN[+NUM|-ELN]] | 604 | |
422 | */ | 605 | lr->start = 0; |
423 | ptr = strchr(arg, ':'); | 606 | lr->end = INT_MAX; |
424 | if (ptr) { | 607 | |
425 | lr->start = (int)strtoul(ptr + 1, &tmp, 0); | 608 | range = strchr(name, ':'); |
426 | if (*tmp == '+') { | 609 | if (range) { |
427 | lr->end = lr->start + (int)strtoul(tmp + 1, &tmp, 0); | 610 | *range++ = '\0'; |
428 | lr->end--; /* | 611 | |
429 | * Adjust the number of lines here. | 612 | err = parse_line_num(&range, &lr->start, "start line"); |
430 | * If the number of lines == 1, the | 613 | if (err) |
431 | * the end of line should be equal to | 614 | goto err; |
432 | * the start of line. | 615 | |
433 | */ | 616 | if (*range == '+' || *range == '-') { |
434 | } else if (*tmp == '-') | 617 | const char c = *range++; |
435 | lr->end = (int)strtoul(tmp + 1, &tmp, 0); | 618 | |
436 | else | 619 | err = parse_line_num(&range, &lr->end, "end line"); |
437 | lr->end = INT_MAX; | 620 | if (err) |
621 | goto err; | ||
622 | |||
623 | if (c == '+') { | ||
624 | lr->end += lr->start; | ||
625 | /* | ||
626 | * Adjust the number of lines here. | ||
627 | * If the number of lines == 1, the | ||
628 | * the end of line should be equal to | ||
629 | * the start of line. | ||
630 | */ | ||
631 | lr->end--; | ||
632 | } | ||
633 | } | ||
634 | |||
438 | pr_debug("Line range is %d to %d\n", lr->start, lr->end); | 635 | pr_debug("Line range is %d to %d\n", lr->start, lr->end); |
636 | |||
637 | err = -EINVAL; | ||
439 | if (lr->start > lr->end) { | 638 | if (lr->start > lr->end) { |
440 | semantic_error("Start line must be smaller" | 639 | semantic_error("Start line must be smaller" |
441 | " than end line.\n"); | 640 | " than end line.\n"); |
442 | return -EINVAL; | 641 | goto err; |
443 | } | 642 | } |
444 | if (*tmp != '\0') { | 643 | if (*range != '\0') { |
445 | semantic_error("Tailing with invalid character '%d'.\n", | 644 | semantic_error("Tailing with invalid str '%s'.\n", range); |
446 | *tmp); | 645 | goto err; |
447 | return -EINVAL; | ||
448 | } | 646 | } |
449 | tmp = strndup(arg, (ptr - arg)); | ||
450 | } else { | ||
451 | tmp = strdup(arg); | ||
452 | lr->end = INT_MAX; | ||
453 | } | 647 | } |
454 | 648 | ||
455 | if (tmp == NULL) | 649 | file = strchr(name, '@'); |
456 | return -ENOMEM; | 650 | if (file) { |
457 | 651 | *file = '\0'; | |
458 | if (strchr(tmp, '.')) | 652 | lr->file = strdup(++file); |
459 | lr->file = tmp; | 653 | if (lr->file == NULL) { |
654 | err = -ENOMEM; | ||
655 | goto err; | ||
656 | } | ||
657 | lr->function = name; | ||
658 | } else if (strchr(name, '.')) | ||
659 | lr->file = name; | ||
460 | else | 660 | else |
461 | lr->function = tmp; | 661 | lr->function = name; |
462 | 662 | ||
463 | return 0; | 663 | return 0; |
664 | err: | ||
665 | free(name); | ||
666 | return err; | ||
464 | } | 667 | } |
465 | 668 | ||
466 | /* Check the name is good for event/group */ | 669 | /* Check the name is good for event/group */ |
@@ -584,39 +787,40 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev) | |||
584 | 787 | ||
585 | /* Exclusion check */ | 788 | /* Exclusion check */ |
586 | if (pp->lazy_line && pp->line) { | 789 | if (pp->lazy_line && pp->line) { |
587 | semantic_error("Lazy pattern can't be used with line number."); | 790 | semantic_error("Lazy pattern can't be used with" |
791 | " line number.\n"); | ||
588 | return -EINVAL; | 792 | return -EINVAL; |
589 | } | 793 | } |
590 | 794 | ||
591 | if (pp->lazy_line && pp->offset) { | 795 | if (pp->lazy_line && pp->offset) { |
592 | semantic_error("Lazy pattern can't be used with offset."); | 796 | semantic_error("Lazy pattern can't be used with offset.\n"); |
593 | return -EINVAL; | 797 | return -EINVAL; |
594 | } | 798 | } |
595 | 799 | ||
596 | if (pp->line && pp->offset) { | 800 | if (pp->line && pp->offset) { |
597 | semantic_error("Offset can't be used with line number."); | 801 | semantic_error("Offset can't be used with line number.\n"); |
598 | return -EINVAL; | 802 | return -EINVAL; |
599 | } | 803 | } |
600 | 804 | ||
601 | if (!pp->line && !pp->lazy_line && pp->file && !pp->function) { | 805 | if (!pp->line && !pp->lazy_line && pp->file && !pp->function) { |
602 | semantic_error("File always requires line number or " | 806 | semantic_error("File always requires line number or " |
603 | "lazy pattern."); | 807 | "lazy pattern.\n"); |
604 | return -EINVAL; | 808 | return -EINVAL; |
605 | } | 809 | } |
606 | 810 | ||
607 | if (pp->offset && !pp->function) { | 811 | if (pp->offset && !pp->function) { |
608 | semantic_error("Offset requires an entry function."); | 812 | semantic_error("Offset requires an entry function.\n"); |
609 | return -EINVAL; | 813 | return -EINVAL; |
610 | } | 814 | } |
611 | 815 | ||
612 | if (pp->retprobe && !pp->function) { | 816 | if (pp->retprobe && !pp->function) { |
613 | semantic_error("Return probe requires an entry function."); | 817 | semantic_error("Return probe requires an entry function.\n"); |
614 | return -EINVAL; | 818 | return -EINVAL; |
615 | } | 819 | } |
616 | 820 | ||
617 | if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe) { | 821 | if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe) { |
618 | semantic_error("Offset/Line/Lazy pattern can't be used with " | 822 | semantic_error("Offset/Line/Lazy pattern can't be used with " |
619 | "return probe."); | 823 | "return probe.\n"); |
620 | return -EINVAL; | 824 | return -EINVAL; |
621 | } | 825 | } |
622 | 826 | ||
@@ -890,7 +1094,7 @@ int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len) | |||
890 | 1094 | ||
891 | return tmp - buf; | 1095 | return tmp - buf; |
892 | error: | 1096 | error: |
893 | pr_debug("Failed to synthesize perf probe argument: %s", | 1097 | pr_debug("Failed to synthesize perf probe argument: %s\n", |
894 | strerror(-ret)); | 1098 | strerror(-ret)); |
895 | return ret; | 1099 | return ret; |
896 | } | 1100 | } |
@@ -918,13 +1122,13 @@ static char *synthesize_perf_probe_point(struct perf_probe_point *pp) | |||
918 | goto error; | 1122 | goto error; |
919 | } | 1123 | } |
920 | if (pp->file) { | 1124 | if (pp->file) { |
921 | len = strlen(pp->file) - 31; | 1125 | tmp = pp->file; |
922 | if (len < 0) | 1126 | len = strlen(tmp); |
923 | len = 0; | 1127 | if (len > 30) { |
924 | tmp = strchr(pp->file + len, '/'); | 1128 | tmp = strchr(pp->file + len - 30, '/'); |
925 | if (!tmp) | 1129 | tmp = tmp ? tmp + 1 : pp->file + len - 30; |
926 | tmp = pp->file + len; | 1130 | } |
927 | ret = e_snprintf(file, 32, "@%s", tmp + 1); | 1131 | ret = e_snprintf(file, 32, "@%s", tmp); |
928 | if (ret <= 0) | 1132 | if (ret <= 0) |
929 | goto error; | 1133 | goto error; |
930 | } | 1134 | } |
@@ -940,7 +1144,7 @@ static char *synthesize_perf_probe_point(struct perf_probe_point *pp) | |||
940 | 1144 | ||
941 | return buf; | 1145 | return buf; |
942 | error: | 1146 | error: |
943 | pr_debug("Failed to synthesize perf probe point: %s", | 1147 | pr_debug("Failed to synthesize perf probe point: %s\n", |
944 | strerror(-ret)); | 1148 | strerror(-ret)); |
945 | if (buf) | 1149 | if (buf) |
946 | free(buf); | 1150 | free(buf); |
@@ -1087,7 +1291,7 @@ error: | |||
1087 | } | 1291 | } |
1088 | 1292 | ||
1089 | static int convert_to_perf_probe_event(struct probe_trace_event *tev, | 1293 | static int convert_to_perf_probe_event(struct probe_trace_event *tev, |
1090 | struct perf_probe_event *pev) | 1294 | struct perf_probe_event *pev) |
1091 | { | 1295 | { |
1092 | char buf[64] = ""; | 1296 | char buf[64] = ""; |
1093 | int i, ret; | 1297 | int i, ret; |
@@ -1516,14 +1720,14 @@ static int __add_probe_trace_events(struct perf_probe_event *pev, | |||
1516 | 1720 | ||
1517 | static int convert_to_probe_trace_events(struct perf_probe_event *pev, | 1721 | static int convert_to_probe_trace_events(struct perf_probe_event *pev, |
1518 | struct probe_trace_event **tevs, | 1722 | struct probe_trace_event **tevs, |
1519 | int max_tevs) | 1723 | int max_tevs, const char *module) |
1520 | { | 1724 | { |
1521 | struct symbol *sym; | 1725 | struct symbol *sym; |
1522 | int ret = 0, i; | 1726 | int ret = 0, i; |
1523 | struct probe_trace_event *tev; | 1727 | struct probe_trace_event *tev; |
1524 | 1728 | ||
1525 | /* Convert perf_probe_event with debuginfo */ | 1729 | /* Convert perf_probe_event with debuginfo */ |
1526 | ret = try_to_find_probe_trace_events(pev, tevs, max_tevs); | 1730 | ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, module); |
1527 | if (ret != 0) | 1731 | if (ret != 0) |
1528 | return ret; | 1732 | return ret; |
1529 | 1733 | ||
@@ -1572,8 +1776,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev, | |||
1572 | } | 1776 | } |
1573 | 1777 | ||
1574 | /* Currently just checking function name from symbol map */ | 1778 | /* Currently just checking function name from symbol map */ |
1575 | sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION], | 1779 | sym = __find_kernel_function_by_name(tev->point.symbol, NULL); |
1576 | tev->point.symbol, NULL); | ||
1577 | if (!sym) { | 1780 | if (!sym) { |
1578 | pr_warning("Kernel symbol \'%s\' not found.\n", | 1781 | pr_warning("Kernel symbol \'%s\' not found.\n", |
1579 | tev->point.symbol); | 1782 | tev->point.symbol); |
@@ -1596,7 +1799,7 @@ struct __event_package { | |||
1596 | }; | 1799 | }; |
1597 | 1800 | ||
1598 | int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, | 1801 | int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, |
1599 | bool force_add, int max_tevs) | 1802 | int max_tevs, const char *module, bool force_add) |
1600 | { | 1803 | { |
1601 | int i, j, ret; | 1804 | int i, j, ret; |
1602 | struct __event_package *pkgs; | 1805 | struct __event_package *pkgs; |
@@ -1617,16 +1820,21 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, | |||
1617 | pkgs[i].pev = &pevs[i]; | 1820 | pkgs[i].pev = &pevs[i]; |
1618 | /* Convert with or without debuginfo */ | 1821 | /* Convert with or without debuginfo */ |
1619 | ret = convert_to_probe_trace_events(pkgs[i].pev, | 1822 | ret = convert_to_probe_trace_events(pkgs[i].pev, |
1620 | &pkgs[i].tevs, max_tevs); | 1823 | &pkgs[i].tevs, |
1824 | max_tevs, | ||
1825 | module); | ||
1621 | if (ret < 0) | 1826 | if (ret < 0) |
1622 | goto end; | 1827 | goto end; |
1623 | pkgs[i].ntevs = ret; | 1828 | pkgs[i].ntevs = ret; |
1624 | } | 1829 | } |
1625 | 1830 | ||
1626 | /* Loop 2: add all events */ | 1831 | /* Loop 2: add all events */ |
1627 | for (i = 0; i < npevs && ret >= 0; i++) | 1832 | for (i = 0; i < npevs; i++) { |
1628 | ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs, | 1833 | ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs, |
1629 | pkgs[i].ntevs, force_add); | 1834 | pkgs[i].ntevs, force_add); |
1835 | if (ret < 0) | ||
1836 | break; | ||
1837 | } | ||
1630 | end: | 1838 | end: |
1631 | /* Loop 3: cleanup and free trace events */ | 1839 | /* Loop 3: cleanup and free trace events */ |
1632 | for (i = 0; i < npevs; i++) { | 1840 | for (i = 0; i < npevs; i++) { |
@@ -1680,7 +1888,7 @@ static int del_trace_probe_event(int fd, const char *group, | |||
1680 | 1888 | ||
1681 | ret = e_snprintf(buf, 128, "%s:%s", group, event); | 1889 | ret = e_snprintf(buf, 128, "%s:%s", group, event); |
1682 | if (ret < 0) { | 1890 | if (ret < 0) { |
1683 | pr_err("Failed to copy event."); | 1891 | pr_err("Failed to copy event.\n"); |
1684 | return ret; | 1892 | return ret; |
1685 | } | 1893 | } |
1686 | 1894 | ||
@@ -1752,4 +1960,46 @@ int del_perf_probe_events(struct strlist *dellist) | |||
1752 | 1960 | ||
1753 | return ret; | 1961 | return ret; |
1754 | } | 1962 | } |
1963 | /* TODO: don't use a global variable for filter ... */ | ||
1964 | static struct strfilter *available_func_filter; | ||
1965 | |||
1966 | /* | ||
1967 | * If a symbol corresponds to a function with global binding and | ||
1968 | * matches filter return 0. For all others return 1. | ||
1969 | */ | ||
1970 | static int filter_available_functions(struct map *map __unused, | ||
1971 | struct symbol *sym) | ||
1972 | { | ||
1973 | if (sym->binding == STB_GLOBAL && | ||
1974 | strfilter__compare(available_func_filter, sym->name)) | ||
1975 | return 0; | ||
1976 | return 1; | ||
1977 | } | ||
1755 | 1978 | ||
1979 | int show_available_funcs(const char *module, struct strfilter *_filter) | ||
1980 | { | ||
1981 | struct map *map; | ||
1982 | int ret; | ||
1983 | |||
1984 | setup_pager(); | ||
1985 | |||
1986 | ret = init_vmlinux(); | ||
1987 | if (ret < 0) | ||
1988 | return ret; | ||
1989 | |||
1990 | map = kernel_get_module_map(module); | ||
1991 | if (!map) { | ||
1992 | pr_err("Failed to find %s map.\n", (module) ? : "kernel"); | ||
1993 | return -EINVAL; | ||
1994 | } | ||
1995 | available_func_filter = _filter; | ||
1996 | if (map__load(map, filter_available_functions)) { | ||
1997 | pr_err("Failed to load map.\n"); | ||
1998 | return -EINVAL; | ||
1999 | } | ||
2000 | if (!dso__sorted_by_name(map->dso, map->type)) | ||
2001 | dso__sort_by_name(map->dso, map->type); | ||
2002 | |||
2003 | dso__fprintf_symbols_by_name(map->dso, map->type, stdout); | ||
2004 | return 0; | ||
2005 | } | ||
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h index 5af39243a25b..3434fc9d79d5 100644 --- a/tools/perf/util/probe-event.h +++ b/tools/perf/util/probe-event.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <stdbool.h> | 4 | #include <stdbool.h> |
5 | #include "strlist.h" | 5 | #include "strlist.h" |
6 | #include "strfilter.h" | ||
6 | 7 | ||
7 | extern bool probe_event_dry_run; | 8 | extern bool probe_event_dry_run; |
8 | 9 | ||
@@ -90,6 +91,12 @@ struct line_range { | |||
90 | struct list_head line_list; /* Visible lines */ | 91 | struct list_head line_list; /* Visible lines */ |
91 | }; | 92 | }; |
92 | 93 | ||
94 | /* List of variables */ | ||
95 | struct variable_list { | ||
96 | struct probe_trace_point point; /* Actual probepoint */ | ||
97 | struct strlist *vars; /* Available variables */ | ||
98 | }; | ||
99 | |||
93 | /* Command string to events */ | 100 | /* Command string to events */ |
94 | extern int parse_perf_probe_command(const char *cmd, | 101 | extern int parse_perf_probe_command(const char *cmd, |
95 | struct perf_probe_event *pev); | 102 | struct perf_probe_event *pev); |
@@ -109,12 +116,19 @@ extern void clear_perf_probe_event(struct perf_probe_event *pev); | |||
109 | /* Command string to line-range */ | 116 | /* Command string to line-range */ |
110 | extern int parse_line_range_desc(const char *cmd, struct line_range *lr); | 117 | extern int parse_line_range_desc(const char *cmd, struct line_range *lr); |
111 | 118 | ||
119 | /* Internal use: Return kernel/module path */ | ||
120 | extern const char *kernel_get_module_path(const char *module); | ||
112 | 121 | ||
113 | extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, | 122 | extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, |
114 | bool force_add, int max_probe_points); | 123 | int max_probe_points, const char *module, |
124 | bool force_add); | ||
115 | extern int del_perf_probe_events(struct strlist *dellist); | 125 | extern int del_perf_probe_events(struct strlist *dellist); |
116 | extern int show_perf_probe_events(void); | 126 | extern int show_perf_probe_events(void); |
117 | extern int show_line_range(struct line_range *lr); | 127 | extern int show_line_range(struct line_range *lr, const char *module); |
128 | extern int show_available_vars(struct perf_probe_event *pevs, int npevs, | ||
129 | int max_probe_points, const char *module, | ||
130 | struct strfilter *filter, bool externs); | ||
131 | extern int show_available_funcs(const char *module, struct strfilter *filter); | ||
118 | 132 | ||
119 | 133 | ||
120 | /* Maximum index number of event-name postfix */ | 134 | /* Maximum index number of event-name postfix */ |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 32b81f707ff5..3b9d0b800d5c 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <ctype.h> | 33 | #include <ctype.h> |
34 | #include <dwarf-regs.h> | 34 | #include <dwarf-regs.h> |
35 | 35 | ||
36 | #include <linux/bitops.h> | ||
36 | #include "event.h" | 37 | #include "event.h" |
37 | #include "debug.h" | 38 | #include "debug.h" |
38 | #include "util.h" | 39 | #include "util.h" |
@@ -116,6 +117,126 @@ static void line_list__free(struct list_head *head) | |||
116 | } | 117 | } |
117 | } | 118 | } |
118 | 119 | ||
120 | /* Dwarf FL wrappers */ | ||
121 | static char *debuginfo_path; /* Currently dummy */ | ||
122 | |||
123 | static const Dwfl_Callbacks offline_callbacks = { | ||
124 | .find_debuginfo = dwfl_standard_find_debuginfo, | ||
125 | .debuginfo_path = &debuginfo_path, | ||
126 | |||
127 | .section_address = dwfl_offline_section_address, | ||
128 | |||
129 | /* We use this table for core files too. */ | ||
130 | .find_elf = dwfl_build_id_find_elf, | ||
131 | }; | ||
132 | |||
133 | /* Get a Dwarf from offline image */ | ||
134 | static Dwarf *dwfl_init_offline_dwarf(int fd, Dwfl **dwflp, Dwarf_Addr *bias) | ||
135 | { | ||
136 | Dwfl_Module *mod; | ||
137 | Dwarf *dbg = NULL; | ||
138 | |||
139 | if (!dwflp) | ||
140 | return NULL; | ||
141 | |||
142 | *dwflp = dwfl_begin(&offline_callbacks); | ||
143 | if (!*dwflp) | ||
144 | return NULL; | ||
145 | |||
146 | mod = dwfl_report_offline(*dwflp, "", "", fd); | ||
147 | if (!mod) | ||
148 | goto error; | ||
149 | |||
150 | dbg = dwfl_module_getdwarf(mod, bias); | ||
151 | if (!dbg) { | ||
152 | error: | ||
153 | dwfl_end(*dwflp); | ||
154 | *dwflp = NULL; | ||
155 | } | ||
156 | return dbg; | ||
157 | } | ||
158 | |||
159 | #if _ELFUTILS_PREREQ(0, 148) | ||
160 | /* This method is buggy if elfutils is older than 0.148 */ | ||
161 | static int __linux_kernel_find_elf(Dwfl_Module *mod, | ||
162 | void **userdata, | ||
163 | const char *module_name, | ||
164 | Dwarf_Addr base, | ||
165 | char **file_name, Elf **elfp) | ||
166 | { | ||
167 | int fd; | ||
168 | const char *path = kernel_get_module_path(module_name); | ||
169 | |||
170 | pr_debug2("Use file %s for %s\n", path, module_name); | ||
171 | if (path) { | ||
172 | fd = open(path, O_RDONLY); | ||
173 | if (fd >= 0) { | ||
174 | *file_name = strdup(path); | ||
175 | return fd; | ||
176 | } | ||
177 | } | ||
178 | /* If failed, try to call standard method */ | ||
179 | return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base, | ||
180 | file_name, elfp); | ||
181 | } | ||
182 | |||
183 | static const Dwfl_Callbacks kernel_callbacks = { | ||
184 | .find_debuginfo = dwfl_standard_find_debuginfo, | ||
185 | .debuginfo_path = &debuginfo_path, | ||
186 | |||
187 | .find_elf = __linux_kernel_find_elf, | ||
188 | .section_address = dwfl_linux_kernel_module_section_address, | ||
189 | }; | ||
190 | |||
191 | /* Get a Dwarf from live kernel image */ | ||
192 | static Dwarf *dwfl_init_live_kernel_dwarf(Dwarf_Addr addr, Dwfl **dwflp, | ||
193 | Dwarf_Addr *bias) | ||
194 | { | ||
195 | Dwarf *dbg; | ||
196 | |||
197 | if (!dwflp) | ||
198 | return NULL; | ||
199 | |||
200 | *dwflp = dwfl_begin(&kernel_callbacks); | ||
201 | if (!*dwflp) | ||
202 | return NULL; | ||
203 | |||
204 | /* Load the kernel dwarves: Don't care the result here */ | ||
205 | dwfl_linux_kernel_report_kernel(*dwflp); | ||
206 | dwfl_linux_kernel_report_modules(*dwflp); | ||
207 | |||
208 | dbg = dwfl_addrdwarf(*dwflp, addr, bias); | ||
209 | /* Here, check whether we could get a real dwarf */ | ||
210 | if (!dbg) { | ||
211 | pr_debug("Failed to find kernel dwarf at %lx\n", | ||
212 | (unsigned long)addr); | ||
213 | dwfl_end(*dwflp); | ||
214 | *dwflp = NULL; | ||
215 | } | ||
216 | return dbg; | ||
217 | } | ||
218 | #else | ||
219 | /* With older elfutils, this just support kernel module... */ | ||
220 | static Dwarf *dwfl_init_live_kernel_dwarf(Dwarf_Addr addr __used, Dwfl **dwflp, | ||
221 | Dwarf_Addr *bias) | ||
222 | { | ||
223 | int fd; | ||
224 | const char *path = kernel_get_module_path("kernel"); | ||
225 | |||
226 | if (!path) { | ||
227 | pr_err("Failed to find vmlinux path\n"); | ||
228 | return NULL; | ||
229 | } | ||
230 | |||
231 | pr_debug2("Use file %s for debuginfo\n", path); | ||
232 | fd = open(path, O_RDONLY); | ||
233 | if (fd < 0) | ||
234 | return NULL; | ||
235 | |||
236 | return dwfl_init_offline_dwarf(fd, dwflp, bias); | ||
237 | } | ||
238 | #endif | ||
239 | |||
119 | /* Dwarf wrappers */ | 240 | /* Dwarf wrappers */ |
120 | 241 | ||
121 | /* Find the realpath of the target file. */ | 242 | /* Find the realpath of the target file. */ |
@@ -152,6 +273,25 @@ static const char *cu_get_comp_dir(Dwarf_Die *cu_die) | |||
152 | return dwarf_formstring(&attr); | 273 | return dwarf_formstring(&attr); |
153 | } | 274 | } |
154 | 275 | ||
276 | /* Get a line number and file name for given address */ | ||
277 | static int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr, | ||
278 | const char **fname, int *lineno) | ||
279 | { | ||
280 | Dwarf_Line *line; | ||
281 | Dwarf_Addr laddr; | ||
282 | |||
283 | line = dwarf_getsrc_die(cudie, (Dwarf_Addr)addr); | ||
284 | if (line && dwarf_lineaddr(line, &laddr) == 0 && | ||
285 | addr == (unsigned long)laddr && dwarf_lineno(line, lineno) == 0) { | ||
286 | *fname = dwarf_linesrc(line, NULL, NULL); | ||
287 | if (!*fname) | ||
288 | /* line number is useless without filename */ | ||
289 | *lineno = 0; | ||
290 | } | ||
291 | |||
292 | return *lineno ?: -ENOENT; | ||
293 | } | ||
294 | |||
155 | /* Compare diename and tname */ | 295 | /* Compare diename and tname */ |
156 | static bool die_compare_name(Dwarf_Die *dw_die, const char *tname) | 296 | static bool die_compare_name(Dwarf_Die *dw_die, const char *tname) |
157 | { | 297 | { |
@@ -160,35 +300,76 @@ static bool die_compare_name(Dwarf_Die *dw_die, const char *tname) | |||
160 | return name ? (strcmp(tname, name) == 0) : false; | 300 | return name ? (strcmp(tname, name) == 0) : false; |
161 | } | 301 | } |
162 | 302 | ||
163 | /* Get type die, but skip qualifiers and typedef */ | 303 | /* Get callsite line number of inline-function instance */ |
164 | static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) | 304 | static int die_get_call_lineno(Dwarf_Die *in_die) |
165 | { | 305 | { |
166 | Dwarf_Attribute attr; | 306 | Dwarf_Attribute attr; |
307 | Dwarf_Word ret; | ||
308 | |||
309 | if (!dwarf_attr(in_die, DW_AT_call_line, &attr)) | ||
310 | return -ENOENT; | ||
311 | |||
312 | dwarf_formudata(&attr, &ret); | ||
313 | return (int)ret; | ||
314 | } | ||
315 | |||
316 | /* Get type die */ | ||
317 | static Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) | ||
318 | { | ||
319 | Dwarf_Attribute attr; | ||
320 | |||
321 | if (dwarf_attr_integrate(vr_die, DW_AT_type, &attr) && | ||
322 | dwarf_formref_die(&attr, die_mem)) | ||
323 | return die_mem; | ||
324 | else | ||
325 | return NULL; | ||
326 | } | ||
327 | |||
328 | /* Get a type die, but skip qualifiers */ | ||
329 | static Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) | ||
330 | { | ||
167 | int tag; | 331 | int tag; |
168 | 332 | ||
169 | do { | 333 | do { |
170 | if (dwarf_attr(vr_die, DW_AT_type, &attr) == NULL || | 334 | vr_die = die_get_type(vr_die, die_mem); |
171 | dwarf_formref_die(&attr, die_mem) == NULL) | 335 | if (!vr_die) |
172 | return NULL; | 336 | break; |
173 | 337 | tag = dwarf_tag(vr_die); | |
174 | tag = dwarf_tag(die_mem); | ||
175 | vr_die = die_mem; | ||
176 | } while (tag == DW_TAG_const_type || | 338 | } while (tag == DW_TAG_const_type || |
177 | tag == DW_TAG_restrict_type || | 339 | tag == DW_TAG_restrict_type || |
178 | tag == DW_TAG_volatile_type || | 340 | tag == DW_TAG_volatile_type || |
179 | tag == DW_TAG_shared_type || | 341 | tag == DW_TAG_shared_type); |
180 | tag == DW_TAG_typedef); | ||
181 | 342 | ||
182 | return die_mem; | 343 | return vr_die; |
183 | } | 344 | } |
184 | 345 | ||
185 | static bool die_is_signed_type(Dwarf_Die *tp_die) | 346 | /* Get a type die, but skip qualifiers and typedef */ |
347 | static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) | ||
348 | { | ||
349 | do { | ||
350 | vr_die = __die_get_real_type(vr_die, die_mem); | ||
351 | } while (vr_die && dwarf_tag(vr_die) == DW_TAG_typedef); | ||
352 | |||
353 | return vr_die; | ||
354 | } | ||
355 | |||
356 | static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name, | ||
357 | Dwarf_Word *result) | ||
186 | { | 358 | { |
187 | Dwarf_Attribute attr; | 359 | Dwarf_Attribute attr; |
360 | |||
361 | if (dwarf_attr(tp_die, attr_name, &attr) == NULL || | ||
362 | dwarf_formudata(&attr, result) != 0) | ||
363 | return -ENOENT; | ||
364 | |||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | static bool die_is_signed_type(Dwarf_Die *tp_die) | ||
369 | { | ||
188 | Dwarf_Word ret; | 370 | Dwarf_Word ret; |
189 | 371 | ||
190 | if (dwarf_attr(tp_die, DW_AT_encoding, &attr) == NULL || | 372 | if (die_get_attr_udata(tp_die, DW_AT_encoding, &ret)) |
191 | dwarf_formudata(&attr, &ret) != 0) | ||
192 | return false; | 373 | return false; |
193 | 374 | ||
194 | return (ret == DW_ATE_signed_char || ret == DW_ATE_signed || | 375 | return (ret == DW_ATE_signed_char || ret == DW_ATE_signed || |
@@ -197,11 +378,29 @@ static bool die_is_signed_type(Dwarf_Die *tp_die) | |||
197 | 378 | ||
198 | static int die_get_byte_size(Dwarf_Die *tp_die) | 379 | static int die_get_byte_size(Dwarf_Die *tp_die) |
199 | { | 380 | { |
200 | Dwarf_Attribute attr; | ||
201 | Dwarf_Word ret; | 381 | Dwarf_Word ret; |
202 | 382 | ||
203 | if (dwarf_attr(tp_die, DW_AT_byte_size, &attr) == NULL || | 383 | if (die_get_attr_udata(tp_die, DW_AT_byte_size, &ret)) |
204 | dwarf_formudata(&attr, &ret) != 0) | 384 | return 0; |
385 | |||
386 | return (int)ret; | ||
387 | } | ||
388 | |||
389 | static int die_get_bit_size(Dwarf_Die *tp_die) | ||
390 | { | ||
391 | Dwarf_Word ret; | ||
392 | |||
393 | if (die_get_attr_udata(tp_die, DW_AT_bit_size, &ret)) | ||
394 | return 0; | ||
395 | |||
396 | return (int)ret; | ||
397 | } | ||
398 | |||
399 | static int die_get_bit_offset(Dwarf_Die *tp_die) | ||
400 | { | ||
401 | Dwarf_Word ret; | ||
402 | |||
403 | if (die_get_attr_udata(tp_die, DW_AT_bit_offset, &ret)) | ||
205 | return 0; | 404 | return 0; |
206 | 405 | ||
207 | return (int)ret; | 406 | return (int)ret; |
@@ -317,28 +516,196 @@ static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data) | |||
317 | static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, | 516 | static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, |
318 | Dwarf_Die *die_mem) | 517 | Dwarf_Die *die_mem) |
319 | { | 518 | { |
320 | return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem); | 519 | Dwarf_Die tmp_die; |
520 | |||
521 | sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr, &tmp_die); | ||
522 | if (!sp_die) | ||
523 | return NULL; | ||
524 | |||
525 | /* Inlined function could be recursive. Trace it until fail */ | ||
526 | while (sp_die) { | ||
527 | memcpy(die_mem, sp_die, sizeof(Dwarf_Die)); | ||
528 | sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr, | ||
529 | &tmp_die); | ||
530 | } | ||
531 | |||
532 | return die_mem; | ||
533 | } | ||
534 | |||
535 | /* Walker on lines (Note: line number will not be sorted) */ | ||
536 | typedef int (* line_walk_handler_t) (const char *fname, int lineno, | ||
537 | Dwarf_Addr addr, void *data); | ||
538 | |||
539 | struct __line_walk_param { | ||
540 | const char *fname; | ||
541 | line_walk_handler_t handler; | ||
542 | void *data; | ||
543 | int retval; | ||
544 | }; | ||
545 | |||
546 | static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data) | ||
547 | { | ||
548 | struct __line_walk_param *lw = data; | ||
549 | Dwarf_Addr addr; | ||
550 | int lineno; | ||
551 | |||
552 | if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) { | ||
553 | lineno = die_get_call_lineno(in_die); | ||
554 | if (lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) { | ||
555 | lw->retval = lw->handler(lw->fname, lineno, addr, | ||
556 | lw->data); | ||
557 | if (lw->retval != 0) | ||
558 | return DIE_FIND_CB_FOUND; | ||
559 | } | ||
560 | } | ||
561 | return DIE_FIND_CB_SIBLING; | ||
562 | } | ||
563 | |||
564 | /* Walk on lines of blocks included in given DIE */ | ||
565 | static int __die_walk_funclines(Dwarf_Die *sp_die, | ||
566 | line_walk_handler_t handler, void *data) | ||
567 | { | ||
568 | struct __line_walk_param lw = { | ||
569 | .handler = handler, | ||
570 | .data = data, | ||
571 | .retval = 0, | ||
572 | }; | ||
573 | Dwarf_Die die_mem; | ||
574 | Dwarf_Addr addr; | ||
575 | int lineno; | ||
576 | |||
577 | /* Handle function declaration line */ | ||
578 | lw.fname = dwarf_decl_file(sp_die); | ||
579 | if (lw.fname && dwarf_decl_line(sp_die, &lineno) == 0 && | ||
580 | dwarf_entrypc(sp_die, &addr) == 0) { | ||
581 | lw.retval = handler(lw.fname, lineno, addr, data); | ||
582 | if (lw.retval != 0) | ||
583 | goto done; | ||
584 | } | ||
585 | die_find_child(sp_die, __die_walk_funclines_cb, &lw, &die_mem); | ||
586 | done: | ||
587 | return lw.retval; | ||
588 | } | ||
589 | |||
590 | static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data) | ||
591 | { | ||
592 | struct __line_walk_param *lw = data; | ||
593 | |||
594 | lw->retval = __die_walk_funclines(sp_die, lw->handler, lw->data); | ||
595 | if (lw->retval != 0) | ||
596 | return DWARF_CB_ABORT; | ||
597 | |||
598 | return DWARF_CB_OK; | ||
321 | } | 599 | } |
322 | 600 | ||
601 | /* | ||
602 | * Walk on lines inside given PDIE. If the PDIE is subprogram, walk only on | ||
603 | * the lines inside the subprogram, otherwise PDIE must be a CU DIE. | ||
604 | */ | ||
605 | static int die_walk_lines(Dwarf_Die *pdie, line_walk_handler_t handler, | ||
606 | void *data) | ||
607 | { | ||
608 | Dwarf_Lines *lines; | ||
609 | Dwarf_Line *line; | ||
610 | Dwarf_Addr addr; | ||
611 | const char *fname; | ||
612 | int lineno, ret = 0; | ||
613 | Dwarf_Die die_mem, *cu_die; | ||
614 | size_t nlines, i; | ||
615 | |||
616 | /* Get the CU die */ | ||
617 | if (dwarf_tag(pdie) == DW_TAG_subprogram) | ||
618 | cu_die = dwarf_diecu(pdie, &die_mem, NULL, NULL); | ||
619 | else | ||
620 | cu_die = pdie; | ||
621 | if (!cu_die) { | ||
622 | pr_debug2("Failed to get CU from subprogram\n"); | ||
623 | return -EINVAL; | ||
624 | } | ||
625 | |||
626 | /* Get lines list in the CU */ | ||
627 | if (dwarf_getsrclines(cu_die, &lines, &nlines) != 0) { | ||
628 | pr_debug2("Failed to get source lines on this CU.\n"); | ||
629 | return -ENOENT; | ||
630 | } | ||
631 | pr_debug2("Get %zd lines from this CU\n", nlines); | ||
632 | |||
633 | /* Walk on the lines on lines list */ | ||
634 | for (i = 0; i < nlines; i++) { | ||
635 | line = dwarf_onesrcline(lines, i); | ||
636 | if (line == NULL || | ||
637 | dwarf_lineno(line, &lineno) != 0 || | ||
638 | dwarf_lineaddr(line, &addr) != 0) { | ||
639 | pr_debug2("Failed to get line info. " | ||
640 | "Possible error in debuginfo.\n"); | ||
641 | continue; | ||
642 | } | ||
643 | /* Filter lines based on address */ | ||
644 | if (pdie != cu_die) | ||
645 | /* | ||
646 | * Address filtering | ||
647 | * The line is included in given function, and | ||
648 | * no inline block includes it. | ||
649 | */ | ||
650 | if (!dwarf_haspc(pdie, addr) || | ||
651 | die_find_inlinefunc(pdie, addr, &die_mem)) | ||
652 | continue; | ||
653 | /* Get source line */ | ||
654 | fname = dwarf_linesrc(line, NULL, NULL); | ||
655 | |||
656 | ret = handler(fname, lineno, addr, data); | ||
657 | if (ret != 0) | ||
658 | return ret; | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * Dwarf lines doesn't include function declarations and inlined | ||
663 | * subroutines. We have to check functions list or given function. | ||
664 | */ | ||
665 | if (pdie != cu_die) | ||
666 | ret = __die_walk_funclines(pdie, handler, data); | ||
667 | else { | ||
668 | struct __line_walk_param param = { | ||
669 | .handler = handler, | ||
670 | .data = data, | ||
671 | .retval = 0, | ||
672 | }; | ||
673 | dwarf_getfuncs(cu_die, __die_walk_culines_cb, ¶m, 0); | ||
674 | ret = param.retval; | ||
675 | } | ||
676 | |||
677 | return ret; | ||
678 | } | ||
679 | |||
680 | struct __find_variable_param { | ||
681 | const char *name; | ||
682 | Dwarf_Addr addr; | ||
683 | }; | ||
684 | |||
323 | static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data) | 685 | static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data) |
324 | { | 686 | { |
325 | const char *name = data; | 687 | struct __find_variable_param *fvp = data; |
326 | int tag; | 688 | int tag; |
327 | 689 | ||
328 | tag = dwarf_tag(die_mem); | 690 | tag = dwarf_tag(die_mem); |
329 | if ((tag == DW_TAG_formal_parameter || | 691 | if ((tag == DW_TAG_formal_parameter || |
330 | tag == DW_TAG_variable) && | 692 | tag == DW_TAG_variable) && |
331 | die_compare_name(die_mem, name)) | 693 | die_compare_name(die_mem, fvp->name)) |
332 | return DIE_FIND_CB_FOUND; | 694 | return DIE_FIND_CB_FOUND; |
333 | 695 | ||
334 | return DIE_FIND_CB_CONTINUE; | 696 | if (dwarf_haspc(die_mem, fvp->addr)) |
697 | return DIE_FIND_CB_CONTINUE; | ||
698 | else | ||
699 | return DIE_FIND_CB_SIBLING; | ||
335 | } | 700 | } |
336 | 701 | ||
337 | /* Find a variable called 'name' */ | 702 | /* Find a variable called 'name' at given address */ |
338 | static Dwarf_Die *die_find_variable(Dwarf_Die *sp_die, const char *name, | 703 | static Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name, |
339 | Dwarf_Die *die_mem) | 704 | Dwarf_Addr addr, Dwarf_Die *die_mem) |
340 | { | 705 | { |
341 | return die_find_child(sp_die, __die_find_variable_cb, (void *)name, | 706 | struct __find_variable_param fvp = { .name = name, .addr = addr}; |
707 | |||
708 | return die_find_child(sp_die, __die_find_variable_cb, (void *)&fvp, | ||
342 | die_mem); | 709 | die_mem); |
343 | } | 710 | } |
344 | 711 | ||
@@ -361,6 +728,60 @@ static Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name, | |||
361 | die_mem); | 728 | die_mem); |
362 | } | 729 | } |
363 | 730 | ||
731 | /* Get the name of given variable DIE */ | ||
732 | static int die_get_typename(Dwarf_Die *vr_die, char *buf, int len) | ||
733 | { | ||
734 | Dwarf_Die type; | ||
735 | int tag, ret, ret2; | ||
736 | const char *tmp = ""; | ||
737 | |||
738 | if (__die_get_real_type(vr_die, &type) == NULL) | ||
739 | return -ENOENT; | ||
740 | |||
741 | tag = dwarf_tag(&type); | ||
742 | if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type) | ||
743 | tmp = "*"; | ||
744 | else if (tag == DW_TAG_subroutine_type) { | ||
745 | /* Function pointer */ | ||
746 | ret = snprintf(buf, len, "(function_type)"); | ||
747 | return (ret >= len) ? -E2BIG : ret; | ||
748 | } else { | ||
749 | if (!dwarf_diename(&type)) | ||
750 | return -ENOENT; | ||
751 | if (tag == DW_TAG_union_type) | ||
752 | tmp = "union "; | ||
753 | else if (tag == DW_TAG_structure_type) | ||
754 | tmp = "struct "; | ||
755 | /* Write a base name */ | ||
756 | ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type)); | ||
757 | return (ret >= len) ? -E2BIG : ret; | ||
758 | } | ||
759 | ret = die_get_typename(&type, buf, len); | ||
760 | if (ret > 0) { | ||
761 | ret2 = snprintf(buf + ret, len - ret, "%s", tmp); | ||
762 | ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret; | ||
763 | } | ||
764 | return ret; | ||
765 | } | ||
766 | |||
767 | /* Get the name and type of given variable DIE, stored as "type\tname" */ | ||
768 | static int die_get_varname(Dwarf_Die *vr_die, char *buf, int len) | ||
769 | { | ||
770 | int ret, ret2; | ||
771 | |||
772 | ret = die_get_typename(vr_die, buf, len); | ||
773 | if (ret < 0) { | ||
774 | pr_debug("Failed to get type, make it unknown.\n"); | ||
775 | ret = snprintf(buf, len, "(unknown_type)"); | ||
776 | } | ||
777 | if (ret > 0) { | ||
778 | ret2 = snprintf(buf + ret, len - ret, "\t%s", | ||
779 | dwarf_diename(vr_die)); | ||
780 | ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret; | ||
781 | } | ||
782 | return ret; | ||
783 | } | ||
784 | |||
364 | /* | 785 | /* |
365 | * Probe finder related functions | 786 | * Probe finder related functions |
366 | */ | 787 | */ |
@@ -374,8 +795,13 @@ static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs) | |||
374 | return ref; | 795 | return ref; |
375 | } | 796 | } |
376 | 797 | ||
377 | /* Show a location */ | 798 | /* |
378 | static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf) | 799 | * Convert a location into trace_arg. |
800 | * If tvar == NULL, this just checks variable can be converted. | ||
801 | */ | ||
802 | static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr, | ||
803 | Dwarf_Op *fb_ops, | ||
804 | struct probe_trace_arg *tvar) | ||
379 | { | 805 | { |
380 | Dwarf_Attribute attr; | 806 | Dwarf_Attribute attr; |
381 | Dwarf_Op *op; | 807 | Dwarf_Op *op; |
@@ -384,20 +810,23 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf) | |||
384 | Dwarf_Word offs = 0; | 810 | Dwarf_Word offs = 0; |
385 | bool ref = false; | 811 | bool ref = false; |
386 | const char *regs; | 812 | const char *regs; |
387 | struct probe_trace_arg *tvar = pf->tvar; | ||
388 | int ret; | 813 | int ret; |
389 | 814 | ||
815 | if (dwarf_attr(vr_die, DW_AT_external, &attr) != NULL) | ||
816 | goto static_var; | ||
817 | |||
390 | /* TODO: handle more than 1 exprs */ | 818 | /* TODO: handle more than 1 exprs */ |
391 | if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL || | 819 | if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL || |
392 | dwarf_getlocation_addr(&attr, pf->addr, &op, &nops, 1) <= 0 || | 820 | dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0 || |
393 | nops == 0) { | 821 | nops == 0) { |
394 | /* TODO: Support const_value */ | 822 | /* TODO: Support const_value */ |
395 | pr_err("Failed to find the location of %s at this address.\n" | ||
396 | " Perhaps, it has been optimized out.\n", pf->pvar->var); | ||
397 | return -ENOENT; | 823 | return -ENOENT; |
398 | } | 824 | } |
399 | 825 | ||
400 | if (op->atom == DW_OP_addr) { | 826 | if (op->atom == DW_OP_addr) { |
827 | static_var: | ||
828 | if (!tvar) | ||
829 | return 0; | ||
401 | /* Static variables on memory (not stack), make @varname */ | 830 | /* Static variables on memory (not stack), make @varname */ |
402 | ret = strlen(dwarf_diename(vr_die)); | 831 | ret = strlen(dwarf_diename(vr_die)); |
403 | tvar->value = zalloc(ret + 2); | 832 | tvar->value = zalloc(ret + 2); |
@@ -412,14 +841,11 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf) | |||
412 | 841 | ||
413 | /* If this is based on frame buffer, set the offset */ | 842 | /* If this is based on frame buffer, set the offset */ |
414 | if (op->atom == DW_OP_fbreg) { | 843 | if (op->atom == DW_OP_fbreg) { |
415 | if (pf->fb_ops == NULL) { | 844 | if (fb_ops == NULL) |
416 | pr_warning("The attribute of frame base is not " | ||
417 | "supported.\n"); | ||
418 | return -ENOTSUP; | 845 | return -ENOTSUP; |
419 | } | ||
420 | ref = true; | 846 | ref = true; |
421 | offs = op->number; | 847 | offs = op->number; |
422 | op = &pf->fb_ops[0]; | 848 | op = &fb_ops[0]; |
423 | } | 849 | } |
424 | 850 | ||
425 | if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) { | 851 | if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) { |
@@ -435,13 +861,18 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf) | |||
435 | } else if (op->atom == DW_OP_regx) { | 861 | } else if (op->atom == DW_OP_regx) { |
436 | regn = op->number; | 862 | regn = op->number; |
437 | } else { | 863 | } else { |
438 | pr_warning("DW_OP %x is not supported.\n", op->atom); | 864 | pr_debug("DW_OP %x is not supported.\n", op->atom); |
439 | return -ENOTSUP; | 865 | return -ENOTSUP; |
440 | } | 866 | } |
441 | 867 | ||
868 | if (!tvar) | ||
869 | return 0; | ||
870 | |||
442 | regs = get_arch_regstr(regn); | 871 | regs = get_arch_regstr(regn); |
443 | if (!regs) { | 872 | if (!regs) { |
444 | pr_warning("Mapping for DWARF register number %u missing on this architecture.", regn); | 873 | /* This should be a bug in DWARF or this tool */ |
874 | pr_warning("Mapping for the register number %u " | ||
875 | "missing on this architecture.\n", regn); | ||
445 | return -ERANGE; | 876 | return -ERANGE; |
446 | } | 877 | } |
447 | 878 | ||
@@ -457,6 +888,8 @@ static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf) | |||
457 | return 0; | 888 | return 0; |
458 | } | 889 | } |
459 | 890 | ||
891 | #define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long)) | ||
892 | |||
460 | static int convert_variable_type(Dwarf_Die *vr_die, | 893 | static int convert_variable_type(Dwarf_Die *vr_die, |
461 | struct probe_trace_arg *tvar, | 894 | struct probe_trace_arg *tvar, |
462 | const char *cast) | 895 | const char *cast) |
@@ -473,6 +906,14 @@ static int convert_variable_type(Dwarf_Die *vr_die, | |||
473 | return (tvar->type == NULL) ? -ENOMEM : 0; | 906 | return (tvar->type == NULL) ? -ENOMEM : 0; |
474 | } | 907 | } |
475 | 908 | ||
909 | if (die_get_bit_size(vr_die) != 0) { | ||
910 | /* This is a bitfield */ | ||
911 | ret = snprintf(buf, 16, "b%d@%d/%zd", die_get_bit_size(vr_die), | ||
912 | die_get_bit_offset(vr_die), | ||
913 | BYTES_TO_BITS(die_get_byte_size(vr_die))); | ||
914 | goto formatted; | ||
915 | } | ||
916 | |||
476 | if (die_get_real_type(vr_die, &type) == NULL) { | 917 | if (die_get_real_type(vr_die, &type) == NULL) { |
477 | pr_warning("Failed to get a type information of %s.\n", | 918 | pr_warning("Failed to get a type information of %s.\n", |
478 | dwarf_diename(vr_die)); | 919 | dwarf_diename(vr_die)); |
@@ -487,13 +928,14 @@ static int convert_variable_type(Dwarf_Die *vr_die, | |||
487 | if (ret != DW_TAG_pointer_type && | 928 | if (ret != DW_TAG_pointer_type && |
488 | ret != DW_TAG_array_type) { | 929 | ret != DW_TAG_array_type) { |
489 | pr_warning("Failed to cast into string: " | 930 | pr_warning("Failed to cast into string: " |
490 | "%s(%s) is not a pointer nor array.", | 931 | "%s(%s) is not a pointer nor array.\n", |
491 | dwarf_diename(vr_die), dwarf_diename(&type)); | 932 | dwarf_diename(vr_die), dwarf_diename(&type)); |
492 | return -EINVAL; | 933 | return -EINVAL; |
493 | } | 934 | } |
494 | if (ret == DW_TAG_pointer_type) { | 935 | if (ret == DW_TAG_pointer_type) { |
495 | if (die_get_real_type(&type, &type) == NULL) { | 936 | if (die_get_real_type(&type, &type) == NULL) { |
496 | pr_warning("Failed to get a type information."); | 937 | pr_warning("Failed to get a type" |
938 | " information.\n"); | ||
497 | return -ENOENT; | 939 | return -ENOENT; |
498 | } | 940 | } |
499 | while (*ref_ptr) | 941 | while (*ref_ptr) |
@@ -508,7 +950,7 @@ static int convert_variable_type(Dwarf_Die *vr_die, | |||
508 | if (!die_compare_name(&type, "char") && | 950 | if (!die_compare_name(&type, "char") && |
509 | !die_compare_name(&type, "unsigned char")) { | 951 | !die_compare_name(&type, "unsigned char")) { |
510 | pr_warning("Failed to cast into string: " | 952 | pr_warning("Failed to cast into string: " |
511 | "%s is not (unsigned) char *.", | 953 | "%s is not (unsigned) char *.\n", |
512 | dwarf_diename(vr_die)); | 954 | dwarf_diename(vr_die)); |
513 | return -EINVAL; | 955 | return -EINVAL; |
514 | } | 956 | } |
@@ -516,29 +958,31 @@ static int convert_variable_type(Dwarf_Die *vr_die, | |||
516 | return (tvar->type == NULL) ? -ENOMEM : 0; | 958 | return (tvar->type == NULL) ? -ENOMEM : 0; |
517 | } | 959 | } |
518 | 960 | ||
519 | ret = die_get_byte_size(&type) * 8; | 961 | ret = BYTES_TO_BITS(die_get_byte_size(&type)); |
520 | if (ret) { | 962 | if (!ret) |
521 | /* Check the bitwidth */ | 963 | /* No size ... try to use default type */ |
522 | if (ret > MAX_BASIC_TYPE_BITS) { | 964 | return 0; |
523 | pr_info("%s exceeds max-bitwidth." | ||
524 | " Cut down to %d bits.\n", | ||
525 | dwarf_diename(&type), MAX_BASIC_TYPE_BITS); | ||
526 | ret = MAX_BASIC_TYPE_BITS; | ||
527 | } | ||
528 | 965 | ||
529 | ret = snprintf(buf, 16, "%c%d", | 966 | /* Check the bitwidth */ |
530 | die_is_signed_type(&type) ? 's' : 'u', ret); | 967 | if (ret > MAX_BASIC_TYPE_BITS) { |
531 | if (ret < 0 || ret >= 16) { | 968 | pr_info("%s exceeds max-bitwidth. Cut down to %d bits.\n", |
532 | if (ret >= 16) | 969 | dwarf_diename(&type), MAX_BASIC_TYPE_BITS); |
533 | ret = -E2BIG; | 970 | ret = MAX_BASIC_TYPE_BITS; |
534 | pr_warning("Failed to convert variable type: %s\n", | ||
535 | strerror(-ret)); | ||
536 | return ret; | ||
537 | } | ||
538 | tvar->type = strdup(buf); | ||
539 | if (tvar->type == NULL) | ||
540 | return -ENOMEM; | ||
541 | } | 971 | } |
972 | ret = snprintf(buf, 16, "%c%d", | ||
973 | die_is_signed_type(&type) ? 's' : 'u', ret); | ||
974 | |||
975 | formatted: | ||
976 | if (ret < 0 || ret >= 16) { | ||
977 | if (ret >= 16) | ||
978 | ret = -E2BIG; | ||
979 | pr_warning("Failed to convert variable type: %s\n", | ||
980 | strerror(-ret)); | ||
981 | return ret; | ||
982 | } | ||
983 | tvar->type = strdup(buf); | ||
984 | if (tvar->type == NULL) | ||
985 | return -ENOMEM; | ||
542 | return 0; | 986 | return 0; |
543 | } | 987 | } |
544 | 988 | ||
@@ -618,8 +1062,8 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname, | |||
618 | return -EINVAL; | 1062 | return -EINVAL; |
619 | } | 1063 | } |
620 | if (field->name[0] == '[') { | 1064 | if (field->name[0] == '[') { |
621 | pr_err("Semantic error: %s is not a pointor nor array.", | 1065 | pr_err("Semantic error: %s is not a pointor" |
622 | varname); | 1066 | " nor array.\n", varname); |
623 | return -EINVAL; | 1067 | return -EINVAL; |
624 | } | 1068 | } |
625 | if (field->ref) { | 1069 | if (field->ref) { |
@@ -666,8 +1110,14 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf) | |||
666 | pr_debug("Converting variable %s into trace event.\n", | 1110 | pr_debug("Converting variable %s into trace event.\n", |
667 | dwarf_diename(vr_die)); | 1111 | dwarf_diename(vr_die)); |
668 | 1112 | ||
669 | ret = convert_variable_location(vr_die, pf); | 1113 | ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops, |
670 | if (ret == 0 && pf->pvar->field) { | 1114 | pf->tvar); |
1115 | if (ret == -ENOENT) | ||
1116 | pr_err("Failed to find the location of %s at this address.\n" | ||
1117 | " Perhaps, it has been optimized out.\n", pf->pvar->var); | ||
1118 | else if (ret == -ENOTSUP) | ||
1119 | pr_err("Sorry, we don't support this variable location yet.\n"); | ||
1120 | else if (pf->pvar->field) { | ||
671 | ret = convert_variable_fields(vr_die, pf->pvar->var, | 1121 | ret = convert_variable_fields(vr_die, pf->pvar->var, |
672 | pf->pvar->field, &pf->tvar->ref, | 1122 | pf->pvar->field, &pf->tvar->ref, |
673 | &die_mem); | 1123 | &die_mem); |
@@ -722,85 +1172,87 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
722 | pr_debug("Searching '%s' variable in context.\n", | 1172 | pr_debug("Searching '%s' variable in context.\n", |
723 | pf->pvar->var); | 1173 | pf->pvar->var); |
724 | /* Search child die for local variables and parameters. */ | 1174 | /* Search child die for local variables and parameters. */ |
725 | if (die_find_variable(sp_die, pf->pvar->var, &vr_die)) | 1175 | if (die_find_variable_at(sp_die, pf->pvar->var, pf->addr, &vr_die)) |
726 | ret = convert_variable(&vr_die, pf); | 1176 | ret = convert_variable(&vr_die, pf); |
727 | else { | 1177 | else { |
728 | /* Search upper class */ | 1178 | /* Search upper class */ |
729 | nscopes = dwarf_getscopes_die(sp_die, &scopes); | 1179 | nscopes = dwarf_getscopes_die(sp_die, &scopes); |
730 | if (nscopes > 0) { | 1180 | while (nscopes-- > 1) { |
731 | ret = dwarf_getscopevar(scopes, nscopes, pf->pvar->var, | 1181 | pr_debug("Searching variables in %s\n", |
732 | 0, NULL, 0, 0, &vr_die); | 1182 | dwarf_diename(&scopes[nscopes])); |
733 | if (ret >= 0) | 1183 | /* We should check this scope, so give dummy address */ |
1184 | if (die_find_variable_at(&scopes[nscopes], | ||
1185 | pf->pvar->var, 0, | ||
1186 | &vr_die)) { | ||
734 | ret = convert_variable(&vr_die, pf); | 1187 | ret = convert_variable(&vr_die, pf); |
735 | else | 1188 | goto found; |
736 | ret = -ENOENT; | 1189 | } |
1190 | } | ||
1191 | if (scopes) | ||
737 | free(scopes); | 1192 | free(scopes); |
738 | } else | 1193 | ret = -ENOENT; |
739 | ret = -ENOENT; | ||
740 | } | 1194 | } |
1195 | found: | ||
741 | if (ret < 0) | 1196 | if (ret < 0) |
742 | pr_warning("Failed to find '%s' in this function.\n", | 1197 | pr_warning("Failed to find '%s' in this function.\n", |
743 | pf->pvar->var); | 1198 | pf->pvar->var); |
744 | return ret; | 1199 | return ret; |
745 | } | 1200 | } |
746 | 1201 | ||
747 | /* Show a probe point to output buffer */ | 1202 | /* Convert subprogram DIE to trace point */ |
748 | static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf) | 1203 | static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr, |
1204 | bool retprobe, struct probe_trace_point *tp) | ||
749 | { | 1205 | { |
750 | struct probe_trace_event *tev; | ||
751 | Dwarf_Addr eaddr; | 1206 | Dwarf_Addr eaddr; |
752 | Dwarf_Die die_mem; | ||
753 | const char *name; | 1207 | const char *name; |
754 | int ret, i; | ||
755 | Dwarf_Attribute fb_attr; | ||
756 | size_t nops; | ||
757 | |||
758 | if (pf->ntevs == pf->max_tevs) { | ||
759 | pr_warning("Too many( > %d) probe point found.\n", | ||
760 | pf->max_tevs); | ||
761 | return -ERANGE; | ||
762 | } | ||
763 | tev = &pf->tevs[pf->ntevs++]; | ||
764 | |||
765 | /* If no real subprogram, find a real one */ | ||
766 | if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) { | ||
767 | sp_die = die_find_real_subprogram(&pf->cu_die, | ||
768 | pf->addr, &die_mem); | ||
769 | if (!sp_die) { | ||
770 | pr_warning("Failed to find probe point in any " | ||
771 | "functions.\n"); | ||
772 | return -ENOENT; | ||
773 | } | ||
774 | } | ||
775 | 1208 | ||
776 | /* Copy the name of probe point */ | 1209 | /* Copy the name of probe point */ |
777 | name = dwarf_diename(sp_die); | 1210 | name = dwarf_diename(sp_die); |
778 | if (name) { | 1211 | if (name) { |
779 | if (dwarf_entrypc(sp_die, &eaddr) != 0) { | 1212 | if (dwarf_entrypc(sp_die, &eaddr) != 0) { |
780 | pr_warning("Failed to get entry pc of %s\n", | 1213 | pr_warning("Failed to get entry address of %s\n", |
781 | dwarf_diename(sp_die)); | 1214 | dwarf_diename(sp_die)); |
782 | return -ENOENT; | 1215 | return -ENOENT; |
783 | } | 1216 | } |
784 | tev->point.symbol = strdup(name); | 1217 | tp->symbol = strdup(name); |
785 | if (tev->point.symbol == NULL) | 1218 | if (tp->symbol == NULL) |
786 | return -ENOMEM; | 1219 | return -ENOMEM; |
787 | tev->point.offset = (unsigned long)(pf->addr - eaddr); | 1220 | tp->offset = (unsigned long)(paddr - eaddr); |
788 | } else | 1221 | } else |
789 | /* This function has no name. */ | 1222 | /* This function has no name. */ |
790 | tev->point.offset = (unsigned long)pf->addr; | 1223 | tp->offset = (unsigned long)paddr; |
791 | 1224 | ||
792 | /* Return probe must be on the head of a subprogram */ | 1225 | /* Return probe must be on the head of a subprogram */ |
793 | if (pf->pev->point.retprobe) { | 1226 | if (retprobe) { |
794 | if (tev->point.offset != 0) { | 1227 | if (eaddr != paddr) { |
795 | pr_warning("Return probe must be on the head of" | 1228 | pr_warning("Return probe must be on the head of" |
796 | " a real function\n"); | 1229 | " a real function.\n"); |
797 | return -EINVAL; | 1230 | return -EINVAL; |
798 | } | 1231 | } |
799 | tev->point.retprobe = true; | 1232 | tp->retprobe = true; |
800 | } | 1233 | } |
801 | 1234 | ||
802 | pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, | 1235 | return 0; |
803 | tev->point.offset); | 1236 | } |
1237 | |||
1238 | /* Call probe_finder callback with real subprogram DIE */ | ||
1239 | static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf) | ||
1240 | { | ||
1241 | Dwarf_Die die_mem; | ||
1242 | Dwarf_Attribute fb_attr; | ||
1243 | size_t nops; | ||
1244 | int ret; | ||
1245 | |||
1246 | /* If no real subprogram, find a real one */ | ||
1247 | if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) { | ||
1248 | sp_die = die_find_real_subprogram(&pf->cu_die, | ||
1249 | pf->addr, &die_mem); | ||
1250 | if (!sp_die) { | ||
1251 | pr_warning("Failed to find probe point in any " | ||
1252 | "functions.\n"); | ||
1253 | return -ENOENT; | ||
1254 | } | ||
1255 | } | ||
804 | 1256 | ||
805 | /* Get the frame base attribute/ops */ | 1257 | /* Get the frame base attribute/ops */ |
806 | dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr); | 1258 | dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr); |
@@ -813,182 +1265,118 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
813 | Dwarf_Frame *frame; | 1265 | Dwarf_Frame *frame; |
814 | if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 || | 1266 | if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 || |
815 | dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) { | 1267 | dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) { |
816 | pr_warning("Failed to get CFA on 0x%jx\n", | 1268 | pr_warning("Failed to get call frame on 0x%jx\n", |
817 | (uintmax_t)pf->addr); | 1269 | (uintmax_t)pf->addr); |
818 | return -ENOENT; | 1270 | return -ENOENT; |
819 | } | 1271 | } |
820 | #endif | 1272 | #endif |
821 | } | 1273 | } |
822 | 1274 | ||
823 | /* Find each argument */ | 1275 | /* Call finder's callback handler */ |
824 | tev->nargs = pf->pev->nargs; | 1276 | ret = pf->callback(sp_die, pf); |
825 | tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); | ||
826 | if (tev->args == NULL) | ||
827 | return -ENOMEM; | ||
828 | for (i = 0; i < pf->pev->nargs; i++) { | ||
829 | pf->pvar = &pf->pev->args[i]; | ||
830 | pf->tvar = &tev->args[i]; | ||
831 | ret = find_variable(sp_die, pf); | ||
832 | if (ret != 0) | ||
833 | return ret; | ||
834 | } | ||
835 | 1277 | ||
836 | /* *pf->fb_ops will be cached in libdw. Don't free it. */ | 1278 | /* *pf->fb_ops will be cached in libdw. Don't free it. */ |
837 | pf->fb_ops = NULL; | 1279 | pf->fb_ops = NULL; |
838 | return 0; | 1280 | |
1281 | return ret; | ||
839 | } | 1282 | } |
840 | 1283 | ||
841 | /* Find probe point from its line number */ | 1284 | static int probe_point_line_walker(const char *fname, int lineno, |
842 | static int find_probe_point_by_line(struct probe_finder *pf) | 1285 | Dwarf_Addr addr, void *data) |
843 | { | 1286 | { |
844 | Dwarf_Lines *lines; | 1287 | struct probe_finder *pf = data; |
845 | Dwarf_Line *line; | 1288 | int ret; |
846 | size_t nlines, i; | ||
847 | Dwarf_Addr addr; | ||
848 | int lineno; | ||
849 | int ret = 0; | ||
850 | |||
851 | if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) { | ||
852 | pr_warning("No source lines found in this CU.\n"); | ||
853 | return -ENOENT; | ||
854 | } | ||
855 | 1289 | ||
856 | for (i = 0; i < nlines && ret == 0; i++) { | 1290 | if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0) |
857 | line = dwarf_onesrcline(lines, i); | 1291 | return 0; |
858 | if (dwarf_lineno(line, &lineno) != 0 || | ||
859 | lineno != pf->lno) | ||
860 | continue; | ||
861 | 1292 | ||
862 | /* TODO: Get fileno from line, but how? */ | 1293 | pf->addr = addr; |
863 | if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0) | 1294 | ret = call_probe_finder(NULL, pf); |
864 | continue; | ||
865 | 1295 | ||
866 | if (dwarf_lineaddr(line, &addr) != 0) { | 1296 | /* Continue if no error, because the line will be in inline function */ |
867 | pr_warning("Failed to get the address of the line.\n"); | 1297 | return ret < 0 ? ret : 0; |
868 | return -ENOENT; | 1298 | } |
869 | } | ||
870 | pr_debug("Probe line found: line[%d]:%d addr:0x%jx\n", | ||
871 | (int)i, lineno, (uintmax_t)addr); | ||
872 | pf->addr = addr; | ||
873 | 1299 | ||
874 | ret = convert_probe_point(NULL, pf); | 1300 | /* Find probe point from its line number */ |
875 | /* Continuing, because target line might be inlined. */ | 1301 | static int find_probe_point_by_line(struct probe_finder *pf) |
876 | } | 1302 | { |
877 | return ret; | 1303 | return die_walk_lines(&pf->cu_die, probe_point_line_walker, pf); |
878 | } | 1304 | } |
879 | 1305 | ||
880 | /* Find lines which match lazy pattern */ | 1306 | /* Find lines which match lazy pattern */ |
881 | static int find_lazy_match_lines(struct list_head *head, | 1307 | static int find_lazy_match_lines(struct list_head *head, |
882 | const char *fname, const char *pat) | 1308 | const char *fname, const char *pat) |
883 | { | 1309 | { |
884 | char *fbuf, *p1, *p2; | 1310 | FILE *fp; |
885 | int fd, line, nlines = -1; | 1311 | char *line = NULL; |
886 | struct stat st; | 1312 | size_t line_len; |
887 | 1313 | ssize_t len; | |
888 | fd = open(fname, O_RDONLY); | 1314 | int count = 0, linenum = 1; |
889 | if (fd < 0) { | 1315 | |
890 | pr_warning("Failed to open %s: %s\n", fname, strerror(-fd)); | 1316 | fp = fopen(fname, "r"); |
1317 | if (!fp) { | ||
1318 | pr_warning("Failed to open %s: %s\n", fname, strerror(errno)); | ||
891 | return -errno; | 1319 | return -errno; |
892 | } | 1320 | } |
893 | 1321 | ||
894 | if (fstat(fd, &st) < 0) { | 1322 | while ((len = getline(&line, &line_len, fp)) > 0) { |
895 | pr_warning("Failed to get the size of %s: %s\n", | 1323 | |
896 | fname, strerror(errno)); | 1324 | if (line[len - 1] == '\n') |
897 | nlines = -errno; | 1325 | line[len - 1] = '\0'; |
898 | goto out_close; | 1326 | |
899 | } | 1327 | if (strlazymatch(line, pat)) { |
900 | 1328 | line_list__add_line(head, linenum); | |
901 | nlines = -ENOMEM; | 1329 | count++; |
902 | fbuf = malloc(st.st_size + 2); | ||
903 | if (fbuf == NULL) | ||
904 | goto out_close; | ||
905 | if (read(fd, fbuf, st.st_size) < 0) { | ||
906 | pr_warning("Failed to read %s: %s\n", fname, strerror(errno)); | ||
907 | nlines = -errno; | ||
908 | goto out_free_fbuf; | ||
909 | } | ||
910 | fbuf[st.st_size] = '\n'; /* Dummy line */ | ||
911 | fbuf[st.st_size + 1] = '\0'; | ||
912 | p1 = fbuf; | ||
913 | line = 1; | ||
914 | nlines = 0; | ||
915 | while ((p2 = strchr(p1, '\n')) != NULL) { | ||
916 | *p2 = '\0'; | ||
917 | if (strlazymatch(p1, pat)) { | ||
918 | line_list__add_line(head, line); | ||
919 | nlines++; | ||
920 | } | 1330 | } |
921 | line++; | 1331 | linenum++; |
922 | p1 = p2 + 1; | ||
923 | } | 1332 | } |
924 | out_free_fbuf: | 1333 | |
925 | free(fbuf); | 1334 | if (ferror(fp)) |
926 | out_close: | 1335 | count = -errno; |
927 | close(fd); | 1336 | free(line); |
928 | return nlines; | 1337 | fclose(fp); |
1338 | |||
1339 | if (count == 0) | ||
1340 | pr_debug("No matched lines found in %s.\n", fname); | ||
1341 | return count; | ||
1342 | } | ||
1343 | |||
1344 | static int probe_point_lazy_walker(const char *fname, int lineno, | ||
1345 | Dwarf_Addr addr, void *data) | ||
1346 | { | ||
1347 | struct probe_finder *pf = data; | ||
1348 | int ret; | ||
1349 | |||
1350 | if (!line_list__has_line(&pf->lcache, lineno) || | ||
1351 | strtailcmp(fname, pf->fname) != 0) | ||
1352 | return 0; | ||
1353 | |||
1354 | pr_debug("Probe line found: line:%d addr:0x%llx\n", | ||
1355 | lineno, (unsigned long long)addr); | ||
1356 | pf->addr = addr; | ||
1357 | ret = call_probe_finder(NULL, pf); | ||
1358 | |||
1359 | /* | ||
1360 | * Continue if no error, because the lazy pattern will match | ||
1361 | * to other lines | ||
1362 | */ | ||
1363 | return ret < 0 ? ret : 0; | ||
929 | } | 1364 | } |
930 | 1365 | ||
931 | /* Find probe points from lazy pattern */ | 1366 | /* Find probe points from lazy pattern */ |
932 | static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) | 1367 | static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) |
933 | { | 1368 | { |
934 | Dwarf_Lines *lines; | ||
935 | Dwarf_Line *line; | ||
936 | size_t nlines, i; | ||
937 | Dwarf_Addr addr; | ||
938 | Dwarf_Die die_mem; | ||
939 | int lineno; | ||
940 | int ret = 0; | 1369 | int ret = 0; |
941 | 1370 | ||
942 | if (list_empty(&pf->lcache)) { | 1371 | if (list_empty(&pf->lcache)) { |
943 | /* Matching lazy line pattern */ | 1372 | /* Matching lazy line pattern */ |
944 | ret = find_lazy_match_lines(&pf->lcache, pf->fname, | 1373 | ret = find_lazy_match_lines(&pf->lcache, pf->fname, |
945 | pf->pev->point.lazy_line); | 1374 | pf->pev->point.lazy_line); |
946 | if (ret == 0) { | 1375 | if (ret <= 0) |
947 | pr_debug("No matched lines found in %s.\n", pf->fname); | ||
948 | return 0; | ||
949 | } else if (ret < 0) | ||
950 | return ret; | 1376 | return ret; |
951 | } | 1377 | } |
952 | 1378 | ||
953 | if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) { | 1379 | return die_walk_lines(sp_die, probe_point_lazy_walker, pf); |
954 | pr_warning("No source lines found in this CU.\n"); | ||
955 | return -ENOENT; | ||
956 | } | ||
957 | |||
958 | for (i = 0; i < nlines && ret >= 0; i++) { | ||
959 | line = dwarf_onesrcline(lines, i); | ||
960 | |||
961 | if (dwarf_lineno(line, &lineno) != 0 || | ||
962 | !line_list__has_line(&pf->lcache, lineno)) | ||
963 | continue; | ||
964 | |||
965 | /* TODO: Get fileno from line, but how? */ | ||
966 | if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0) | ||
967 | continue; | ||
968 | |||
969 | if (dwarf_lineaddr(line, &addr) != 0) { | ||
970 | pr_debug("Failed to get the address of line %d.\n", | ||
971 | lineno); | ||
972 | continue; | ||
973 | } | ||
974 | if (sp_die) { | ||
975 | /* Address filtering 1: does sp_die include addr? */ | ||
976 | if (!dwarf_haspc(sp_die, addr)) | ||
977 | continue; | ||
978 | /* Address filtering 2: No child include addr? */ | ||
979 | if (die_find_inlinefunc(sp_die, addr, &die_mem)) | ||
980 | continue; | ||
981 | } | ||
982 | |||
983 | pr_debug("Probe line found: line[%d]:%d addr:0x%llx\n", | ||
984 | (int)i, lineno, (unsigned long long)addr); | ||
985 | pf->addr = addr; | ||
986 | |||
987 | ret = convert_probe_point(sp_die, pf); | ||
988 | /* Continuing, because target line might be inlined. */ | ||
989 | } | ||
990 | /* TODO: deallocate lines, but how? */ | ||
991 | return ret; | ||
992 | } | 1380 | } |
993 | 1381 | ||
994 | /* Callback parameter with return value */ | 1382 | /* Callback parameter with return value */ |
@@ -1009,7 +1397,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data) | |||
1009 | else { | 1397 | else { |
1010 | /* Get probe address */ | 1398 | /* Get probe address */ |
1011 | if (dwarf_entrypc(in_die, &addr) != 0) { | 1399 | if (dwarf_entrypc(in_die, &addr) != 0) { |
1012 | pr_warning("Failed to get entry pc of %s.\n", | 1400 | pr_warning("Failed to get entry address of %s.\n", |
1013 | dwarf_diename(in_die)); | 1401 | dwarf_diename(in_die)); |
1014 | param->retval = -ENOENT; | 1402 | param->retval = -ENOENT; |
1015 | return DWARF_CB_ABORT; | 1403 | return DWARF_CB_ABORT; |
@@ -1019,7 +1407,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data) | |||
1019 | pr_debug("found inline addr: 0x%jx\n", | 1407 | pr_debug("found inline addr: 0x%jx\n", |
1020 | (uintmax_t)pf->addr); | 1408 | (uintmax_t)pf->addr); |
1021 | 1409 | ||
1022 | param->retval = convert_probe_point(in_die, pf); | 1410 | param->retval = call_probe_finder(in_die, pf); |
1023 | if (param->retval < 0) | 1411 | if (param->retval < 0) |
1024 | return DWARF_CB_ABORT; | 1412 | return DWARF_CB_ABORT; |
1025 | } | 1413 | } |
@@ -1039,6 +1427,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) | |||
1039 | !die_compare_name(sp_die, pp->function)) | 1427 | !die_compare_name(sp_die, pp->function)) |
1040 | return DWARF_CB_OK; | 1428 | return DWARF_CB_OK; |
1041 | 1429 | ||
1430 | /* Check declared file */ | ||
1431 | if (pp->file && strtailcmp(pp->file, dwarf_decl_file(sp_die))) | ||
1432 | return DWARF_CB_OK; | ||
1433 | |||
1042 | pf->fname = dwarf_decl_file(sp_die); | 1434 | pf->fname = dwarf_decl_file(sp_die); |
1043 | if (pp->line) { /* Function relative line */ | 1435 | if (pp->line) { /* Function relative line */ |
1044 | dwarf_decl_line(sp_die, &pf->lno); | 1436 | dwarf_decl_line(sp_die, &pf->lno); |
@@ -1050,14 +1442,14 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) | |||
1050 | param->retval = find_probe_point_lazy(sp_die, pf); | 1442 | param->retval = find_probe_point_lazy(sp_die, pf); |
1051 | else { | 1443 | else { |
1052 | if (dwarf_entrypc(sp_die, &pf->addr) != 0) { | 1444 | if (dwarf_entrypc(sp_die, &pf->addr) != 0) { |
1053 | pr_warning("Failed to get entry pc of %s.\n", | 1445 | pr_warning("Failed to get entry address of " |
1054 | dwarf_diename(sp_die)); | 1446 | "%s.\n", dwarf_diename(sp_die)); |
1055 | param->retval = -ENOENT; | 1447 | param->retval = -ENOENT; |
1056 | return DWARF_CB_ABORT; | 1448 | return DWARF_CB_ABORT; |
1057 | } | 1449 | } |
1058 | pf->addr += pp->offset; | 1450 | pf->addr += pp->offset; |
1059 | /* TODO: Check the address in this function */ | 1451 | /* TODO: Check the address in this function */ |
1060 | param->retval = convert_probe_point(sp_die, pf); | 1452 | param->retval = call_probe_finder(sp_die, pf); |
1061 | } | 1453 | } |
1062 | } else { | 1454 | } else { |
1063 | struct dwarf_callback_param _param = {.data = (void *)pf, | 1455 | struct dwarf_callback_param _param = {.data = (void *)pf, |
@@ -1079,155 +1471,410 @@ static int find_probe_point_by_func(struct probe_finder *pf) | |||
1079 | return _param.retval; | 1471 | return _param.retval; |
1080 | } | 1472 | } |
1081 | 1473 | ||
1082 | /* Find probe_trace_events specified by perf_probe_event from debuginfo */ | 1474 | struct pubname_callback_param { |
1083 | int find_probe_trace_events(int fd, struct perf_probe_event *pev, | 1475 | char *function; |
1084 | struct probe_trace_event **tevs, int max_tevs) | 1476 | char *file; |
1477 | Dwarf_Die *cu_die; | ||
1478 | Dwarf_Die *sp_die; | ||
1479 | int found; | ||
1480 | }; | ||
1481 | |||
1482 | static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data) | ||
1483 | { | ||
1484 | struct pubname_callback_param *param = data; | ||
1485 | |||
1486 | if (dwarf_offdie(dbg, gl->die_offset, param->sp_die)) { | ||
1487 | if (dwarf_tag(param->sp_die) != DW_TAG_subprogram) | ||
1488 | return DWARF_CB_OK; | ||
1489 | |||
1490 | if (die_compare_name(param->sp_die, param->function)) { | ||
1491 | if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die)) | ||
1492 | return DWARF_CB_OK; | ||
1493 | |||
1494 | if (param->file && | ||
1495 | strtailcmp(param->file, dwarf_decl_file(param->sp_die))) | ||
1496 | return DWARF_CB_OK; | ||
1497 | |||
1498 | param->found = 1; | ||
1499 | return DWARF_CB_ABORT; | ||
1500 | } | ||
1501 | } | ||
1502 | |||
1503 | return DWARF_CB_OK; | ||
1504 | } | ||
1505 | |||
1506 | /* Find probe points from debuginfo */ | ||
1507 | static int find_probes(int fd, struct probe_finder *pf) | ||
1085 | { | 1508 | { |
1086 | struct probe_finder pf = {.pev = pev, .max_tevs = max_tevs}; | 1509 | struct perf_probe_point *pp = &pf->pev->point; |
1087 | struct perf_probe_point *pp = &pev->point; | ||
1088 | Dwarf_Off off, noff; | 1510 | Dwarf_Off off, noff; |
1089 | size_t cuhl; | 1511 | size_t cuhl; |
1090 | Dwarf_Die *diep; | 1512 | Dwarf_Die *diep; |
1091 | Dwarf *dbg; | 1513 | Dwarf *dbg = NULL; |
1514 | Dwfl *dwfl; | ||
1515 | Dwarf_Addr bias; /* Currently ignored */ | ||
1092 | int ret = 0; | 1516 | int ret = 0; |
1093 | 1517 | ||
1094 | pf.tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs); | 1518 | dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias); |
1095 | if (pf.tevs == NULL) | ||
1096 | return -ENOMEM; | ||
1097 | *tevs = pf.tevs; | ||
1098 | pf.ntevs = 0; | ||
1099 | |||
1100 | dbg = dwarf_begin(fd, DWARF_C_READ); | ||
1101 | if (!dbg) { | 1519 | if (!dbg) { |
1102 | pr_warning("No dwarf info found in the vmlinux - " | 1520 | pr_warning("No debug information found in the vmlinux - " |
1103 | "please rebuild with CONFIG_DEBUG_INFO=y.\n"); | 1521 | "please rebuild with CONFIG_DEBUG_INFO=y.\n"); |
1104 | free(pf.tevs); | 1522 | close(fd); /* Without dwfl_end(), fd isn't closed. */ |
1105 | *tevs = NULL; | ||
1106 | return -EBADF; | 1523 | return -EBADF; |
1107 | } | 1524 | } |
1108 | 1525 | ||
1109 | #if _ELFUTILS_PREREQ(0, 142) | 1526 | #if _ELFUTILS_PREREQ(0, 142) |
1110 | /* Get the call frame information from this dwarf */ | 1527 | /* Get the call frame information from this dwarf */ |
1111 | pf.cfi = dwarf_getcfi(dbg); | 1528 | pf->cfi = dwarf_getcfi(dbg); |
1112 | #endif | 1529 | #endif |
1113 | 1530 | ||
1114 | off = 0; | 1531 | off = 0; |
1115 | line_list__init(&pf.lcache); | 1532 | line_list__init(&pf->lcache); |
1533 | |||
1534 | /* Fastpath: lookup by function name from .debug_pubnames section */ | ||
1535 | if (pp->function) { | ||
1536 | struct pubname_callback_param pubname_param = { | ||
1537 | .function = pp->function, | ||
1538 | .file = pp->file, | ||
1539 | .cu_die = &pf->cu_die, | ||
1540 | .sp_die = &pf->sp_die, | ||
1541 | .found = 0, | ||
1542 | }; | ||
1543 | struct dwarf_callback_param probe_param = { | ||
1544 | .data = pf, | ||
1545 | }; | ||
1546 | |||
1547 | dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0); | ||
1548 | if (pubname_param.found) { | ||
1549 | ret = probe_point_search_cb(&pf->sp_die, &probe_param); | ||
1550 | if (ret) | ||
1551 | goto found; | ||
1552 | } | ||
1553 | } | ||
1554 | |||
1116 | /* Loop on CUs (Compilation Unit) */ | 1555 | /* Loop on CUs (Compilation Unit) */ |
1117 | while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) && | 1556 | while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) { |
1118 | ret >= 0) { | ||
1119 | /* Get the DIE(Debugging Information Entry) of this CU */ | 1557 | /* Get the DIE(Debugging Information Entry) of this CU */ |
1120 | diep = dwarf_offdie(dbg, off + cuhl, &pf.cu_die); | 1558 | diep = dwarf_offdie(dbg, off + cuhl, &pf->cu_die); |
1121 | if (!diep) | 1559 | if (!diep) |
1122 | continue; | 1560 | continue; |
1123 | 1561 | ||
1124 | /* Check if target file is included. */ | 1562 | /* Check if target file is included. */ |
1125 | if (pp->file) | 1563 | if (pp->file) |
1126 | pf.fname = cu_find_realpath(&pf.cu_die, pp->file); | 1564 | pf->fname = cu_find_realpath(&pf->cu_die, pp->file); |
1127 | else | 1565 | else |
1128 | pf.fname = NULL; | 1566 | pf->fname = NULL; |
1129 | 1567 | ||
1130 | if (!pp->file || pf.fname) { | 1568 | if (!pp->file || pf->fname) { |
1131 | if (pp->function) | 1569 | if (pp->function) |
1132 | ret = find_probe_point_by_func(&pf); | 1570 | ret = find_probe_point_by_func(pf); |
1133 | else if (pp->lazy_line) | 1571 | else if (pp->lazy_line) |
1134 | ret = find_probe_point_lazy(NULL, &pf); | 1572 | ret = find_probe_point_lazy(NULL, pf); |
1135 | else { | 1573 | else { |
1136 | pf.lno = pp->line; | 1574 | pf->lno = pp->line; |
1137 | ret = find_probe_point_by_line(&pf); | 1575 | ret = find_probe_point_by_line(pf); |
1138 | } | 1576 | } |
1577 | if (ret < 0) | ||
1578 | break; | ||
1139 | } | 1579 | } |
1140 | off = noff; | 1580 | off = noff; |
1141 | } | 1581 | } |
1142 | line_list__free(&pf.lcache); | ||
1143 | dwarf_end(dbg); | ||
1144 | 1582 | ||
1145 | return (ret < 0) ? ret : pf.ntevs; | 1583 | found: |
1584 | line_list__free(&pf->lcache); | ||
1585 | if (dwfl) | ||
1586 | dwfl_end(dwfl); | ||
1587 | |||
1588 | return ret; | ||
1589 | } | ||
1590 | |||
1591 | /* Add a found probe point into trace event list */ | ||
1592 | static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf) | ||
1593 | { | ||
1594 | struct trace_event_finder *tf = | ||
1595 | container_of(pf, struct trace_event_finder, pf); | ||
1596 | struct probe_trace_event *tev; | ||
1597 | int ret, i; | ||
1598 | |||
1599 | /* Check number of tevs */ | ||
1600 | if (tf->ntevs == tf->max_tevs) { | ||
1601 | pr_warning("Too many( > %d) probe point found.\n", | ||
1602 | tf->max_tevs); | ||
1603 | return -ERANGE; | ||
1604 | } | ||
1605 | tev = &tf->tevs[tf->ntevs++]; | ||
1606 | |||
1607 | ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe, | ||
1608 | &tev->point); | ||
1609 | if (ret < 0) | ||
1610 | return ret; | ||
1611 | |||
1612 | pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, | ||
1613 | tev->point.offset); | ||
1614 | |||
1615 | /* Find each argument */ | ||
1616 | tev->nargs = pf->pev->nargs; | ||
1617 | tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); | ||
1618 | if (tev->args == NULL) | ||
1619 | return -ENOMEM; | ||
1620 | for (i = 0; i < pf->pev->nargs; i++) { | ||
1621 | pf->pvar = &pf->pev->args[i]; | ||
1622 | pf->tvar = &tev->args[i]; | ||
1623 | ret = find_variable(sp_die, pf); | ||
1624 | if (ret != 0) | ||
1625 | return ret; | ||
1626 | } | ||
1627 | |||
1628 | return 0; | ||
1629 | } | ||
1630 | |||
1631 | /* Find probe_trace_events specified by perf_probe_event from debuginfo */ | ||
1632 | int find_probe_trace_events(int fd, struct perf_probe_event *pev, | ||
1633 | struct probe_trace_event **tevs, int max_tevs) | ||
1634 | { | ||
1635 | struct trace_event_finder tf = { | ||
1636 | .pf = {.pev = pev, .callback = add_probe_trace_event}, | ||
1637 | .max_tevs = max_tevs}; | ||
1638 | int ret; | ||
1639 | |||
1640 | /* Allocate result tevs array */ | ||
1641 | *tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs); | ||
1642 | if (*tevs == NULL) | ||
1643 | return -ENOMEM; | ||
1644 | |||
1645 | tf.tevs = *tevs; | ||
1646 | tf.ntevs = 0; | ||
1647 | |||
1648 | ret = find_probes(fd, &tf.pf); | ||
1649 | if (ret < 0) { | ||
1650 | free(*tevs); | ||
1651 | *tevs = NULL; | ||
1652 | return ret; | ||
1653 | } | ||
1654 | |||
1655 | return (ret < 0) ? ret : tf.ntevs; | ||
1656 | } | ||
1657 | |||
1658 | #define MAX_VAR_LEN 64 | ||
1659 | |||
1660 | /* Collect available variables in this scope */ | ||
1661 | static int collect_variables_cb(Dwarf_Die *die_mem, void *data) | ||
1662 | { | ||
1663 | struct available_var_finder *af = data; | ||
1664 | struct variable_list *vl; | ||
1665 | char buf[MAX_VAR_LEN]; | ||
1666 | int tag, ret; | ||
1667 | |||
1668 | vl = &af->vls[af->nvls - 1]; | ||
1669 | |||
1670 | tag = dwarf_tag(die_mem); | ||
1671 | if (tag == DW_TAG_formal_parameter || | ||
1672 | tag == DW_TAG_variable) { | ||
1673 | ret = convert_variable_location(die_mem, af->pf.addr, | ||
1674 | af->pf.fb_ops, NULL); | ||
1675 | if (ret == 0) { | ||
1676 | ret = die_get_varname(die_mem, buf, MAX_VAR_LEN); | ||
1677 | pr_debug2("Add new var: %s\n", buf); | ||
1678 | if (ret > 0) | ||
1679 | strlist__add(vl->vars, buf); | ||
1680 | } | ||
1681 | } | ||
1682 | |||
1683 | if (af->child && dwarf_haspc(die_mem, af->pf.addr)) | ||
1684 | return DIE_FIND_CB_CONTINUE; | ||
1685 | else | ||
1686 | return DIE_FIND_CB_SIBLING; | ||
1687 | } | ||
1688 | |||
1689 | /* Add a found vars into available variables list */ | ||
1690 | static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf) | ||
1691 | { | ||
1692 | struct available_var_finder *af = | ||
1693 | container_of(pf, struct available_var_finder, pf); | ||
1694 | struct variable_list *vl; | ||
1695 | Dwarf_Die die_mem, *scopes = NULL; | ||
1696 | int ret, nscopes; | ||
1697 | |||
1698 | /* Check number of tevs */ | ||
1699 | if (af->nvls == af->max_vls) { | ||
1700 | pr_warning("Too many( > %d) probe point found.\n", af->max_vls); | ||
1701 | return -ERANGE; | ||
1702 | } | ||
1703 | vl = &af->vls[af->nvls++]; | ||
1704 | |||
1705 | ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe, | ||
1706 | &vl->point); | ||
1707 | if (ret < 0) | ||
1708 | return ret; | ||
1709 | |||
1710 | pr_debug("Probe point found: %s+%lu\n", vl->point.symbol, | ||
1711 | vl->point.offset); | ||
1712 | |||
1713 | /* Find local variables */ | ||
1714 | vl->vars = strlist__new(true, NULL); | ||
1715 | if (vl->vars == NULL) | ||
1716 | return -ENOMEM; | ||
1717 | af->child = true; | ||
1718 | die_find_child(sp_die, collect_variables_cb, (void *)af, &die_mem); | ||
1719 | |||
1720 | /* Find external variables */ | ||
1721 | if (!af->externs) | ||
1722 | goto out; | ||
1723 | /* Don't need to search child DIE for externs. */ | ||
1724 | af->child = false; | ||
1725 | nscopes = dwarf_getscopes_die(sp_die, &scopes); | ||
1726 | while (nscopes-- > 1) | ||
1727 | die_find_child(&scopes[nscopes], collect_variables_cb, | ||
1728 | (void *)af, &die_mem); | ||
1729 | if (scopes) | ||
1730 | free(scopes); | ||
1731 | |||
1732 | out: | ||
1733 | if (strlist__empty(vl->vars)) { | ||
1734 | strlist__delete(vl->vars); | ||
1735 | vl->vars = NULL; | ||
1736 | } | ||
1737 | |||
1738 | return ret; | ||
1739 | } | ||
1740 | |||
1741 | /* Find available variables at given probe point */ | ||
1742 | int find_available_vars_at(int fd, struct perf_probe_event *pev, | ||
1743 | struct variable_list **vls, int max_vls, | ||
1744 | bool externs) | ||
1745 | { | ||
1746 | struct available_var_finder af = { | ||
1747 | .pf = {.pev = pev, .callback = add_available_vars}, | ||
1748 | .max_vls = max_vls, .externs = externs}; | ||
1749 | int ret; | ||
1750 | |||
1751 | /* Allocate result vls array */ | ||
1752 | *vls = zalloc(sizeof(struct variable_list) * max_vls); | ||
1753 | if (*vls == NULL) | ||
1754 | return -ENOMEM; | ||
1755 | |||
1756 | af.vls = *vls; | ||
1757 | af.nvls = 0; | ||
1758 | |||
1759 | ret = find_probes(fd, &af.pf); | ||
1760 | if (ret < 0) { | ||
1761 | /* Free vlist for error */ | ||
1762 | while (af.nvls--) { | ||
1763 | if (af.vls[af.nvls].point.symbol) | ||
1764 | free(af.vls[af.nvls].point.symbol); | ||
1765 | if (af.vls[af.nvls].vars) | ||
1766 | strlist__delete(af.vls[af.nvls].vars); | ||
1767 | } | ||
1768 | free(af.vls); | ||
1769 | *vls = NULL; | ||
1770 | return ret; | ||
1771 | } | ||
1772 | |||
1773 | return (ret < 0) ? ret : af.nvls; | ||
1146 | } | 1774 | } |
1147 | 1775 | ||
1148 | /* Reverse search */ | 1776 | /* Reverse search */ |
1149 | int find_perf_probe_point(int fd, unsigned long addr, | 1777 | int find_perf_probe_point(unsigned long addr, struct perf_probe_point *ppt) |
1150 | struct perf_probe_point *ppt) | ||
1151 | { | 1778 | { |
1152 | Dwarf_Die cudie, spdie, indie; | 1779 | Dwarf_Die cudie, spdie, indie; |
1153 | Dwarf *dbg; | 1780 | Dwarf *dbg = NULL; |
1154 | Dwarf_Line *line; | 1781 | Dwfl *dwfl = NULL; |
1155 | Dwarf_Addr laddr, eaddr; | 1782 | Dwarf_Addr _addr, baseaddr, bias = 0; |
1156 | const char *tmp; | 1783 | const char *fname = NULL, *func = NULL, *tmp; |
1157 | int lineno, ret = 0; | 1784 | int baseline = 0, lineno = 0, ret = 0; |
1158 | bool found = false; | 1785 | |
1159 | 1786 | /* Open the live linux kernel */ | |
1160 | dbg = dwarf_begin(fd, DWARF_C_READ); | 1787 | dbg = dwfl_init_live_kernel_dwarf(addr, &dwfl, &bias); |
1161 | if (!dbg) | 1788 | if (!dbg) { |
1162 | return -EBADF; | 1789 | pr_warning("No debug information found in the vmlinux - " |
1790 | "please rebuild with CONFIG_DEBUG_INFO=y.\n"); | ||
1791 | ret = -EINVAL; | ||
1792 | goto end; | ||
1793 | } | ||
1163 | 1794 | ||
1795 | /* Adjust address with bias */ | ||
1796 | addr += bias; | ||
1164 | /* Find cu die */ | 1797 | /* Find cu die */ |
1165 | if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr, &cudie)) { | 1798 | if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr - bias, &cudie)) { |
1799 | pr_warning("Failed to find debug information for address %lx\n", | ||
1800 | addr); | ||
1166 | ret = -EINVAL; | 1801 | ret = -EINVAL; |
1167 | goto end; | 1802 | goto end; |
1168 | } | 1803 | } |
1169 | 1804 | ||
1170 | /* Find a corresponding line */ | 1805 | /* Find a corresponding line (filename and lineno) */ |
1171 | line = dwarf_getsrc_die(&cudie, (Dwarf_Addr)addr); | 1806 | cu_find_lineinfo(&cudie, addr, &fname, &lineno); |
1172 | if (line) { | 1807 | /* Don't care whether it failed or not */ |
1173 | if (dwarf_lineaddr(line, &laddr) == 0 && | ||
1174 | (Dwarf_Addr)addr == laddr && | ||
1175 | dwarf_lineno(line, &lineno) == 0) { | ||
1176 | tmp = dwarf_linesrc(line, NULL, NULL); | ||
1177 | if (tmp) { | ||
1178 | ppt->line = lineno; | ||
1179 | ppt->file = strdup(tmp); | ||
1180 | if (ppt->file == NULL) { | ||
1181 | ret = -ENOMEM; | ||
1182 | goto end; | ||
1183 | } | ||
1184 | found = true; | ||
1185 | } | ||
1186 | } | ||
1187 | } | ||
1188 | 1808 | ||
1189 | /* Find a corresponding function */ | 1809 | /* Find a corresponding function (name, baseline and baseaddr) */ |
1190 | if (die_find_real_subprogram(&cudie, (Dwarf_Addr)addr, &spdie)) { | 1810 | if (die_find_real_subprogram(&cudie, (Dwarf_Addr)addr, &spdie)) { |
1811 | /* Get function entry information */ | ||
1191 | tmp = dwarf_diename(&spdie); | 1812 | tmp = dwarf_diename(&spdie); |
1192 | if (!tmp || dwarf_entrypc(&spdie, &eaddr) != 0) | 1813 | if (!tmp || |
1193 | goto end; | 1814 | dwarf_entrypc(&spdie, &baseaddr) != 0 || |
1194 | 1815 | dwarf_decl_line(&spdie, &baseline) != 0) | |
1195 | if (ppt->line) { | 1816 | goto post; |
1196 | if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr, | 1817 | func = tmp; |
1197 | &indie)) { | 1818 | |
1198 | /* addr in an inline function */ | 1819 | if (addr == (unsigned long)baseaddr) |
1820 | /* Function entry - Relative line number is 0 */ | ||
1821 | lineno = baseline; | ||
1822 | else if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr, | ||
1823 | &indie)) { | ||
1824 | if (dwarf_entrypc(&indie, &_addr) == 0 && | ||
1825 | _addr == addr) | ||
1826 | /* | ||
1827 | * addr is at an inline function entry. | ||
1828 | * In this case, lineno should be the call-site | ||
1829 | * line number. | ||
1830 | */ | ||
1831 | lineno = die_get_call_lineno(&indie); | ||
1832 | else { | ||
1833 | /* | ||
1834 | * addr is in an inline function body. | ||
1835 | * Since lineno points one of the lines | ||
1836 | * of the inline function, baseline should | ||
1837 | * be the entry line of the inline function. | ||
1838 | */ | ||
1199 | tmp = dwarf_diename(&indie); | 1839 | tmp = dwarf_diename(&indie); |
1200 | if (!tmp) | 1840 | if (tmp && |
1201 | goto end; | 1841 | dwarf_decl_line(&spdie, &baseline) == 0) |
1202 | ret = dwarf_decl_line(&indie, &lineno); | 1842 | func = tmp; |
1203 | } else { | ||
1204 | if (eaddr == addr) { /* Function entry */ | ||
1205 | lineno = ppt->line; | ||
1206 | ret = 0; | ||
1207 | } else | ||
1208 | ret = dwarf_decl_line(&spdie, &lineno); | ||
1209 | } | ||
1210 | if (ret == 0) { | ||
1211 | /* Make a relative line number */ | ||
1212 | ppt->line -= lineno; | ||
1213 | goto found; | ||
1214 | } | 1843 | } |
1215 | } | 1844 | } |
1216 | /* We don't have a line number, let's use offset */ | 1845 | } |
1217 | ppt->offset = addr - (unsigned long)eaddr; | 1846 | |
1218 | found: | 1847 | post: |
1219 | ppt->function = strdup(tmp); | 1848 | /* Make a relative line number or an offset */ |
1849 | if (lineno) | ||
1850 | ppt->line = lineno - baseline; | ||
1851 | else if (func) | ||
1852 | ppt->offset = addr - (unsigned long)baseaddr; | ||
1853 | |||
1854 | /* Duplicate strings */ | ||
1855 | if (func) { | ||
1856 | ppt->function = strdup(func); | ||
1220 | if (ppt->function == NULL) { | 1857 | if (ppt->function == NULL) { |
1221 | ret = -ENOMEM; | 1858 | ret = -ENOMEM; |
1222 | goto end; | 1859 | goto end; |
1223 | } | 1860 | } |
1224 | found = true; | ||
1225 | } | 1861 | } |
1226 | 1862 | if (fname) { | |
1863 | ppt->file = strdup(fname); | ||
1864 | if (ppt->file == NULL) { | ||
1865 | if (ppt->function) { | ||
1866 | free(ppt->function); | ||
1867 | ppt->function = NULL; | ||
1868 | } | ||
1869 | ret = -ENOMEM; | ||
1870 | goto end; | ||
1871 | } | ||
1872 | } | ||
1227 | end: | 1873 | end: |
1228 | dwarf_end(dbg); | 1874 | if (dwfl) |
1229 | if (ret >= 0) | 1875 | dwfl_end(dwfl); |
1230 | ret = found ? 1 : 0; | 1876 | if (ret == 0 && (fname || func)) |
1877 | ret = 1; /* Found a point */ | ||
1231 | return ret; | 1878 | return ret; |
1232 | } | 1879 | } |
1233 | 1880 | ||
@@ -1244,91 +1891,28 @@ static int line_range_add_line(const char *src, unsigned int lineno, | |||
1244 | return line_list__add_line(&lr->line_list, lineno); | 1891 | return line_list__add_line(&lr->line_list, lineno); |
1245 | } | 1892 | } |
1246 | 1893 | ||
1247 | /* Search function declaration lines */ | 1894 | static int line_range_walk_cb(const char *fname, int lineno, |
1248 | static int line_range_funcdecl_cb(Dwarf_Die *sp_die, void *data) | 1895 | Dwarf_Addr addr __used, |
1896 | void *data) | ||
1249 | { | 1897 | { |
1250 | struct dwarf_callback_param *param = data; | 1898 | struct line_finder *lf = data; |
1251 | struct line_finder *lf = param->data; | ||
1252 | const char *src; | ||
1253 | int lineno; | ||
1254 | 1899 | ||
1255 | src = dwarf_decl_file(sp_die); | 1900 | if ((strtailcmp(fname, lf->fname) != 0) || |
1256 | if (src && strtailcmp(src, lf->fname) != 0) | ||
1257 | return DWARF_CB_OK; | ||
1258 | |||
1259 | if (dwarf_decl_line(sp_die, &lineno) != 0 || | ||
1260 | (lf->lno_s > lineno || lf->lno_e < lineno)) | 1901 | (lf->lno_s > lineno || lf->lno_e < lineno)) |
1261 | return DWARF_CB_OK; | 1902 | return 0; |
1262 | 1903 | ||
1263 | param->retval = line_range_add_line(src, lineno, lf->lr); | 1904 | if (line_range_add_line(fname, lineno, lf->lr) < 0) |
1264 | if (param->retval < 0) | 1905 | return -EINVAL; |
1265 | return DWARF_CB_ABORT; | ||
1266 | return DWARF_CB_OK; | ||
1267 | } | ||
1268 | 1906 | ||
1269 | static int find_line_range_func_decl_lines(struct line_finder *lf) | 1907 | return 0; |
1270 | { | ||
1271 | struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0}; | ||
1272 | dwarf_getfuncs(&lf->cu_die, line_range_funcdecl_cb, ¶m, 0); | ||
1273 | return param.retval; | ||
1274 | } | 1908 | } |
1275 | 1909 | ||
1276 | /* Find line range from its line number */ | 1910 | /* Find line range from its line number */ |
1277 | static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf) | 1911 | static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf) |
1278 | { | 1912 | { |
1279 | Dwarf_Lines *lines; | 1913 | int ret; |
1280 | Dwarf_Line *line; | ||
1281 | size_t nlines, i; | ||
1282 | Dwarf_Addr addr; | ||
1283 | int lineno, ret = 0; | ||
1284 | const char *src; | ||
1285 | Dwarf_Die die_mem; | ||
1286 | |||
1287 | line_list__init(&lf->lr->line_list); | ||
1288 | if (dwarf_getsrclines(&lf->cu_die, &lines, &nlines) != 0) { | ||
1289 | pr_warning("No source lines found in this CU.\n"); | ||
1290 | return -ENOENT; | ||
1291 | } | ||
1292 | |||
1293 | /* Search probable lines on lines list */ | ||
1294 | for (i = 0; i < nlines; i++) { | ||
1295 | line = dwarf_onesrcline(lines, i); | ||
1296 | if (dwarf_lineno(line, &lineno) != 0 || | ||
1297 | (lf->lno_s > lineno || lf->lno_e < lineno)) | ||
1298 | continue; | ||
1299 | |||
1300 | if (sp_die) { | ||
1301 | /* Address filtering 1: does sp_die include addr? */ | ||
1302 | if (dwarf_lineaddr(line, &addr) != 0 || | ||
1303 | !dwarf_haspc(sp_die, addr)) | ||
1304 | continue; | ||
1305 | |||
1306 | /* Address filtering 2: No child include addr? */ | ||
1307 | if (die_find_inlinefunc(sp_die, addr, &die_mem)) | ||
1308 | continue; | ||
1309 | } | ||
1310 | |||
1311 | /* TODO: Get fileno from line, but how? */ | ||
1312 | src = dwarf_linesrc(line, NULL, NULL); | ||
1313 | if (strtailcmp(src, lf->fname) != 0) | ||
1314 | continue; | ||
1315 | 1914 | ||
1316 | ret = line_range_add_line(src, lineno, lf->lr); | 1915 | ret = die_walk_lines(sp_die ?: &lf->cu_die, line_range_walk_cb, lf); |
1317 | if (ret < 0) | ||
1318 | return ret; | ||
1319 | } | ||
1320 | |||
1321 | /* | ||
1322 | * Dwarf lines doesn't include function declarations. We have to | ||
1323 | * check functions list or given function. | ||
1324 | */ | ||
1325 | if (sp_die) { | ||
1326 | src = dwarf_decl_file(sp_die); | ||
1327 | if (src && dwarf_decl_line(sp_die, &lineno) == 0 && | ||
1328 | (lf->lno_s <= lineno && lf->lno_e >= lineno)) | ||
1329 | ret = line_range_add_line(src, lineno, lf->lr); | ||
1330 | } else | ||
1331 | ret = find_line_range_func_decl_lines(lf); | ||
1332 | 1916 | ||
1333 | /* Update status */ | 1917 | /* Update status */ |
1334 | if (ret >= 0) | 1918 | if (ret >= 0) |
@@ -1358,6 +1942,10 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data) | |||
1358 | struct line_finder *lf = param->data; | 1942 | struct line_finder *lf = param->data; |
1359 | struct line_range *lr = lf->lr; | 1943 | struct line_range *lr = lf->lr; |
1360 | 1944 | ||
1945 | /* Check declared file */ | ||
1946 | if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die))) | ||
1947 | return DWARF_CB_OK; | ||
1948 | |||
1361 | if (dwarf_tag(sp_die) == DW_TAG_subprogram && | 1949 | if (dwarf_tag(sp_die) == DW_TAG_subprogram && |
1362 | die_compare_name(sp_die, lr->function)) { | 1950 | die_compare_name(sp_die, lr->function)) { |
1363 | lf->fname = dwarf_decl_file(sp_die); | 1951 | lf->fname = dwarf_decl_file(sp_die); |
@@ -1401,16 +1989,35 @@ int find_line_range(int fd, struct line_range *lr) | |||
1401 | Dwarf_Off off = 0, noff; | 1989 | Dwarf_Off off = 0, noff; |
1402 | size_t cuhl; | 1990 | size_t cuhl; |
1403 | Dwarf_Die *diep; | 1991 | Dwarf_Die *diep; |
1404 | Dwarf *dbg; | 1992 | Dwarf *dbg = NULL; |
1993 | Dwfl *dwfl; | ||
1994 | Dwarf_Addr bias; /* Currently ignored */ | ||
1405 | const char *comp_dir; | 1995 | const char *comp_dir; |
1406 | 1996 | ||
1407 | dbg = dwarf_begin(fd, DWARF_C_READ); | 1997 | dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias); |
1408 | if (!dbg) { | 1998 | if (!dbg) { |
1409 | pr_warning("No dwarf info found in the vmlinux - " | 1999 | pr_warning("No debug information found in the vmlinux - " |
1410 | "please rebuild with CONFIG_DEBUG_INFO=y.\n"); | 2000 | "please rebuild with CONFIG_DEBUG_INFO=y.\n"); |
2001 | close(fd); /* Without dwfl_end(), fd isn't closed. */ | ||
1411 | return -EBADF; | 2002 | return -EBADF; |
1412 | } | 2003 | } |
1413 | 2004 | ||
2005 | /* Fastpath: lookup by function name from .debug_pubnames section */ | ||
2006 | if (lr->function) { | ||
2007 | struct pubname_callback_param pubname_param = { | ||
2008 | .function = lr->function, .file = lr->file, | ||
2009 | .cu_die = &lf.cu_die, .sp_die = &lf.sp_die, .found = 0}; | ||
2010 | struct dwarf_callback_param line_range_param = { | ||
2011 | .data = (void *)&lf, .retval = 0}; | ||
2012 | |||
2013 | dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0); | ||
2014 | if (pubname_param.found) { | ||
2015 | line_range_search_cb(&lf.sp_die, &line_range_param); | ||
2016 | if (lf.found) | ||
2017 | goto found; | ||
2018 | } | ||
2019 | } | ||
2020 | |||
1414 | /* Loop on CUs (Compilation Unit) */ | 2021 | /* Loop on CUs (Compilation Unit) */ |
1415 | while (!lf.found && ret >= 0) { | 2022 | while (!lf.found && ret >= 0) { |
1416 | if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0) | 2023 | if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0) |
@@ -1439,6 +2046,7 @@ int find_line_range(int fd, struct line_range *lr) | |||
1439 | off = noff; | 2046 | off = noff; |
1440 | } | 2047 | } |
1441 | 2048 | ||
2049 | found: | ||
1442 | /* Store comp_dir */ | 2050 | /* Store comp_dir */ |
1443 | if (lf.found) { | 2051 | if (lf.found) { |
1444 | comp_dir = cu_get_comp_dir(&lf.cu_die); | 2052 | comp_dir = cu_get_comp_dir(&lf.cu_die); |
@@ -1450,8 +2058,7 @@ int find_line_range(int fd, struct line_range *lr) | |||
1450 | } | 2058 | } |
1451 | 2059 | ||
1452 | pr_debug("path: %s\n", lr->path); | 2060 | pr_debug("path: %s\n", lr->path); |
1453 | dwarf_end(dbg); | 2061 | dwfl_end(dwfl); |
1454 | |||
1455 | return (ret < 0) ? ret : lf.found; | 2062 | return (ret < 0) ? ret : lf.found; |
1456 | } | 2063 | } |
1457 | 2064 | ||
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index 4507d519f183..605730a366db 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h | |||
@@ -22,26 +22,34 @@ extern int find_probe_trace_events(int fd, struct perf_probe_event *pev, | |||
22 | int max_tevs); | 22 | int max_tevs); |
23 | 23 | ||
24 | /* Find a perf_probe_point from debuginfo */ | 24 | /* Find a perf_probe_point from debuginfo */ |
25 | extern int find_perf_probe_point(int fd, unsigned long addr, | 25 | extern int find_perf_probe_point(unsigned long addr, |
26 | struct perf_probe_point *ppt); | 26 | struct perf_probe_point *ppt); |
27 | 27 | ||
28 | /* Find a line range */ | ||
28 | extern int find_line_range(int fd, struct line_range *lr); | 29 | extern int find_line_range(int fd, struct line_range *lr); |
29 | 30 | ||
31 | /* Find available variables */ | ||
32 | extern int find_available_vars_at(int fd, struct perf_probe_event *pev, | ||
33 | struct variable_list **vls, int max_points, | ||
34 | bool externs); | ||
35 | |||
30 | #include <dwarf.h> | 36 | #include <dwarf.h> |
31 | #include <libdw.h> | 37 | #include <elfutils/libdw.h> |
32 | #include <version.h> | 38 | #include <elfutils/libdwfl.h> |
39 | #include <elfutils/version.h> | ||
33 | 40 | ||
34 | struct probe_finder { | 41 | struct probe_finder { |
35 | struct perf_probe_event *pev; /* Target probe event */ | 42 | struct perf_probe_event *pev; /* Target probe event */ |
36 | struct probe_trace_event *tevs; /* Result trace events */ | 43 | |
37 | int ntevs; /* Number of trace events */ | 44 | /* Callback when a probe point is found */ |
38 | int max_tevs; /* Max number of trace events */ | 45 | int (*callback)(Dwarf_Die *sp_die, struct probe_finder *pf); |
39 | 46 | ||
40 | /* For function searching */ | 47 | /* For function searching */ |
41 | int lno; /* Line number */ | 48 | int lno; /* Line number */ |
42 | Dwarf_Addr addr; /* Address */ | 49 | Dwarf_Addr addr; /* Address */ |
43 | const char *fname; /* Real file name */ | 50 | const char *fname; /* Real file name */ |
44 | Dwarf_Die cu_die; /* Current CU */ | 51 | Dwarf_Die cu_die; /* Current CU */ |
52 | Dwarf_Die sp_die; | ||
45 | struct list_head lcache; /* Line cache for lazy match */ | 53 | struct list_head lcache; /* Line cache for lazy match */ |
46 | 54 | ||
47 | /* For variable searching */ | 55 | /* For variable searching */ |
@@ -53,6 +61,22 @@ struct probe_finder { | |||
53 | struct probe_trace_arg *tvar; /* Current result variable */ | 61 | struct probe_trace_arg *tvar; /* Current result variable */ |
54 | }; | 62 | }; |
55 | 63 | ||
64 | struct trace_event_finder { | ||
65 | struct probe_finder pf; | ||
66 | struct probe_trace_event *tevs; /* Found trace events */ | ||
67 | int ntevs; /* Number of trace events */ | ||
68 | int max_tevs; /* Max number of trace events */ | ||
69 | }; | ||
70 | |||
71 | struct available_var_finder { | ||
72 | struct probe_finder pf; | ||
73 | struct variable_list *vls; /* Found variable lists */ | ||
74 | int nvls; /* Number of variable lists */ | ||
75 | int max_vls; /* Max no. of variable lists */ | ||
76 | bool externs; /* Find external vars too */ | ||
77 | bool child; /* Search child scopes */ | ||
78 | }; | ||
79 | |||
56 | struct line_finder { | 80 | struct line_finder { |
57 | struct line_range *lr; /* Target line range */ | 81 | struct line_range *lr; /* Target line range */ |
58 | 82 | ||
@@ -60,6 +84,7 @@ struct line_finder { | |||
60 | int lno_s; /* Start line number */ | 84 | int lno_s; /* Start line number */ |
61 | int lno_e; /* End line number */ | 85 | int lno_e; /* End line number */ |
62 | Dwarf_Die cu_die; /* Current CU */ | 86 | Dwarf_Die cu_die; /* Current CU */ |
87 | Dwarf_Die sp_die; | ||
63 | int found; | 88 | int found; |
64 | }; | 89 | }; |
65 | 90 | ||
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c new file mode 100644 index 000000000000..a9ac0504aabd --- /dev/null +++ b/tools/perf/util/python.c | |||
@@ -0,0 +1,905 @@ | |||
1 | #include <Python.h> | ||
2 | #include <structmember.h> | ||
3 | #include <inttypes.h> | ||
4 | #include <poll.h> | ||
5 | #include "evlist.h" | ||
6 | #include "evsel.h" | ||
7 | #include "event.h" | ||
8 | #include "cpumap.h" | ||
9 | #include "thread_map.h" | ||
10 | |||
11 | /* Define PyVarObject_HEAD_INIT for python 2.5 */ | ||
12 | #ifndef PyVarObject_HEAD_INIT | ||
13 | # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, | ||
14 | #endif | ||
15 | |||
16 | struct throttle_event { | ||
17 | struct perf_event_header header; | ||
18 | u64 time; | ||
19 | u64 id; | ||
20 | u64 stream_id; | ||
21 | }; | ||
22 | |||
23 | PyMODINIT_FUNC initperf(void); | ||
24 | |||
25 | #define member_def(type, member, ptype, help) \ | ||
26 | { #member, ptype, \ | ||
27 | offsetof(struct pyrf_event, event) + offsetof(struct type, member), \ | ||
28 | 0, help } | ||
29 | |||
30 | #define sample_member_def(name, member, ptype, help) \ | ||
31 | { #name, ptype, \ | ||
32 | offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \ | ||
33 | 0, help } | ||
34 | |||
35 | struct pyrf_event { | ||
36 | PyObject_HEAD | ||
37 | struct perf_sample sample; | ||
38 | union perf_event event; | ||
39 | }; | ||
40 | |||
41 | #define sample_members \ | ||
42 | sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \ | ||
43 | sample_member_def(sample_pid, pid, T_INT, "event pid"), \ | ||
44 | sample_member_def(sample_tid, tid, T_INT, "event tid"), \ | ||
45 | sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ | ||
46 | sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \ | ||
47 | sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \ | ||
48 | sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \ | ||
49 | sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \ | ||
50 | sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"), | ||
51 | |||
52 | static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object."); | ||
53 | |||
54 | static PyMemberDef pyrf_mmap_event__members[] = { | ||
55 | sample_members | ||
56 | member_def(perf_event_header, type, T_UINT, "event type"), | ||
57 | member_def(mmap_event, pid, T_UINT, "event pid"), | ||
58 | member_def(mmap_event, tid, T_UINT, "event tid"), | ||
59 | member_def(mmap_event, start, T_ULONGLONG, "start of the map"), | ||
60 | member_def(mmap_event, len, T_ULONGLONG, "map length"), | ||
61 | member_def(mmap_event, pgoff, T_ULONGLONG, "page offset"), | ||
62 | member_def(mmap_event, filename, T_STRING_INPLACE, "backing store"), | ||
63 | { .name = NULL, }, | ||
64 | }; | ||
65 | |||
66 | static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent) | ||
67 | { | ||
68 | PyObject *ret; | ||
69 | char *s; | ||
70 | |||
71 | if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRIx64 ", " | ||
72 | "length: %#" PRIx64 ", offset: %#" PRIx64 ", " | ||
73 | "filename: %s }", | ||
74 | pevent->event.mmap.pid, pevent->event.mmap.tid, | ||
75 | pevent->event.mmap.start, pevent->event.mmap.len, | ||
76 | pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) { | ||
77 | ret = PyErr_NoMemory(); | ||
78 | } else { | ||
79 | ret = PyString_FromString(s); | ||
80 | free(s); | ||
81 | } | ||
82 | return ret; | ||
83 | } | ||
84 | |||
85 | static PyTypeObject pyrf_mmap_event__type = { | ||
86 | PyVarObject_HEAD_INIT(NULL, 0) | ||
87 | .tp_name = "perf.mmap_event", | ||
88 | .tp_basicsize = sizeof(struct pyrf_event), | ||
89 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
90 | .tp_doc = pyrf_mmap_event__doc, | ||
91 | .tp_members = pyrf_mmap_event__members, | ||
92 | .tp_repr = (reprfunc)pyrf_mmap_event__repr, | ||
93 | }; | ||
94 | |||
95 | static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object."); | ||
96 | |||
97 | static PyMemberDef pyrf_task_event__members[] = { | ||
98 | sample_members | ||
99 | member_def(perf_event_header, type, T_UINT, "event type"), | ||
100 | member_def(fork_event, pid, T_UINT, "event pid"), | ||
101 | member_def(fork_event, ppid, T_UINT, "event ppid"), | ||
102 | member_def(fork_event, tid, T_UINT, "event tid"), | ||
103 | member_def(fork_event, ptid, T_UINT, "event ptid"), | ||
104 | member_def(fork_event, time, T_ULONGLONG, "timestamp"), | ||
105 | { .name = NULL, }, | ||
106 | }; | ||
107 | |||
108 | static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent) | ||
109 | { | ||
110 | return PyString_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, " | ||
111 | "ptid: %u, time: %" PRIu64 "}", | ||
112 | pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit", | ||
113 | pevent->event.fork.pid, | ||
114 | pevent->event.fork.ppid, | ||
115 | pevent->event.fork.tid, | ||
116 | pevent->event.fork.ptid, | ||
117 | pevent->event.fork.time); | ||
118 | } | ||
119 | |||
120 | static PyTypeObject pyrf_task_event__type = { | ||
121 | PyVarObject_HEAD_INIT(NULL, 0) | ||
122 | .tp_name = "perf.task_event", | ||
123 | .tp_basicsize = sizeof(struct pyrf_event), | ||
124 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
125 | .tp_doc = pyrf_task_event__doc, | ||
126 | .tp_members = pyrf_task_event__members, | ||
127 | .tp_repr = (reprfunc)pyrf_task_event__repr, | ||
128 | }; | ||
129 | |||
130 | static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object."); | ||
131 | |||
132 | static PyMemberDef pyrf_comm_event__members[] = { | ||
133 | sample_members | ||
134 | member_def(perf_event_header, type, T_UINT, "event type"), | ||
135 | member_def(comm_event, pid, T_UINT, "event pid"), | ||
136 | member_def(comm_event, tid, T_UINT, "event tid"), | ||
137 | member_def(comm_event, comm, T_STRING_INPLACE, "process name"), | ||
138 | { .name = NULL, }, | ||
139 | }; | ||
140 | |||
141 | static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent) | ||
142 | { | ||
143 | return PyString_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }", | ||
144 | pevent->event.comm.pid, | ||
145 | pevent->event.comm.tid, | ||
146 | pevent->event.comm.comm); | ||
147 | } | ||
148 | |||
149 | static PyTypeObject pyrf_comm_event__type = { | ||
150 | PyVarObject_HEAD_INIT(NULL, 0) | ||
151 | .tp_name = "perf.comm_event", | ||
152 | .tp_basicsize = sizeof(struct pyrf_event), | ||
153 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
154 | .tp_doc = pyrf_comm_event__doc, | ||
155 | .tp_members = pyrf_comm_event__members, | ||
156 | .tp_repr = (reprfunc)pyrf_comm_event__repr, | ||
157 | }; | ||
158 | |||
159 | static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object."); | ||
160 | |||
161 | static PyMemberDef pyrf_throttle_event__members[] = { | ||
162 | sample_members | ||
163 | member_def(perf_event_header, type, T_UINT, "event type"), | ||
164 | member_def(throttle_event, time, T_ULONGLONG, "timestamp"), | ||
165 | member_def(throttle_event, id, T_ULONGLONG, "event id"), | ||
166 | member_def(throttle_event, stream_id, T_ULONGLONG, "event stream id"), | ||
167 | { .name = NULL, }, | ||
168 | }; | ||
169 | |||
170 | static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent) | ||
171 | { | ||
172 | struct throttle_event *te = (struct throttle_event *)(&pevent->event.header + 1); | ||
173 | |||
174 | return PyString_FromFormat("{ type: %sthrottle, time: %" PRIu64 ", id: %" PRIu64 | ||
175 | ", stream_id: %" PRIu64 " }", | ||
176 | pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un", | ||
177 | te->time, te->id, te->stream_id); | ||
178 | } | ||
179 | |||
180 | static PyTypeObject pyrf_throttle_event__type = { | ||
181 | PyVarObject_HEAD_INIT(NULL, 0) | ||
182 | .tp_name = "perf.throttle_event", | ||
183 | .tp_basicsize = sizeof(struct pyrf_event), | ||
184 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
185 | .tp_doc = pyrf_throttle_event__doc, | ||
186 | .tp_members = pyrf_throttle_event__members, | ||
187 | .tp_repr = (reprfunc)pyrf_throttle_event__repr, | ||
188 | }; | ||
189 | |||
190 | static int pyrf_event__setup_types(void) | ||
191 | { | ||
192 | int err; | ||
193 | pyrf_mmap_event__type.tp_new = | ||
194 | pyrf_task_event__type.tp_new = | ||
195 | pyrf_comm_event__type.tp_new = | ||
196 | pyrf_throttle_event__type.tp_new = PyType_GenericNew; | ||
197 | err = PyType_Ready(&pyrf_mmap_event__type); | ||
198 | if (err < 0) | ||
199 | goto out; | ||
200 | err = PyType_Ready(&pyrf_task_event__type); | ||
201 | if (err < 0) | ||
202 | goto out; | ||
203 | err = PyType_Ready(&pyrf_comm_event__type); | ||
204 | if (err < 0) | ||
205 | goto out; | ||
206 | err = PyType_Ready(&pyrf_throttle_event__type); | ||
207 | if (err < 0) | ||
208 | goto out; | ||
209 | out: | ||
210 | return err; | ||
211 | } | ||
212 | |||
213 | static PyTypeObject *pyrf_event__type[] = { | ||
214 | [PERF_RECORD_MMAP] = &pyrf_mmap_event__type, | ||
215 | [PERF_RECORD_LOST] = &pyrf_mmap_event__type, | ||
216 | [PERF_RECORD_COMM] = &pyrf_comm_event__type, | ||
217 | [PERF_RECORD_EXIT] = &pyrf_task_event__type, | ||
218 | [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type, | ||
219 | [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type, | ||
220 | [PERF_RECORD_FORK] = &pyrf_task_event__type, | ||
221 | [PERF_RECORD_READ] = &pyrf_mmap_event__type, | ||
222 | [PERF_RECORD_SAMPLE] = &pyrf_mmap_event__type, | ||
223 | }; | ||
224 | |||
225 | static PyObject *pyrf_event__new(union perf_event *event) | ||
226 | { | ||
227 | struct pyrf_event *pevent; | ||
228 | PyTypeObject *ptype; | ||
229 | |||
230 | if (event->header.type < PERF_RECORD_MMAP || | ||
231 | event->header.type > PERF_RECORD_SAMPLE) | ||
232 | return NULL; | ||
233 | |||
234 | ptype = pyrf_event__type[event->header.type]; | ||
235 | pevent = PyObject_New(struct pyrf_event, ptype); | ||
236 | if (pevent != NULL) | ||
237 | memcpy(&pevent->event, event, event->header.size); | ||
238 | return (PyObject *)pevent; | ||
239 | } | ||
240 | |||
241 | struct pyrf_cpu_map { | ||
242 | PyObject_HEAD | ||
243 | |||
244 | struct cpu_map *cpus; | ||
245 | }; | ||
246 | |||
247 | static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, | ||
248 | PyObject *args, PyObject *kwargs) | ||
249 | { | ||
250 | static char *kwlist[] = { "cpustr", NULL, NULL, }; | ||
251 | char *cpustr = NULL; | ||
252 | |||
253 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", | ||
254 | kwlist, &cpustr)) | ||
255 | return -1; | ||
256 | |||
257 | pcpus->cpus = cpu_map__new(cpustr); | ||
258 | if (pcpus->cpus == NULL) | ||
259 | return -1; | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus) | ||
264 | { | ||
265 | cpu_map__delete(pcpus->cpus); | ||
266 | pcpus->ob_type->tp_free((PyObject*)pcpus); | ||
267 | } | ||
268 | |||
269 | static Py_ssize_t pyrf_cpu_map__length(PyObject *obj) | ||
270 | { | ||
271 | struct pyrf_cpu_map *pcpus = (void *)obj; | ||
272 | |||
273 | return pcpus->cpus->nr; | ||
274 | } | ||
275 | |||
276 | static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i) | ||
277 | { | ||
278 | struct pyrf_cpu_map *pcpus = (void *)obj; | ||
279 | |||
280 | if (i >= pcpus->cpus->nr) | ||
281 | return NULL; | ||
282 | |||
283 | return Py_BuildValue("i", pcpus->cpus->map[i]); | ||
284 | } | ||
285 | |||
286 | static PySequenceMethods pyrf_cpu_map__sequence_methods = { | ||
287 | .sq_length = pyrf_cpu_map__length, | ||
288 | .sq_item = pyrf_cpu_map__item, | ||
289 | }; | ||
290 | |||
291 | static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object."); | ||
292 | |||
293 | static PyTypeObject pyrf_cpu_map__type = { | ||
294 | PyVarObject_HEAD_INIT(NULL, 0) | ||
295 | .tp_name = "perf.cpu_map", | ||
296 | .tp_basicsize = sizeof(struct pyrf_cpu_map), | ||
297 | .tp_dealloc = (destructor)pyrf_cpu_map__delete, | ||
298 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
299 | .tp_doc = pyrf_cpu_map__doc, | ||
300 | .tp_as_sequence = &pyrf_cpu_map__sequence_methods, | ||
301 | .tp_init = (initproc)pyrf_cpu_map__init, | ||
302 | }; | ||
303 | |||
304 | static int pyrf_cpu_map__setup_types(void) | ||
305 | { | ||
306 | pyrf_cpu_map__type.tp_new = PyType_GenericNew; | ||
307 | return PyType_Ready(&pyrf_cpu_map__type); | ||
308 | } | ||
309 | |||
310 | struct pyrf_thread_map { | ||
311 | PyObject_HEAD | ||
312 | |||
313 | struct thread_map *threads; | ||
314 | }; | ||
315 | |||
316 | static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, | ||
317 | PyObject *args, PyObject *kwargs) | ||
318 | { | ||
319 | static char *kwlist[] = { "pid", "tid", NULL, NULL, }; | ||
320 | int pid = -1, tid = -1; | ||
321 | |||
322 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", | ||
323 | kwlist, &pid, &tid)) | ||
324 | return -1; | ||
325 | |||
326 | pthreads->threads = thread_map__new(pid, tid); | ||
327 | if (pthreads->threads == NULL) | ||
328 | return -1; | ||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads) | ||
333 | { | ||
334 | thread_map__delete(pthreads->threads); | ||
335 | pthreads->ob_type->tp_free((PyObject*)pthreads); | ||
336 | } | ||
337 | |||
338 | static Py_ssize_t pyrf_thread_map__length(PyObject *obj) | ||
339 | { | ||
340 | struct pyrf_thread_map *pthreads = (void *)obj; | ||
341 | |||
342 | return pthreads->threads->nr; | ||
343 | } | ||
344 | |||
345 | static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i) | ||
346 | { | ||
347 | struct pyrf_thread_map *pthreads = (void *)obj; | ||
348 | |||
349 | if (i >= pthreads->threads->nr) | ||
350 | return NULL; | ||
351 | |||
352 | return Py_BuildValue("i", pthreads->threads->map[i]); | ||
353 | } | ||
354 | |||
355 | static PySequenceMethods pyrf_thread_map__sequence_methods = { | ||
356 | .sq_length = pyrf_thread_map__length, | ||
357 | .sq_item = pyrf_thread_map__item, | ||
358 | }; | ||
359 | |||
360 | static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object."); | ||
361 | |||
362 | static PyTypeObject pyrf_thread_map__type = { | ||
363 | PyVarObject_HEAD_INIT(NULL, 0) | ||
364 | .tp_name = "perf.thread_map", | ||
365 | .tp_basicsize = sizeof(struct pyrf_thread_map), | ||
366 | .tp_dealloc = (destructor)pyrf_thread_map__delete, | ||
367 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
368 | .tp_doc = pyrf_thread_map__doc, | ||
369 | .tp_as_sequence = &pyrf_thread_map__sequence_methods, | ||
370 | .tp_init = (initproc)pyrf_thread_map__init, | ||
371 | }; | ||
372 | |||
373 | static int pyrf_thread_map__setup_types(void) | ||
374 | { | ||
375 | pyrf_thread_map__type.tp_new = PyType_GenericNew; | ||
376 | return PyType_Ready(&pyrf_thread_map__type); | ||
377 | } | ||
378 | |||
379 | struct pyrf_evsel { | ||
380 | PyObject_HEAD | ||
381 | |||
382 | struct perf_evsel evsel; | ||
383 | }; | ||
384 | |||
385 | static int pyrf_evsel__init(struct pyrf_evsel *pevsel, | ||
386 | PyObject *args, PyObject *kwargs) | ||
387 | { | ||
388 | struct perf_event_attr attr = { | ||
389 | .type = PERF_TYPE_HARDWARE, | ||
390 | .config = PERF_COUNT_HW_CPU_CYCLES, | ||
391 | .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID, | ||
392 | }; | ||
393 | static char *kwlist[] = { | ||
394 | "type", | ||
395 | "config", | ||
396 | "sample_freq", | ||
397 | "sample_period", | ||
398 | "sample_type", | ||
399 | "read_format", | ||
400 | "disabled", | ||
401 | "inherit", | ||
402 | "pinned", | ||
403 | "exclusive", | ||
404 | "exclude_user", | ||
405 | "exclude_kernel", | ||
406 | "exclude_hv", | ||
407 | "exclude_idle", | ||
408 | "mmap", | ||
409 | "comm", | ||
410 | "freq", | ||
411 | "inherit_stat", | ||
412 | "enable_on_exec", | ||
413 | "task", | ||
414 | "watermark", | ||
415 | "precise_ip", | ||
416 | "mmap_data", | ||
417 | "sample_id_all", | ||
418 | "wakeup_events", | ||
419 | "bp_type", | ||
420 | "bp_addr", | ||
421 | "bp_len", NULL, NULL, }; | ||
422 | u64 sample_period = 0; | ||
423 | u32 disabled = 0, | ||
424 | inherit = 0, | ||
425 | pinned = 0, | ||
426 | exclusive = 0, | ||
427 | exclude_user = 0, | ||
428 | exclude_kernel = 0, | ||
429 | exclude_hv = 0, | ||
430 | exclude_idle = 0, | ||
431 | mmap = 0, | ||
432 | comm = 0, | ||
433 | freq = 1, | ||
434 | inherit_stat = 0, | ||
435 | enable_on_exec = 0, | ||
436 | task = 0, | ||
437 | watermark = 0, | ||
438 | precise_ip = 0, | ||
439 | mmap_data = 0, | ||
440 | sample_id_all = 1; | ||
441 | int idx = 0; | ||
442 | |||
443 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, | ||
444 | "|iKiKKiiiiiiiiiiiiiiiiiiiiiKK", kwlist, | ||
445 | &attr.type, &attr.config, &attr.sample_freq, | ||
446 | &sample_period, &attr.sample_type, | ||
447 | &attr.read_format, &disabled, &inherit, | ||
448 | &pinned, &exclusive, &exclude_user, | ||
449 | &exclude_kernel, &exclude_hv, &exclude_idle, | ||
450 | &mmap, &comm, &freq, &inherit_stat, | ||
451 | &enable_on_exec, &task, &watermark, | ||
452 | &precise_ip, &mmap_data, &sample_id_all, | ||
453 | &attr.wakeup_events, &attr.bp_type, | ||
454 | &attr.bp_addr, &attr.bp_len, &idx)) | ||
455 | return -1; | ||
456 | |||
457 | /* union... */ | ||
458 | if (sample_period != 0) { | ||
459 | if (attr.sample_freq != 0) | ||
460 | return -1; /* FIXME: throw right exception */ | ||
461 | attr.sample_period = sample_period; | ||
462 | } | ||
463 | |||
464 | /* Bitfields */ | ||
465 | attr.disabled = disabled; | ||
466 | attr.inherit = inherit; | ||
467 | attr.pinned = pinned; | ||
468 | attr.exclusive = exclusive; | ||
469 | attr.exclude_user = exclude_user; | ||
470 | attr.exclude_kernel = exclude_kernel; | ||
471 | attr.exclude_hv = exclude_hv; | ||
472 | attr.exclude_idle = exclude_idle; | ||
473 | attr.mmap = mmap; | ||
474 | attr.comm = comm; | ||
475 | attr.freq = freq; | ||
476 | attr.inherit_stat = inherit_stat; | ||
477 | attr.enable_on_exec = enable_on_exec; | ||
478 | attr.task = task; | ||
479 | attr.watermark = watermark; | ||
480 | attr.precise_ip = precise_ip; | ||
481 | attr.mmap_data = mmap_data; | ||
482 | attr.sample_id_all = sample_id_all; | ||
483 | |||
484 | perf_evsel__init(&pevsel->evsel, &attr, idx); | ||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | static void pyrf_evsel__delete(struct pyrf_evsel *pevsel) | ||
489 | { | ||
490 | perf_evsel__exit(&pevsel->evsel); | ||
491 | pevsel->ob_type->tp_free((PyObject*)pevsel); | ||
492 | } | ||
493 | |||
494 | static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel, | ||
495 | PyObject *args, PyObject *kwargs) | ||
496 | { | ||
497 | struct perf_evsel *evsel = &pevsel->evsel; | ||
498 | struct cpu_map *cpus = NULL; | ||
499 | struct thread_map *threads = NULL; | ||
500 | PyObject *pcpus = NULL, *pthreads = NULL; | ||
501 | int group = 0, inherit = 0; | ||
502 | static char *kwlist[] = {"cpus", "threads", "group", "inherit", NULL, NULL}; | ||
503 | |||
504 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, | ||
505 | &pcpus, &pthreads, &group, &inherit)) | ||
506 | return NULL; | ||
507 | |||
508 | if (pthreads != NULL) | ||
509 | threads = ((struct pyrf_thread_map *)pthreads)->threads; | ||
510 | |||
511 | if (pcpus != NULL) | ||
512 | cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; | ||
513 | |||
514 | evsel->attr.inherit = inherit; | ||
515 | if (perf_evsel__open(evsel, cpus, threads, group) < 0) { | ||
516 | PyErr_SetFromErrno(PyExc_OSError); | ||
517 | return NULL; | ||
518 | } | ||
519 | |||
520 | Py_INCREF(Py_None); | ||
521 | return Py_None; | ||
522 | } | ||
523 | |||
524 | static PyMethodDef pyrf_evsel__methods[] = { | ||
525 | { | ||
526 | .ml_name = "open", | ||
527 | .ml_meth = (PyCFunction)pyrf_evsel__open, | ||
528 | .ml_flags = METH_VARARGS | METH_KEYWORDS, | ||
529 | .ml_doc = PyDoc_STR("open the event selector file descriptor table.") | ||
530 | }, | ||
531 | { .ml_name = NULL, } | ||
532 | }; | ||
533 | |||
534 | static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object."); | ||
535 | |||
536 | static PyTypeObject pyrf_evsel__type = { | ||
537 | PyVarObject_HEAD_INIT(NULL, 0) | ||
538 | .tp_name = "perf.evsel", | ||
539 | .tp_basicsize = sizeof(struct pyrf_evsel), | ||
540 | .tp_dealloc = (destructor)pyrf_evsel__delete, | ||
541 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
542 | .tp_doc = pyrf_evsel__doc, | ||
543 | .tp_methods = pyrf_evsel__methods, | ||
544 | .tp_init = (initproc)pyrf_evsel__init, | ||
545 | }; | ||
546 | |||
547 | static int pyrf_evsel__setup_types(void) | ||
548 | { | ||
549 | pyrf_evsel__type.tp_new = PyType_GenericNew; | ||
550 | return PyType_Ready(&pyrf_evsel__type); | ||
551 | } | ||
552 | |||
553 | struct pyrf_evlist { | ||
554 | PyObject_HEAD | ||
555 | |||
556 | struct perf_evlist evlist; | ||
557 | }; | ||
558 | |||
559 | static int pyrf_evlist__init(struct pyrf_evlist *pevlist, | ||
560 | PyObject *args, PyObject *kwargs __used) | ||
561 | { | ||
562 | PyObject *pcpus = NULL, *pthreads = NULL; | ||
563 | struct cpu_map *cpus; | ||
564 | struct thread_map *threads; | ||
565 | |||
566 | if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads)) | ||
567 | return -1; | ||
568 | |||
569 | threads = ((struct pyrf_thread_map *)pthreads)->threads; | ||
570 | cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; | ||
571 | perf_evlist__init(&pevlist->evlist, cpus, threads); | ||
572 | return 0; | ||
573 | } | ||
574 | |||
575 | static void pyrf_evlist__delete(struct pyrf_evlist *pevlist) | ||
576 | { | ||
577 | perf_evlist__exit(&pevlist->evlist); | ||
578 | pevlist->ob_type->tp_free((PyObject*)pevlist); | ||
579 | } | ||
580 | |||
581 | static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist, | ||
582 | PyObject *args, PyObject *kwargs) | ||
583 | { | ||
584 | struct perf_evlist *evlist = &pevlist->evlist; | ||
585 | static char *kwlist[] = {"pages", "overwrite", | ||
586 | NULL, NULL}; | ||
587 | int pages = 128, overwrite = false; | ||
588 | |||
589 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, | ||
590 | &pages, &overwrite)) | ||
591 | return NULL; | ||
592 | |||
593 | if (perf_evlist__mmap(evlist, pages, overwrite) < 0) { | ||
594 | PyErr_SetFromErrno(PyExc_OSError); | ||
595 | return NULL; | ||
596 | } | ||
597 | |||
598 | Py_INCREF(Py_None); | ||
599 | return Py_None; | ||
600 | } | ||
601 | |||
602 | static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, | ||
603 | PyObject *args, PyObject *kwargs) | ||
604 | { | ||
605 | struct perf_evlist *evlist = &pevlist->evlist; | ||
606 | static char *kwlist[] = {"timeout", NULL, NULL}; | ||
607 | int timeout = -1, n; | ||
608 | |||
609 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) | ||
610 | return NULL; | ||
611 | |||
612 | n = poll(evlist->pollfd, evlist->nr_fds, timeout); | ||
613 | if (n < 0) { | ||
614 | PyErr_SetFromErrno(PyExc_OSError); | ||
615 | return NULL; | ||
616 | } | ||
617 | |||
618 | return Py_BuildValue("i", n); | ||
619 | } | ||
620 | |||
621 | static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, | ||
622 | PyObject *args __used, PyObject *kwargs __used) | ||
623 | { | ||
624 | struct perf_evlist *evlist = &pevlist->evlist; | ||
625 | PyObject *list = PyList_New(0); | ||
626 | int i; | ||
627 | |||
628 | for (i = 0; i < evlist->nr_fds; ++i) { | ||
629 | PyObject *file; | ||
630 | FILE *fp = fdopen(evlist->pollfd[i].fd, "r"); | ||
631 | |||
632 | if (fp == NULL) | ||
633 | goto free_list; | ||
634 | |||
635 | file = PyFile_FromFile(fp, "perf", "r", NULL); | ||
636 | if (file == NULL) | ||
637 | goto free_list; | ||
638 | |||
639 | if (PyList_Append(list, file) != 0) { | ||
640 | Py_DECREF(file); | ||
641 | goto free_list; | ||
642 | } | ||
643 | |||
644 | Py_DECREF(file); | ||
645 | } | ||
646 | |||
647 | return list; | ||
648 | free_list: | ||
649 | return PyErr_NoMemory(); | ||
650 | } | ||
651 | |||
652 | |||
653 | static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, | ||
654 | PyObject *args, PyObject *kwargs __used) | ||
655 | { | ||
656 | struct perf_evlist *evlist = &pevlist->evlist; | ||
657 | PyObject *pevsel; | ||
658 | struct perf_evsel *evsel; | ||
659 | |||
660 | if (!PyArg_ParseTuple(args, "O", &pevsel)) | ||
661 | return NULL; | ||
662 | |||
663 | Py_INCREF(pevsel); | ||
664 | evsel = &((struct pyrf_evsel *)pevsel)->evsel; | ||
665 | evsel->idx = evlist->nr_entries; | ||
666 | perf_evlist__add(evlist, evsel); | ||
667 | |||
668 | return Py_BuildValue("i", evlist->nr_entries); | ||
669 | } | ||
670 | |||
671 | static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, | ||
672 | PyObject *args, PyObject *kwargs) | ||
673 | { | ||
674 | struct perf_evlist *evlist = &pevlist->evlist; | ||
675 | union perf_event *event; | ||
676 | int sample_id_all = 1, cpu; | ||
677 | static char *kwlist[] = {"cpu", "sample_id_all", NULL, NULL}; | ||
678 | int err; | ||
679 | |||
680 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, | ||
681 | &cpu, &sample_id_all)) | ||
682 | return NULL; | ||
683 | |||
684 | event = perf_evlist__mmap_read(evlist, cpu); | ||
685 | if (event != NULL) { | ||
686 | struct perf_evsel *first; | ||
687 | PyObject *pyevent = pyrf_event__new(event); | ||
688 | struct pyrf_event *pevent = (struct pyrf_event *)pyevent; | ||
689 | |||
690 | if (pyevent == NULL) | ||
691 | return PyErr_NoMemory(); | ||
692 | |||
693 | first = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
694 | err = perf_event__parse_sample(event, first->attr.sample_type, | ||
695 | perf_evsel__sample_size(first), | ||
696 | sample_id_all, &pevent->sample); | ||
697 | if (err) | ||
698 | return PyErr_Format(PyExc_OSError, | ||
699 | "perf: can't parse sample, err=%d", err); | ||
700 | return pyevent; | ||
701 | } | ||
702 | |||
703 | Py_INCREF(Py_None); | ||
704 | return Py_None; | ||
705 | } | ||
706 | |||
707 | static PyMethodDef pyrf_evlist__methods[] = { | ||
708 | { | ||
709 | .ml_name = "mmap", | ||
710 | .ml_meth = (PyCFunction)pyrf_evlist__mmap, | ||
711 | .ml_flags = METH_VARARGS | METH_KEYWORDS, | ||
712 | .ml_doc = PyDoc_STR("mmap the file descriptor table.") | ||
713 | }, | ||
714 | { | ||
715 | .ml_name = "poll", | ||
716 | .ml_meth = (PyCFunction)pyrf_evlist__poll, | ||
717 | .ml_flags = METH_VARARGS | METH_KEYWORDS, | ||
718 | .ml_doc = PyDoc_STR("poll the file descriptor table.") | ||
719 | }, | ||
720 | { | ||
721 | .ml_name = "get_pollfd", | ||
722 | .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd, | ||
723 | .ml_flags = METH_VARARGS | METH_KEYWORDS, | ||
724 | .ml_doc = PyDoc_STR("get the poll file descriptor table.") | ||
725 | }, | ||
726 | { | ||
727 | .ml_name = "add", | ||
728 | .ml_meth = (PyCFunction)pyrf_evlist__add, | ||
729 | .ml_flags = METH_VARARGS | METH_KEYWORDS, | ||
730 | .ml_doc = PyDoc_STR("adds an event selector to the list.") | ||
731 | }, | ||
732 | { | ||
733 | .ml_name = "read_on_cpu", | ||
734 | .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu, | ||
735 | .ml_flags = METH_VARARGS | METH_KEYWORDS, | ||
736 | .ml_doc = PyDoc_STR("reads an event.") | ||
737 | }, | ||
738 | { .ml_name = NULL, } | ||
739 | }; | ||
740 | |||
741 | static Py_ssize_t pyrf_evlist__length(PyObject *obj) | ||
742 | { | ||
743 | struct pyrf_evlist *pevlist = (void *)obj; | ||
744 | |||
745 | return pevlist->evlist.nr_entries; | ||
746 | } | ||
747 | |||
748 | static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i) | ||
749 | { | ||
750 | struct pyrf_evlist *pevlist = (void *)obj; | ||
751 | struct perf_evsel *pos; | ||
752 | |||
753 | if (i >= pevlist->evlist.nr_entries) | ||
754 | return NULL; | ||
755 | |||
756 | list_for_each_entry(pos, &pevlist->evlist.entries, node) | ||
757 | if (i-- == 0) | ||
758 | break; | ||
759 | |||
760 | return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel)); | ||
761 | } | ||
762 | |||
763 | static PySequenceMethods pyrf_evlist__sequence_methods = { | ||
764 | .sq_length = pyrf_evlist__length, | ||
765 | .sq_item = pyrf_evlist__item, | ||
766 | }; | ||
767 | |||
768 | static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object."); | ||
769 | |||
770 | static PyTypeObject pyrf_evlist__type = { | ||
771 | PyVarObject_HEAD_INIT(NULL, 0) | ||
772 | .tp_name = "perf.evlist", | ||
773 | .tp_basicsize = sizeof(struct pyrf_evlist), | ||
774 | .tp_dealloc = (destructor)pyrf_evlist__delete, | ||
775 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
776 | .tp_as_sequence = &pyrf_evlist__sequence_methods, | ||
777 | .tp_doc = pyrf_evlist__doc, | ||
778 | .tp_methods = pyrf_evlist__methods, | ||
779 | .tp_init = (initproc)pyrf_evlist__init, | ||
780 | }; | ||
781 | |||
782 | static int pyrf_evlist__setup_types(void) | ||
783 | { | ||
784 | pyrf_evlist__type.tp_new = PyType_GenericNew; | ||
785 | return PyType_Ready(&pyrf_evlist__type); | ||
786 | } | ||
787 | |||
788 | static struct { | ||
789 | const char *name; | ||
790 | int value; | ||
791 | } perf__constants[] = { | ||
792 | { "TYPE_HARDWARE", PERF_TYPE_HARDWARE }, | ||
793 | { "TYPE_SOFTWARE", PERF_TYPE_SOFTWARE }, | ||
794 | { "TYPE_TRACEPOINT", PERF_TYPE_TRACEPOINT }, | ||
795 | { "TYPE_HW_CACHE", PERF_TYPE_HW_CACHE }, | ||
796 | { "TYPE_RAW", PERF_TYPE_RAW }, | ||
797 | { "TYPE_BREAKPOINT", PERF_TYPE_BREAKPOINT }, | ||
798 | |||
799 | { "COUNT_HW_CPU_CYCLES", PERF_COUNT_HW_CPU_CYCLES }, | ||
800 | { "COUNT_HW_INSTRUCTIONS", PERF_COUNT_HW_INSTRUCTIONS }, | ||
801 | { "COUNT_HW_CACHE_REFERENCES", PERF_COUNT_HW_CACHE_REFERENCES }, | ||
802 | { "COUNT_HW_CACHE_MISSES", PERF_COUNT_HW_CACHE_MISSES }, | ||
803 | { "COUNT_HW_BRANCH_INSTRUCTIONS", PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, | ||
804 | { "COUNT_HW_BRANCH_MISSES", PERF_COUNT_HW_BRANCH_MISSES }, | ||
805 | { "COUNT_HW_BUS_CYCLES", PERF_COUNT_HW_BUS_CYCLES }, | ||
806 | { "COUNT_HW_CACHE_L1D", PERF_COUNT_HW_CACHE_L1D }, | ||
807 | { "COUNT_HW_CACHE_L1I", PERF_COUNT_HW_CACHE_L1I }, | ||
808 | { "COUNT_HW_CACHE_LL", PERF_COUNT_HW_CACHE_LL }, | ||
809 | { "COUNT_HW_CACHE_DTLB", PERF_COUNT_HW_CACHE_DTLB }, | ||
810 | { "COUNT_HW_CACHE_ITLB", PERF_COUNT_HW_CACHE_ITLB }, | ||
811 | { "COUNT_HW_CACHE_BPU", PERF_COUNT_HW_CACHE_BPU }, | ||
812 | { "COUNT_HW_CACHE_OP_READ", PERF_COUNT_HW_CACHE_OP_READ }, | ||
813 | { "COUNT_HW_CACHE_OP_WRITE", PERF_COUNT_HW_CACHE_OP_WRITE }, | ||
814 | { "COUNT_HW_CACHE_OP_PREFETCH", PERF_COUNT_HW_CACHE_OP_PREFETCH }, | ||
815 | { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS }, | ||
816 | { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS }, | ||
817 | |||
818 | { "COUNT_HW_STALLED_CYCLES_FRONTEND", PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, | ||
819 | { "COUNT_HW_STALLED_CYCLES_BACKEND", PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, | ||
820 | |||
821 | { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK }, | ||
822 | { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK }, | ||
823 | { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS }, | ||
824 | { "COUNT_SW_CONTEXT_SWITCHES", PERF_COUNT_SW_CONTEXT_SWITCHES }, | ||
825 | { "COUNT_SW_CPU_MIGRATIONS", PERF_COUNT_SW_CPU_MIGRATIONS }, | ||
826 | { "COUNT_SW_PAGE_FAULTS_MIN", PERF_COUNT_SW_PAGE_FAULTS_MIN }, | ||
827 | { "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ }, | ||
828 | { "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS }, | ||
829 | { "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS }, | ||
830 | |||
831 | { "SAMPLE_IP", PERF_SAMPLE_IP }, | ||
832 | { "SAMPLE_TID", PERF_SAMPLE_TID }, | ||
833 | { "SAMPLE_TIME", PERF_SAMPLE_TIME }, | ||
834 | { "SAMPLE_ADDR", PERF_SAMPLE_ADDR }, | ||
835 | { "SAMPLE_READ", PERF_SAMPLE_READ }, | ||
836 | { "SAMPLE_CALLCHAIN", PERF_SAMPLE_CALLCHAIN }, | ||
837 | { "SAMPLE_ID", PERF_SAMPLE_ID }, | ||
838 | { "SAMPLE_CPU", PERF_SAMPLE_CPU }, | ||
839 | { "SAMPLE_PERIOD", PERF_SAMPLE_PERIOD }, | ||
840 | { "SAMPLE_STREAM_ID", PERF_SAMPLE_STREAM_ID }, | ||
841 | { "SAMPLE_RAW", PERF_SAMPLE_RAW }, | ||
842 | |||
843 | { "FORMAT_TOTAL_TIME_ENABLED", PERF_FORMAT_TOTAL_TIME_ENABLED }, | ||
844 | { "FORMAT_TOTAL_TIME_RUNNING", PERF_FORMAT_TOTAL_TIME_RUNNING }, | ||
845 | { "FORMAT_ID", PERF_FORMAT_ID }, | ||
846 | { "FORMAT_GROUP", PERF_FORMAT_GROUP }, | ||
847 | |||
848 | { "RECORD_MMAP", PERF_RECORD_MMAP }, | ||
849 | { "RECORD_LOST", PERF_RECORD_LOST }, | ||
850 | { "RECORD_COMM", PERF_RECORD_COMM }, | ||
851 | { "RECORD_EXIT", PERF_RECORD_EXIT }, | ||
852 | { "RECORD_THROTTLE", PERF_RECORD_THROTTLE }, | ||
853 | { "RECORD_UNTHROTTLE", PERF_RECORD_UNTHROTTLE }, | ||
854 | { "RECORD_FORK", PERF_RECORD_FORK }, | ||
855 | { "RECORD_READ", PERF_RECORD_READ }, | ||
856 | { "RECORD_SAMPLE", PERF_RECORD_SAMPLE }, | ||
857 | { .name = NULL, }, | ||
858 | }; | ||
859 | |||
860 | static PyMethodDef perf__methods[] = { | ||
861 | { .ml_name = NULL, } | ||
862 | }; | ||
863 | |||
864 | PyMODINIT_FUNC initperf(void) | ||
865 | { | ||
866 | PyObject *obj; | ||
867 | int i; | ||
868 | PyObject *dict, *module = Py_InitModule("perf", perf__methods); | ||
869 | |||
870 | if (module == NULL || | ||
871 | pyrf_event__setup_types() < 0 || | ||
872 | pyrf_evlist__setup_types() < 0 || | ||
873 | pyrf_evsel__setup_types() < 0 || | ||
874 | pyrf_thread_map__setup_types() < 0 || | ||
875 | pyrf_cpu_map__setup_types() < 0) | ||
876 | return; | ||
877 | |||
878 | Py_INCREF(&pyrf_evlist__type); | ||
879 | PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type); | ||
880 | |||
881 | Py_INCREF(&pyrf_evsel__type); | ||
882 | PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type); | ||
883 | |||
884 | Py_INCREF(&pyrf_thread_map__type); | ||
885 | PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type); | ||
886 | |||
887 | Py_INCREF(&pyrf_cpu_map__type); | ||
888 | PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type); | ||
889 | |||
890 | dict = PyModule_GetDict(module); | ||
891 | if (dict == NULL) | ||
892 | goto error; | ||
893 | |||
894 | for (i = 0; perf__constants[i].name != NULL; i++) { | ||
895 | obj = PyInt_FromLong(perf__constants[i].value); | ||
896 | if (obj == NULL) | ||
897 | goto error; | ||
898 | PyDict_SetItemString(dict, perf__constants[i].name, obj); | ||
899 | Py_DECREF(obj); | ||
900 | } | ||
901 | |||
902 | error: | ||
903 | if (PyErr_Occurred()) | ||
904 | PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); | ||
905 | } | ||
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index b059dc50cc2d..74350ffb57fe 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * trace-event-perl. Feed perf trace events to an embedded Perl interpreter. | 2 | * trace-event-perl. Feed perf script events to an embedded Perl interpreter. |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> | 4 | * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> |
5 | * | 5 | * |
@@ -245,9 +245,11 @@ static inline struct event *find_cache_event(int type) | |||
245 | return event; | 245 | return event; |
246 | } | 246 | } |
247 | 247 | ||
248 | static void perl_process_event(int cpu, void *data, | 248 | static void perl_process_event(union perf_event *pevent __unused, |
249 | int size __unused, | 249 | struct perf_sample *sample, |
250 | unsigned long long nsecs, char *comm) | 250 | struct perf_evsel *evsel, |
251 | struct perf_session *session __unused, | ||
252 | struct thread *thread) | ||
251 | { | 253 | { |
252 | struct format_field *field; | 254 | struct format_field *field; |
253 | static char handler[256]; | 255 | static char handler[256]; |
@@ -256,6 +258,10 @@ static void perl_process_event(int cpu, void *data, | |||
256 | struct event *event; | 258 | struct event *event; |
257 | int type; | 259 | int type; |
258 | int pid; | 260 | int pid; |
261 | int cpu = sample->cpu; | ||
262 | void *data = sample->raw_data; | ||
263 | unsigned long long nsecs = sample->time; | ||
264 | char *comm = thread->comm; | ||
259 | 265 | ||
260 | dSP; | 266 | dSP; |
261 | 267 | ||
@@ -411,8 +417,8 @@ static int perl_generate_script(const char *outfile) | |||
411 | return -1; | 417 | return -1; |
412 | } | 418 | } |
413 | 419 | ||
414 | fprintf(ofp, "# perf trace event handlers, " | 420 | fprintf(ofp, "# perf script event handlers, " |
415 | "generated by perf trace -g perl\n"); | 421 | "generated by perf script -g perl\n"); |
416 | 422 | ||
417 | fprintf(ofp, "# Licensed under the terms of the GNU GPL" | 423 | fprintf(ofp, "# Licensed under the terms of the GNU GPL" |
418 | " License version 2\n\n"); | 424 | " License version 2\n\n"); |
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 33a632523743..6ccf70e8d8f2 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c | |||
@@ -204,9 +204,11 @@ static inline struct event *find_cache_event(int type) | |||
204 | return event; | 204 | return event; |
205 | } | 205 | } |
206 | 206 | ||
207 | static void python_process_event(int cpu, void *data, | 207 | static void python_process_event(union perf_event *pevent __unused, |
208 | int size __unused, | 208 | struct perf_sample *sample, |
209 | unsigned long long nsecs, char *comm) | 209 | struct perf_evsel *evsel __unused, |
210 | struct perf_session *session __unused, | ||
211 | struct thread *thread) | ||
210 | { | 212 | { |
211 | PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; | 213 | PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; |
212 | static char handler_name[256]; | 214 | static char handler_name[256]; |
@@ -217,6 +219,10 @@ static void python_process_event(int cpu, void *data, | |||
217 | unsigned n = 0; | 219 | unsigned n = 0; |
218 | int type; | 220 | int type; |
219 | int pid; | 221 | int pid; |
222 | int cpu = sample->cpu; | ||
223 | void *data = sample->raw_data; | ||
224 | unsigned long long nsecs = sample->time; | ||
225 | char *comm = thread->comm; | ||
220 | 226 | ||
221 | t = PyTuple_New(MAX_FIELDS); | 227 | t = PyTuple_New(MAX_FIELDS); |
222 | if (!t) | 228 | if (!t) |
@@ -248,8 +254,7 @@ static void python_process_event(int cpu, void *data, | |||
248 | context = PyCObject_FromVoidPtr(scripting_context, NULL); | 254 | context = PyCObject_FromVoidPtr(scripting_context, NULL); |
249 | 255 | ||
250 | PyTuple_SetItem(t, n++, PyString_FromString(handler_name)); | 256 | PyTuple_SetItem(t, n++, PyString_FromString(handler_name)); |
251 | PyTuple_SetItem(t, n++, | 257 | PyTuple_SetItem(t, n++, context); |
252 | PyCObject_FromVoidPtr(scripting_context, NULL)); | ||
253 | 258 | ||
254 | if (handler) { | 259 | if (handler) { |
255 | PyTuple_SetItem(t, n++, PyInt_FromLong(cpu)); | 260 | PyTuple_SetItem(t, n++, PyInt_FromLong(cpu)); |
@@ -442,8 +447,8 @@ static int python_generate_script(const char *outfile) | |||
442 | fprintf(stderr, "couldn't open %s\n", fname); | 447 | fprintf(stderr, "couldn't open %s\n", fname); |
443 | return -1; | 448 | return -1; |
444 | } | 449 | } |
445 | fprintf(ofp, "# perf trace event handlers, " | 450 | fprintf(ofp, "# perf script event handlers, " |
446 | "generated by perf trace -g python\n"); | 451 | "generated by perf script -g python\n"); |
447 | 452 | ||
448 | fprintf(ofp, "# Licensed under the terms of the GNU GPL" | 453 | fprintf(ofp, "# Licensed under the terms of the GNU GPL" |
449 | " License version 2\n\n"); | 454 | " License version 2\n\n"); |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index fa9d652c2dc3..f5a8fbdd3f76 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <sys/types.h> | 7 | #include <sys/types.h> |
8 | #include <sys/mman.h> | 8 | #include <sys/mman.h> |
9 | 9 | ||
10 | #include "evlist.h" | ||
11 | #include "evsel.h" | ||
10 | #include "session.h" | 12 | #include "session.h" |
11 | #include "sort.h" | 13 | #include "sort.h" |
12 | #include "util.h" | 14 | #include "util.h" |
@@ -19,7 +21,7 @@ static int perf_session__open(struct perf_session *self, bool force) | |||
19 | self->fd_pipe = true; | 21 | self->fd_pipe = true; |
20 | self->fd = STDIN_FILENO; | 22 | self->fd = STDIN_FILENO; |
21 | 23 | ||
22 | if (perf_header__read(self, self->fd) < 0) | 24 | if (perf_session__read_header(self, self->fd) < 0) |
23 | pr_err("incompatible file format"); | 25 | pr_err("incompatible file format"); |
24 | 26 | ||
25 | return 0; | 27 | return 0; |
@@ -51,11 +53,21 @@ static int perf_session__open(struct perf_session *self, bool force) | |||
51 | goto out_close; | 53 | goto out_close; |
52 | } | 54 | } |
53 | 55 | ||
54 | if (perf_header__read(self, self->fd) < 0) { | 56 | if (perf_session__read_header(self, self->fd) < 0) { |
55 | pr_err("incompatible file format"); | 57 | pr_err("incompatible file format"); |
56 | goto out_close; | 58 | goto out_close; |
57 | } | 59 | } |
58 | 60 | ||
61 | if (!perf_evlist__valid_sample_type(self->evlist)) { | ||
62 | pr_err("non matching sample_type"); | ||
63 | goto out_close; | ||
64 | } | ||
65 | |||
66 | if (!perf_evlist__valid_sample_id_all(self->evlist)) { | ||
67 | pr_err("non matching sample_id_all"); | ||
68 | goto out_close; | ||
69 | } | ||
70 | |||
59 | self->size = input_stat.st_size; | 71 | self->size = input_stat.st_size; |
60 | return 0; | 72 | return 0; |
61 | 73 | ||
@@ -65,9 +77,39 @@ out_close: | |||
65 | return -1; | 77 | return -1; |
66 | } | 78 | } |
67 | 79 | ||
80 | static void perf_session__id_header_size(struct perf_session *session) | ||
81 | { | ||
82 | struct perf_sample *data; | ||
83 | u64 sample_type = session->sample_type; | ||
84 | u16 size = 0; | ||
85 | |||
86 | if (!session->sample_id_all) | ||
87 | goto out; | ||
88 | |||
89 | if (sample_type & PERF_SAMPLE_TID) | ||
90 | size += sizeof(data->tid) * 2; | ||
91 | |||
92 | if (sample_type & PERF_SAMPLE_TIME) | ||
93 | size += sizeof(data->time); | ||
94 | |||
95 | if (sample_type & PERF_SAMPLE_ID) | ||
96 | size += sizeof(data->id); | ||
97 | |||
98 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
99 | size += sizeof(data->stream_id); | ||
100 | |||
101 | if (sample_type & PERF_SAMPLE_CPU) | ||
102 | size += sizeof(data->cpu) * 2; | ||
103 | out: | ||
104 | session->id_hdr_size = size; | ||
105 | } | ||
106 | |||
68 | void perf_session__update_sample_type(struct perf_session *self) | 107 | void perf_session__update_sample_type(struct perf_session *self) |
69 | { | 108 | { |
70 | self->sample_type = perf_header__sample_type(&self->header); | 109 | self->sample_type = perf_evlist__sample_type(self->evlist); |
110 | self->sample_size = __perf_evsel__sample_size(self->sample_type); | ||
111 | self->sample_id_all = perf_evlist__sample_id_all(self->evlist); | ||
112 | perf_session__id_header_size(self); | ||
71 | } | 113 | } |
72 | 114 | ||
73 | int perf_session__create_kernel_maps(struct perf_session *self) | 115 | int perf_session__create_kernel_maps(struct perf_session *self) |
@@ -85,7 +127,9 @@ static void perf_session__destroy_kernel_maps(struct perf_session *self) | |||
85 | machines__destroy_guest_kernel_maps(&self->machines); | 127 | machines__destroy_guest_kernel_maps(&self->machines); |
86 | } | 128 | } |
87 | 129 | ||
88 | struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe) | 130 | struct perf_session *perf_session__new(const char *filename, int mode, |
131 | bool force, bool repipe, | ||
132 | struct perf_event_ops *ops) | ||
89 | { | 133 | { |
90 | size_t len = filename ? strlen(filename) + 1 : 0; | 134 | size_t len = filename ? strlen(filename) + 1 : 0; |
91 | struct perf_session *self = zalloc(sizeof(*self) + len); | 135 | struct perf_session *self = zalloc(sizeof(*self) + len); |
@@ -93,38 +137,47 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc | |||
93 | if (self == NULL) | 137 | if (self == NULL) |
94 | goto out; | 138 | goto out; |
95 | 139 | ||
96 | if (perf_header__init(&self->header) < 0) | ||
97 | goto out_free; | ||
98 | |||
99 | memcpy(self->filename, filename, len); | 140 | memcpy(self->filename, filename, len); |
100 | self->threads = RB_ROOT; | 141 | self->threads = RB_ROOT; |
101 | INIT_LIST_HEAD(&self->dead_threads); | 142 | INIT_LIST_HEAD(&self->dead_threads); |
102 | self->hists_tree = RB_ROOT; | ||
103 | self->last_match = NULL; | 143 | self->last_match = NULL; |
104 | self->mmap_window = 32; | 144 | /* |
145 | * On 64bit we can mmap the data file in one go. No need for tiny mmap | ||
146 | * slices. On 32bit we use 32MB. | ||
147 | */ | ||
148 | #if BITS_PER_LONG == 64 | ||
149 | self->mmap_window = ULLONG_MAX; | ||
150 | #else | ||
151 | self->mmap_window = 32 * 1024 * 1024ULL; | ||
152 | #endif | ||
105 | self->machines = RB_ROOT; | 153 | self->machines = RB_ROOT; |
106 | self->repipe = repipe; | 154 | self->repipe = repipe; |
107 | INIT_LIST_HEAD(&self->ordered_samples.samples_head); | 155 | INIT_LIST_HEAD(&self->ordered_samples.samples); |
156 | INIT_LIST_HEAD(&self->ordered_samples.sample_cache); | ||
157 | INIT_LIST_HEAD(&self->ordered_samples.to_free); | ||
108 | machine__init(&self->host_machine, "", HOST_KERNEL_ID); | 158 | machine__init(&self->host_machine, "", HOST_KERNEL_ID); |
109 | 159 | ||
110 | if (mode == O_RDONLY) { | 160 | if (mode == O_RDONLY) { |
111 | if (perf_session__open(self, force) < 0) | 161 | if (perf_session__open(self, force) < 0) |
112 | goto out_delete; | 162 | goto out_delete; |
163 | perf_session__update_sample_type(self); | ||
113 | } else if (mode == O_WRONLY) { | 164 | } else if (mode == O_WRONLY) { |
114 | /* | 165 | /* |
115 | * In O_RDONLY mode this will be performed when reading the | 166 | * In O_RDONLY mode this will be performed when reading the |
116 | * kernel MMAP event, in event__process_mmap(). | 167 | * kernel MMAP event, in perf_event__process_mmap(). |
117 | */ | 168 | */ |
118 | if (perf_session__create_kernel_maps(self) < 0) | 169 | if (perf_session__create_kernel_maps(self) < 0) |
119 | goto out_delete; | 170 | goto out_delete; |
120 | } | 171 | } |
121 | 172 | ||
122 | perf_session__update_sample_type(self); | 173 | if (ops && ops->ordering_requires_timestamps && |
174 | ops->ordered_samples && !self->sample_id_all) { | ||
175 | dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); | ||
176 | ops->ordered_samples = false; | ||
177 | } | ||
178 | |||
123 | out: | 179 | out: |
124 | return self; | 180 | return self; |
125 | out_free: | ||
126 | free(self); | ||
127 | return NULL; | ||
128 | out_delete: | 181 | out_delete: |
129 | perf_session__delete(self); | 182 | perf_session__delete(self); |
130 | return NULL; | 183 | return NULL; |
@@ -155,7 +208,6 @@ static void perf_session__delete_threads(struct perf_session *self) | |||
155 | 208 | ||
156 | void perf_session__delete(struct perf_session *self) | 209 | void perf_session__delete(struct perf_session *self) |
157 | { | 210 | { |
158 | perf_header__exit(&self->header); | ||
159 | perf_session__destroy_kernel_maps(self); | 211 | perf_session__destroy_kernel_maps(self); |
160 | perf_session__delete_dead_threads(self); | 212 | perf_session__delete_dead_threads(self); |
161 | perf_session__delete_threads(self); | 213 | perf_session__delete_threads(self); |
@@ -183,17 +235,16 @@ static bool symbol__match_parent_regex(struct symbol *sym) | |||
183 | return 0; | 235 | return 0; |
184 | } | 236 | } |
185 | 237 | ||
186 | struct map_symbol *perf_session__resolve_callchain(struct perf_session *self, | 238 | int perf_session__resolve_callchain(struct perf_session *self, |
187 | struct thread *thread, | 239 | struct thread *thread, |
188 | struct ip_callchain *chain, | 240 | struct ip_callchain *chain, |
189 | struct symbol **parent) | 241 | struct symbol **parent) |
190 | { | 242 | { |
191 | u8 cpumode = PERF_RECORD_MISC_USER; | 243 | u8 cpumode = PERF_RECORD_MISC_USER; |
192 | unsigned int i; | 244 | unsigned int i; |
193 | struct map_symbol *syms = calloc(chain->nr, sizeof(*syms)); | 245 | int err; |
194 | 246 | ||
195 | if (!syms) | 247 | callchain_cursor_reset(&self->callchain_cursor); |
196 | return NULL; | ||
197 | 248 | ||
198 | for (i = 0; i < chain->nr; i++) { | 249 | for (i = 0; i < chain->nr; i++) { |
199 | u64 ip = chain->ips[i]; | 250 | u64 ip = chain->ips[i]; |
@@ -222,22 +273,42 @@ struct map_symbol *perf_session__resolve_callchain(struct perf_session *self, | |||
222 | *parent = al.sym; | 273 | *parent = al.sym; |
223 | if (!symbol_conf.use_callchain) | 274 | if (!symbol_conf.use_callchain) |
224 | break; | 275 | break; |
225 | syms[i].map = al.map; | ||
226 | syms[i].sym = al.sym; | ||
227 | } | 276 | } |
277 | |||
278 | err = callchain_cursor_append(&self->callchain_cursor, | ||
279 | ip, al.map, al.sym); | ||
280 | if (err) | ||
281 | return err; | ||
228 | } | 282 | } |
229 | 283 | ||
230 | return syms; | 284 | return 0; |
285 | } | ||
286 | |||
287 | static int process_event_synth_stub(union perf_event *event __used, | ||
288 | struct perf_session *session __used) | ||
289 | { | ||
290 | dump_printf(": unhandled!\n"); | ||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | static int process_event_sample_stub(union perf_event *event __used, | ||
295 | struct perf_sample *sample __used, | ||
296 | struct perf_evsel *evsel __used, | ||
297 | struct perf_session *session __used) | ||
298 | { | ||
299 | dump_printf(": unhandled!\n"); | ||
300 | return 0; | ||
231 | } | 301 | } |
232 | 302 | ||
233 | static int process_event_stub(event_t *event __used, | 303 | static int process_event_stub(union perf_event *event __used, |
304 | struct perf_sample *sample __used, | ||
234 | struct perf_session *session __used) | 305 | struct perf_session *session __used) |
235 | { | 306 | { |
236 | dump_printf(": unhandled!\n"); | 307 | dump_printf(": unhandled!\n"); |
237 | return 0; | 308 | return 0; |
238 | } | 309 | } |
239 | 310 | ||
240 | static int process_finished_round_stub(event_t *event __used, | 311 | static int process_finished_round_stub(union perf_event *event __used, |
241 | struct perf_session *session __used, | 312 | struct perf_session *session __used, |
242 | struct perf_event_ops *ops __used) | 313 | struct perf_event_ops *ops __used) |
243 | { | 314 | { |
@@ -245,14 +316,14 @@ static int process_finished_round_stub(event_t *event __used, | |||
245 | return 0; | 316 | return 0; |
246 | } | 317 | } |
247 | 318 | ||
248 | static int process_finished_round(event_t *event, | 319 | static int process_finished_round(union perf_event *event, |
249 | struct perf_session *session, | 320 | struct perf_session *session, |
250 | struct perf_event_ops *ops); | 321 | struct perf_event_ops *ops); |
251 | 322 | ||
252 | static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) | 323 | static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) |
253 | { | 324 | { |
254 | if (handler->sample == NULL) | 325 | if (handler->sample == NULL) |
255 | handler->sample = process_event_stub; | 326 | handler->sample = process_event_sample_stub; |
256 | if (handler->mmap == NULL) | 327 | if (handler->mmap == NULL) |
257 | handler->mmap = process_event_stub; | 328 | handler->mmap = process_event_stub; |
258 | if (handler->comm == NULL) | 329 | if (handler->comm == NULL) |
@@ -262,7 +333,7 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) | |||
262 | if (handler->exit == NULL) | 333 | if (handler->exit == NULL) |
263 | handler->exit = process_event_stub; | 334 | handler->exit = process_event_stub; |
264 | if (handler->lost == NULL) | 335 | if (handler->lost == NULL) |
265 | handler->lost = process_event_stub; | 336 | handler->lost = perf_event__process_lost; |
266 | if (handler->read == NULL) | 337 | if (handler->read == NULL) |
267 | handler->read = process_event_stub; | 338 | handler->read = process_event_stub; |
268 | if (handler->throttle == NULL) | 339 | if (handler->throttle == NULL) |
@@ -270,13 +341,13 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) | |||
270 | if (handler->unthrottle == NULL) | 341 | if (handler->unthrottle == NULL) |
271 | handler->unthrottle = process_event_stub; | 342 | handler->unthrottle = process_event_stub; |
272 | if (handler->attr == NULL) | 343 | if (handler->attr == NULL) |
273 | handler->attr = process_event_stub; | 344 | handler->attr = process_event_synth_stub; |
274 | if (handler->event_type == NULL) | 345 | if (handler->event_type == NULL) |
275 | handler->event_type = process_event_stub; | 346 | handler->event_type = process_event_synth_stub; |
276 | if (handler->tracing_data == NULL) | 347 | if (handler->tracing_data == NULL) |
277 | handler->tracing_data = process_event_stub; | 348 | handler->tracing_data = process_event_synth_stub; |
278 | if (handler->build_id == NULL) | 349 | if (handler->build_id == NULL) |
279 | handler->build_id = process_event_stub; | 350 | handler->build_id = process_event_synth_stub; |
280 | if (handler->finished_round == NULL) { | 351 | if (handler->finished_round == NULL) { |
281 | if (handler->ordered_samples) | 352 | if (handler->ordered_samples) |
282 | handler->finished_round = process_finished_round; | 353 | handler->finished_round = process_finished_round; |
@@ -296,123 +367,155 @@ void mem_bswap_64(void *src, int byte_size) | |||
296 | } | 367 | } |
297 | } | 368 | } |
298 | 369 | ||
299 | static void event__all64_swap(event_t *self) | 370 | static void perf_event__all64_swap(union perf_event *event) |
300 | { | 371 | { |
301 | struct perf_event_header *hdr = &self->header; | 372 | struct perf_event_header *hdr = &event->header; |
302 | mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr)); | 373 | mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); |
303 | } | 374 | } |
304 | 375 | ||
305 | static void event__comm_swap(event_t *self) | 376 | static void perf_event__comm_swap(union perf_event *event) |
306 | { | 377 | { |
307 | self->comm.pid = bswap_32(self->comm.pid); | 378 | event->comm.pid = bswap_32(event->comm.pid); |
308 | self->comm.tid = bswap_32(self->comm.tid); | 379 | event->comm.tid = bswap_32(event->comm.tid); |
309 | } | 380 | } |
310 | 381 | ||
311 | static void event__mmap_swap(event_t *self) | 382 | static void perf_event__mmap_swap(union perf_event *event) |
312 | { | 383 | { |
313 | self->mmap.pid = bswap_32(self->mmap.pid); | 384 | event->mmap.pid = bswap_32(event->mmap.pid); |
314 | self->mmap.tid = bswap_32(self->mmap.tid); | 385 | event->mmap.tid = bswap_32(event->mmap.tid); |
315 | self->mmap.start = bswap_64(self->mmap.start); | 386 | event->mmap.start = bswap_64(event->mmap.start); |
316 | self->mmap.len = bswap_64(self->mmap.len); | 387 | event->mmap.len = bswap_64(event->mmap.len); |
317 | self->mmap.pgoff = bswap_64(self->mmap.pgoff); | 388 | event->mmap.pgoff = bswap_64(event->mmap.pgoff); |
318 | } | 389 | } |
319 | 390 | ||
320 | static void event__task_swap(event_t *self) | 391 | static void perf_event__task_swap(union perf_event *event) |
321 | { | 392 | { |
322 | self->fork.pid = bswap_32(self->fork.pid); | 393 | event->fork.pid = bswap_32(event->fork.pid); |
323 | self->fork.tid = bswap_32(self->fork.tid); | 394 | event->fork.tid = bswap_32(event->fork.tid); |
324 | self->fork.ppid = bswap_32(self->fork.ppid); | 395 | event->fork.ppid = bswap_32(event->fork.ppid); |
325 | self->fork.ptid = bswap_32(self->fork.ptid); | 396 | event->fork.ptid = bswap_32(event->fork.ptid); |
326 | self->fork.time = bswap_64(self->fork.time); | 397 | event->fork.time = bswap_64(event->fork.time); |
327 | } | 398 | } |
328 | 399 | ||
329 | static void event__read_swap(event_t *self) | 400 | static void perf_event__read_swap(union perf_event *event) |
330 | { | 401 | { |
331 | self->read.pid = bswap_32(self->read.pid); | 402 | event->read.pid = bswap_32(event->read.pid); |
332 | self->read.tid = bswap_32(self->read.tid); | 403 | event->read.tid = bswap_32(event->read.tid); |
333 | self->read.value = bswap_64(self->read.value); | 404 | event->read.value = bswap_64(event->read.value); |
334 | self->read.time_enabled = bswap_64(self->read.time_enabled); | 405 | event->read.time_enabled = bswap_64(event->read.time_enabled); |
335 | self->read.time_running = bswap_64(self->read.time_running); | 406 | event->read.time_running = bswap_64(event->read.time_running); |
336 | self->read.id = bswap_64(self->read.id); | 407 | event->read.id = bswap_64(event->read.id); |
337 | } | 408 | } |
338 | 409 | ||
339 | static void event__attr_swap(event_t *self) | 410 | static void perf_event__attr_swap(union perf_event *event) |
340 | { | 411 | { |
341 | size_t size; | 412 | size_t size; |
342 | 413 | ||
343 | self->attr.attr.type = bswap_32(self->attr.attr.type); | 414 | event->attr.attr.type = bswap_32(event->attr.attr.type); |
344 | self->attr.attr.size = bswap_32(self->attr.attr.size); | 415 | event->attr.attr.size = bswap_32(event->attr.attr.size); |
345 | self->attr.attr.config = bswap_64(self->attr.attr.config); | 416 | event->attr.attr.config = bswap_64(event->attr.attr.config); |
346 | self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period); | 417 | event->attr.attr.sample_period = bswap_64(event->attr.attr.sample_period); |
347 | self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type); | 418 | event->attr.attr.sample_type = bswap_64(event->attr.attr.sample_type); |
348 | self->attr.attr.read_format = bswap_64(self->attr.attr.read_format); | 419 | event->attr.attr.read_format = bswap_64(event->attr.attr.read_format); |
349 | self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events); | 420 | event->attr.attr.wakeup_events = bswap_32(event->attr.attr.wakeup_events); |
350 | self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type); | 421 | event->attr.attr.bp_type = bswap_32(event->attr.attr.bp_type); |
351 | self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr); | 422 | event->attr.attr.bp_addr = bswap_64(event->attr.attr.bp_addr); |
352 | self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len); | 423 | event->attr.attr.bp_len = bswap_64(event->attr.attr.bp_len); |
353 | 424 | ||
354 | size = self->header.size; | 425 | size = event->header.size; |
355 | size -= (void *)&self->attr.id - (void *)self; | 426 | size -= (void *)&event->attr.id - (void *)event; |
356 | mem_bswap_64(self->attr.id, size); | 427 | mem_bswap_64(event->attr.id, size); |
357 | } | 428 | } |
358 | 429 | ||
359 | static void event__event_type_swap(event_t *self) | 430 | static void perf_event__event_type_swap(union perf_event *event) |
360 | { | 431 | { |
361 | self->event_type.event_type.event_id = | 432 | event->event_type.event_type.event_id = |
362 | bswap_64(self->event_type.event_type.event_id); | 433 | bswap_64(event->event_type.event_type.event_id); |
363 | } | 434 | } |
364 | 435 | ||
365 | static void event__tracing_data_swap(event_t *self) | 436 | static void perf_event__tracing_data_swap(union perf_event *event) |
366 | { | 437 | { |
367 | self->tracing_data.size = bswap_32(self->tracing_data.size); | 438 | event->tracing_data.size = bswap_32(event->tracing_data.size); |
368 | } | 439 | } |
369 | 440 | ||
370 | typedef void (*event__swap_op)(event_t *self); | 441 | typedef void (*perf_event__swap_op)(union perf_event *event); |
371 | 442 | ||
372 | static event__swap_op event__swap_ops[] = { | 443 | static perf_event__swap_op perf_event__swap_ops[] = { |
373 | [PERF_RECORD_MMAP] = event__mmap_swap, | 444 | [PERF_RECORD_MMAP] = perf_event__mmap_swap, |
374 | [PERF_RECORD_COMM] = event__comm_swap, | 445 | [PERF_RECORD_COMM] = perf_event__comm_swap, |
375 | [PERF_RECORD_FORK] = event__task_swap, | 446 | [PERF_RECORD_FORK] = perf_event__task_swap, |
376 | [PERF_RECORD_EXIT] = event__task_swap, | 447 | [PERF_RECORD_EXIT] = perf_event__task_swap, |
377 | [PERF_RECORD_LOST] = event__all64_swap, | 448 | [PERF_RECORD_LOST] = perf_event__all64_swap, |
378 | [PERF_RECORD_READ] = event__read_swap, | 449 | [PERF_RECORD_READ] = perf_event__read_swap, |
379 | [PERF_RECORD_SAMPLE] = event__all64_swap, | 450 | [PERF_RECORD_SAMPLE] = perf_event__all64_swap, |
380 | [PERF_RECORD_HEADER_ATTR] = event__attr_swap, | 451 | [PERF_RECORD_HEADER_ATTR] = perf_event__attr_swap, |
381 | [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap, | 452 | [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, |
382 | [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap, | 453 | [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, |
383 | [PERF_RECORD_HEADER_BUILD_ID] = NULL, | 454 | [PERF_RECORD_HEADER_BUILD_ID] = NULL, |
384 | [PERF_RECORD_HEADER_MAX] = NULL, | 455 | [PERF_RECORD_HEADER_MAX] = NULL, |
385 | }; | 456 | }; |
386 | 457 | ||
387 | struct sample_queue { | 458 | struct sample_queue { |
388 | u64 timestamp; | 459 | u64 timestamp; |
389 | struct sample_event *event; | 460 | u64 file_offset; |
461 | union perf_event *event; | ||
390 | struct list_head list; | 462 | struct list_head list; |
391 | }; | 463 | }; |
392 | 464 | ||
465 | static void perf_session_free_sample_buffers(struct perf_session *session) | ||
466 | { | ||
467 | struct ordered_samples *os = &session->ordered_samples; | ||
468 | |||
469 | while (!list_empty(&os->to_free)) { | ||
470 | struct sample_queue *sq; | ||
471 | |||
472 | sq = list_entry(os->to_free.next, struct sample_queue, list); | ||
473 | list_del(&sq->list); | ||
474 | free(sq); | ||
475 | } | ||
476 | } | ||
477 | |||
478 | static int perf_session_deliver_event(struct perf_session *session, | ||
479 | union perf_event *event, | ||
480 | struct perf_sample *sample, | ||
481 | struct perf_event_ops *ops, | ||
482 | u64 file_offset); | ||
483 | |||
393 | static void flush_sample_queue(struct perf_session *s, | 484 | static void flush_sample_queue(struct perf_session *s, |
394 | struct perf_event_ops *ops) | 485 | struct perf_event_ops *ops) |
395 | { | 486 | { |
396 | struct list_head *head = &s->ordered_samples.samples_head; | 487 | struct ordered_samples *os = &s->ordered_samples; |
397 | u64 limit = s->ordered_samples.next_flush; | 488 | struct list_head *head = &os->samples; |
398 | struct sample_queue *tmp, *iter; | 489 | struct sample_queue *tmp, *iter; |
490 | struct perf_sample sample; | ||
491 | u64 limit = os->next_flush; | ||
492 | u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; | ||
493 | int ret; | ||
399 | 494 | ||
400 | if (!ops->ordered_samples || !limit) | 495 | if (!ops->ordered_samples || !limit) |
401 | return; | 496 | return; |
402 | 497 | ||
403 | list_for_each_entry_safe(iter, tmp, head, list) { | 498 | list_for_each_entry_safe(iter, tmp, head, list) { |
404 | if (iter->timestamp > limit) | 499 | if (iter->timestamp > limit) |
405 | return; | 500 | break; |
406 | |||
407 | if (iter == s->ordered_samples.last_inserted) | ||
408 | s->ordered_samples.last_inserted = NULL; | ||
409 | 501 | ||
410 | ops->sample((event_t *)iter->event, s); | 502 | ret = perf_session__parse_sample(s, iter->event, &sample); |
503 | if (ret) | ||
504 | pr_err("Can't parse sample, err = %d\n", ret); | ||
505 | else | ||
506 | perf_session_deliver_event(s, iter->event, &sample, ops, | ||
507 | iter->file_offset); | ||
411 | 508 | ||
412 | s->ordered_samples.last_flush = iter->timestamp; | 509 | os->last_flush = iter->timestamp; |
413 | list_del(&iter->list); | 510 | list_del(&iter->list); |
414 | free(iter->event); | 511 | list_add(&iter->list, &os->sample_cache); |
415 | free(iter); | 512 | } |
513 | |||
514 | if (list_empty(head)) { | ||
515 | os->last_sample = NULL; | ||
516 | } else if (last_ts <= limit) { | ||
517 | os->last_sample = | ||
518 | list_entry(head->prev, struct sample_queue, list); | ||
416 | } | 519 | } |
417 | } | 520 | } |
418 | 521 | ||
@@ -455,7 +558,7 @@ static void flush_sample_queue(struct perf_session *s, | |||
455 | * Flush every events below timestamp 7 | 558 | * Flush every events below timestamp 7 |
456 | * etc... | 559 | * etc... |
457 | */ | 560 | */ |
458 | static int process_finished_round(event_t *event __used, | 561 | static int process_finished_round(union perf_event *event __used, |
459 | struct perf_session *session, | 562 | struct perf_session *session, |
460 | struct perf_event_ops *ops) | 563 | struct perf_event_ops *ops) |
461 | { | 564 | { |
@@ -465,178 +568,277 @@ static int process_finished_round(event_t *event __used, | |||
465 | return 0; | 568 | return 0; |
466 | } | 569 | } |
467 | 570 | ||
468 | static void __queue_sample_end(struct sample_queue *new, struct list_head *head) | ||
469 | { | ||
470 | struct sample_queue *iter; | ||
471 | |||
472 | list_for_each_entry_reverse(iter, head, list) { | ||
473 | if (iter->timestamp < new->timestamp) { | ||
474 | list_add(&new->list, &iter->list); | ||
475 | return; | ||
476 | } | ||
477 | } | ||
478 | |||
479 | list_add(&new->list, head); | ||
480 | } | ||
481 | |||
482 | static void __queue_sample_before(struct sample_queue *new, | ||
483 | struct sample_queue *iter, | ||
484 | struct list_head *head) | ||
485 | { | ||
486 | list_for_each_entry_continue_reverse(iter, head, list) { | ||
487 | if (iter->timestamp < new->timestamp) { | ||
488 | list_add(&new->list, &iter->list); | ||
489 | return; | ||
490 | } | ||
491 | } | ||
492 | |||
493 | list_add(&new->list, head); | ||
494 | } | ||
495 | |||
496 | static void __queue_sample_after(struct sample_queue *new, | ||
497 | struct sample_queue *iter, | ||
498 | struct list_head *head) | ||
499 | { | ||
500 | list_for_each_entry_continue(iter, head, list) { | ||
501 | if (iter->timestamp > new->timestamp) { | ||
502 | list_add_tail(&new->list, &iter->list); | ||
503 | return; | ||
504 | } | ||
505 | } | ||
506 | list_add_tail(&new->list, head); | ||
507 | } | ||
508 | |||
509 | /* The queue is ordered by time */ | 571 | /* The queue is ordered by time */ |
510 | static void __queue_sample_event(struct sample_queue *new, | 572 | static void __queue_event(struct sample_queue *new, struct perf_session *s) |
511 | struct perf_session *s) | ||
512 | { | 573 | { |
513 | struct sample_queue *last_inserted = s->ordered_samples.last_inserted; | 574 | struct ordered_samples *os = &s->ordered_samples; |
514 | struct list_head *head = &s->ordered_samples.samples_head; | 575 | struct sample_queue *sample = os->last_sample; |
576 | u64 timestamp = new->timestamp; | ||
577 | struct list_head *p; | ||
515 | 578 | ||
579 | os->last_sample = new; | ||
516 | 580 | ||
517 | if (!last_inserted) { | 581 | if (!sample) { |
518 | __queue_sample_end(new, head); | 582 | list_add(&new->list, &os->samples); |
583 | os->max_timestamp = timestamp; | ||
519 | return; | 584 | return; |
520 | } | 585 | } |
521 | 586 | ||
522 | /* | 587 | /* |
523 | * Most of the time the current event has a timestamp | 588 | * last_sample might point to some random place in the list as it's |
524 | * very close to the last event inserted, unless we just switched | 589 | * the last queued event. We expect that the new event is close to |
525 | * to another event buffer. Having a sorting based on a list and | 590 | * this. |
526 | * on the last inserted event that is close to the current one is | ||
527 | * probably more efficient than an rbtree based sorting. | ||
528 | */ | 591 | */ |
529 | if (last_inserted->timestamp >= new->timestamp) | 592 | if (sample->timestamp <= timestamp) { |
530 | __queue_sample_before(new, last_inserted, head); | 593 | while (sample->timestamp <= timestamp) { |
531 | else | 594 | p = sample->list.next; |
532 | __queue_sample_after(new, last_inserted, head); | 595 | if (p == &os->samples) { |
596 | list_add_tail(&new->list, &os->samples); | ||
597 | os->max_timestamp = timestamp; | ||
598 | return; | ||
599 | } | ||
600 | sample = list_entry(p, struct sample_queue, list); | ||
601 | } | ||
602 | list_add_tail(&new->list, &sample->list); | ||
603 | } else { | ||
604 | while (sample->timestamp > timestamp) { | ||
605 | p = sample->list.prev; | ||
606 | if (p == &os->samples) { | ||
607 | list_add(&new->list, &os->samples); | ||
608 | return; | ||
609 | } | ||
610 | sample = list_entry(p, struct sample_queue, list); | ||
611 | } | ||
612 | list_add(&new->list, &sample->list); | ||
613 | } | ||
533 | } | 614 | } |
534 | 615 | ||
535 | static int queue_sample_event(event_t *event, struct sample_data *data, | 616 | #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) |
536 | struct perf_session *s) | 617 | |
618 | static int perf_session_queue_event(struct perf_session *s, union perf_event *event, | ||
619 | struct perf_sample *sample, u64 file_offset) | ||
537 | { | 620 | { |
538 | u64 timestamp = data->time; | 621 | struct ordered_samples *os = &s->ordered_samples; |
622 | struct list_head *sc = &os->sample_cache; | ||
623 | u64 timestamp = sample->time; | ||
539 | struct sample_queue *new; | 624 | struct sample_queue *new; |
540 | 625 | ||
626 | if (!timestamp || timestamp == ~0ULL) | ||
627 | return -ETIME; | ||
541 | 628 | ||
542 | if (timestamp < s->ordered_samples.last_flush) { | 629 | if (timestamp < s->ordered_samples.last_flush) { |
543 | printf("Warning: Timestamp below last timeslice flush\n"); | 630 | printf("Warning: Timestamp below last timeslice flush\n"); |
544 | return -EINVAL; | 631 | return -EINVAL; |
545 | } | 632 | } |
546 | 633 | ||
547 | new = malloc(sizeof(*new)); | 634 | if (!list_empty(sc)) { |
548 | if (!new) | 635 | new = list_entry(sc->next, struct sample_queue, list); |
549 | return -ENOMEM; | 636 | list_del(&new->list); |
637 | } else if (os->sample_buffer) { | ||
638 | new = os->sample_buffer + os->sample_buffer_idx; | ||
639 | if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) | ||
640 | os->sample_buffer = NULL; | ||
641 | } else { | ||
642 | os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); | ||
643 | if (!os->sample_buffer) | ||
644 | return -ENOMEM; | ||
645 | list_add(&os->sample_buffer->list, &os->to_free); | ||
646 | os->sample_buffer_idx = 2; | ||
647 | new = os->sample_buffer + 1; | ||
648 | } | ||
550 | 649 | ||
551 | new->timestamp = timestamp; | 650 | new->timestamp = timestamp; |
651 | new->file_offset = file_offset; | ||
652 | new->event = event; | ||
552 | 653 | ||
553 | new->event = malloc(event->header.size); | 654 | __queue_event(new, s); |
554 | if (!new->event) { | ||
555 | free(new); | ||
556 | return -ENOMEM; | ||
557 | } | ||
558 | 655 | ||
559 | memcpy(new->event, event, event->header.size); | 656 | return 0; |
657 | } | ||
560 | 658 | ||
561 | __queue_sample_event(new, s); | 659 | static void callchain__printf(struct perf_sample *sample) |
562 | s->ordered_samples.last_inserted = new; | 660 | { |
661 | unsigned int i; | ||
563 | 662 | ||
564 | if (new->timestamp > s->ordered_samples.max_timestamp) | 663 | printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); |
565 | s->ordered_samples.max_timestamp = new->timestamp; | ||
566 | 664 | ||
567 | return 0; | 665 | for (i = 0; i < sample->callchain->nr; i++) |
666 | printf("..... %2d: %016" PRIx64 "\n", | ||
667 | i, sample->callchain->ips[i]); | ||
568 | } | 668 | } |
569 | 669 | ||
570 | static int perf_session__process_sample(event_t *event, struct perf_session *s, | 670 | static void perf_session__print_tstamp(struct perf_session *session, |
571 | struct perf_event_ops *ops) | 671 | union perf_event *event, |
672 | struct perf_sample *sample) | ||
572 | { | 673 | { |
573 | struct sample_data data; | 674 | if (event->header.type != PERF_RECORD_SAMPLE && |
675 | !session->sample_id_all) { | ||
676 | fputs("-1 -1 ", stdout); | ||
677 | return; | ||
678 | } | ||
574 | 679 | ||
575 | if (!ops->ordered_samples) | 680 | if ((session->sample_type & PERF_SAMPLE_CPU)) |
576 | return ops->sample(event, s); | 681 | printf("%u ", sample->cpu); |
577 | 682 | ||
578 | bzero(&data, sizeof(struct sample_data)); | 683 | if (session->sample_type & PERF_SAMPLE_TIME) |
579 | event__parse_sample(event, s->sample_type, &data); | 684 | printf("%" PRIu64 " ", sample->time); |
685 | } | ||
580 | 686 | ||
581 | queue_sample_event(event, &data, s); | 687 | static void dump_event(struct perf_session *session, union perf_event *event, |
688 | u64 file_offset, struct perf_sample *sample) | ||
689 | { | ||
690 | if (!dump_trace) | ||
691 | return; | ||
582 | 692 | ||
583 | return 0; | 693 | printf("\n%#" PRIx64 " [%#x]: event: %d\n", |
694 | file_offset, event->header.size, event->header.type); | ||
695 | |||
696 | trace_event(event); | ||
697 | |||
698 | if (sample) | ||
699 | perf_session__print_tstamp(session, event, sample); | ||
700 | |||
701 | printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, | ||
702 | event->header.size, perf_event__name(event->header.type)); | ||
584 | } | 703 | } |
585 | 704 | ||
586 | static int perf_session__process_event(struct perf_session *self, | 705 | static void dump_sample(struct perf_session *session, union perf_event *event, |
587 | event_t *event, | 706 | struct perf_sample *sample) |
588 | struct perf_event_ops *ops, | ||
589 | u64 offset, u64 head) | ||
590 | { | 707 | { |
591 | trace_event(event); | 708 | if (!dump_trace) |
709 | return; | ||
592 | 710 | ||
593 | if (event->header.type < PERF_RECORD_HEADER_MAX) { | 711 | printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n", |
594 | dump_printf("%#Lx [%#x]: PERF_RECORD_%s", | 712 | event->header.misc, sample->pid, sample->tid, sample->ip, |
595 | offset + head, event->header.size, | 713 | sample->period); |
596 | event__name[event->header.type]); | ||
597 | hists__inc_nr_events(&self->hists, event->header.type); | ||
598 | } | ||
599 | 714 | ||
600 | if (self->header.needs_swap && event__swap_ops[event->header.type]) | 715 | if (session->sample_type & PERF_SAMPLE_CALLCHAIN) |
601 | event__swap_ops[event->header.type](event); | 716 | callchain__printf(sample); |
717 | } | ||
718 | |||
719 | static int perf_session_deliver_event(struct perf_session *session, | ||
720 | union perf_event *event, | ||
721 | struct perf_sample *sample, | ||
722 | struct perf_event_ops *ops, | ||
723 | u64 file_offset) | ||
724 | { | ||
725 | struct perf_evsel *evsel; | ||
726 | |||
727 | dump_event(session, event, file_offset, sample); | ||
602 | 728 | ||
603 | switch (event->header.type) { | 729 | switch (event->header.type) { |
604 | case PERF_RECORD_SAMPLE: | 730 | case PERF_RECORD_SAMPLE: |
605 | return perf_session__process_sample(event, self, ops); | 731 | dump_sample(session, event, sample); |
732 | evsel = perf_evlist__id2evsel(session->evlist, sample->id); | ||
733 | if (evsel == NULL) { | ||
734 | ++session->hists.stats.nr_unknown_id; | ||
735 | return -1; | ||
736 | } | ||
737 | return ops->sample(event, sample, evsel, session); | ||
606 | case PERF_RECORD_MMAP: | 738 | case PERF_RECORD_MMAP: |
607 | return ops->mmap(event, self); | 739 | return ops->mmap(event, sample, session); |
608 | case PERF_RECORD_COMM: | 740 | case PERF_RECORD_COMM: |
609 | return ops->comm(event, self); | 741 | return ops->comm(event, sample, session); |
610 | case PERF_RECORD_FORK: | 742 | case PERF_RECORD_FORK: |
611 | return ops->fork(event, self); | 743 | return ops->fork(event, sample, session); |
612 | case PERF_RECORD_EXIT: | 744 | case PERF_RECORD_EXIT: |
613 | return ops->exit(event, self); | 745 | return ops->exit(event, sample, session); |
614 | case PERF_RECORD_LOST: | 746 | case PERF_RECORD_LOST: |
615 | return ops->lost(event, self); | 747 | return ops->lost(event, sample, session); |
616 | case PERF_RECORD_READ: | 748 | case PERF_RECORD_READ: |
617 | return ops->read(event, self); | 749 | return ops->read(event, sample, session); |
618 | case PERF_RECORD_THROTTLE: | 750 | case PERF_RECORD_THROTTLE: |
619 | return ops->throttle(event, self); | 751 | return ops->throttle(event, sample, session); |
620 | case PERF_RECORD_UNTHROTTLE: | 752 | case PERF_RECORD_UNTHROTTLE: |
621 | return ops->unthrottle(event, self); | 753 | return ops->unthrottle(event, sample, session); |
754 | default: | ||
755 | ++session->hists.stats.nr_unknown_events; | ||
756 | return -1; | ||
757 | } | ||
758 | } | ||
759 | |||
760 | static int perf_session__preprocess_sample(struct perf_session *session, | ||
761 | union perf_event *event, struct perf_sample *sample) | ||
762 | { | ||
763 | if (event->header.type != PERF_RECORD_SAMPLE || | ||
764 | !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) | ||
765 | return 0; | ||
766 | |||
767 | if (!ip_callchain__valid(sample->callchain, event)) { | ||
768 | pr_debug("call-chain problem with event, skipping it.\n"); | ||
769 | ++session->hists.stats.nr_invalid_chains; | ||
770 | session->hists.stats.total_invalid_chains += sample->period; | ||
771 | return -EINVAL; | ||
772 | } | ||
773 | return 0; | ||
774 | } | ||
775 | |||
776 | static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, | ||
777 | struct perf_event_ops *ops, u64 file_offset) | ||
778 | { | ||
779 | dump_event(session, event, file_offset, NULL); | ||
780 | |||
781 | /* These events are processed right away */ | ||
782 | switch (event->header.type) { | ||
622 | case PERF_RECORD_HEADER_ATTR: | 783 | case PERF_RECORD_HEADER_ATTR: |
623 | return ops->attr(event, self); | 784 | return ops->attr(event, session); |
624 | case PERF_RECORD_HEADER_EVENT_TYPE: | 785 | case PERF_RECORD_HEADER_EVENT_TYPE: |
625 | return ops->event_type(event, self); | 786 | return ops->event_type(event, session); |
626 | case PERF_RECORD_HEADER_TRACING_DATA: | 787 | case PERF_RECORD_HEADER_TRACING_DATA: |
627 | /* setup for reading amidst mmap */ | 788 | /* setup for reading amidst mmap */ |
628 | lseek(self->fd, offset + head, SEEK_SET); | 789 | lseek(session->fd, file_offset, SEEK_SET); |
629 | return ops->tracing_data(event, self); | 790 | return ops->tracing_data(event, session); |
630 | case PERF_RECORD_HEADER_BUILD_ID: | 791 | case PERF_RECORD_HEADER_BUILD_ID: |
631 | return ops->build_id(event, self); | 792 | return ops->build_id(event, session); |
632 | case PERF_RECORD_FINISHED_ROUND: | 793 | case PERF_RECORD_FINISHED_ROUND: |
633 | return ops->finished_round(event, self, ops); | 794 | return ops->finished_round(event, session, ops); |
634 | default: | 795 | default: |
635 | ++self->hists.stats.nr_unknown_events; | 796 | return -EINVAL; |
636 | return -1; | ||
637 | } | 797 | } |
638 | } | 798 | } |
639 | 799 | ||
800 | static int perf_session__process_event(struct perf_session *session, | ||
801 | union perf_event *event, | ||
802 | struct perf_event_ops *ops, | ||
803 | u64 file_offset) | ||
804 | { | ||
805 | struct perf_sample sample; | ||
806 | int ret; | ||
807 | |||
808 | if (session->header.needs_swap && | ||
809 | perf_event__swap_ops[event->header.type]) | ||
810 | perf_event__swap_ops[event->header.type](event); | ||
811 | |||
812 | if (event->header.type >= PERF_RECORD_HEADER_MAX) | ||
813 | return -EINVAL; | ||
814 | |||
815 | hists__inc_nr_events(&session->hists, event->header.type); | ||
816 | |||
817 | if (event->header.type >= PERF_RECORD_USER_TYPE_START) | ||
818 | return perf_session__process_user_event(session, event, ops, file_offset); | ||
819 | |||
820 | /* | ||
821 | * For all kernel events we get the sample data | ||
822 | */ | ||
823 | ret = perf_session__parse_sample(session, event, &sample); | ||
824 | if (ret) | ||
825 | return ret; | ||
826 | |||
827 | /* Preprocess sample records - precheck callchains */ | ||
828 | if (perf_session__preprocess_sample(session, event, &sample)) | ||
829 | return 0; | ||
830 | |||
831 | if (ops->ordered_samples) { | ||
832 | ret = perf_session_queue_event(session, event, &sample, | ||
833 | file_offset); | ||
834 | if (ret != -ETIME) | ||
835 | return ret; | ||
836 | } | ||
837 | |||
838 | return perf_session_deliver_event(session, event, &sample, ops, | ||
839 | file_offset); | ||
840 | } | ||
841 | |||
640 | void perf_event_header__bswap(struct perf_event_header *self) | 842 | void perf_event_header__bswap(struct perf_event_header *self) |
641 | { | 843 | { |
642 | self->type = bswap_32(self->type); | 844 | self->type = bswap_32(self->type); |
@@ -656,21 +858,38 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se | |||
656 | return thread; | 858 | return thread; |
657 | } | 859 | } |
658 | 860 | ||
659 | int do_read(int fd, void *buf, size_t size) | 861 | static void perf_session__warn_about_errors(const struct perf_session *session, |
862 | const struct perf_event_ops *ops) | ||
660 | { | 863 | { |
661 | void *buf_start = buf; | 864 | if (ops->lost == perf_event__process_lost && |
662 | 865 | session->hists.stats.total_lost != 0) { | |
663 | while (size) { | 866 | ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64 |
664 | int ret = read(fd, buf, size); | 867 | "!\n\nCheck IO/CPU overload!\n\n", |
868 | session->hists.stats.total_period, | ||
869 | session->hists.stats.total_lost); | ||
870 | } | ||
665 | 871 | ||
666 | if (ret <= 0) | 872 | if (session->hists.stats.nr_unknown_events != 0) { |
667 | return ret; | 873 | ui__warning("Found %u unknown events!\n\n" |
874 | "Is this an older tool processing a perf.data " | ||
875 | "file generated by a more recent tool?\n\n" | ||
876 | "If that is not the case, consider " | ||
877 | "reporting to linux-kernel@vger.kernel.org.\n\n", | ||
878 | session->hists.stats.nr_unknown_events); | ||
879 | } | ||
668 | 880 | ||
669 | size -= ret; | 881 | if (session->hists.stats.nr_unknown_id != 0) { |
670 | buf += ret; | 882 | ui__warning("%u samples with id not present in the header\n", |
883 | session->hists.stats.nr_unknown_id); | ||
671 | } | 884 | } |
672 | 885 | ||
673 | return buf - buf_start; | 886 | if (session->hists.stats.nr_invalid_chains != 0) { |
887 | ui__warning("Found invalid callchains!\n\n" | ||
888 | "%u out of %u events were discarded for this reason.\n\n" | ||
889 | "Consider reporting to linux-kernel@vger.kernel.org.\n\n", | ||
890 | session->hists.stats.nr_invalid_chains, | ||
891 | session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); | ||
892 | } | ||
674 | } | 893 | } |
675 | 894 | ||
676 | #define session_done() (*(volatile int *)(&session_done)) | 895 | #define session_done() (*(volatile int *)(&session_done)) |
@@ -679,7 +898,7 @@ volatile int session_done; | |||
679 | static int __perf_session__process_pipe_events(struct perf_session *self, | 898 | static int __perf_session__process_pipe_events(struct perf_session *self, |
680 | struct perf_event_ops *ops) | 899 | struct perf_event_ops *ops) |
681 | { | 900 | { |
682 | event_t event; | 901 | union perf_event event; |
683 | uint32_t size; | 902 | uint32_t size; |
684 | int skip = 0; | 903 | int skip = 0; |
685 | u64 head; | 904 | u64 head; |
@@ -690,7 +909,7 @@ static int __perf_session__process_pipe_events(struct perf_session *self, | |||
690 | 909 | ||
691 | head = 0; | 910 | head = 0; |
692 | more: | 911 | more: |
693 | err = do_read(self->fd, &event, sizeof(struct perf_event_header)); | 912 | err = readn(self->fd, &event, sizeof(struct perf_event_header)); |
694 | if (err <= 0) { | 913 | if (err <= 0) { |
695 | if (err == 0) | 914 | if (err == 0) |
696 | goto done; | 915 | goto done; |
@@ -710,8 +929,7 @@ more: | |||
710 | p += sizeof(struct perf_event_header); | 929 | p += sizeof(struct perf_event_header); |
711 | 930 | ||
712 | if (size - sizeof(struct perf_event_header)) { | 931 | if (size - sizeof(struct perf_event_header)) { |
713 | err = do_read(self->fd, p, | 932 | err = readn(self->fd, p, size - sizeof(struct perf_event_header)); |
714 | size - sizeof(struct perf_event_header)); | ||
715 | if (err <= 0) { | 933 | if (err <= 0) { |
716 | if (err == 0) { | 934 | if (err == 0) { |
717 | pr_err("unexpected end of event stream\n"); | 935 | pr_err("unexpected end of event stream\n"); |
@@ -724,9 +942,8 @@ more: | |||
724 | } | 942 | } |
725 | 943 | ||
726 | if (size == 0 || | 944 | if (size == 0 || |
727 | (skip = perf_session__process_event(self, &event, ops, | 945 | (skip = perf_session__process_event(self, &event, ops, head)) < 0) { |
728 | 0, head)) < 0) { | 946 | dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", |
729 | dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", | ||
730 | head, event.header.size, event.header.type); | 947 | head, event.header.size, event.header.type); |
731 | /* | 948 | /* |
732 | * assume we lost track of the stream, check alignment, and | 949 | * assume we lost track of the stream, check alignment, and |
@@ -740,9 +957,6 @@ more: | |||
740 | 957 | ||
741 | head += size; | 958 | head += size; |
742 | 959 | ||
743 | dump_printf("\n%#Lx [%#x]: event: %d\n", | ||
744 | head, event.header.size, event.header.type); | ||
745 | |||
746 | if (skip > 0) | 960 | if (skip > 0) |
747 | head += skip; | 961 | head += skip; |
748 | 962 | ||
@@ -751,82 +965,108 @@ more: | |||
751 | done: | 965 | done: |
752 | err = 0; | 966 | err = 0; |
753 | out_err: | 967 | out_err: |
968 | perf_session__warn_about_errors(self, ops); | ||
969 | perf_session_free_sample_buffers(self); | ||
754 | return err; | 970 | return err; |
755 | } | 971 | } |
756 | 972 | ||
757 | int __perf_session__process_events(struct perf_session *self, | 973 | static union perf_event * |
974 | fetch_mmaped_event(struct perf_session *session, | ||
975 | u64 head, size_t mmap_size, char *buf) | ||
976 | { | ||
977 | union perf_event *event; | ||
978 | |||
979 | /* | ||
980 | * Ensure we have enough space remaining to read | ||
981 | * the size of the event in the headers. | ||
982 | */ | ||
983 | if (head + sizeof(event->header) > mmap_size) | ||
984 | return NULL; | ||
985 | |||
986 | event = (union perf_event *)(buf + head); | ||
987 | |||
988 | if (session->header.needs_swap) | ||
989 | perf_event_header__bswap(&event->header); | ||
990 | |||
991 | if (head + event->header.size > mmap_size) | ||
992 | return NULL; | ||
993 | |||
994 | return event; | ||
995 | } | ||
996 | |||
997 | int __perf_session__process_events(struct perf_session *session, | ||
758 | u64 data_offset, u64 data_size, | 998 | u64 data_offset, u64 data_size, |
759 | u64 file_size, struct perf_event_ops *ops) | 999 | u64 file_size, struct perf_event_ops *ops) |
760 | { | 1000 | { |
761 | int err, mmap_prot, mmap_flags; | 1001 | u64 head, page_offset, file_offset, file_pos, progress_next; |
762 | u64 head, shift; | 1002 | int err, mmap_prot, mmap_flags, map_idx = 0; |
763 | u64 offset = 0; | 1003 | struct ui_progress *progress; |
764 | size_t page_size; | 1004 | size_t page_size, mmap_size; |
765 | event_t *event; | 1005 | char *buf, *mmaps[8]; |
1006 | union perf_event *event; | ||
766 | uint32_t size; | 1007 | uint32_t size; |
767 | char *buf; | ||
768 | struct ui_progress *progress = ui_progress__new("Processing events...", | ||
769 | self->size); | ||
770 | if (progress == NULL) | ||
771 | return -1; | ||
772 | 1008 | ||
773 | perf_event_ops__fill_defaults(ops); | 1009 | perf_event_ops__fill_defaults(ops); |
774 | 1010 | ||
775 | page_size = sysconf(_SC_PAGESIZE); | 1011 | page_size = sysconf(_SC_PAGESIZE); |
776 | 1012 | ||
777 | head = data_offset; | 1013 | page_offset = page_size * (data_offset / page_size); |
778 | shift = page_size * (head / page_size); | 1014 | file_offset = page_offset; |
779 | offset += shift; | 1015 | head = data_offset - page_offset; |
780 | head -= shift; | 1016 | |
1017 | if (data_offset + data_size < file_size) | ||
1018 | file_size = data_offset + data_size; | ||
1019 | |||
1020 | progress_next = file_size / 16; | ||
1021 | progress = ui_progress__new("Processing events...", file_size); | ||
1022 | if (progress == NULL) | ||
1023 | return -1; | ||
1024 | |||
1025 | mmap_size = session->mmap_window; | ||
1026 | if (mmap_size > file_size) | ||
1027 | mmap_size = file_size; | ||
1028 | |||
1029 | memset(mmaps, 0, sizeof(mmaps)); | ||
781 | 1030 | ||
782 | mmap_prot = PROT_READ; | 1031 | mmap_prot = PROT_READ; |
783 | mmap_flags = MAP_SHARED; | 1032 | mmap_flags = MAP_SHARED; |
784 | 1033 | ||
785 | if (self->header.needs_swap) { | 1034 | if (session->header.needs_swap) { |
786 | mmap_prot |= PROT_WRITE; | 1035 | mmap_prot |= PROT_WRITE; |
787 | mmap_flags = MAP_PRIVATE; | 1036 | mmap_flags = MAP_PRIVATE; |
788 | } | 1037 | } |
789 | remap: | 1038 | remap: |
790 | buf = mmap(NULL, page_size * self->mmap_window, mmap_prot, | 1039 | buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, |
791 | mmap_flags, self->fd, offset); | 1040 | file_offset); |
792 | if (buf == MAP_FAILED) { | 1041 | if (buf == MAP_FAILED) { |
793 | pr_err("failed to mmap file\n"); | 1042 | pr_err("failed to mmap file\n"); |
794 | err = -errno; | 1043 | err = -errno; |
795 | goto out_err; | 1044 | goto out_err; |
796 | } | 1045 | } |
1046 | mmaps[map_idx] = buf; | ||
1047 | map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); | ||
1048 | file_pos = file_offset + head; | ||
797 | 1049 | ||
798 | more: | 1050 | more: |
799 | event = (event_t *)(buf + head); | 1051 | event = fetch_mmaped_event(session, head, mmap_size, buf); |
800 | ui_progress__update(progress, offset); | 1052 | if (!event) { |
801 | 1053 | if (mmaps[map_idx]) { | |
802 | if (self->header.needs_swap) | 1054 | munmap(mmaps[map_idx], mmap_size); |
803 | perf_event_header__bswap(&event->header); | 1055 | mmaps[map_idx] = NULL; |
804 | size = event->header.size; | 1056 | } |
805 | if (size == 0) | ||
806 | size = 8; | ||
807 | |||
808 | if (head + event->header.size >= page_size * self->mmap_window) { | ||
809 | int munmap_ret; | ||
810 | |||
811 | shift = page_size * (head / page_size); | ||
812 | |||
813 | munmap_ret = munmap(buf, page_size * self->mmap_window); | ||
814 | assert(munmap_ret == 0); | ||
815 | 1057 | ||
816 | offset += shift; | 1058 | page_offset = page_size * (head / page_size); |
817 | head -= shift; | 1059 | file_offset += page_offset; |
1060 | head -= page_offset; | ||
818 | goto remap; | 1061 | goto remap; |
819 | } | 1062 | } |
820 | 1063 | ||
821 | size = event->header.size; | 1064 | size = event->header.size; |
822 | 1065 | ||
823 | dump_printf("\n%#Lx [%#x]: event: %d\n", | ||
824 | offset + head, event->header.size, event->header.type); | ||
825 | |||
826 | if (size == 0 || | 1066 | if (size == 0 || |
827 | perf_session__process_event(self, event, ops, offset, head) < 0) { | 1067 | perf_session__process_event(session, event, ops, file_pos) < 0) { |
828 | dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", | 1068 | dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", |
829 | offset + head, event->header.size, | 1069 | file_offset + head, event->header.size, |
830 | event->header.type); | 1070 | event->header.type); |
831 | /* | 1071 | /* |
832 | * assume we lost track of the stream, check alignment, and | 1072 | * assume we lost track of the stream, check alignment, and |
@@ -839,19 +1079,24 @@ more: | |||
839 | } | 1079 | } |
840 | 1080 | ||
841 | head += size; | 1081 | head += size; |
1082 | file_pos += size; | ||
842 | 1083 | ||
843 | if (offset + head >= data_offset + data_size) | 1084 | if (file_pos >= progress_next) { |
844 | goto done; | 1085 | progress_next += file_size / 16; |
1086 | ui_progress__update(progress, file_pos); | ||
1087 | } | ||
845 | 1088 | ||
846 | if (offset + head < file_size) | 1089 | if (file_pos < file_size) |
847 | goto more; | 1090 | goto more; |
848 | done: | 1091 | |
849 | err = 0; | 1092 | err = 0; |
850 | /* do the final flush for ordered samples */ | 1093 | /* do the final flush for ordered samples */ |
851 | self->ordered_samples.next_flush = ULLONG_MAX; | 1094 | session->ordered_samples.next_flush = ULLONG_MAX; |
852 | flush_sample_queue(self, ops); | 1095 | flush_sample_queue(session, ops); |
853 | out_err: | 1096 | out_err: |
854 | ui_progress__delete(progress); | 1097 | ui_progress__delete(progress); |
1098 | perf_session__warn_about_errors(session, ops); | ||
1099 | perf_session_free_sample_buffers(session); | ||
855 | return err; | 1100 | return err; |
856 | } | 1101 | } |
857 | 1102 | ||
@@ -929,3 +1174,91 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, | |||
929 | size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); | 1174 | size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); |
930 | return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); | 1175 | return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); |
931 | } | 1176 | } |
1177 | |||
1178 | size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) | ||
1179 | { | ||
1180 | struct perf_evsel *pos; | ||
1181 | size_t ret = fprintf(fp, "Aggregated stats:\n"); | ||
1182 | |||
1183 | ret += hists__fprintf_nr_events(&session->hists, fp); | ||
1184 | |||
1185 | list_for_each_entry(pos, &session->evlist->entries, node) { | ||
1186 | ret += fprintf(fp, "%s stats:\n", event_name(pos)); | ||
1187 | ret += hists__fprintf_nr_events(&pos->hists, fp); | ||
1188 | } | ||
1189 | |||
1190 | return ret; | ||
1191 | } | ||
1192 | |||
1193 | struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, | ||
1194 | unsigned int type) | ||
1195 | { | ||
1196 | struct perf_evsel *pos; | ||
1197 | |||
1198 | list_for_each_entry(pos, &session->evlist->entries, node) { | ||
1199 | if (pos->attr.type == type) | ||
1200 | return pos; | ||
1201 | } | ||
1202 | return NULL; | ||
1203 | } | ||
1204 | |||
1205 | void perf_session__print_symbols(union perf_event *event, | ||
1206 | struct perf_sample *sample, | ||
1207 | struct perf_session *session) | ||
1208 | { | ||
1209 | struct addr_location al; | ||
1210 | const char *symname, *dsoname; | ||
1211 | struct callchain_cursor *cursor = &session->callchain_cursor; | ||
1212 | struct callchain_cursor_node *node; | ||
1213 | |||
1214 | if (perf_event__preprocess_sample(event, session, &al, sample, | ||
1215 | NULL) < 0) { | ||
1216 | error("problem processing %d event, skipping it.\n", | ||
1217 | event->header.type); | ||
1218 | return; | ||
1219 | } | ||
1220 | |||
1221 | if (symbol_conf.use_callchain && sample->callchain) { | ||
1222 | |||
1223 | if (perf_session__resolve_callchain(session, al.thread, | ||
1224 | sample->callchain, NULL) != 0) { | ||
1225 | if (verbose) | ||
1226 | error("Failed to resolve callchain. Skipping\n"); | ||
1227 | return; | ||
1228 | } | ||
1229 | callchain_cursor_commit(cursor); | ||
1230 | |||
1231 | while (1) { | ||
1232 | node = callchain_cursor_current(cursor); | ||
1233 | if (!node) | ||
1234 | break; | ||
1235 | |||
1236 | if (node->sym && node->sym->name) | ||
1237 | symname = node->sym->name; | ||
1238 | else | ||
1239 | symname = ""; | ||
1240 | |||
1241 | if (node->map && node->map->dso && node->map->dso->name) | ||
1242 | dsoname = node->map->dso->name; | ||
1243 | else | ||
1244 | dsoname = ""; | ||
1245 | |||
1246 | printf("\t%16" PRIx64 " %s (%s)\n", node->ip, symname, dsoname); | ||
1247 | |||
1248 | callchain_cursor_advance(cursor); | ||
1249 | } | ||
1250 | |||
1251 | } else { | ||
1252 | if (al.sym && al.sym->name) | ||
1253 | symname = al.sym->name; | ||
1254 | else | ||
1255 | symname = ""; | ||
1256 | |||
1257 | if (al.map && al.map->dso && al.map->dso->name) | ||
1258 | dsoname = al.map->dso->name; | ||
1259 | else | ||
1260 | dsoname = ""; | ||
1261 | |||
1262 | printf("%16" PRIx64 " %s (%s)", al.addr, symname, dsoname); | ||
1263 | } | ||
1264 | } | ||
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 9fa0fc2a863f..66d4e1490879 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h | |||
@@ -17,8 +17,12 @@ struct ordered_samples { | |||
17 | u64 last_flush; | 17 | u64 last_flush; |
18 | u64 next_flush; | 18 | u64 next_flush; |
19 | u64 max_timestamp; | 19 | u64 max_timestamp; |
20 | struct list_head samples_head; | 20 | struct list_head samples; |
21 | struct sample_queue *last_inserted; | 21 | struct list_head sample_cache; |
22 | struct list_head to_free; | ||
23 | struct sample_queue *sample_buffer; | ||
24 | struct sample_queue *last_sample; | ||
25 | int sample_buffer_idx; | ||
22 | }; | 26 | }; |
23 | 27 | ||
24 | struct perf_session { | 28 | struct perf_session { |
@@ -30,49 +34,62 @@ struct perf_session { | |||
30 | struct thread *last_match; | 34 | struct thread *last_match; |
31 | struct machine host_machine; | 35 | struct machine host_machine; |
32 | struct rb_root machines; | 36 | struct rb_root machines; |
33 | struct rb_root hists_tree; | 37 | struct perf_evlist *evlist; |
34 | /* | 38 | /* |
35 | * FIXME: should point to the first entry in hists_tree and | 39 | * FIXME: Need to split this up further, we need global |
36 | * be a hists instance. Right now its only 'report' | 40 | * stats + per event stats. 'perf diff' also needs |
37 | * that is using ->hists_tree while all the rest use | 41 | * to properly support multiple events in a single |
38 | * ->hists. | 42 | * perf.data file. |
39 | */ | 43 | */ |
40 | struct hists hists; | 44 | struct hists hists; |
41 | u64 sample_type; | 45 | u64 sample_type; |
46 | int sample_size; | ||
42 | int fd; | 47 | int fd; |
43 | bool fd_pipe; | 48 | bool fd_pipe; |
44 | bool repipe; | 49 | bool repipe; |
50 | bool sample_id_all; | ||
51 | u16 id_hdr_size; | ||
45 | int cwdlen; | 52 | int cwdlen; |
46 | char *cwd; | 53 | char *cwd; |
47 | struct ordered_samples ordered_samples; | 54 | struct ordered_samples ordered_samples; |
48 | char filename[0]; | 55 | struct callchain_cursor callchain_cursor; |
56 | char filename[0]; | ||
49 | }; | 57 | }; |
50 | 58 | ||
59 | struct perf_evsel; | ||
51 | struct perf_event_ops; | 60 | struct perf_event_ops; |
52 | 61 | ||
53 | typedef int (*event_op)(event_t *self, struct perf_session *session); | 62 | typedef int (*event_sample)(union perf_event *event, struct perf_sample *sample, |
54 | typedef int (*event_op2)(event_t *self, struct perf_session *session, | 63 | struct perf_evsel *evsel, struct perf_session *session); |
64 | typedef int (*event_op)(union perf_event *self, struct perf_sample *sample, | ||
65 | struct perf_session *session); | ||
66 | typedef int (*event_synth_op)(union perf_event *self, | ||
67 | struct perf_session *session); | ||
68 | typedef int (*event_op2)(union perf_event *self, struct perf_session *session, | ||
55 | struct perf_event_ops *ops); | 69 | struct perf_event_ops *ops); |
56 | 70 | ||
57 | struct perf_event_ops { | 71 | struct perf_event_ops { |
58 | event_op sample, | 72 | event_sample sample; |
59 | mmap, | 73 | event_op mmap, |
60 | comm, | 74 | comm, |
61 | fork, | 75 | fork, |
62 | exit, | 76 | exit, |
63 | lost, | 77 | lost, |
64 | read, | 78 | read, |
65 | throttle, | 79 | throttle, |
66 | unthrottle, | 80 | unthrottle; |
67 | attr, | 81 | event_synth_op attr, |
68 | event_type, | 82 | event_type, |
69 | tracing_data, | 83 | tracing_data, |
70 | build_id; | 84 | build_id; |
71 | event_op2 finished_round; | 85 | event_op2 finished_round; |
72 | bool ordered_samples; | 86 | bool ordered_samples; |
87 | bool ordering_requires_timestamps; | ||
73 | }; | 88 | }; |
74 | 89 | ||
75 | struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe); | 90 | struct perf_session *perf_session__new(const char *filename, int mode, |
91 | bool force, bool repipe, | ||
92 | struct perf_event_ops *ops); | ||
76 | void perf_session__delete(struct perf_session *self); | 93 | void perf_session__delete(struct perf_session *self); |
77 | 94 | ||
78 | void perf_event_header__bswap(struct perf_event_header *self); | 95 | void perf_event_header__bswap(struct perf_event_header *self); |
@@ -83,10 +100,10 @@ int __perf_session__process_events(struct perf_session *self, | |||
83 | int perf_session__process_events(struct perf_session *self, | 100 | int perf_session__process_events(struct perf_session *self, |
84 | struct perf_event_ops *event_ops); | 101 | struct perf_event_ops *event_ops); |
85 | 102 | ||
86 | struct map_symbol *perf_session__resolve_callchain(struct perf_session *self, | 103 | int perf_session__resolve_callchain(struct perf_session *self, |
87 | struct thread *thread, | 104 | struct thread *thread, |
88 | struct ip_callchain *chain, | 105 | struct ip_callchain *chain, |
89 | struct symbol **parent); | 106 | struct symbol **parent); |
90 | 107 | ||
91 | bool perf_session__has_traces(struct perf_session *self, const char *msg); | 108 | bool perf_session__has_traces(struct perf_session *self, const char *msg); |
92 | 109 | ||
@@ -98,7 +115,6 @@ void mem_bswap_64(void *src, int byte_size); | |||
98 | 115 | ||
99 | int perf_session__create_kernel_maps(struct perf_session *self); | 116 | int perf_session__create_kernel_maps(struct perf_session *self); |
100 | 117 | ||
101 | int do_read(int fd, void *buf, size_t size); | ||
102 | void perf_session__update_sample_type(struct perf_session *self); | 118 | void perf_session__update_sample_type(struct perf_session *self); |
103 | void perf_session__remove_thread(struct perf_session *self, struct thread *th); | 119 | void perf_session__remove_thread(struct perf_session *self, struct thread *th); |
104 | 120 | ||
@@ -137,9 +153,22 @@ size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp); | |||
137 | size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, | 153 | size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, |
138 | FILE *fp, bool with_hits); | 154 | FILE *fp, bool with_hits); |
139 | 155 | ||
140 | static inline | 156 | size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp); |
141 | size_t perf_session__fprintf_nr_events(struct perf_session *self, FILE *fp) | 157 | |
158 | static inline int perf_session__parse_sample(struct perf_session *session, | ||
159 | const union perf_event *event, | ||
160 | struct perf_sample *sample) | ||
142 | { | 161 | { |
143 | return hists__fprintf_nr_events(&self->hists, fp); | 162 | return perf_event__parse_sample(event, session->sample_type, |
163 | session->sample_size, | ||
164 | session->sample_id_all, sample); | ||
144 | } | 165 | } |
166 | |||
167 | struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, | ||
168 | unsigned int type); | ||
169 | |||
170 | void perf_session__print_symbols(union perf_event *event, | ||
171 | struct perf_sample *sample, | ||
172 | struct perf_session *session); | ||
173 | |||
145 | #endif /* __PERF_SESSION_H */ | 174 | #endif /* __PERF_SESSION_H */ |
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py new file mode 100644 index 000000000000..bbc982f5dd8b --- /dev/null +++ b/tools/perf/util/setup.py | |||
@@ -0,0 +1,24 @@ | |||
1 | #!/usr/bin/python2 | ||
2 | |||
3 | from distutils.core import setup, Extension | ||
4 | from os import getenv | ||
5 | |||
6 | cflags = ['-fno-strict-aliasing', '-Wno-write-strings'] | ||
7 | cflags += getenv('CFLAGS', '').split() | ||
8 | |||
9 | perf = Extension('perf', | ||
10 | sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c', | ||
11 | 'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c', | ||
12 | 'util/util.c', 'util/xyarray.c', 'util/cgroup.c'], | ||
13 | include_dirs = ['util/include'], | ||
14 | extra_compile_args = cflags, | ||
15 | ) | ||
16 | |||
17 | setup(name='perf', | ||
18 | version='0.1', | ||
19 | description='Interface with the Linux profiling infrastructure', | ||
20 | author='Arnaldo Carvalho de Melo', | ||
21 | author_email='acme@redhat.com', | ||
22 | license='GPLv2', | ||
23 | url='http://perf.wiki.kernel.org', | ||
24 | ext_modules=[perf]) | ||
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index b62a553cc67d..f44fa541d56e 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c | |||
@@ -170,7 +170,7 @@ static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, | |||
170 | return repsep_snprintf(bf, size, "%-*s", width, dso_name); | 170 | return repsep_snprintf(bf, size, "%-*s", width, dso_name); |
171 | } | 171 | } |
172 | 172 | ||
173 | return repsep_snprintf(bf, size, "%*Lx", width, self->ip); | 173 | return repsep_snprintf(bf, size, "%-*s", width, "[unknown]"); |
174 | } | 174 | } |
175 | 175 | ||
176 | /* --sort symbol */ | 176 | /* --sort symbol */ |
@@ -196,7 +196,7 @@ static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, | |||
196 | 196 | ||
197 | if (verbose) { | 197 | if (verbose) { |
198 | char o = self->ms.map ? dso__symtab_origin(self->ms.map->dso) : '!'; | 198 | char o = self->ms.map ? dso__symtab_origin(self->ms.map->dso) : '!'; |
199 | ret += repsep_snprintf(bf, size, "%*Lx %c ", | 199 | ret += repsep_snprintf(bf, size, "%-#*llx %c ", |
200 | BITS_PER_LONG / 4, self->ip, o); | 200 | BITS_PER_LONG / 4, self->ip, o); |
201 | } | 201 | } |
202 | 202 | ||
@@ -205,7 +205,7 @@ static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, | |||
205 | ret += repsep_snprintf(bf + ret, size - ret, "%s", | 205 | ret += repsep_snprintf(bf + ret, size - ret, "%s", |
206 | self->ms.sym->name); | 206 | self->ms.sym->name); |
207 | else | 207 | else |
208 | ret += repsep_snprintf(bf + ret, size - ret, "%*Lx", | 208 | ret += repsep_snprintf(bf + ret, size - ret, "%-#*llx", |
209 | BITS_PER_LONG / 4, self->ip); | 209 | BITS_PER_LONG / 4, self->ip); |
210 | 210 | ||
211 | return ret; | 211 | return ret; |
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 46e531d09e8b..0b91053a7d11 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h | |||
@@ -70,7 +70,7 @@ struct hist_entry { | |||
70 | struct hist_entry *pair; | 70 | struct hist_entry *pair; |
71 | struct rb_root sorted_chain; | 71 | struct rb_root sorted_chain; |
72 | }; | 72 | }; |
73 | struct callchain_node callchain[0]; | 73 | struct callchain_root callchain[0]; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | enum sort_type { | 76 | enum sort_type { |
diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c new file mode 100644 index 000000000000..834c8ebfe38e --- /dev/null +++ b/tools/perf/util/strfilter.c | |||
@@ -0,0 +1,199 @@ | |||
1 | #include "util.h" | ||
2 | #include "string.h" | ||
3 | #include "strfilter.h" | ||
4 | |||
5 | /* Operators */ | ||
6 | static const char *OP_and = "&"; /* Logical AND */ | ||
7 | static const char *OP_or = "|"; /* Logical OR */ | ||
8 | static const char *OP_not = "!"; /* Logical NOT */ | ||
9 | |||
10 | #define is_operator(c) ((c) == '|' || (c) == '&' || (c) == '!') | ||
11 | #define is_separator(c) (is_operator(c) || (c) == '(' || (c) == ')') | ||
12 | |||
13 | static void strfilter_node__delete(struct strfilter_node *self) | ||
14 | { | ||
15 | if (self) { | ||
16 | if (self->p && !is_operator(*self->p)) | ||
17 | free((char *)self->p); | ||
18 | strfilter_node__delete(self->l); | ||
19 | strfilter_node__delete(self->r); | ||
20 | free(self); | ||
21 | } | ||
22 | } | ||
23 | |||
24 | void strfilter__delete(struct strfilter *self) | ||
25 | { | ||
26 | if (self) { | ||
27 | strfilter_node__delete(self->root); | ||
28 | free(self); | ||
29 | } | ||
30 | } | ||
31 | |||
32 | static const char *get_token(const char *s, const char **e) | ||
33 | { | ||
34 | const char *p; | ||
35 | |||
36 | while (isspace(*s)) /* Skip spaces */ | ||
37 | s++; | ||
38 | |||
39 | if (*s == '\0') { | ||
40 | p = s; | ||
41 | goto end; | ||
42 | } | ||
43 | |||
44 | p = s + 1; | ||
45 | if (!is_separator(*s)) { | ||
46 | /* End search */ | ||
47 | retry: | ||
48 | while (*p && !is_separator(*p) && !isspace(*p)) | ||
49 | p++; | ||
50 | /* Escape and special case: '!' is also used in glob pattern */ | ||
51 | if (*(p - 1) == '\\' || (*p == '!' && *(p - 1) == '[')) { | ||
52 | p++; | ||
53 | goto retry; | ||
54 | } | ||
55 | } | ||
56 | end: | ||
57 | *e = p; | ||
58 | return s; | ||
59 | } | ||
60 | |||
61 | static struct strfilter_node *strfilter_node__alloc(const char *op, | ||
62 | struct strfilter_node *l, | ||
63 | struct strfilter_node *r) | ||
64 | { | ||
65 | struct strfilter_node *ret = zalloc(sizeof(struct strfilter_node)); | ||
66 | |||
67 | if (ret) { | ||
68 | ret->p = op; | ||
69 | ret->l = l; | ||
70 | ret->r = r; | ||
71 | } | ||
72 | |||
73 | return ret; | ||
74 | } | ||
75 | |||
76 | static struct strfilter_node *strfilter_node__new(const char *s, | ||
77 | const char **ep) | ||
78 | { | ||
79 | struct strfilter_node root, *cur, *last_op; | ||
80 | const char *e; | ||
81 | |||
82 | if (!s) | ||
83 | return NULL; | ||
84 | |||
85 | memset(&root, 0, sizeof(root)); | ||
86 | last_op = cur = &root; | ||
87 | |||
88 | s = get_token(s, &e); | ||
89 | while (*s != '\0' && *s != ')') { | ||
90 | switch (*s) { | ||
91 | case '&': /* Exchg last OP->r with AND */ | ||
92 | if (!cur->r || !last_op->r) | ||
93 | goto error; | ||
94 | cur = strfilter_node__alloc(OP_and, last_op->r, NULL); | ||
95 | if (!cur) | ||
96 | goto nomem; | ||
97 | last_op->r = cur; | ||
98 | last_op = cur; | ||
99 | break; | ||
100 | case '|': /* Exchg the root with OR */ | ||
101 | if (!cur->r || !root.r) | ||
102 | goto error; | ||
103 | cur = strfilter_node__alloc(OP_or, root.r, NULL); | ||
104 | if (!cur) | ||
105 | goto nomem; | ||
106 | root.r = cur; | ||
107 | last_op = cur; | ||
108 | break; | ||
109 | case '!': /* Add NOT as a leaf node */ | ||
110 | if (cur->r) | ||
111 | goto error; | ||
112 | cur->r = strfilter_node__alloc(OP_not, NULL, NULL); | ||
113 | if (!cur->r) | ||
114 | goto nomem; | ||
115 | cur = cur->r; | ||
116 | break; | ||
117 | case '(': /* Recursively parses inside the parenthesis */ | ||
118 | if (cur->r) | ||
119 | goto error; | ||
120 | cur->r = strfilter_node__new(s + 1, &s); | ||
121 | if (!s) | ||
122 | goto nomem; | ||
123 | if (!cur->r || *s != ')') | ||
124 | goto error; | ||
125 | e = s + 1; | ||
126 | break; | ||
127 | default: | ||
128 | if (cur->r) | ||
129 | goto error; | ||
130 | cur->r = strfilter_node__alloc(NULL, NULL, NULL); | ||
131 | if (!cur->r) | ||
132 | goto nomem; | ||
133 | cur->r->p = strndup(s, e - s); | ||
134 | if (!cur->r->p) | ||
135 | goto nomem; | ||
136 | } | ||
137 | s = get_token(e, &e); | ||
138 | } | ||
139 | if (!cur->r) | ||
140 | goto error; | ||
141 | *ep = s; | ||
142 | return root.r; | ||
143 | nomem: | ||
144 | s = NULL; | ||
145 | error: | ||
146 | *ep = s; | ||
147 | strfilter_node__delete(root.r); | ||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * Parse filter rule and return new strfilter. | ||
153 | * Return NULL if fail, and *ep == NULL if memory allocation failed. | ||
154 | */ | ||
155 | struct strfilter *strfilter__new(const char *rules, const char **err) | ||
156 | { | ||
157 | struct strfilter *ret = zalloc(sizeof(struct strfilter)); | ||
158 | const char *ep = NULL; | ||
159 | |||
160 | if (ret) | ||
161 | ret->root = strfilter_node__new(rules, &ep); | ||
162 | |||
163 | if (!ret || !ret->root || *ep != '\0') { | ||
164 | if (err) | ||
165 | *err = ep; | ||
166 | strfilter__delete(ret); | ||
167 | ret = NULL; | ||
168 | } | ||
169 | |||
170 | return ret; | ||
171 | } | ||
172 | |||
173 | static bool strfilter_node__compare(struct strfilter_node *self, | ||
174 | const char *str) | ||
175 | { | ||
176 | if (!self || !self->p) | ||
177 | return false; | ||
178 | |||
179 | switch (*self->p) { | ||
180 | case '|': /* OR */ | ||
181 | return strfilter_node__compare(self->l, str) || | ||
182 | strfilter_node__compare(self->r, str); | ||
183 | case '&': /* AND */ | ||
184 | return strfilter_node__compare(self->l, str) && | ||
185 | strfilter_node__compare(self->r, str); | ||
186 | case '!': /* NOT */ | ||
187 | return !strfilter_node__compare(self->r, str); | ||
188 | default: | ||
189 | return strglobmatch(str, self->p); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | /* Return true if STR matches the filter rules */ | ||
194 | bool strfilter__compare(struct strfilter *self, const char *str) | ||
195 | { | ||
196 | if (!self) | ||
197 | return false; | ||
198 | return strfilter_node__compare(self->root, str); | ||
199 | } | ||
diff --git a/tools/perf/util/strfilter.h b/tools/perf/util/strfilter.h new file mode 100644 index 000000000000..00f58a7506de --- /dev/null +++ b/tools/perf/util/strfilter.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef __PERF_STRFILTER_H | ||
2 | #define __PERF_STRFILTER_H | ||
3 | /* General purpose glob matching filter */ | ||
4 | |||
5 | #include <linux/list.h> | ||
6 | #include <stdbool.h> | ||
7 | |||
8 | /* A node of string filter */ | ||
9 | struct strfilter_node { | ||
10 | struct strfilter_node *l; /* Tree left branche (for &,|) */ | ||
11 | struct strfilter_node *r; /* Tree right branche (for !,&,|) */ | ||
12 | const char *p; /* Operator or rule */ | ||
13 | }; | ||
14 | |||
15 | /* String filter */ | ||
16 | struct strfilter { | ||
17 | struct strfilter_node *root; | ||
18 | }; | ||
19 | |||
20 | /** | ||
21 | * strfilter__new - Create a new string filter | ||
22 | * @rules: Filter rule, which is a combination of glob expressions. | ||
23 | * @err: Pointer which points an error detected on @rules | ||
24 | * | ||
25 | * Parse @rules and return new strfilter. Return NULL if an error detected. | ||
26 | * In that case, *@err will indicate where it is detected, and *@err is NULL | ||
27 | * if a memory allocation is failed. | ||
28 | */ | ||
29 | struct strfilter *strfilter__new(const char *rules, const char **err); | ||
30 | |||
31 | /** | ||
32 | * strfilter__compare - compare given string and a string filter | ||
33 | * @self: String filter | ||
34 | * @str: target string | ||
35 | * | ||
36 | * Compare @str and @self. Return true if the str match the rule | ||
37 | */ | ||
38 | bool strfilter__compare(struct strfilter *self, const char *str); | ||
39 | |||
40 | /** | ||
41 | * strfilter__delete - delete a string filter | ||
42 | * @self: String filter to delete | ||
43 | * | ||
44 | * Delete @self. | ||
45 | */ | ||
46 | void strfilter__delete(struct strfilter *self); | ||
47 | |||
48 | #endif | ||
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index 0409fc7c0058..b9a985dadd08 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c | |||
@@ -85,7 +85,7 @@ out: | |||
85 | 85 | ||
86 | /* | 86 | /* |
87 | * Helper function for splitting a string into an argv-like array. | 87 | * Helper function for splitting a string into an argv-like array. |
88 | * originaly copied from lib/argv_split.c | 88 | * originally copied from lib/argv_split.c |
89 | */ | 89 | */ |
90 | static const char *skip_sep(const char *cp) | 90 | static const char *skip_sep(const char *cp) |
91 | { | 91 | { |
@@ -259,7 +259,7 @@ static bool __match_glob(const char *str, const char *pat, bool ignore_space) | |||
259 | if (!*pat) /* Tail wild card matches all */ | 259 | if (!*pat) /* Tail wild card matches all */ |
260 | return true; | 260 | return true; |
261 | while (*str) | 261 | while (*str) |
262 | if (strglobmatch(str++, pat)) | 262 | if (__match_glob(str++, pat, ignore_space)) |
263 | return true; | 263 | return true; |
264 | } | 264 | } |
265 | return !*str && !*pat; | 265 | return !*str && !*pat; |
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index b3637db025a2..96c866045d60 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * of the License. | 12 | * of the License. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <inttypes.h> | ||
15 | #include <stdio.h> | 16 | #include <stdio.h> |
16 | #include <stdlib.h> | 17 | #include <stdlib.h> |
17 | #include <unistd.h> | 18 | #include <unistd.h> |
@@ -43,11 +44,11 @@ static double cpu2y(int cpu) | |||
43 | return cpu2slot(cpu) * SLOT_MULT; | 44 | return cpu2slot(cpu) * SLOT_MULT; |
44 | } | 45 | } |
45 | 46 | ||
46 | static double time2pixels(u64 time) | 47 | static double time2pixels(u64 __time) |
47 | { | 48 | { |
48 | double X; | 49 | double X; |
49 | 50 | ||
50 | X = 1.0 * svg_page_width * (time - first_time) / (last_time - first_time); | 51 | X = 1.0 * svg_page_width * (__time - first_time) / (last_time - first_time); |
51 | return X; | 52 | return X; |
52 | } | 53 | } |
53 | 54 | ||
@@ -94,7 +95,7 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end) | |||
94 | 95 | ||
95 | total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT; | 96 | total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT; |
96 | fprintf(svgfile, "<?xml version=\"1.0\" standalone=\"no\"?> \n"); | 97 | fprintf(svgfile, "<?xml version=\"1.0\" standalone=\"no\"?> \n"); |
97 | fprintf(svgfile, "<svg width=\"%i\" height=\"%llu\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height); | 98 | fprintf(svgfile, "<svg width=\"%i\" height=\"%" PRIu64 "\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height); |
98 | 99 | ||
99 | fprintf(svgfile, "<defs>\n <style type=\"text/css\">\n <![CDATA[\n"); | 100 | fprintf(svgfile, "<defs>\n <style type=\"text/css\">\n <![CDATA[\n"); |
100 | 101 | ||
@@ -455,9 +456,9 @@ void svg_legenda(void) | |||
455 | return; | 456 | return; |
456 | 457 | ||
457 | svg_legenda_box(0, "Running", "sample"); | 458 | svg_legenda_box(0, "Running", "sample"); |
458 | svg_legenda_box(100, "Idle","rect.c1"); | 459 | svg_legenda_box(100, "Idle","c1"); |
459 | svg_legenda_box(200, "Deeper Idle", "rect.c3"); | 460 | svg_legenda_box(200, "Deeper Idle", "c3"); |
460 | svg_legenda_box(350, "Deepest Idle", "rect.c6"); | 461 | svg_legenda_box(350, "Deepest Idle", "c6"); |
461 | svg_legenda_box(550, "Sleeping", "process2"); | 462 | svg_legenda_box(550, "Sleeping", "process2"); |
462 | svg_legenda_box(650, "Waiting for cpu", "waiting"); | 463 | svg_legenda_box(650, "Waiting for cpu", "waiting"); |
463 | svg_legenda_box(800, "Blocked on IO", "blocked"); | 464 | svg_legenda_box(800, "Blocked on IO", "blocked"); |
@@ -483,7 +484,7 @@ void svg_time_grid(void) | |||
483 | color = 128; | 484 | color = 128; |
484 | } | 485 | } |
485 | 486 | ||
486 | fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%llu\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n", | 487 | fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%" PRIu64 "\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n", |
487 | time2pixels(i), SLOT_MULT/2, time2pixels(i), total_height, color, color, color, thickness); | 488 | time2pixels(i), SLOT_MULT/2, time2pixels(i), total_height, color, color, color, thickness); |
488 | 489 | ||
489 | i += 10000000; | 490 | i += 10000000; |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index b2f5ae97f33d..eec196329fd9 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <sys/param.h> | 11 | #include <sys/param.h> |
12 | #include <fcntl.h> | 12 | #include <fcntl.h> |
13 | #include <unistd.h> | 13 | #include <unistd.h> |
14 | #include <inttypes.h> | ||
14 | #include "build-id.h" | 15 | #include "build-id.h" |
15 | #include "debug.h" | 16 | #include "debug.h" |
16 | #include "symbol.h" | 17 | #include "symbol.h" |
@@ -22,17 +23,21 @@ | |||
22 | #include <limits.h> | 23 | #include <limits.h> |
23 | #include <sys/utsname.h> | 24 | #include <sys/utsname.h> |
24 | 25 | ||
26 | #ifndef KSYM_NAME_LEN | ||
27 | #define KSYM_NAME_LEN 128 | ||
28 | #endif | ||
29 | |||
25 | #ifndef NT_GNU_BUILD_ID | 30 | #ifndef NT_GNU_BUILD_ID |
26 | #define NT_GNU_BUILD_ID 3 | 31 | #define NT_GNU_BUILD_ID 3 |
27 | #endif | 32 | #endif |
28 | 33 | ||
29 | static bool dso__build_id_equal(const struct dso *self, u8 *build_id); | 34 | static bool dso__build_id_equal(const struct dso *dso, u8 *build_id); |
30 | static int elf_read_build_id(Elf *elf, void *bf, size_t size); | 35 | static int elf_read_build_id(Elf *elf, void *bf, size_t size); |
31 | static void dsos__add(struct list_head *head, struct dso *dso); | 36 | static void dsos__add(struct list_head *head, struct dso *dso); |
32 | static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); | 37 | static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); |
33 | static int dso__load_kernel_sym(struct dso *self, struct map *map, | 38 | static int dso__load_kernel_sym(struct dso *dso, struct map *map, |
34 | symbol_filter_t filter); | 39 | symbol_filter_t filter); |
35 | static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, | 40 | static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, |
36 | symbol_filter_t filter); | 41 | symbol_filter_t filter); |
37 | static int vmlinux_path__nr_entries; | 42 | static int vmlinux_path__nr_entries; |
38 | static char **vmlinux_path; | 43 | static char **vmlinux_path; |
@@ -41,29 +46,30 @@ struct symbol_conf symbol_conf = { | |||
41 | .exclude_other = true, | 46 | .exclude_other = true, |
42 | .use_modules = true, | 47 | .use_modules = true, |
43 | .try_vmlinux_path = true, | 48 | .try_vmlinux_path = true, |
49 | .symfs = "", | ||
44 | }; | 50 | }; |
45 | 51 | ||
46 | int dso__name_len(const struct dso *self) | 52 | int dso__name_len(const struct dso *dso) |
47 | { | 53 | { |
48 | if (verbose) | 54 | if (verbose) |
49 | return self->long_name_len; | 55 | return dso->long_name_len; |
50 | 56 | ||
51 | return self->short_name_len; | 57 | return dso->short_name_len; |
52 | } | 58 | } |
53 | 59 | ||
54 | bool dso__loaded(const struct dso *self, enum map_type type) | 60 | bool dso__loaded(const struct dso *dso, enum map_type type) |
55 | { | 61 | { |
56 | return self->loaded & (1 << type); | 62 | return dso->loaded & (1 << type); |
57 | } | 63 | } |
58 | 64 | ||
59 | bool dso__sorted_by_name(const struct dso *self, enum map_type type) | 65 | bool dso__sorted_by_name(const struct dso *dso, enum map_type type) |
60 | { | 66 | { |
61 | return self->sorted_by_name & (1 << type); | 67 | return dso->sorted_by_name & (1 << type); |
62 | } | 68 | } |
63 | 69 | ||
64 | static void dso__set_sorted_by_name(struct dso *self, enum map_type type) | 70 | static void dso__set_sorted_by_name(struct dso *dso, enum map_type type) |
65 | { | 71 | { |
66 | self->sorted_by_name |= (1 << type); | 72 | dso->sorted_by_name |= (1 << type); |
67 | } | 73 | } |
68 | 74 | ||
69 | bool symbol_type__is_a(char symbol_type, enum map_type map_type) | 75 | bool symbol_type__is_a(char symbol_type, enum map_type map_type) |
@@ -78,9 +84,9 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type) | |||
78 | } | 84 | } |
79 | } | 85 | } |
80 | 86 | ||
81 | static void symbols__fixup_end(struct rb_root *self) | 87 | static void symbols__fixup_end(struct rb_root *symbols) |
82 | { | 88 | { |
83 | struct rb_node *nd, *prevnd = rb_first(self); | 89 | struct rb_node *nd, *prevnd = rb_first(symbols); |
84 | struct symbol *curr, *prev; | 90 | struct symbol *curr, *prev; |
85 | 91 | ||
86 | if (prevnd == NULL) | 92 | if (prevnd == NULL) |
@@ -92,7 +98,7 @@ static void symbols__fixup_end(struct rb_root *self) | |||
92 | prev = curr; | 98 | prev = curr; |
93 | curr = rb_entry(nd, struct symbol, rb_node); | 99 | curr = rb_entry(nd, struct symbol, rb_node); |
94 | 100 | ||
95 | if (prev->end == prev->start) | 101 | if (prev->end == prev->start && prev->end != curr->start) |
96 | prev->end = curr->start - 1; | 102 | prev->end = curr->start - 1; |
97 | } | 103 | } |
98 | 104 | ||
@@ -101,10 +107,10 @@ static void symbols__fixup_end(struct rb_root *self) | |||
101 | curr->end = roundup(curr->start, 4096); | 107 | curr->end = roundup(curr->start, 4096); |
102 | } | 108 | } |
103 | 109 | ||
104 | static void __map_groups__fixup_end(struct map_groups *self, enum map_type type) | 110 | static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) |
105 | { | 111 | { |
106 | struct map *prev, *curr; | 112 | struct map *prev, *curr; |
107 | struct rb_node *nd, *prevnd = rb_first(&self->maps[type]); | 113 | struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); |
108 | 114 | ||
109 | if (prevnd == NULL) | 115 | if (prevnd == NULL) |
110 | return; | 116 | return; |
@@ -121,132 +127,131 @@ static void __map_groups__fixup_end(struct map_groups *self, enum map_type type) | |||
121 | * We still haven't the actual symbols, so guess the | 127 | * We still haven't the actual symbols, so guess the |
122 | * last map final address. | 128 | * last map final address. |
123 | */ | 129 | */ |
124 | curr->end = ~0UL; | 130 | curr->end = ~0ULL; |
125 | } | 131 | } |
126 | 132 | ||
127 | static void map_groups__fixup_end(struct map_groups *self) | 133 | static void map_groups__fixup_end(struct map_groups *mg) |
128 | { | 134 | { |
129 | int i; | 135 | int i; |
130 | for (i = 0; i < MAP__NR_TYPES; ++i) | 136 | for (i = 0; i < MAP__NR_TYPES; ++i) |
131 | __map_groups__fixup_end(self, i); | 137 | __map_groups__fixup_end(mg, i); |
132 | } | 138 | } |
133 | 139 | ||
134 | static struct symbol *symbol__new(u64 start, u64 len, u8 binding, | 140 | static struct symbol *symbol__new(u64 start, u64 len, u8 binding, |
135 | const char *name) | 141 | const char *name) |
136 | { | 142 | { |
137 | size_t namelen = strlen(name) + 1; | 143 | size_t namelen = strlen(name) + 1; |
138 | struct symbol *self = calloc(1, (symbol_conf.priv_size + | 144 | struct symbol *sym = calloc(1, (symbol_conf.priv_size + |
139 | sizeof(*self) + namelen)); | 145 | sizeof(*sym) + namelen)); |
140 | if (self == NULL) | 146 | if (sym == NULL) |
141 | return NULL; | 147 | return NULL; |
142 | 148 | ||
143 | if (symbol_conf.priv_size) | 149 | if (symbol_conf.priv_size) |
144 | self = ((void *)self) + symbol_conf.priv_size; | 150 | sym = ((void *)sym) + symbol_conf.priv_size; |
145 | |||
146 | self->start = start; | ||
147 | self->end = len ? start + len - 1 : start; | ||
148 | self->binding = binding; | ||
149 | self->namelen = namelen - 1; | ||
150 | 151 | ||
151 | pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); | 152 | sym->start = start; |
153 | sym->end = len ? start + len - 1 : start; | ||
154 | sym->binding = binding; | ||
155 | sym->namelen = namelen - 1; | ||
152 | 156 | ||
153 | memcpy(self->name, name, namelen); | 157 | pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", |
158 | __func__, name, start, sym->end); | ||
159 | memcpy(sym->name, name, namelen); | ||
154 | 160 | ||
155 | return self; | 161 | return sym; |
156 | } | 162 | } |
157 | 163 | ||
158 | void symbol__delete(struct symbol *self) | 164 | void symbol__delete(struct symbol *sym) |
159 | { | 165 | { |
160 | free(((void *)self) - symbol_conf.priv_size); | 166 | free(((void *)sym) - symbol_conf.priv_size); |
161 | } | 167 | } |
162 | 168 | ||
163 | static size_t symbol__fprintf(struct symbol *self, FILE *fp) | 169 | static size_t symbol__fprintf(struct symbol *sym, FILE *fp) |
164 | { | 170 | { |
165 | return fprintf(fp, " %llx-%llx %c %s\n", | 171 | return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n", |
166 | self->start, self->end, | 172 | sym->start, sym->end, |
167 | self->binding == STB_GLOBAL ? 'g' : | 173 | sym->binding == STB_GLOBAL ? 'g' : |
168 | self->binding == STB_LOCAL ? 'l' : 'w', | 174 | sym->binding == STB_LOCAL ? 'l' : 'w', |
169 | self->name); | 175 | sym->name); |
170 | } | 176 | } |
171 | 177 | ||
172 | void dso__set_long_name(struct dso *self, char *name) | 178 | void dso__set_long_name(struct dso *dso, char *name) |
173 | { | 179 | { |
174 | if (name == NULL) | 180 | if (name == NULL) |
175 | return; | 181 | return; |
176 | self->long_name = name; | 182 | dso->long_name = name; |
177 | self->long_name_len = strlen(name); | 183 | dso->long_name_len = strlen(name); |
178 | } | 184 | } |
179 | 185 | ||
180 | static void dso__set_short_name(struct dso *self, const char *name) | 186 | static void dso__set_short_name(struct dso *dso, const char *name) |
181 | { | 187 | { |
182 | if (name == NULL) | 188 | if (name == NULL) |
183 | return; | 189 | return; |
184 | self->short_name = name; | 190 | dso->short_name = name; |
185 | self->short_name_len = strlen(name); | 191 | dso->short_name_len = strlen(name); |
186 | } | 192 | } |
187 | 193 | ||
188 | static void dso__set_basename(struct dso *self) | 194 | static void dso__set_basename(struct dso *dso) |
189 | { | 195 | { |
190 | dso__set_short_name(self, basename(self->long_name)); | 196 | dso__set_short_name(dso, basename(dso->long_name)); |
191 | } | 197 | } |
192 | 198 | ||
193 | struct dso *dso__new(const char *name) | 199 | struct dso *dso__new(const char *name) |
194 | { | 200 | { |
195 | struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1); | 201 | struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); |
196 | 202 | ||
197 | if (self != NULL) { | 203 | if (dso != NULL) { |
198 | int i; | 204 | int i; |
199 | strcpy(self->name, name); | 205 | strcpy(dso->name, name); |
200 | dso__set_long_name(self, self->name); | 206 | dso__set_long_name(dso, dso->name); |
201 | dso__set_short_name(self, self->name); | 207 | dso__set_short_name(dso, dso->name); |
202 | for (i = 0; i < MAP__NR_TYPES; ++i) | 208 | for (i = 0; i < MAP__NR_TYPES; ++i) |
203 | self->symbols[i] = self->symbol_names[i] = RB_ROOT; | 209 | dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; |
204 | self->slen_calculated = 0; | 210 | dso->symtab_type = SYMTAB__NOT_FOUND; |
205 | self->origin = DSO__ORIG_NOT_FOUND; | 211 | dso->loaded = 0; |
206 | self->loaded = 0; | 212 | dso->sorted_by_name = 0; |
207 | self->sorted_by_name = 0; | 213 | dso->has_build_id = 0; |
208 | self->has_build_id = 0; | 214 | dso->kernel = DSO_TYPE_USER; |
209 | self->kernel = DSO_TYPE_USER; | 215 | INIT_LIST_HEAD(&dso->node); |
210 | INIT_LIST_HEAD(&self->node); | ||
211 | } | 216 | } |
212 | 217 | ||
213 | return self; | 218 | return dso; |
214 | } | 219 | } |
215 | 220 | ||
216 | static void symbols__delete(struct rb_root *self) | 221 | static void symbols__delete(struct rb_root *symbols) |
217 | { | 222 | { |
218 | struct symbol *pos; | 223 | struct symbol *pos; |
219 | struct rb_node *next = rb_first(self); | 224 | struct rb_node *next = rb_first(symbols); |
220 | 225 | ||
221 | while (next) { | 226 | while (next) { |
222 | pos = rb_entry(next, struct symbol, rb_node); | 227 | pos = rb_entry(next, struct symbol, rb_node); |
223 | next = rb_next(&pos->rb_node); | 228 | next = rb_next(&pos->rb_node); |
224 | rb_erase(&pos->rb_node, self); | 229 | rb_erase(&pos->rb_node, symbols); |
225 | symbol__delete(pos); | 230 | symbol__delete(pos); |
226 | } | 231 | } |
227 | } | 232 | } |
228 | 233 | ||
229 | void dso__delete(struct dso *self) | 234 | void dso__delete(struct dso *dso) |
230 | { | 235 | { |
231 | int i; | 236 | int i; |
232 | for (i = 0; i < MAP__NR_TYPES; ++i) | 237 | for (i = 0; i < MAP__NR_TYPES; ++i) |
233 | symbols__delete(&self->symbols[i]); | 238 | symbols__delete(&dso->symbols[i]); |
234 | if (self->sname_alloc) | 239 | if (dso->sname_alloc) |
235 | free((char *)self->short_name); | 240 | free((char *)dso->short_name); |
236 | if (self->lname_alloc) | 241 | if (dso->lname_alloc) |
237 | free(self->long_name); | 242 | free(dso->long_name); |
238 | free(self); | 243 | free(dso); |
239 | } | 244 | } |
240 | 245 | ||
241 | void dso__set_build_id(struct dso *self, void *build_id) | 246 | void dso__set_build_id(struct dso *dso, void *build_id) |
242 | { | 247 | { |
243 | memcpy(self->build_id, build_id, sizeof(self->build_id)); | 248 | memcpy(dso->build_id, build_id, sizeof(dso->build_id)); |
244 | self->has_build_id = 1; | 249 | dso->has_build_id = 1; |
245 | } | 250 | } |
246 | 251 | ||
247 | static void symbols__insert(struct rb_root *self, struct symbol *sym) | 252 | static void symbols__insert(struct rb_root *symbols, struct symbol *sym) |
248 | { | 253 | { |
249 | struct rb_node **p = &self->rb_node; | 254 | struct rb_node **p = &symbols->rb_node; |
250 | struct rb_node *parent = NULL; | 255 | struct rb_node *parent = NULL; |
251 | const u64 ip = sym->start; | 256 | const u64 ip = sym->start; |
252 | struct symbol *s; | 257 | struct symbol *s; |
@@ -260,17 +265,17 @@ static void symbols__insert(struct rb_root *self, struct symbol *sym) | |||
260 | p = &(*p)->rb_right; | 265 | p = &(*p)->rb_right; |
261 | } | 266 | } |
262 | rb_link_node(&sym->rb_node, parent, p); | 267 | rb_link_node(&sym->rb_node, parent, p); |
263 | rb_insert_color(&sym->rb_node, self); | 268 | rb_insert_color(&sym->rb_node, symbols); |
264 | } | 269 | } |
265 | 270 | ||
266 | static struct symbol *symbols__find(struct rb_root *self, u64 ip) | 271 | static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) |
267 | { | 272 | { |
268 | struct rb_node *n; | 273 | struct rb_node *n; |
269 | 274 | ||
270 | if (self == NULL) | 275 | if (symbols == NULL) |
271 | return NULL; | 276 | return NULL; |
272 | 277 | ||
273 | n = self->rb_node; | 278 | n = symbols->rb_node; |
274 | 279 | ||
275 | while (n) { | 280 | while (n) { |
276 | struct symbol *s = rb_entry(n, struct symbol, rb_node); | 281 | struct symbol *s = rb_entry(n, struct symbol, rb_node); |
@@ -291,11 +296,13 @@ struct symbol_name_rb_node { | |||
291 | struct symbol sym; | 296 | struct symbol sym; |
292 | }; | 297 | }; |
293 | 298 | ||
294 | static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym) | 299 | static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) |
295 | { | 300 | { |
296 | struct rb_node **p = &self->rb_node; | 301 | struct rb_node **p = &symbols->rb_node; |
297 | struct rb_node *parent = NULL; | 302 | struct rb_node *parent = NULL; |
298 | struct symbol_name_rb_node *symn = ((void *)sym) - sizeof(*parent), *s; | 303 | struct symbol_name_rb_node *symn, *s; |
304 | |||
305 | symn = container_of(sym, struct symbol_name_rb_node, sym); | ||
299 | 306 | ||
300 | while (*p != NULL) { | 307 | while (*p != NULL) { |
301 | parent = *p; | 308 | parent = *p; |
@@ -306,27 +313,29 @@ static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym) | |||
306 | p = &(*p)->rb_right; | 313 | p = &(*p)->rb_right; |
307 | } | 314 | } |
308 | rb_link_node(&symn->rb_node, parent, p); | 315 | rb_link_node(&symn->rb_node, parent, p); |
309 | rb_insert_color(&symn->rb_node, self); | 316 | rb_insert_color(&symn->rb_node, symbols); |
310 | } | 317 | } |
311 | 318 | ||
312 | static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source) | 319 | static void symbols__sort_by_name(struct rb_root *symbols, |
320 | struct rb_root *source) | ||
313 | { | 321 | { |
314 | struct rb_node *nd; | 322 | struct rb_node *nd; |
315 | 323 | ||
316 | for (nd = rb_first(source); nd; nd = rb_next(nd)) { | 324 | for (nd = rb_first(source); nd; nd = rb_next(nd)) { |
317 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); | 325 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); |
318 | symbols__insert_by_name(self, pos); | 326 | symbols__insert_by_name(symbols, pos); |
319 | } | 327 | } |
320 | } | 328 | } |
321 | 329 | ||
322 | static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name) | 330 | static struct symbol *symbols__find_by_name(struct rb_root *symbols, |
331 | const char *name) | ||
323 | { | 332 | { |
324 | struct rb_node *n; | 333 | struct rb_node *n; |
325 | 334 | ||
326 | if (self == NULL) | 335 | if (symbols == NULL) |
327 | return NULL; | 336 | return NULL; |
328 | 337 | ||
329 | n = self->rb_node; | 338 | n = symbols->rb_node; |
330 | 339 | ||
331 | while (n) { | 340 | while (n) { |
332 | struct symbol_name_rb_node *s; | 341 | struct symbol_name_rb_node *s; |
@@ -346,29 +355,29 @@ static struct symbol *symbols__find_by_name(struct rb_root *self, const char *na | |||
346 | return NULL; | 355 | return NULL; |
347 | } | 356 | } |
348 | 357 | ||
349 | struct symbol *dso__find_symbol(struct dso *self, | 358 | struct symbol *dso__find_symbol(struct dso *dso, |
350 | enum map_type type, u64 addr) | 359 | enum map_type type, u64 addr) |
351 | { | 360 | { |
352 | return symbols__find(&self->symbols[type], addr); | 361 | return symbols__find(&dso->symbols[type], addr); |
353 | } | 362 | } |
354 | 363 | ||
355 | struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, | 364 | struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, |
356 | const char *name) | 365 | const char *name) |
357 | { | 366 | { |
358 | return symbols__find_by_name(&self->symbol_names[type], name); | 367 | return symbols__find_by_name(&dso->symbol_names[type], name); |
359 | } | 368 | } |
360 | 369 | ||
361 | void dso__sort_by_name(struct dso *self, enum map_type type) | 370 | void dso__sort_by_name(struct dso *dso, enum map_type type) |
362 | { | 371 | { |
363 | dso__set_sorted_by_name(self, type); | 372 | dso__set_sorted_by_name(dso, type); |
364 | return symbols__sort_by_name(&self->symbol_names[type], | 373 | return symbols__sort_by_name(&dso->symbol_names[type], |
365 | &self->symbols[type]); | 374 | &dso->symbols[type]); |
366 | } | 375 | } |
367 | 376 | ||
368 | int build_id__sprintf(const u8 *self, int len, char *bf) | 377 | int build_id__sprintf(const u8 *build_id, int len, char *bf) |
369 | { | 378 | { |
370 | char *bid = bf; | 379 | char *bid = bf; |
371 | const u8 *raw = self; | 380 | const u8 *raw = build_id; |
372 | int i; | 381 | int i; |
373 | 382 | ||
374 | for (i = 0; i < len; ++i) { | 383 | for (i = 0; i < len; ++i) { |
@@ -377,29 +386,44 @@ int build_id__sprintf(const u8 *self, int len, char *bf) | |||
377 | bid += 2; | 386 | bid += 2; |
378 | } | 387 | } |
379 | 388 | ||
380 | return raw - self; | 389 | return raw - build_id; |
381 | } | 390 | } |
382 | 391 | ||
383 | size_t dso__fprintf_buildid(struct dso *self, FILE *fp) | 392 | size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) |
384 | { | 393 | { |
385 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; | 394 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; |
386 | 395 | ||
387 | build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); | 396 | build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); |
388 | return fprintf(fp, "%s", sbuild_id); | 397 | return fprintf(fp, "%s", sbuild_id); |
389 | } | 398 | } |
390 | 399 | ||
391 | size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp) | 400 | size_t dso__fprintf_symbols_by_name(struct dso *dso, |
401 | enum map_type type, FILE *fp) | ||
402 | { | ||
403 | size_t ret = 0; | ||
404 | struct rb_node *nd; | ||
405 | struct symbol_name_rb_node *pos; | ||
406 | |||
407 | for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { | ||
408 | pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); | ||
409 | fprintf(fp, "%s\n", pos->sym.name); | ||
410 | } | ||
411 | |||
412 | return ret; | ||
413 | } | ||
414 | |||
415 | size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) | ||
392 | { | 416 | { |
393 | struct rb_node *nd; | 417 | struct rb_node *nd; |
394 | size_t ret = fprintf(fp, "dso: %s (", self->short_name); | 418 | size_t ret = fprintf(fp, "dso: %s (", dso->short_name); |
395 | 419 | ||
396 | if (self->short_name != self->long_name) | 420 | if (dso->short_name != dso->long_name) |
397 | ret += fprintf(fp, "%s, ", self->long_name); | 421 | ret += fprintf(fp, "%s, ", dso->long_name); |
398 | ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], | 422 | ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], |
399 | self->loaded ? "" : "NOT "); | 423 | dso->loaded ? "" : "NOT "); |
400 | ret += dso__fprintf_buildid(self, fp); | 424 | ret += dso__fprintf_buildid(dso, fp); |
401 | ret += fprintf(fp, ")\n"); | 425 | ret += fprintf(fp, ")\n"); |
402 | for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) { | 426 | for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { |
403 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); | 427 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); |
404 | ret += symbol__fprintf(pos, fp); | 428 | ret += symbol__fprintf(pos, fp); |
405 | } | 429 | } |
@@ -409,16 +433,25 @@ size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp) | |||
409 | 433 | ||
410 | int kallsyms__parse(const char *filename, void *arg, | 434 | int kallsyms__parse(const char *filename, void *arg, |
411 | int (*process_symbol)(void *arg, const char *name, | 435 | int (*process_symbol)(void *arg, const char *name, |
412 | char type, u64 start)) | 436 | char type, u64 start, u64 end)) |
413 | { | 437 | { |
414 | char *line = NULL; | 438 | char *line = NULL; |
415 | size_t n; | 439 | size_t n; |
416 | int err = 0; | 440 | int err = -1; |
441 | u64 prev_start = 0; | ||
442 | char prev_symbol_type = 0; | ||
443 | char *prev_symbol_name; | ||
417 | FILE *file = fopen(filename, "r"); | 444 | FILE *file = fopen(filename, "r"); |
418 | 445 | ||
419 | if (file == NULL) | 446 | if (file == NULL) |
420 | goto out_failure; | 447 | goto out_failure; |
421 | 448 | ||
449 | prev_symbol_name = malloc(KSYM_NAME_LEN); | ||
450 | if (prev_symbol_name == NULL) | ||
451 | goto out_close; | ||
452 | |||
453 | err = 0; | ||
454 | |||
422 | while (!feof(file)) { | 455 | while (!feof(file)) { |
423 | u64 start; | 456 | u64 start; |
424 | int line_len, len; | 457 | int line_len, len; |
@@ -438,14 +471,33 @@ int kallsyms__parse(const char *filename, void *arg, | |||
438 | continue; | 471 | continue; |
439 | 472 | ||
440 | symbol_type = toupper(line[len]); | 473 | symbol_type = toupper(line[len]); |
441 | symbol_name = line + len + 2; | 474 | len += 2; |
475 | symbol_name = line + len; | ||
476 | len = line_len - len; | ||
442 | 477 | ||
443 | err = process_symbol(arg, symbol_name, symbol_type, start); | 478 | if (len >= KSYM_NAME_LEN) { |
444 | if (err) | 479 | err = -1; |
445 | break; | 480 | break; |
481 | } | ||
482 | |||
483 | if (prev_symbol_type) { | ||
484 | u64 end = start; | ||
485 | if (end != prev_start) | ||
486 | --end; | ||
487 | err = process_symbol(arg, prev_symbol_name, | ||
488 | prev_symbol_type, prev_start, end); | ||
489 | if (err) | ||
490 | break; | ||
491 | } | ||
492 | |||
493 | memcpy(prev_symbol_name, symbol_name, len + 1); | ||
494 | prev_symbol_type = symbol_type; | ||
495 | prev_start = start; | ||
446 | } | 496 | } |
447 | 497 | ||
498 | free(prev_symbol_name); | ||
448 | free(line); | 499 | free(line); |
500 | out_close: | ||
449 | fclose(file); | 501 | fclose(file); |
450 | return err; | 502 | return err; |
451 | 503 | ||
@@ -467,7 +519,7 @@ static u8 kallsyms2elf_type(char type) | |||
467 | } | 519 | } |
468 | 520 | ||
469 | static int map__process_kallsym_symbol(void *arg, const char *name, | 521 | static int map__process_kallsym_symbol(void *arg, const char *name, |
470 | char type, u64 start) | 522 | char type, u64 start, u64 end) |
471 | { | 523 | { |
472 | struct symbol *sym; | 524 | struct symbol *sym; |
473 | struct process_kallsyms_args *a = arg; | 525 | struct process_kallsyms_args *a = arg; |
@@ -476,11 +528,8 @@ static int map__process_kallsym_symbol(void *arg, const char *name, | |||
476 | if (!symbol_type__is_a(type, a->map->type)) | 528 | if (!symbol_type__is_a(type, a->map->type)) |
477 | return 0; | 529 | return 0; |
478 | 530 | ||
479 | /* | 531 | sym = symbol__new(start, end - start + 1, |
480 | * Will fix up the end later, when we have all symbols sorted. | 532 | kallsyms2elf_type(type), name); |
481 | */ | ||
482 | sym = symbol__new(start, 0, kallsyms2elf_type(type), name); | ||
483 | |||
484 | if (sym == NULL) | 533 | if (sym == NULL) |
485 | return -ENOMEM; | 534 | return -ENOMEM; |
486 | /* | 535 | /* |
@@ -497,10 +546,10 @@ static int map__process_kallsym_symbol(void *arg, const char *name, | |||
497 | * so that we can in the next step set the symbol ->end address and then | 546 | * so that we can in the next step set the symbol ->end address and then |
498 | * call kernel_maps__split_kallsyms. | 547 | * call kernel_maps__split_kallsyms. |
499 | */ | 548 | */ |
500 | static int dso__load_all_kallsyms(struct dso *self, const char *filename, | 549 | static int dso__load_all_kallsyms(struct dso *dso, const char *filename, |
501 | struct map *map) | 550 | struct map *map) |
502 | { | 551 | { |
503 | struct process_kallsyms_args args = { .map = map, .dso = self, }; | 552 | struct process_kallsyms_args args = { .map = map, .dso = dso, }; |
504 | return kallsyms__parse(filename, &args, map__process_kallsym_symbol); | 553 | return kallsyms__parse(filename, &args, map__process_kallsym_symbol); |
505 | } | 554 | } |
506 | 555 | ||
@@ -509,15 +558,15 @@ static int dso__load_all_kallsyms(struct dso *self, const char *filename, | |||
509 | * kernel range is broken in several maps, named [kernel].N, as we don't have | 558 | * kernel range is broken in several maps, named [kernel].N, as we don't have |
510 | * the original ELF section names vmlinux have. | 559 | * the original ELF section names vmlinux have. |
511 | */ | 560 | */ |
512 | static int dso__split_kallsyms(struct dso *self, struct map *map, | 561 | static int dso__split_kallsyms(struct dso *dso, struct map *map, |
513 | symbol_filter_t filter) | 562 | symbol_filter_t filter) |
514 | { | 563 | { |
515 | struct map_groups *kmaps = map__kmap(map)->kmaps; | 564 | struct map_groups *kmaps = map__kmap(map)->kmaps; |
516 | struct machine *machine = kmaps->machine; | 565 | struct machine *machine = kmaps->machine; |
517 | struct map *curr_map = map; | 566 | struct map *curr_map = map; |
518 | struct symbol *pos; | 567 | struct symbol *pos; |
519 | int count = 0; | 568 | int count = 0, moved = 0; |
520 | struct rb_root *root = &self->symbols[map->type]; | 569 | struct rb_root *root = &dso->symbols[map->type]; |
521 | struct rb_node *next = rb_first(root); | 570 | struct rb_node *next = rb_first(root); |
522 | int kernel_range = 0; | 571 | int kernel_range = 0; |
523 | 572 | ||
@@ -536,7 +585,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, | |||
536 | 585 | ||
537 | if (strcmp(curr_map->dso->short_name, module)) { | 586 | if (strcmp(curr_map->dso->short_name, module)) { |
538 | if (curr_map != map && | 587 | if (curr_map != map && |
539 | self->kernel == DSO_TYPE_GUEST_KERNEL && | 588 | dso->kernel == DSO_TYPE_GUEST_KERNEL && |
540 | machine__is_default_guest(machine)) { | 589 | machine__is_default_guest(machine)) { |
541 | /* | 590 | /* |
542 | * We assume all symbols of a module are | 591 | * We assume all symbols of a module are |
@@ -572,9 +621,14 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, | |||
572 | pos->end = curr_map->map_ip(curr_map, pos->end); | 621 | pos->end = curr_map->map_ip(curr_map, pos->end); |
573 | } else if (curr_map != map) { | 622 | } else if (curr_map != map) { |
574 | char dso_name[PATH_MAX]; | 623 | char dso_name[PATH_MAX]; |
575 | struct dso *dso; | 624 | struct dso *ndso; |
576 | 625 | ||
577 | if (self->kernel == DSO_TYPE_GUEST_KERNEL) | 626 | if (count == 0) { |
627 | curr_map = map; | ||
628 | goto filter_symbol; | ||
629 | } | ||
630 | |||
631 | if (dso->kernel == DSO_TYPE_GUEST_KERNEL) | ||
578 | snprintf(dso_name, sizeof(dso_name), | 632 | snprintf(dso_name, sizeof(dso_name), |
579 | "[guest.kernel].%d", | 633 | "[guest.kernel].%d", |
580 | kernel_range++); | 634 | kernel_range++); |
@@ -583,15 +637,15 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, | |||
583 | "[kernel].%d", | 637 | "[kernel].%d", |
584 | kernel_range++); | 638 | kernel_range++); |
585 | 639 | ||
586 | dso = dso__new(dso_name); | 640 | ndso = dso__new(dso_name); |
587 | if (dso == NULL) | 641 | if (ndso == NULL) |
588 | return -1; | 642 | return -1; |
589 | 643 | ||
590 | dso->kernel = self->kernel; | 644 | ndso->kernel = dso->kernel; |
591 | 645 | ||
592 | curr_map = map__new2(pos->start, dso, map->type); | 646 | curr_map = map__new2(pos->start, ndso, map->type); |
593 | if (curr_map == NULL) { | 647 | if (curr_map == NULL) { |
594 | dso__delete(dso); | 648 | dso__delete(ndso); |
595 | return -1; | 649 | return -1; |
596 | } | 650 | } |
597 | 651 | ||
@@ -599,7 +653,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, | |||
599 | map_groups__insert(kmaps, curr_map); | 653 | map_groups__insert(kmaps, curr_map); |
600 | ++kernel_range; | 654 | ++kernel_range; |
601 | } | 655 | } |
602 | 656 | filter_symbol: | |
603 | if (filter && filter(curr_map, pos)) { | 657 | if (filter && filter(curr_map, pos)) { |
604 | discard_symbol: rb_erase(&pos->rb_node, root); | 658 | discard_symbol: rb_erase(&pos->rb_node, root); |
605 | symbol__delete(pos); | 659 | symbol__delete(pos); |
@@ -607,36 +661,57 @@ discard_symbol: rb_erase(&pos->rb_node, root); | |||
607 | if (curr_map != map) { | 661 | if (curr_map != map) { |
608 | rb_erase(&pos->rb_node, root); | 662 | rb_erase(&pos->rb_node, root); |
609 | symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); | 663 | symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); |
610 | } | 664 | ++moved; |
611 | count++; | 665 | } else |
666 | ++count; | ||
612 | } | 667 | } |
613 | } | 668 | } |
614 | 669 | ||
615 | if (curr_map != map && | 670 | if (curr_map != map && |
616 | self->kernel == DSO_TYPE_GUEST_KERNEL && | 671 | dso->kernel == DSO_TYPE_GUEST_KERNEL && |
617 | machine__is_default_guest(kmaps->machine)) { | 672 | machine__is_default_guest(kmaps->machine)) { |
618 | dso__set_loaded(curr_map->dso, curr_map->type); | 673 | dso__set_loaded(curr_map->dso, curr_map->type); |
619 | } | 674 | } |
620 | 675 | ||
621 | return count; | 676 | return count + moved; |
622 | } | 677 | } |
623 | 678 | ||
624 | int dso__load_kallsyms(struct dso *self, const char *filename, | 679 | static bool symbol__restricted_filename(const char *filename, |
680 | const char *restricted_filename) | ||
681 | { | ||
682 | bool restricted = false; | ||
683 | |||
684 | if (symbol_conf.kptr_restrict) { | ||
685 | char *r = realpath(filename, NULL); | ||
686 | |||
687 | if (r != NULL) { | ||
688 | restricted = strcmp(r, restricted_filename) == 0; | ||
689 | free(r); | ||
690 | return restricted; | ||
691 | } | ||
692 | } | ||
693 | |||
694 | return restricted; | ||
695 | } | ||
696 | |||
697 | int dso__load_kallsyms(struct dso *dso, const char *filename, | ||
625 | struct map *map, symbol_filter_t filter) | 698 | struct map *map, symbol_filter_t filter) |
626 | { | 699 | { |
627 | if (dso__load_all_kallsyms(self, filename, map) < 0) | 700 | if (symbol__restricted_filename(filename, "/proc/kallsyms")) |
701 | return -1; | ||
702 | |||
703 | if (dso__load_all_kallsyms(dso, filename, map) < 0) | ||
628 | return -1; | 704 | return -1; |
629 | 705 | ||
630 | symbols__fixup_end(&self->symbols[map->type]); | 706 | if (dso->kernel == DSO_TYPE_GUEST_KERNEL) |
631 | if (self->kernel == DSO_TYPE_GUEST_KERNEL) | 707 | dso->symtab_type = SYMTAB__GUEST_KALLSYMS; |
632 | self->origin = DSO__ORIG_GUEST_KERNEL; | ||
633 | else | 708 | else |
634 | self->origin = DSO__ORIG_KERNEL; | 709 | dso->symtab_type = SYMTAB__KALLSYMS; |
635 | 710 | ||
636 | return dso__split_kallsyms(self, map, filter); | 711 | return dso__split_kallsyms(dso, map, filter); |
637 | } | 712 | } |
638 | 713 | ||
639 | static int dso__load_perf_map(struct dso *self, struct map *map, | 714 | static int dso__load_perf_map(struct dso *dso, struct map *map, |
640 | symbol_filter_t filter) | 715 | symbol_filter_t filter) |
641 | { | 716 | { |
642 | char *line = NULL; | 717 | char *line = NULL; |
@@ -644,7 +719,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map, | |||
644 | FILE *file; | 719 | FILE *file; |
645 | int nr_syms = 0; | 720 | int nr_syms = 0; |
646 | 721 | ||
647 | file = fopen(self->long_name, "r"); | 722 | file = fopen(dso->long_name, "r"); |
648 | if (file == NULL) | 723 | if (file == NULL) |
649 | goto out_failure; | 724 | goto out_failure; |
650 | 725 | ||
@@ -682,7 +757,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map, | |||
682 | if (filter && filter(map, sym)) | 757 | if (filter && filter(map, sym)) |
683 | symbol__delete(sym); | 758 | symbol__delete(sym); |
684 | else { | 759 | else { |
685 | symbols__insert(&self->symbols[map->type], sym); | 760 | symbols__insert(&dso->symbols[map->type], sym); |
686 | nr_syms++; | 761 | nr_syms++; |
687 | } | 762 | } |
688 | } | 763 | } |
@@ -701,7 +776,7 @@ out_failure: | |||
701 | /** | 776 | /** |
702 | * elf_symtab__for_each_symbol - iterate thru all the symbols | 777 | * elf_symtab__for_each_symbol - iterate thru all the symbols |
703 | * | 778 | * |
704 | * @self: struct elf_symtab instance to iterate | 779 | * @syms: struct elf_symtab instance to iterate |
705 | * @idx: uint32_t idx | 780 | * @idx: uint32_t idx |
706 | * @sym: GElf_Sym iterator | 781 | * @sym: GElf_Sym iterator |
707 | */ | 782 | */ |
@@ -801,7 +876,7 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, | |||
801 | * And always look at the original dso, not at debuginfo packages, that | 876 | * And always look at the original dso, not at debuginfo packages, that |
802 | * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). | 877 | * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). |
803 | */ | 878 | */ |
804 | static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, | 879 | static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, |
805 | symbol_filter_t filter) | 880 | symbol_filter_t filter) |
806 | { | 881 | { |
807 | uint32_t nr_rel_entries, idx; | 882 | uint32_t nr_rel_entries, idx; |
@@ -817,8 +892,11 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, | |||
817 | char sympltname[1024]; | 892 | char sympltname[1024]; |
818 | Elf *elf; | 893 | Elf *elf; |
819 | int nr = 0, symidx, fd, err = 0; | 894 | int nr = 0, symidx, fd, err = 0; |
895 | char name[PATH_MAX]; | ||
820 | 896 | ||
821 | fd = open(self->long_name, O_RDONLY); | 897 | snprintf(name, sizeof(name), "%s%s", |
898 | symbol_conf.symfs, dso->long_name); | ||
899 | fd = open(name, O_RDONLY); | ||
822 | if (fd < 0) | 900 | if (fd < 0) |
823 | goto out; | 901 | goto out; |
824 | 902 | ||
@@ -893,7 +971,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, | |||
893 | if (filter && filter(map, f)) | 971 | if (filter && filter(map, f)) |
894 | symbol__delete(f); | 972 | symbol__delete(f); |
895 | else { | 973 | else { |
896 | symbols__insert(&self->symbols[map->type], f); | 974 | symbols__insert(&dso->symbols[map->type], f); |
897 | ++nr; | 975 | ++nr; |
898 | } | 976 | } |
899 | } | 977 | } |
@@ -915,7 +993,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, | |||
915 | if (filter && filter(map, f)) | 993 | if (filter && filter(map, f)) |
916 | symbol__delete(f); | 994 | symbol__delete(f); |
917 | else { | 995 | else { |
918 | symbols__insert(&self->symbols[map->type], f); | 996 | symbols__insert(&dso->symbols[map->type], f); |
919 | ++nr; | 997 | ++nr; |
920 | } | 998 | } |
921 | } | 999 | } |
@@ -931,29 +1009,30 @@ out_close: | |||
931 | return nr; | 1009 | return nr; |
932 | out: | 1010 | out: |
933 | pr_debug("%s: problems reading %s PLT info.\n", | 1011 | pr_debug("%s: problems reading %s PLT info.\n", |
934 | __func__, self->long_name); | 1012 | __func__, dso->long_name); |
935 | return 0; | 1013 | return 0; |
936 | } | 1014 | } |
937 | 1015 | ||
938 | static bool elf_sym__is_a(GElf_Sym *self, enum map_type type) | 1016 | static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) |
939 | { | 1017 | { |
940 | switch (type) { | 1018 | switch (type) { |
941 | case MAP__FUNCTION: | 1019 | case MAP__FUNCTION: |
942 | return elf_sym__is_function(self); | 1020 | return elf_sym__is_function(sym); |
943 | case MAP__VARIABLE: | 1021 | case MAP__VARIABLE: |
944 | return elf_sym__is_object(self); | 1022 | return elf_sym__is_object(sym); |
945 | default: | 1023 | default: |
946 | return false; | 1024 | return false; |
947 | } | 1025 | } |
948 | } | 1026 | } |
949 | 1027 | ||
950 | static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type) | 1028 | static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, |
1029 | enum map_type type) | ||
951 | { | 1030 | { |
952 | switch (type) { | 1031 | switch (type) { |
953 | case MAP__FUNCTION: | 1032 | case MAP__FUNCTION: |
954 | return elf_sec__is_text(self, secstrs); | 1033 | return elf_sec__is_text(shdr, secstrs); |
955 | case MAP__VARIABLE: | 1034 | case MAP__VARIABLE: |
956 | return elf_sec__is_data(self, secstrs); | 1035 | return elf_sec__is_data(shdr, secstrs); |
957 | default: | 1036 | default: |
958 | return false; | 1037 | return false; |
959 | } | 1038 | } |
@@ -978,13 +1057,13 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) | |||
978 | return -1; | 1057 | return -1; |
979 | } | 1058 | } |
980 | 1059 | ||
981 | static int dso__load_sym(struct dso *self, struct map *map, const char *name, | 1060 | static int dso__load_sym(struct dso *dso, struct map *map, const char *name, |
982 | int fd, symbol_filter_t filter, int kmodule, | 1061 | int fd, symbol_filter_t filter, int kmodule, |
983 | int want_symtab) | 1062 | int want_symtab) |
984 | { | 1063 | { |
985 | struct kmap *kmap = self->kernel ? map__kmap(map) : NULL; | 1064 | struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; |
986 | struct map *curr_map = map; | 1065 | struct map *curr_map = map; |
987 | struct dso *curr_dso = self; | 1066 | struct dso *curr_dso = dso; |
988 | Elf_Data *symstrs, *secstrs; | 1067 | Elf_Data *symstrs, *secstrs; |
989 | uint32_t nr_syms; | 1068 | uint32_t nr_syms; |
990 | int err = -1; | 1069 | int err = -1; |
@@ -1010,14 +1089,14 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, | |||
1010 | } | 1089 | } |
1011 | 1090 | ||
1012 | /* Always reject images with a mismatched build-id: */ | 1091 | /* Always reject images with a mismatched build-id: */ |
1013 | if (self->has_build_id) { | 1092 | if (dso->has_build_id) { |
1014 | u8 build_id[BUILD_ID_SIZE]; | 1093 | u8 build_id[BUILD_ID_SIZE]; |
1015 | 1094 | ||
1016 | if (elf_read_build_id(elf, build_id, | 1095 | if (elf_read_build_id(elf, build_id, |
1017 | BUILD_ID_SIZE) != BUILD_ID_SIZE) | 1096 | BUILD_ID_SIZE) != BUILD_ID_SIZE) |
1018 | goto out_elf_end; | 1097 | goto out_elf_end; |
1019 | 1098 | ||
1020 | if (!dso__build_id_equal(self, build_id)) | 1099 | if (!dso__build_id_equal(dso, build_id)) |
1021 | goto out_elf_end; | 1100 | goto out_elf_end; |
1022 | } | 1101 | } |
1023 | 1102 | ||
@@ -1058,13 +1137,14 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, | |||
1058 | nr_syms = shdr.sh_size / shdr.sh_entsize; | 1137 | nr_syms = shdr.sh_size / shdr.sh_entsize; |
1059 | 1138 | ||
1060 | memset(&sym, 0, sizeof(sym)); | 1139 | memset(&sym, 0, sizeof(sym)); |
1061 | if (self->kernel == DSO_TYPE_USER) { | 1140 | if (dso->kernel == DSO_TYPE_USER) { |
1062 | self->adjust_symbols = (ehdr.e_type == ET_EXEC || | 1141 | dso->adjust_symbols = (ehdr.e_type == ET_EXEC || |
1063 | elf_section_by_name(elf, &ehdr, &shdr, | 1142 | elf_section_by_name(elf, &ehdr, &shdr, |
1064 | ".gnu.prelink_undo", | 1143 | ".gnu.prelink_undo", |
1065 | NULL) != NULL); | 1144 | NULL) != NULL); |
1066 | } else self->adjust_symbols = 0; | 1145 | } else { |
1067 | 1146 | dso->adjust_symbols = 0; | |
1147 | } | ||
1068 | elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { | 1148 | elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { |
1069 | struct symbol *f; | 1149 | struct symbol *f; |
1070 | const char *elf_name = elf_sym__name(&sym, symstrs); | 1150 | const char *elf_name = elf_sym__name(&sym, symstrs); |
@@ -1107,22 +1187,29 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, | |||
1107 | 1187 | ||
1108 | section_name = elf_sec__name(&shdr, secstrs); | 1188 | section_name = elf_sec__name(&shdr, secstrs); |
1109 | 1189 | ||
1110 | if (self->kernel != DSO_TYPE_USER || kmodule) { | 1190 | /* On ARM, symbols for thumb functions have 1 added to |
1191 | * the symbol address as a flag - remove it */ | ||
1192 | if ((ehdr.e_machine == EM_ARM) && | ||
1193 | (map->type == MAP__FUNCTION) && | ||
1194 | (sym.st_value & 1)) | ||
1195 | --sym.st_value; | ||
1196 | |||
1197 | if (dso->kernel != DSO_TYPE_USER || kmodule) { | ||
1111 | char dso_name[PATH_MAX]; | 1198 | char dso_name[PATH_MAX]; |
1112 | 1199 | ||
1113 | if (strcmp(section_name, | 1200 | if (strcmp(section_name, |
1114 | (curr_dso->short_name + | 1201 | (curr_dso->short_name + |
1115 | self->short_name_len)) == 0) | 1202 | dso->short_name_len)) == 0) |
1116 | goto new_symbol; | 1203 | goto new_symbol; |
1117 | 1204 | ||
1118 | if (strcmp(section_name, ".text") == 0) { | 1205 | if (strcmp(section_name, ".text") == 0) { |
1119 | curr_map = map; | 1206 | curr_map = map; |
1120 | curr_dso = self; | 1207 | curr_dso = dso; |
1121 | goto new_symbol; | 1208 | goto new_symbol; |
1122 | } | 1209 | } |
1123 | 1210 | ||
1124 | snprintf(dso_name, sizeof(dso_name), | 1211 | snprintf(dso_name, sizeof(dso_name), |
1125 | "%s%s", self->short_name, section_name); | 1212 | "%s%s", dso->short_name, section_name); |
1126 | 1213 | ||
1127 | curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); | 1214 | curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); |
1128 | if (curr_map == NULL) { | 1215 | if (curr_map == NULL) { |
@@ -1134,7 +1221,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, | |||
1134 | curr_dso = dso__new(dso_name); | 1221 | curr_dso = dso__new(dso_name); |
1135 | if (curr_dso == NULL) | 1222 | if (curr_dso == NULL) |
1136 | goto out_elf_end; | 1223 | goto out_elf_end; |
1137 | curr_dso->kernel = self->kernel; | 1224 | curr_dso->kernel = dso->kernel; |
1225 | curr_dso->long_name = dso->long_name; | ||
1226 | curr_dso->long_name_len = dso->long_name_len; | ||
1138 | curr_map = map__new2(start, curr_dso, | 1227 | curr_map = map__new2(start, curr_dso, |
1139 | map->type); | 1228 | map->type); |
1140 | if (curr_map == NULL) { | 1229 | if (curr_map == NULL) { |
@@ -1143,9 +1232,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, | |||
1143 | } | 1232 | } |
1144 | curr_map->map_ip = identity__map_ip; | 1233 | curr_map->map_ip = identity__map_ip; |
1145 | curr_map->unmap_ip = identity__map_ip; | 1234 | curr_map->unmap_ip = identity__map_ip; |
1146 | curr_dso->origin = self->origin; | 1235 | curr_dso->symtab_type = dso->symtab_type; |
1147 | map_groups__insert(kmap->kmaps, curr_map); | 1236 | map_groups__insert(kmap->kmaps, curr_map); |
1148 | dsos__add(&self->node, curr_dso); | 1237 | dsos__add(&dso->node, curr_dso); |
1149 | dso__set_loaded(curr_dso, map->type); | 1238 | dso__set_loaded(curr_dso, map->type); |
1150 | } else | 1239 | } else |
1151 | curr_dso = curr_map->dso; | 1240 | curr_dso = curr_map->dso; |
@@ -1154,8 +1243,8 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, | |||
1154 | } | 1243 | } |
1155 | 1244 | ||
1156 | if (curr_dso->adjust_symbols) { | 1245 | if (curr_dso->adjust_symbols) { |
1157 | pr_debug4("%s: adjusting symbol: st_value: %#Lx " | 1246 | pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " |
1158 | "sh_addr: %#Lx sh_offset: %#Lx\n", __func__, | 1247 | "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, |
1159 | (u64)sym.st_value, (u64)shdr.sh_addr, | 1248 | (u64)sym.st_value, (u64)shdr.sh_addr, |
1160 | (u64)shdr.sh_offset); | 1249 | (u64)shdr.sh_offset); |
1161 | sym.st_value -= shdr.sh_addr - shdr.sh_offset; | 1250 | sym.st_value -= shdr.sh_addr - shdr.sh_offset; |
@@ -1187,7 +1276,7 @@ new_symbol: | |||
1187 | * For misannotated, zeroed, ASM function sizes. | 1276 | * For misannotated, zeroed, ASM function sizes. |
1188 | */ | 1277 | */ |
1189 | if (nr > 0) { | 1278 | if (nr > 0) { |
1190 | symbols__fixup_end(&self->symbols[map->type]); | 1279 | symbols__fixup_end(&dso->symbols[map->type]); |
1191 | if (kmap) { | 1280 | if (kmap) { |
1192 | /* | 1281 | /* |
1193 | * We need to fixup this here too because we create new | 1282 | * We need to fixup this here too because we create new |
@@ -1203,9 +1292,9 @@ out_close: | |||
1203 | return err; | 1292 | return err; |
1204 | } | 1293 | } |
1205 | 1294 | ||
1206 | static bool dso__build_id_equal(const struct dso *self, u8 *build_id) | 1295 | static bool dso__build_id_equal(const struct dso *dso, u8 *build_id) |
1207 | { | 1296 | { |
1208 | return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0; | 1297 | return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; |
1209 | } | 1298 | } |
1210 | 1299 | ||
1211 | bool __dsos__read_build_ids(struct list_head *head, bool with_hits) | 1300 | bool __dsos__read_build_ids(struct list_head *head, bool with_hits) |
@@ -1366,27 +1455,27 @@ out: | |||
1366 | return err; | 1455 | return err; |
1367 | } | 1456 | } |
1368 | 1457 | ||
1369 | char dso__symtab_origin(const struct dso *self) | 1458 | char dso__symtab_origin(const struct dso *dso) |
1370 | { | 1459 | { |
1371 | static const char origin[] = { | 1460 | static const char origin[] = { |
1372 | [DSO__ORIG_KERNEL] = 'k', | 1461 | [SYMTAB__KALLSYMS] = 'k', |
1373 | [DSO__ORIG_JAVA_JIT] = 'j', | 1462 | [SYMTAB__JAVA_JIT] = 'j', |
1374 | [DSO__ORIG_BUILD_ID_CACHE] = 'B', | 1463 | [SYMTAB__BUILD_ID_CACHE] = 'B', |
1375 | [DSO__ORIG_FEDORA] = 'f', | 1464 | [SYMTAB__FEDORA_DEBUGINFO] = 'f', |
1376 | [DSO__ORIG_UBUNTU] = 'u', | 1465 | [SYMTAB__UBUNTU_DEBUGINFO] = 'u', |
1377 | [DSO__ORIG_BUILDID] = 'b', | 1466 | [SYMTAB__BUILDID_DEBUGINFO] = 'b', |
1378 | [DSO__ORIG_DSO] = 'd', | 1467 | [SYMTAB__SYSTEM_PATH_DSO] = 'd', |
1379 | [DSO__ORIG_KMODULE] = 'K', | 1468 | [SYMTAB__SYSTEM_PATH_KMODULE] = 'K', |
1380 | [DSO__ORIG_GUEST_KERNEL] = 'g', | 1469 | [SYMTAB__GUEST_KALLSYMS] = 'g', |
1381 | [DSO__ORIG_GUEST_KMODULE] = 'G', | 1470 | [SYMTAB__GUEST_KMODULE] = 'G', |
1382 | }; | 1471 | }; |
1383 | 1472 | ||
1384 | if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND) | 1473 | if (dso == NULL || dso->symtab_type == SYMTAB__NOT_FOUND) |
1385 | return '!'; | 1474 | return '!'; |
1386 | return origin[self->origin]; | 1475 | return origin[dso->symtab_type]; |
1387 | } | 1476 | } |
1388 | 1477 | ||
1389 | int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) | 1478 | int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) |
1390 | { | 1479 | { |
1391 | int size = PATH_MAX; | 1480 | int size = PATH_MAX; |
1392 | char *name; | 1481 | char *name; |
@@ -1396,12 +1485,12 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) | |||
1396 | const char *root_dir; | 1485 | const char *root_dir; |
1397 | int want_symtab; | 1486 | int want_symtab; |
1398 | 1487 | ||
1399 | dso__set_loaded(self, map->type); | 1488 | dso__set_loaded(dso, map->type); |
1400 | 1489 | ||
1401 | if (self->kernel == DSO_TYPE_KERNEL) | 1490 | if (dso->kernel == DSO_TYPE_KERNEL) |
1402 | return dso__load_kernel_sym(self, map, filter); | 1491 | return dso__load_kernel_sym(dso, map, filter); |
1403 | else if (self->kernel == DSO_TYPE_GUEST_KERNEL) | 1492 | else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) |
1404 | return dso__load_guest_kernel_sym(self, map, filter); | 1493 | return dso__load_guest_kernel_sym(dso, map, filter); |
1405 | 1494 | ||
1406 | if (map->groups && map->groups->machine) | 1495 | if (map->groups && map->groups->machine) |
1407 | machine = map->groups->machine; | 1496 | machine = map->groups->machine; |
@@ -1412,12 +1501,12 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) | |||
1412 | if (!name) | 1501 | if (!name) |
1413 | return -1; | 1502 | return -1; |
1414 | 1503 | ||
1415 | self->adjust_symbols = 0; | 1504 | dso->adjust_symbols = 0; |
1416 | 1505 | ||
1417 | if (strncmp(self->name, "/tmp/perf-", 10) == 0) { | 1506 | if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { |
1418 | ret = dso__load_perf_map(self, map, filter); | 1507 | ret = dso__load_perf_map(dso, map, filter); |
1419 | self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT : | 1508 | dso->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : |
1420 | DSO__ORIG_NOT_FOUND; | 1509 | SYMTAB__NOT_FOUND; |
1421 | return ret; | 1510 | return ret; |
1422 | } | 1511 | } |
1423 | 1512 | ||
@@ -1425,57 +1514,59 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) | |||
1425 | * On the first pass, only load images if they have a full symtab. | 1514 | * On the first pass, only load images if they have a full symtab. |
1426 | * Failing that, do a second pass where we accept .dynsym also | 1515 | * Failing that, do a second pass where we accept .dynsym also |
1427 | */ | 1516 | */ |
1428 | for (self->origin = DSO__ORIG_BUILD_ID_CACHE, want_symtab = 1; | 1517 | want_symtab = 1; |
1429 | self->origin != DSO__ORIG_NOT_FOUND; | 1518 | restart: |
1430 | self->origin++) { | 1519 | for (dso->symtab_type = SYMTAB__BUILD_ID_CACHE; |
1431 | switch (self->origin) { | 1520 | dso->symtab_type != SYMTAB__NOT_FOUND; |
1432 | case DSO__ORIG_BUILD_ID_CACHE: | 1521 | dso->symtab_type++) { |
1433 | if (dso__build_id_filename(self, name, size) == NULL) | 1522 | switch (dso->symtab_type) { |
1523 | case SYMTAB__BUILD_ID_CACHE: | ||
1524 | /* skip the locally configured cache if a symfs is given */ | ||
1525 | if (symbol_conf.symfs[0] || | ||
1526 | (dso__build_id_filename(dso, name, size) == NULL)) { | ||
1434 | continue; | 1527 | continue; |
1528 | } | ||
1435 | break; | 1529 | break; |
1436 | case DSO__ORIG_FEDORA: | 1530 | case SYMTAB__FEDORA_DEBUGINFO: |
1437 | snprintf(name, size, "/usr/lib/debug%s.debug", | 1531 | snprintf(name, size, "%s/usr/lib/debug%s.debug", |
1438 | self->long_name); | 1532 | symbol_conf.symfs, dso->long_name); |
1439 | break; | 1533 | break; |
1440 | case DSO__ORIG_UBUNTU: | 1534 | case SYMTAB__UBUNTU_DEBUGINFO: |
1441 | snprintf(name, size, "/usr/lib/debug%s", | 1535 | snprintf(name, size, "%s/usr/lib/debug%s", |
1442 | self->long_name); | 1536 | symbol_conf.symfs, dso->long_name); |
1443 | break; | 1537 | break; |
1444 | case DSO__ORIG_BUILDID: { | 1538 | case SYMTAB__BUILDID_DEBUGINFO: { |
1445 | char build_id_hex[BUILD_ID_SIZE * 2 + 1]; | 1539 | char build_id_hex[BUILD_ID_SIZE * 2 + 1]; |
1446 | 1540 | ||
1447 | if (!self->has_build_id) | 1541 | if (!dso->has_build_id) |
1448 | continue; | 1542 | continue; |
1449 | 1543 | ||
1450 | build_id__sprintf(self->build_id, | 1544 | build_id__sprintf(dso->build_id, |
1451 | sizeof(self->build_id), | 1545 | sizeof(dso->build_id), |
1452 | build_id_hex); | 1546 | build_id_hex); |
1453 | snprintf(name, size, | 1547 | snprintf(name, size, |
1454 | "/usr/lib/debug/.build-id/%.2s/%s.debug", | 1548 | "%s/usr/lib/debug/.build-id/%.2s/%s.debug", |
1455 | build_id_hex, build_id_hex + 2); | 1549 | symbol_conf.symfs, build_id_hex, build_id_hex + 2); |
1456 | } | 1550 | } |
1457 | break; | 1551 | break; |
1458 | case DSO__ORIG_DSO: | 1552 | case SYMTAB__SYSTEM_PATH_DSO: |
1459 | snprintf(name, size, "%s", self->long_name); | 1553 | snprintf(name, size, "%s%s", |
1554 | symbol_conf.symfs, dso->long_name); | ||
1460 | break; | 1555 | break; |
1461 | case DSO__ORIG_GUEST_KMODULE: | 1556 | case SYMTAB__GUEST_KMODULE: |
1462 | if (map->groups && map->groups->machine) | 1557 | if (map->groups && machine) |
1463 | root_dir = map->groups->machine->root_dir; | 1558 | root_dir = machine->root_dir; |
1464 | else | 1559 | else |
1465 | root_dir = ""; | 1560 | root_dir = ""; |
1466 | snprintf(name, size, "%s%s", root_dir, self->long_name); | 1561 | snprintf(name, size, "%s%s%s", symbol_conf.symfs, |
1562 | root_dir, dso->long_name); | ||
1467 | break; | 1563 | break; |
1468 | 1564 | ||
1469 | default: | 1565 | case SYMTAB__SYSTEM_PATH_KMODULE: |
1470 | /* | 1566 | snprintf(name, size, "%s%s", symbol_conf.symfs, |
1471 | * If we wanted a full symtab but no image had one, | 1567 | dso->long_name); |
1472 | * relax our requirements and repeat the search. | 1568 | break; |
1473 | */ | 1569 | default:; |
1474 | if (want_symtab) { | ||
1475 | want_symtab = 0; | ||
1476 | self->origin = DSO__ORIG_BUILD_ID_CACHE; | ||
1477 | } else | ||
1478 | continue; | ||
1479 | } | 1570 | } |
1480 | 1571 | ||
1481 | /* Name is now the name of the next image to try */ | 1572 | /* Name is now the name of the next image to try */ |
@@ -1483,7 +1574,7 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) | |||
1483 | if (fd < 0) | 1574 | if (fd < 0) |
1484 | continue; | 1575 | continue; |
1485 | 1576 | ||
1486 | ret = dso__load_sym(self, map, name, fd, filter, 0, | 1577 | ret = dso__load_sym(dso, map, name, fd, filter, 0, |
1487 | want_symtab); | 1578 | want_symtab); |
1488 | close(fd); | 1579 | close(fd); |
1489 | 1580 | ||
@@ -1495,25 +1586,35 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) | |||
1495 | continue; | 1586 | continue; |
1496 | 1587 | ||
1497 | if (ret > 0) { | 1588 | if (ret > 0) { |
1498 | int nr_plt = dso__synthesize_plt_symbols(self, map, filter); | 1589 | int nr_plt = dso__synthesize_plt_symbols(dso, map, |
1590 | filter); | ||
1499 | if (nr_plt > 0) | 1591 | if (nr_plt > 0) |
1500 | ret += nr_plt; | 1592 | ret += nr_plt; |
1501 | break; | 1593 | break; |
1502 | } | 1594 | } |
1503 | } | 1595 | } |
1504 | 1596 | ||
1597 | /* | ||
1598 | * If we wanted a full symtab but no image had one, | ||
1599 | * relax our requirements and repeat the search. | ||
1600 | */ | ||
1601 | if (ret <= 0 && want_symtab) { | ||
1602 | want_symtab = 0; | ||
1603 | goto restart; | ||
1604 | } | ||
1605 | |||
1505 | free(name); | 1606 | free(name); |
1506 | if (ret < 0 && strstr(self->name, " (deleted)") != NULL) | 1607 | if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) |
1507 | return 0; | 1608 | return 0; |
1508 | return ret; | 1609 | return ret; |
1509 | } | 1610 | } |
1510 | 1611 | ||
1511 | struct map *map_groups__find_by_name(struct map_groups *self, | 1612 | struct map *map_groups__find_by_name(struct map_groups *mg, |
1512 | enum map_type type, const char *name) | 1613 | enum map_type type, const char *name) |
1513 | { | 1614 | { |
1514 | struct rb_node *nd; | 1615 | struct rb_node *nd; |
1515 | 1616 | ||
1516 | for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { | 1617 | for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { |
1517 | struct map *map = rb_entry(nd, struct map, rb_node); | 1618 | struct map *map = rb_entry(nd, struct map, rb_node); |
1518 | 1619 | ||
1519 | if (map->dso && strcmp(map->dso->short_name, name) == 0) | 1620 | if (map->dso && strcmp(map->dso->short_name, name) == 0) |
@@ -1523,28 +1624,28 @@ struct map *map_groups__find_by_name(struct map_groups *self, | |||
1523 | return NULL; | 1624 | return NULL; |
1524 | } | 1625 | } |
1525 | 1626 | ||
1526 | static int dso__kernel_module_get_build_id(struct dso *self, | 1627 | static int dso__kernel_module_get_build_id(struct dso *dso, |
1527 | const char *root_dir) | 1628 | const char *root_dir) |
1528 | { | 1629 | { |
1529 | char filename[PATH_MAX]; | 1630 | char filename[PATH_MAX]; |
1530 | /* | 1631 | /* |
1531 | * kernel module short names are of the form "[module]" and | 1632 | * kernel module short names are of the form "[module]" and |
1532 | * we need just "module" here. | 1633 | * we need just "module" here. |
1533 | */ | 1634 | */ |
1534 | const char *name = self->short_name + 1; | 1635 | const char *name = dso->short_name + 1; |
1535 | 1636 | ||
1536 | snprintf(filename, sizeof(filename), | 1637 | snprintf(filename, sizeof(filename), |
1537 | "%s/sys/module/%.*s/notes/.note.gnu.build-id", | 1638 | "%s/sys/module/%.*s/notes/.note.gnu.build-id", |
1538 | root_dir, (int)strlen(name) - 1, name); | 1639 | root_dir, (int)strlen(name) - 1, name); |
1539 | 1640 | ||
1540 | if (sysfs__read_build_id(filename, self->build_id, | 1641 | if (sysfs__read_build_id(filename, dso->build_id, |
1541 | sizeof(self->build_id)) == 0) | 1642 | sizeof(dso->build_id)) == 0) |
1542 | self->has_build_id = true; | 1643 | dso->has_build_id = true; |
1543 | 1644 | ||
1544 | return 0; | 1645 | return 0; |
1545 | } | 1646 | } |
1546 | 1647 | ||
1547 | static int map_groups__set_modules_path_dir(struct map_groups *self, | 1648 | static int map_groups__set_modules_path_dir(struct map_groups *mg, |
1548 | const char *dir_name) | 1649 | const char *dir_name) |
1549 | { | 1650 | { |
1550 | struct dirent *dent; | 1651 | struct dirent *dent; |
@@ -1572,7 +1673,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *self, | |||
1572 | 1673 | ||
1573 | snprintf(path, sizeof(path), "%s/%s", | 1674 | snprintf(path, sizeof(path), "%s/%s", |
1574 | dir_name, dent->d_name); | 1675 | dir_name, dent->d_name); |
1575 | ret = map_groups__set_modules_path_dir(self, path); | 1676 | ret = map_groups__set_modules_path_dir(mg, path); |
1576 | if (ret < 0) | 1677 | if (ret < 0) |
1577 | goto out; | 1678 | goto out; |
1578 | } else { | 1679 | } else { |
@@ -1587,7 +1688,8 @@ static int map_groups__set_modules_path_dir(struct map_groups *self, | |||
1587 | (int)(dot - dent->d_name), dent->d_name); | 1688 | (int)(dot - dent->d_name), dent->d_name); |
1588 | 1689 | ||
1589 | strxfrchar(dso_name, '-', '_'); | 1690 | strxfrchar(dso_name, '-', '_'); |
1590 | map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name); | 1691 | map = map_groups__find_by_name(mg, MAP__FUNCTION, |
1692 | dso_name); | ||
1591 | if (map == NULL) | 1693 | if (map == NULL) |
1592 | continue; | 1694 | continue; |
1593 | 1695 | ||
@@ -1637,20 +1739,20 @@ static char *get_kernel_version(const char *root_dir) | |||
1637 | return strdup(name); | 1739 | return strdup(name); |
1638 | } | 1740 | } |
1639 | 1741 | ||
1640 | static int machine__set_modules_path(struct machine *self) | 1742 | static int machine__set_modules_path(struct machine *machine) |
1641 | { | 1743 | { |
1642 | char *version; | 1744 | char *version; |
1643 | char modules_path[PATH_MAX]; | 1745 | char modules_path[PATH_MAX]; |
1644 | 1746 | ||
1645 | version = get_kernel_version(self->root_dir); | 1747 | version = get_kernel_version(machine->root_dir); |
1646 | if (!version) | 1748 | if (!version) |
1647 | return -1; | 1749 | return -1; |
1648 | 1750 | ||
1649 | snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", | 1751 | snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", |
1650 | self->root_dir, version); | 1752 | machine->root_dir, version); |
1651 | free(version); | 1753 | free(version); |
1652 | 1754 | ||
1653 | return map_groups__set_modules_path_dir(&self->kmaps, modules_path); | 1755 | return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); |
1654 | } | 1756 | } |
1655 | 1757 | ||
1656 | /* | 1758 | /* |
@@ -1660,23 +1762,23 @@ static int machine__set_modules_path(struct machine *self) | |||
1660 | */ | 1762 | */ |
1661 | static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) | 1763 | static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) |
1662 | { | 1764 | { |
1663 | struct map *self = calloc(1, (sizeof(*self) + | 1765 | struct map *map = calloc(1, (sizeof(*map) + |
1664 | (dso->kernel ? sizeof(struct kmap) : 0))); | 1766 | (dso->kernel ? sizeof(struct kmap) : 0))); |
1665 | if (self != NULL) { | 1767 | if (map != NULL) { |
1666 | /* | 1768 | /* |
1667 | * ->end will be filled after we load all the symbols | 1769 | * ->end will be filled after we load all the symbols |
1668 | */ | 1770 | */ |
1669 | map__init(self, type, start, 0, 0, dso); | 1771 | map__init(map, type, start, 0, 0, dso); |
1670 | } | 1772 | } |
1671 | 1773 | ||
1672 | return self; | 1774 | return map; |
1673 | } | 1775 | } |
1674 | 1776 | ||
1675 | struct map *machine__new_module(struct machine *self, u64 start, | 1777 | struct map *machine__new_module(struct machine *machine, u64 start, |
1676 | const char *filename) | 1778 | const char *filename) |
1677 | { | 1779 | { |
1678 | struct map *map; | 1780 | struct map *map; |
1679 | struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename); | 1781 | struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); |
1680 | 1782 | ||
1681 | if (dso == NULL) | 1783 | if (dso == NULL) |
1682 | return NULL; | 1784 | return NULL; |
@@ -1685,15 +1787,15 @@ struct map *machine__new_module(struct machine *self, u64 start, | |||
1685 | if (map == NULL) | 1787 | if (map == NULL) |
1686 | return NULL; | 1788 | return NULL; |
1687 | 1789 | ||
1688 | if (machine__is_host(self)) | 1790 | if (machine__is_host(machine)) |
1689 | dso->origin = DSO__ORIG_KMODULE; | 1791 | dso->symtab_type = SYMTAB__SYSTEM_PATH_KMODULE; |
1690 | else | 1792 | else |
1691 | dso->origin = DSO__ORIG_GUEST_KMODULE; | 1793 | dso->symtab_type = SYMTAB__GUEST_KMODULE; |
1692 | map_groups__insert(&self->kmaps, map); | 1794 | map_groups__insert(&machine->kmaps, map); |
1693 | return map; | 1795 | return map; |
1694 | } | 1796 | } |
1695 | 1797 | ||
1696 | static int machine__create_modules(struct machine *self) | 1798 | static int machine__create_modules(struct machine *machine) |
1697 | { | 1799 | { |
1698 | char *line = NULL; | 1800 | char *line = NULL; |
1699 | size_t n; | 1801 | size_t n; |
@@ -1702,13 +1804,16 @@ static int machine__create_modules(struct machine *self) | |||
1702 | const char *modules; | 1804 | const char *modules; |
1703 | char path[PATH_MAX]; | 1805 | char path[PATH_MAX]; |
1704 | 1806 | ||
1705 | if (machine__is_default_guest(self)) | 1807 | if (machine__is_default_guest(machine)) |
1706 | modules = symbol_conf.default_guest_modules; | 1808 | modules = symbol_conf.default_guest_modules; |
1707 | else { | 1809 | else { |
1708 | sprintf(path, "%s/proc/modules", self->root_dir); | 1810 | sprintf(path, "%s/proc/modules", machine->root_dir); |
1709 | modules = path; | 1811 | modules = path; |
1710 | } | 1812 | } |
1711 | 1813 | ||
1814 | if (symbol__restricted_filename(path, "/proc/modules")) | ||
1815 | return -1; | ||
1816 | |||
1712 | file = fopen(modules, "r"); | 1817 | file = fopen(modules, "r"); |
1713 | if (file == NULL) | 1818 | if (file == NULL) |
1714 | return -1; | 1819 | return -1; |
@@ -1741,16 +1846,16 @@ static int machine__create_modules(struct machine *self) | |||
1741 | *sep = '\0'; | 1846 | *sep = '\0'; |
1742 | 1847 | ||
1743 | snprintf(name, sizeof(name), "[%s]", line); | 1848 | snprintf(name, sizeof(name), "[%s]", line); |
1744 | map = machine__new_module(self, start, name); | 1849 | map = machine__new_module(machine, start, name); |
1745 | if (map == NULL) | 1850 | if (map == NULL) |
1746 | goto out_delete_line; | 1851 | goto out_delete_line; |
1747 | dso__kernel_module_get_build_id(map->dso, self->root_dir); | 1852 | dso__kernel_module_get_build_id(map->dso, machine->root_dir); |
1748 | } | 1853 | } |
1749 | 1854 | ||
1750 | free(line); | 1855 | free(line); |
1751 | fclose(file); | 1856 | fclose(file); |
1752 | 1857 | ||
1753 | return machine__set_modules_path(self); | 1858 | return machine__set_modules_path(machine); |
1754 | 1859 | ||
1755 | out_delete_line: | 1860 | out_delete_line: |
1756 | free(line); | 1861 | free(line); |
@@ -1758,26 +1863,30 @@ out_failure: | |||
1758 | return -1; | 1863 | return -1; |
1759 | } | 1864 | } |
1760 | 1865 | ||
1761 | static int dso__load_vmlinux(struct dso *self, struct map *map, | 1866 | int dso__load_vmlinux(struct dso *dso, struct map *map, |
1762 | const char *vmlinux, symbol_filter_t filter) | 1867 | const char *vmlinux, symbol_filter_t filter) |
1763 | { | 1868 | { |
1764 | int err = -1, fd; | 1869 | int err = -1, fd; |
1870 | char symfs_vmlinux[PATH_MAX]; | ||
1765 | 1871 | ||
1766 | fd = open(vmlinux, O_RDONLY); | 1872 | snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", |
1873 | symbol_conf.symfs, vmlinux); | ||
1874 | fd = open(symfs_vmlinux, O_RDONLY); | ||
1767 | if (fd < 0) | 1875 | if (fd < 0) |
1768 | return -1; | 1876 | return -1; |
1769 | 1877 | ||
1770 | dso__set_loaded(self, map->type); | 1878 | dso__set_long_name(dso, (char *)vmlinux); |
1771 | err = dso__load_sym(self, map, vmlinux, fd, filter, 0, 0); | 1879 | dso__set_loaded(dso, map->type); |
1880 | err = dso__load_sym(dso, map, symfs_vmlinux, fd, filter, 0, 0); | ||
1772 | close(fd); | 1881 | close(fd); |
1773 | 1882 | ||
1774 | if (err > 0) | 1883 | if (err > 0) |
1775 | pr_debug("Using %s for symbols\n", vmlinux); | 1884 | pr_debug("Using %s for symbols\n", symfs_vmlinux); |
1776 | 1885 | ||
1777 | return err; | 1886 | return err; |
1778 | } | 1887 | } |
1779 | 1888 | ||
1780 | int dso__load_vmlinux_path(struct dso *self, struct map *map, | 1889 | int dso__load_vmlinux_path(struct dso *dso, struct map *map, |
1781 | symbol_filter_t filter) | 1890 | symbol_filter_t filter) |
1782 | { | 1891 | { |
1783 | int i, err = 0; | 1892 | int i, err = 0; |
@@ -1786,20 +1895,20 @@ int dso__load_vmlinux_path(struct dso *self, struct map *map, | |||
1786 | pr_debug("Looking at the vmlinux_path (%d entries long)\n", | 1895 | pr_debug("Looking at the vmlinux_path (%d entries long)\n", |
1787 | vmlinux_path__nr_entries + 1); | 1896 | vmlinux_path__nr_entries + 1); |
1788 | 1897 | ||
1789 | filename = dso__build_id_filename(self, NULL, 0); | 1898 | filename = dso__build_id_filename(dso, NULL, 0); |
1790 | if (filename != NULL) { | 1899 | if (filename != NULL) { |
1791 | err = dso__load_vmlinux(self, map, filename, filter); | 1900 | err = dso__load_vmlinux(dso, map, filename, filter); |
1792 | if (err > 0) { | 1901 | if (err > 0) { |
1793 | dso__set_long_name(self, filename); | 1902 | dso__set_long_name(dso, filename); |
1794 | goto out; | 1903 | goto out; |
1795 | } | 1904 | } |
1796 | free(filename); | 1905 | free(filename); |
1797 | } | 1906 | } |
1798 | 1907 | ||
1799 | for (i = 0; i < vmlinux_path__nr_entries; ++i) { | 1908 | for (i = 0; i < vmlinux_path__nr_entries; ++i) { |
1800 | err = dso__load_vmlinux(self, map, vmlinux_path[i], filter); | 1909 | err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter); |
1801 | if (err > 0) { | 1910 | if (err > 0) { |
1802 | dso__set_long_name(self, strdup(vmlinux_path[i])); | 1911 | dso__set_long_name(dso, strdup(vmlinux_path[i])); |
1803 | break; | 1912 | break; |
1804 | } | 1913 | } |
1805 | } | 1914 | } |
@@ -1807,15 +1916,15 @@ out: | |||
1807 | return err; | 1916 | return err; |
1808 | } | 1917 | } |
1809 | 1918 | ||
1810 | static int dso__load_kernel_sym(struct dso *self, struct map *map, | 1919 | static int dso__load_kernel_sym(struct dso *dso, struct map *map, |
1811 | symbol_filter_t filter) | 1920 | symbol_filter_t filter) |
1812 | { | 1921 | { |
1813 | int err; | 1922 | int err; |
1814 | const char *kallsyms_filename = NULL; | 1923 | const char *kallsyms_filename = NULL; |
1815 | char *kallsyms_allocated_filename = NULL; | 1924 | char *kallsyms_allocated_filename = NULL; |
1816 | /* | 1925 | /* |
1817 | * Step 1: if the user specified a vmlinux filename, use it and only | 1926 | * Step 1: if the user specified a kallsyms or vmlinux filename, use |
1818 | * it, reporting errors to the user if it cannot be used. | 1927 | * it and only it, reporting errors to the user if it cannot be used. |
1819 | * | 1928 | * |
1820 | * For instance, try to analyse an ARM perf.data file _without_ a | 1929 | * For instance, try to analyse an ARM perf.data file _without_ a |
1821 | * build-id, or if the user specifies the wrong path to the right | 1930 | * build-id, or if the user specifies the wrong path to the right |
@@ -1828,11 +1937,16 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, | |||
1828 | * validation in dso__load_vmlinux and will bail out if they don't | 1937 | * validation in dso__load_vmlinux and will bail out if they don't |
1829 | * match. | 1938 | * match. |
1830 | */ | 1939 | */ |
1940 | if (symbol_conf.kallsyms_name != NULL) { | ||
1941 | kallsyms_filename = symbol_conf.kallsyms_name; | ||
1942 | goto do_kallsyms; | ||
1943 | } | ||
1944 | |||
1831 | if (symbol_conf.vmlinux_name != NULL) { | 1945 | if (symbol_conf.vmlinux_name != NULL) { |
1832 | err = dso__load_vmlinux(self, map, | 1946 | err = dso__load_vmlinux(dso, map, |
1833 | symbol_conf.vmlinux_name, filter); | 1947 | symbol_conf.vmlinux_name, filter); |
1834 | if (err > 0) { | 1948 | if (err > 0) { |
1835 | dso__set_long_name(self, | 1949 | dso__set_long_name(dso, |
1836 | strdup(symbol_conf.vmlinux_name)); | 1950 | strdup(symbol_conf.vmlinux_name)); |
1837 | goto out_fixup; | 1951 | goto out_fixup; |
1838 | } | 1952 | } |
@@ -1840,23 +1954,27 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, | |||
1840 | } | 1954 | } |
1841 | 1955 | ||
1842 | if (vmlinux_path != NULL) { | 1956 | if (vmlinux_path != NULL) { |
1843 | err = dso__load_vmlinux_path(self, map, filter); | 1957 | err = dso__load_vmlinux_path(dso, map, filter); |
1844 | if (err > 0) | 1958 | if (err > 0) |
1845 | goto out_fixup; | 1959 | goto out_fixup; |
1846 | } | 1960 | } |
1847 | 1961 | ||
1962 | /* do not try local files if a symfs was given */ | ||
1963 | if (symbol_conf.symfs[0] != 0) | ||
1964 | return -1; | ||
1965 | |||
1848 | /* | 1966 | /* |
1849 | * Say the kernel DSO was created when processing the build-id header table, | 1967 | * Say the kernel DSO was created when processing the build-id header table, |
1850 | * we have a build-id, so check if it is the same as the running kernel, | 1968 | * we have a build-id, so check if it is the same as the running kernel, |
1851 | * using it if it is. | 1969 | * using it if it is. |
1852 | */ | 1970 | */ |
1853 | if (self->has_build_id) { | 1971 | if (dso->has_build_id) { |
1854 | u8 kallsyms_build_id[BUILD_ID_SIZE]; | 1972 | u8 kallsyms_build_id[BUILD_ID_SIZE]; |
1855 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; | 1973 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; |
1856 | 1974 | ||
1857 | if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, | 1975 | if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, |
1858 | sizeof(kallsyms_build_id)) == 0) { | 1976 | sizeof(kallsyms_build_id)) == 0) { |
1859 | if (dso__build_id_equal(self, kallsyms_build_id)) { | 1977 | if (dso__build_id_equal(dso, kallsyms_build_id)) { |
1860 | kallsyms_filename = "/proc/kallsyms"; | 1978 | kallsyms_filename = "/proc/kallsyms"; |
1861 | goto do_kallsyms; | 1979 | goto do_kallsyms; |
1862 | } | 1980 | } |
@@ -1865,7 +1983,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, | |||
1865 | * Now look if we have it on the build-id cache in | 1983 | * Now look if we have it on the build-id cache in |
1866 | * $HOME/.debug/[kernel.kallsyms]. | 1984 | * $HOME/.debug/[kernel.kallsyms]. |
1867 | */ | 1985 | */ |
1868 | build_id__sprintf(self->build_id, sizeof(self->build_id), | 1986 | build_id__sprintf(dso->build_id, sizeof(dso->build_id), |
1869 | sbuild_id); | 1987 | sbuild_id); |
1870 | 1988 | ||
1871 | if (asprintf(&kallsyms_allocated_filename, | 1989 | if (asprintf(&kallsyms_allocated_filename, |
@@ -1892,7 +2010,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, | |||
1892 | } | 2010 | } |
1893 | 2011 | ||
1894 | do_kallsyms: | 2012 | do_kallsyms: |
1895 | err = dso__load_kallsyms(self, kallsyms_filename, map, filter); | 2013 | err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); |
1896 | if (err > 0) | 2014 | if (err > 0) |
1897 | pr_debug("Using %s for symbols\n", kallsyms_filename); | 2015 | pr_debug("Using %s for symbols\n", kallsyms_filename); |
1898 | free(kallsyms_allocated_filename); | 2016 | free(kallsyms_allocated_filename); |
@@ -1900,7 +2018,7 @@ do_kallsyms: | |||
1900 | if (err > 0) { | 2018 | if (err > 0) { |
1901 | out_fixup: | 2019 | out_fixup: |
1902 | if (kallsyms_filename != NULL) | 2020 | if (kallsyms_filename != NULL) |
1903 | dso__set_long_name(self, strdup("[kernel.kallsyms]")); | 2021 | dso__set_long_name(dso, strdup("[kernel.kallsyms]")); |
1904 | map__fixup_start(map); | 2022 | map__fixup_start(map); |
1905 | map__fixup_end(map); | 2023 | map__fixup_end(map); |
1906 | } | 2024 | } |
@@ -1908,8 +2026,8 @@ out_fixup: | |||
1908 | return err; | 2026 | return err; |
1909 | } | 2027 | } |
1910 | 2028 | ||
1911 | static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, | 2029 | static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, |
1912 | symbol_filter_t filter) | 2030 | symbol_filter_t filter) |
1913 | { | 2031 | { |
1914 | int err; | 2032 | int err; |
1915 | const char *kallsyms_filename = NULL; | 2033 | const char *kallsyms_filename = NULL; |
@@ -1929,7 +2047,7 @@ static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, | |||
1929 | * Or use file guest_kallsyms inputted by user on commandline | 2047 | * Or use file guest_kallsyms inputted by user on commandline |
1930 | */ | 2048 | */ |
1931 | if (symbol_conf.default_guest_vmlinux_name != NULL) { | 2049 | if (symbol_conf.default_guest_vmlinux_name != NULL) { |
1932 | err = dso__load_vmlinux(self, map, | 2050 | err = dso__load_vmlinux(dso, map, |
1933 | symbol_conf.default_guest_vmlinux_name, filter); | 2051 | symbol_conf.default_guest_vmlinux_name, filter); |
1934 | goto out_try_fixup; | 2052 | goto out_try_fixup; |
1935 | } | 2053 | } |
@@ -1942,7 +2060,7 @@ static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, | |||
1942 | kallsyms_filename = path; | 2060 | kallsyms_filename = path; |
1943 | } | 2061 | } |
1944 | 2062 | ||
1945 | err = dso__load_kallsyms(self, kallsyms_filename, map, filter); | 2063 | err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); |
1946 | if (err > 0) | 2064 | if (err > 0) |
1947 | pr_debug("Using %s for symbols\n", kallsyms_filename); | 2065 | pr_debug("Using %s for symbols\n", kallsyms_filename); |
1948 | 2066 | ||
@@ -1950,7 +2068,7 @@ out_try_fixup: | |||
1950 | if (err > 0) { | 2068 | if (err > 0) { |
1951 | if (kallsyms_filename != NULL) { | 2069 | if (kallsyms_filename != NULL) { |
1952 | machine__mmap_name(machine, path, sizeof(path)); | 2070 | machine__mmap_name(machine, path, sizeof(path)); |
1953 | dso__set_long_name(self, strdup(path)); | 2071 | dso__set_long_name(dso, strdup(path)); |
1954 | } | 2072 | } |
1955 | map__fixup_start(map); | 2073 | map__fixup_start(map); |
1956 | map__fixup_end(map); | 2074 | map__fixup_end(map); |
@@ -2003,12 +2121,12 @@ size_t __dsos__fprintf(struct list_head *head, FILE *fp) | |||
2003 | return ret; | 2121 | return ret; |
2004 | } | 2122 | } |
2005 | 2123 | ||
2006 | size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp) | 2124 | size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp) |
2007 | { | 2125 | { |
2008 | struct rb_node *nd; | 2126 | struct rb_node *nd; |
2009 | size_t ret = 0; | 2127 | size_t ret = 0; |
2010 | 2128 | ||
2011 | for (nd = rb_first(self); nd; nd = rb_next(nd)) { | 2129 | for (nd = rb_first(machines); nd; nd = rb_next(nd)) { |
2012 | struct machine *pos = rb_entry(nd, struct machine, rb_node); | 2130 | struct machine *pos = rb_entry(nd, struct machine, rb_node); |
2013 | ret += __dsos__fprintf(&pos->kernel_dsos, fp); | 2131 | ret += __dsos__fprintf(&pos->kernel_dsos, fp); |
2014 | ret += __dsos__fprintf(&pos->user_dsos, fp); | 2132 | ret += __dsos__fprintf(&pos->user_dsos, fp); |
@@ -2032,18 +2150,20 @@ static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, | |||
2032 | return ret; | 2150 | return ret; |
2033 | } | 2151 | } |
2034 | 2152 | ||
2035 | size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits) | 2153 | size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, |
2154 | bool with_hits) | ||
2036 | { | 2155 | { |
2037 | return __dsos__fprintf_buildid(&self->kernel_dsos, fp, with_hits) + | 2156 | return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) + |
2038 | __dsos__fprintf_buildid(&self->user_dsos, fp, with_hits); | 2157 | __dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits); |
2039 | } | 2158 | } |
2040 | 2159 | ||
2041 | size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits) | 2160 | size_t machines__fprintf_dsos_buildid(struct rb_root *machines, |
2161 | FILE *fp, bool with_hits) | ||
2042 | { | 2162 | { |
2043 | struct rb_node *nd; | 2163 | struct rb_node *nd; |
2044 | size_t ret = 0; | 2164 | size_t ret = 0; |
2045 | 2165 | ||
2046 | for (nd = rb_first(self); nd; nd = rb_next(nd)) { | 2166 | for (nd = rb_first(machines); nd; nd = rb_next(nd)) { |
2047 | struct machine *pos = rb_entry(nd, struct machine, rb_node); | 2167 | struct machine *pos = rb_entry(nd, struct machine, rb_node); |
2048 | ret += machine__fprintf_dsos_buildid(pos, fp, with_hits); | 2168 | ret += machine__fprintf_dsos_buildid(pos, fp, with_hits); |
2049 | } | 2169 | } |
@@ -2052,97 +2172,143 @@ size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_ | |||
2052 | 2172 | ||
2053 | struct dso *dso__new_kernel(const char *name) | 2173 | struct dso *dso__new_kernel(const char *name) |
2054 | { | 2174 | { |
2055 | struct dso *self = dso__new(name ?: "[kernel.kallsyms]"); | 2175 | struct dso *dso = dso__new(name ?: "[kernel.kallsyms]"); |
2056 | 2176 | ||
2057 | if (self != NULL) { | 2177 | if (dso != NULL) { |
2058 | dso__set_short_name(self, "[kernel]"); | 2178 | dso__set_short_name(dso, "[kernel]"); |
2059 | self->kernel = DSO_TYPE_KERNEL; | 2179 | dso->kernel = DSO_TYPE_KERNEL; |
2060 | } | 2180 | } |
2061 | 2181 | ||
2062 | return self; | 2182 | return dso; |
2063 | } | 2183 | } |
2064 | 2184 | ||
2065 | static struct dso *dso__new_guest_kernel(struct machine *machine, | 2185 | static struct dso *dso__new_guest_kernel(struct machine *machine, |
2066 | const char *name) | 2186 | const char *name) |
2067 | { | 2187 | { |
2068 | char bf[PATH_MAX]; | 2188 | char bf[PATH_MAX]; |
2069 | struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf))); | 2189 | struct dso *dso = dso__new(name ?: machine__mmap_name(machine, bf, |
2070 | 2190 | sizeof(bf))); | |
2071 | if (self != NULL) { | 2191 | if (dso != NULL) { |
2072 | dso__set_short_name(self, "[guest.kernel]"); | 2192 | dso__set_short_name(dso, "[guest.kernel]"); |
2073 | self->kernel = DSO_TYPE_GUEST_KERNEL; | 2193 | dso->kernel = DSO_TYPE_GUEST_KERNEL; |
2074 | } | 2194 | } |
2075 | 2195 | ||
2076 | return self; | 2196 | return dso; |
2077 | } | 2197 | } |
2078 | 2198 | ||
2079 | void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine) | 2199 | void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) |
2080 | { | 2200 | { |
2081 | char path[PATH_MAX]; | 2201 | char path[PATH_MAX]; |
2082 | 2202 | ||
2083 | if (machine__is_default_guest(machine)) | 2203 | if (machine__is_default_guest(machine)) |
2084 | return; | 2204 | return; |
2085 | sprintf(path, "%s/sys/kernel/notes", machine->root_dir); | 2205 | sprintf(path, "%s/sys/kernel/notes", machine->root_dir); |
2086 | if (sysfs__read_build_id(path, self->build_id, | 2206 | if (sysfs__read_build_id(path, dso->build_id, |
2087 | sizeof(self->build_id)) == 0) | 2207 | sizeof(dso->build_id)) == 0) |
2088 | self->has_build_id = true; | 2208 | dso->has_build_id = true; |
2089 | } | 2209 | } |
2090 | 2210 | ||
2091 | static struct dso *machine__create_kernel(struct machine *self) | 2211 | static struct dso *machine__create_kernel(struct machine *machine) |
2092 | { | 2212 | { |
2093 | const char *vmlinux_name = NULL; | 2213 | const char *vmlinux_name = NULL; |
2094 | struct dso *kernel; | 2214 | struct dso *kernel; |
2095 | 2215 | ||
2096 | if (machine__is_host(self)) { | 2216 | if (machine__is_host(machine)) { |
2097 | vmlinux_name = symbol_conf.vmlinux_name; | 2217 | vmlinux_name = symbol_conf.vmlinux_name; |
2098 | kernel = dso__new_kernel(vmlinux_name); | 2218 | kernel = dso__new_kernel(vmlinux_name); |
2099 | } else { | 2219 | } else { |
2100 | if (machine__is_default_guest(self)) | 2220 | if (machine__is_default_guest(machine)) |
2101 | vmlinux_name = symbol_conf.default_guest_vmlinux_name; | 2221 | vmlinux_name = symbol_conf.default_guest_vmlinux_name; |
2102 | kernel = dso__new_guest_kernel(self, vmlinux_name); | 2222 | kernel = dso__new_guest_kernel(machine, vmlinux_name); |
2103 | } | 2223 | } |
2104 | 2224 | ||
2105 | if (kernel != NULL) { | 2225 | if (kernel != NULL) { |
2106 | dso__read_running_kernel_build_id(kernel, self); | 2226 | dso__read_running_kernel_build_id(kernel, machine); |
2107 | dsos__add(&self->kernel_dsos, kernel); | 2227 | dsos__add(&machine->kernel_dsos, kernel); |
2108 | } | 2228 | } |
2109 | return kernel; | 2229 | return kernel; |
2110 | } | 2230 | } |
2111 | 2231 | ||
2112 | int __machine__create_kernel_maps(struct machine *self, struct dso *kernel) | 2232 | struct process_args { |
2233 | u64 start; | ||
2234 | }; | ||
2235 | |||
2236 | static int symbol__in_kernel(void *arg, const char *name, | ||
2237 | char type __used, u64 start, u64 end __used) | ||
2238 | { | ||
2239 | struct process_args *args = arg; | ||
2240 | |||
2241 | if (strchr(name, '[')) | ||
2242 | return 0; | ||
2243 | |||
2244 | args->start = start; | ||
2245 | return 1; | ||
2246 | } | ||
2247 | |||
2248 | /* Figure out the start address of kernel map from /proc/kallsyms */ | ||
2249 | static u64 machine__get_kernel_start_addr(struct machine *machine) | ||
2250 | { | ||
2251 | const char *filename; | ||
2252 | char path[PATH_MAX]; | ||
2253 | struct process_args args; | ||
2254 | |||
2255 | if (machine__is_host(machine)) { | ||
2256 | filename = "/proc/kallsyms"; | ||
2257 | } else { | ||
2258 | if (machine__is_default_guest(machine)) | ||
2259 | filename = (char *)symbol_conf.default_guest_kallsyms; | ||
2260 | else { | ||
2261 | sprintf(path, "%s/proc/kallsyms", machine->root_dir); | ||
2262 | filename = path; | ||
2263 | } | ||
2264 | } | ||
2265 | |||
2266 | if (symbol__restricted_filename(filename, "/proc/kallsyms")) | ||
2267 | return 0; | ||
2268 | |||
2269 | if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) | ||
2270 | return 0; | ||
2271 | |||
2272 | return args.start; | ||
2273 | } | ||
2274 | |||
2275 | int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) | ||
2113 | { | 2276 | { |
2114 | enum map_type type; | 2277 | enum map_type type; |
2278 | u64 start = machine__get_kernel_start_addr(machine); | ||
2115 | 2279 | ||
2116 | for (type = 0; type < MAP__NR_TYPES; ++type) { | 2280 | for (type = 0; type < MAP__NR_TYPES; ++type) { |
2117 | struct kmap *kmap; | 2281 | struct kmap *kmap; |
2118 | 2282 | ||
2119 | self->vmlinux_maps[type] = map__new2(0, kernel, type); | 2283 | machine->vmlinux_maps[type] = map__new2(start, kernel, type); |
2120 | if (self->vmlinux_maps[type] == NULL) | 2284 | if (machine->vmlinux_maps[type] == NULL) |
2121 | return -1; | 2285 | return -1; |
2122 | 2286 | ||
2123 | self->vmlinux_maps[type]->map_ip = | 2287 | machine->vmlinux_maps[type]->map_ip = |
2124 | self->vmlinux_maps[type]->unmap_ip = identity__map_ip; | 2288 | machine->vmlinux_maps[type]->unmap_ip = |
2125 | 2289 | identity__map_ip; | |
2126 | kmap = map__kmap(self->vmlinux_maps[type]); | 2290 | kmap = map__kmap(machine->vmlinux_maps[type]); |
2127 | kmap->kmaps = &self->kmaps; | 2291 | kmap->kmaps = &machine->kmaps; |
2128 | map_groups__insert(&self->kmaps, self->vmlinux_maps[type]); | 2292 | map_groups__insert(&machine->kmaps, |
2293 | machine->vmlinux_maps[type]); | ||
2129 | } | 2294 | } |
2130 | 2295 | ||
2131 | return 0; | 2296 | return 0; |
2132 | } | 2297 | } |
2133 | 2298 | ||
2134 | void machine__destroy_kernel_maps(struct machine *self) | 2299 | void machine__destroy_kernel_maps(struct machine *machine) |
2135 | { | 2300 | { |
2136 | enum map_type type; | 2301 | enum map_type type; |
2137 | 2302 | ||
2138 | for (type = 0; type < MAP__NR_TYPES; ++type) { | 2303 | for (type = 0; type < MAP__NR_TYPES; ++type) { |
2139 | struct kmap *kmap; | 2304 | struct kmap *kmap; |
2140 | 2305 | ||
2141 | if (self->vmlinux_maps[type] == NULL) | 2306 | if (machine->vmlinux_maps[type] == NULL) |
2142 | continue; | 2307 | continue; |
2143 | 2308 | ||
2144 | kmap = map__kmap(self->vmlinux_maps[type]); | 2309 | kmap = map__kmap(machine->vmlinux_maps[type]); |
2145 | map_groups__remove(&self->kmaps, self->vmlinux_maps[type]); | 2310 | map_groups__remove(&machine->kmaps, |
2311 | machine->vmlinux_maps[type]); | ||
2146 | if (kmap->ref_reloc_sym) { | 2312 | if (kmap->ref_reloc_sym) { |
2147 | /* | 2313 | /* |
2148 | * ref_reloc_sym is shared among all maps, so free just | 2314 | * ref_reloc_sym is shared among all maps, so free just |
@@ -2156,25 +2322,25 @@ void machine__destroy_kernel_maps(struct machine *self) | |||
2156 | kmap->ref_reloc_sym = NULL; | 2322 | kmap->ref_reloc_sym = NULL; |
2157 | } | 2323 | } |
2158 | 2324 | ||
2159 | map__delete(self->vmlinux_maps[type]); | 2325 | map__delete(machine->vmlinux_maps[type]); |
2160 | self->vmlinux_maps[type] = NULL; | 2326 | machine->vmlinux_maps[type] = NULL; |
2161 | } | 2327 | } |
2162 | } | 2328 | } |
2163 | 2329 | ||
2164 | int machine__create_kernel_maps(struct machine *self) | 2330 | int machine__create_kernel_maps(struct machine *machine) |
2165 | { | 2331 | { |
2166 | struct dso *kernel = machine__create_kernel(self); | 2332 | struct dso *kernel = machine__create_kernel(machine); |
2167 | 2333 | ||
2168 | if (kernel == NULL || | 2334 | if (kernel == NULL || |
2169 | __machine__create_kernel_maps(self, kernel) < 0) | 2335 | __machine__create_kernel_maps(machine, kernel) < 0) |
2170 | return -1; | 2336 | return -1; |
2171 | 2337 | ||
2172 | if (symbol_conf.use_modules && machine__create_modules(self) < 0) | 2338 | if (symbol_conf.use_modules && machine__create_modules(machine) < 0) |
2173 | pr_debug("Problems creating module maps, continuing anyway...\n"); | 2339 | pr_debug("Problems creating module maps, continuing anyway...\n"); |
2174 | /* | 2340 | /* |
2175 | * Now that we have all the maps created, just set the ->end of them: | 2341 | * Now that we have all the maps created, just set the ->end of them: |
2176 | */ | 2342 | */ |
2177 | map_groups__fixup_end(&self->kmaps); | 2343 | map_groups__fixup_end(&machine->kmaps); |
2178 | return 0; | 2344 | return 0; |
2179 | } | 2345 | } |
2180 | 2346 | ||
@@ -2194,9 +2360,6 @@ static int vmlinux_path__init(void) | |||
2194 | struct utsname uts; | 2360 | struct utsname uts; |
2195 | char bf[PATH_MAX]; | 2361 | char bf[PATH_MAX]; |
2196 | 2362 | ||
2197 | if (uname(&uts) < 0) | ||
2198 | return -1; | ||
2199 | |||
2200 | vmlinux_path = malloc(sizeof(char *) * 5); | 2363 | vmlinux_path = malloc(sizeof(char *) * 5); |
2201 | if (vmlinux_path == NULL) | 2364 | if (vmlinux_path == NULL) |
2202 | return -1; | 2365 | return -1; |
@@ -2209,6 +2372,14 @@ static int vmlinux_path__init(void) | |||
2209 | if (vmlinux_path[vmlinux_path__nr_entries] == NULL) | 2372 | if (vmlinux_path[vmlinux_path__nr_entries] == NULL) |
2210 | goto out_fail; | 2373 | goto out_fail; |
2211 | ++vmlinux_path__nr_entries; | 2374 | ++vmlinux_path__nr_entries; |
2375 | |||
2376 | /* only try running kernel version if no symfs was given */ | ||
2377 | if (symbol_conf.symfs[0] != 0) | ||
2378 | return 0; | ||
2379 | |||
2380 | if (uname(&uts) < 0) | ||
2381 | return -1; | ||
2382 | |||
2212 | snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release); | 2383 | snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release); |
2213 | vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); | 2384 | vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); |
2214 | if (vmlinux_path[vmlinux_path__nr_entries] == NULL) | 2385 | if (vmlinux_path[vmlinux_path__nr_entries] == NULL) |
@@ -2233,11 +2404,11 @@ out_fail: | |||
2233 | return -1; | 2404 | return -1; |
2234 | } | 2405 | } |
2235 | 2406 | ||
2236 | size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp) | 2407 | size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) |
2237 | { | 2408 | { |
2238 | int i; | 2409 | int i; |
2239 | size_t printed = 0; | 2410 | size_t printed = 0; |
2240 | struct dso *kdso = self->vmlinux_maps[MAP__FUNCTION]->dso; | 2411 | struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; |
2241 | 2412 | ||
2242 | if (kdso->has_build_id) { | 2413 | if (kdso->has_build_id) { |
2243 | char filename[PATH_MAX]; | 2414 | char filename[PATH_MAX]; |
@@ -2266,11 +2437,34 @@ static int setup_list(struct strlist **list, const char *list_str, | |||
2266 | return 0; | 2437 | return 0; |
2267 | } | 2438 | } |
2268 | 2439 | ||
2440 | static bool symbol__read_kptr_restrict(void) | ||
2441 | { | ||
2442 | bool value = false; | ||
2443 | |||
2444 | if (geteuid() != 0) { | ||
2445 | FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); | ||
2446 | if (fp != NULL) { | ||
2447 | char line[8]; | ||
2448 | |||
2449 | if (fgets(line, sizeof(line), fp) != NULL) | ||
2450 | value = atoi(line) != 0; | ||
2451 | |||
2452 | fclose(fp); | ||
2453 | } | ||
2454 | } | ||
2455 | |||
2456 | return value; | ||
2457 | } | ||
2458 | |||
2269 | int symbol__init(void) | 2459 | int symbol__init(void) |
2270 | { | 2460 | { |
2461 | const char *symfs; | ||
2462 | |||
2271 | if (symbol_conf.initialized) | 2463 | if (symbol_conf.initialized) |
2272 | return 0; | 2464 | return 0; |
2273 | 2465 | ||
2466 | symbol_conf.priv_size = ALIGN(symbol_conf.priv_size, sizeof(u64)); | ||
2467 | |||
2274 | elf_version(EV_CURRENT); | 2468 | elf_version(EV_CURRENT); |
2275 | if (symbol_conf.sort_by_name) | 2469 | if (symbol_conf.sort_by_name) |
2276 | symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - | 2470 | symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - |
@@ -2296,6 +2490,20 @@ int symbol__init(void) | |||
2296 | symbol_conf.sym_list_str, "symbol") < 0) | 2490 | symbol_conf.sym_list_str, "symbol") < 0) |
2297 | goto out_free_comm_list; | 2491 | goto out_free_comm_list; |
2298 | 2492 | ||
2493 | /* | ||
2494 | * A path to symbols of "/" is identical to "" | ||
2495 | * reset here for simplicity. | ||
2496 | */ | ||
2497 | symfs = realpath(symbol_conf.symfs, NULL); | ||
2498 | if (symfs == NULL) | ||
2499 | symfs = symbol_conf.symfs; | ||
2500 | if (strcmp(symfs, "/") == 0) | ||
2501 | symbol_conf.symfs = ""; | ||
2502 | if (symfs != symbol_conf.symfs) | ||
2503 | free((void *)symfs); | ||
2504 | |||
2505 | symbol_conf.kptr_restrict = symbol__read_kptr_restrict(); | ||
2506 | |||
2299 | symbol_conf.initialized = true; | 2507 | symbol_conf.initialized = true; |
2300 | return 0; | 2508 | return 0; |
2301 | 2509 | ||
@@ -2318,9 +2526,9 @@ void symbol__exit(void) | |||
2318 | symbol_conf.initialized = false; | 2526 | symbol_conf.initialized = false; |
2319 | } | 2527 | } |
2320 | 2528 | ||
2321 | int machines__create_kernel_maps(struct rb_root *self, pid_t pid) | 2529 | int machines__create_kernel_maps(struct rb_root *machines, pid_t pid) |
2322 | { | 2530 | { |
2323 | struct machine *machine = machines__findnew(self, pid); | 2531 | struct machine *machine = machines__findnew(machines, pid); |
2324 | 2532 | ||
2325 | if (machine == NULL) | 2533 | if (machine == NULL) |
2326 | return -1; | 2534 | return -1; |
@@ -2371,7 +2579,7 @@ char *strxfrchar(char *s, char from, char to) | |||
2371 | return s; | 2579 | return s; |
2372 | } | 2580 | } |
2373 | 2581 | ||
2374 | int machines__create_guest_kernel_maps(struct rb_root *self) | 2582 | int machines__create_guest_kernel_maps(struct rb_root *machines) |
2375 | { | 2583 | { |
2376 | int ret = 0; | 2584 | int ret = 0; |
2377 | struct dirent **namelist = NULL; | 2585 | struct dirent **namelist = NULL; |
@@ -2382,7 +2590,7 @@ int machines__create_guest_kernel_maps(struct rb_root *self) | |||
2382 | if (symbol_conf.default_guest_vmlinux_name || | 2590 | if (symbol_conf.default_guest_vmlinux_name || |
2383 | symbol_conf.default_guest_modules || | 2591 | symbol_conf.default_guest_modules || |
2384 | symbol_conf.default_guest_kallsyms) { | 2592 | symbol_conf.default_guest_kallsyms) { |
2385 | machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID); | 2593 | machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); |
2386 | } | 2594 | } |
2387 | 2595 | ||
2388 | if (symbol_conf.guestmount) { | 2596 | if (symbol_conf.guestmount) { |
@@ -2403,7 +2611,7 @@ int machines__create_guest_kernel_maps(struct rb_root *self) | |||
2403 | pr_debug("Can't access file %s\n", path); | 2611 | pr_debug("Can't access file %s\n", path); |
2404 | goto failure; | 2612 | goto failure; |
2405 | } | 2613 | } |
2406 | machines__create_kernel_maps(self, pid); | 2614 | machines__create_kernel_maps(machines, pid); |
2407 | } | 2615 | } |
2408 | failure: | 2616 | failure: |
2409 | free(namelist); | 2617 | free(namelist); |
@@ -2412,23 +2620,23 @@ failure: | |||
2412 | return ret; | 2620 | return ret; |
2413 | } | 2621 | } |
2414 | 2622 | ||
2415 | void machines__destroy_guest_kernel_maps(struct rb_root *self) | 2623 | void machines__destroy_guest_kernel_maps(struct rb_root *machines) |
2416 | { | 2624 | { |
2417 | struct rb_node *next = rb_first(self); | 2625 | struct rb_node *next = rb_first(machines); |
2418 | 2626 | ||
2419 | while (next) { | 2627 | while (next) { |
2420 | struct machine *pos = rb_entry(next, struct machine, rb_node); | 2628 | struct machine *pos = rb_entry(next, struct machine, rb_node); |
2421 | 2629 | ||
2422 | next = rb_next(&pos->rb_node); | 2630 | next = rb_next(&pos->rb_node); |
2423 | rb_erase(&pos->rb_node, self); | 2631 | rb_erase(&pos->rb_node, machines); |
2424 | machine__delete(pos); | 2632 | machine__delete(pos); |
2425 | } | 2633 | } |
2426 | } | 2634 | } |
2427 | 2635 | ||
2428 | int machine__load_kallsyms(struct machine *self, const char *filename, | 2636 | int machine__load_kallsyms(struct machine *machine, const char *filename, |
2429 | enum map_type type, symbol_filter_t filter) | 2637 | enum map_type type, symbol_filter_t filter) |
2430 | { | 2638 | { |
2431 | struct map *map = self->vmlinux_maps[type]; | 2639 | struct map *map = machine->vmlinux_maps[type]; |
2432 | int ret = dso__load_kallsyms(map->dso, filename, map, filter); | 2640 | int ret = dso__load_kallsyms(map->dso, filename, map, filter); |
2433 | 2641 | ||
2434 | if (ret > 0) { | 2642 | if (ret > 0) { |
@@ -2438,16 +2646,16 @@ int machine__load_kallsyms(struct machine *self, const char *filename, | |||
2438 | * kernel, with modules between them, fixup the end of all | 2646 | * kernel, with modules between them, fixup the end of all |
2439 | * sections. | 2647 | * sections. |
2440 | */ | 2648 | */ |
2441 | __map_groups__fixup_end(&self->kmaps, type); | 2649 | __map_groups__fixup_end(&machine->kmaps, type); |
2442 | } | 2650 | } |
2443 | 2651 | ||
2444 | return ret; | 2652 | return ret; |
2445 | } | 2653 | } |
2446 | 2654 | ||
2447 | int machine__load_vmlinux_path(struct machine *self, enum map_type type, | 2655 | int machine__load_vmlinux_path(struct machine *machine, enum map_type type, |
2448 | symbol_filter_t filter) | 2656 | symbol_filter_t filter) |
2449 | { | 2657 | { |
2450 | struct map *map = self->vmlinux_maps[type]; | 2658 | struct map *map = machine->vmlinux_maps[type]; |
2451 | int ret = dso__load_vmlinux_path(map->dso, map, filter); | 2659 | int ret = dso__load_vmlinux_path(map->dso, map, filter); |
2452 | 2660 | ||
2453 | if (ret > 0) { | 2661 | if (ret > 0) { |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index ea95c2756f05..325ee36a9d29 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -48,16 +48,21 @@ char *strxfrchar(char *s, char from, char to); | |||
48 | 48 | ||
49 | #define BUILD_ID_SIZE 20 | 49 | #define BUILD_ID_SIZE 20 |
50 | 50 | ||
51 | /** struct symbol - symtab entry | ||
52 | * | ||
53 | * @ignore - resolvable but tools ignore it (e.g. idle routines) | ||
54 | */ | ||
51 | struct symbol { | 55 | struct symbol { |
52 | struct rb_node rb_node; | 56 | struct rb_node rb_node; |
53 | u64 start; | 57 | u64 start; |
54 | u64 end; | 58 | u64 end; |
55 | u16 namelen; | 59 | u16 namelen; |
56 | u8 binding; | 60 | u8 binding; |
61 | bool ignore; | ||
57 | char name[0]; | 62 | char name[0]; |
58 | }; | 63 | }; |
59 | 64 | ||
60 | void symbol__delete(struct symbol *self); | 65 | void symbol__delete(struct symbol *sym); |
61 | 66 | ||
62 | struct strlist; | 67 | struct strlist; |
63 | 68 | ||
@@ -70,8 +75,10 @@ struct symbol_conf { | |||
70 | use_callchain, | 75 | use_callchain, |
71 | exclude_other, | 76 | exclude_other, |
72 | show_cpu_utilization, | 77 | show_cpu_utilization, |
73 | initialized; | 78 | initialized, |
79 | kptr_restrict; | ||
74 | const char *vmlinux_name, | 80 | const char *vmlinux_name, |
81 | *kallsyms_name, | ||
75 | *source_prefix, | 82 | *source_prefix, |
76 | *field_sep; | 83 | *field_sep; |
77 | const char *default_guest_vmlinux_name, | 84 | const char *default_guest_vmlinux_name, |
@@ -85,13 +92,14 @@ struct symbol_conf { | |||
85 | struct strlist *dso_list, | 92 | struct strlist *dso_list, |
86 | *comm_list, | 93 | *comm_list, |
87 | *sym_list; | 94 | *sym_list; |
95 | const char *symfs; | ||
88 | }; | 96 | }; |
89 | 97 | ||
90 | extern struct symbol_conf symbol_conf; | 98 | extern struct symbol_conf symbol_conf; |
91 | 99 | ||
92 | static inline void *symbol__priv(struct symbol *self) | 100 | static inline void *symbol__priv(struct symbol *sym) |
93 | { | 101 | { |
94 | return ((void *)self) - symbol_conf.priv_size; | 102 | return ((void *)sym) - symbol_conf.priv_size; |
95 | } | 103 | } |
96 | 104 | ||
97 | struct ref_reloc_sym { | 105 | struct ref_reloc_sym { |
@@ -130,13 +138,12 @@ struct dso { | |||
130 | struct rb_root symbol_names[MAP__NR_TYPES]; | 138 | struct rb_root symbol_names[MAP__NR_TYPES]; |
131 | enum dso_kernel_type kernel; | 139 | enum dso_kernel_type kernel; |
132 | u8 adjust_symbols:1; | 140 | u8 adjust_symbols:1; |
133 | u8 slen_calculated:1; | ||
134 | u8 has_build_id:1; | 141 | u8 has_build_id:1; |
135 | u8 hit:1; | 142 | u8 hit:1; |
136 | u8 annotate_warned:1; | 143 | u8 annotate_warned:1; |
137 | u8 sname_alloc:1; | 144 | u8 sname_alloc:1; |
138 | u8 lname_alloc:1; | 145 | u8 lname_alloc:1; |
139 | unsigned char origin; | 146 | unsigned char symtab_type; |
140 | u8 sorted_by_name; | 147 | u8 sorted_by_name; |
141 | u8 loaded; | 148 | u8 loaded; |
142 | u8 build_id[BUILD_ID_SIZE]; | 149 | u8 build_id[BUILD_ID_SIZE]; |
@@ -149,83 +156,90 @@ struct dso { | |||
149 | 156 | ||
150 | struct dso *dso__new(const char *name); | 157 | struct dso *dso__new(const char *name); |
151 | struct dso *dso__new_kernel(const char *name); | 158 | struct dso *dso__new_kernel(const char *name); |
152 | void dso__delete(struct dso *self); | 159 | void dso__delete(struct dso *dso); |
153 | 160 | ||
154 | int dso__name_len(const struct dso *self); | 161 | int dso__name_len(const struct dso *dso); |
155 | 162 | ||
156 | bool dso__loaded(const struct dso *self, enum map_type type); | 163 | bool dso__loaded(const struct dso *dso, enum map_type type); |
157 | bool dso__sorted_by_name(const struct dso *self, enum map_type type); | 164 | bool dso__sorted_by_name(const struct dso *dso, enum map_type type); |
158 | 165 | ||
159 | static inline void dso__set_loaded(struct dso *self, enum map_type type) | 166 | static inline void dso__set_loaded(struct dso *dso, enum map_type type) |
160 | { | 167 | { |
161 | self->loaded |= (1 << type); | 168 | dso->loaded |= (1 << type); |
162 | } | 169 | } |
163 | 170 | ||
164 | void dso__sort_by_name(struct dso *self, enum map_type type); | 171 | void dso__sort_by_name(struct dso *dso, enum map_type type); |
165 | 172 | ||
166 | struct dso *__dsos__findnew(struct list_head *head, const char *name); | 173 | struct dso *__dsos__findnew(struct list_head *head, const char *name); |
167 | 174 | ||
168 | int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); | 175 | int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter); |
169 | int dso__load_vmlinux_path(struct dso *self, struct map *map, | 176 | int dso__load_vmlinux(struct dso *dso, struct map *map, |
177 | const char *vmlinux, symbol_filter_t filter); | ||
178 | int dso__load_vmlinux_path(struct dso *dso, struct map *map, | ||
170 | symbol_filter_t filter); | 179 | symbol_filter_t filter); |
171 | int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map, | 180 | int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, |
172 | symbol_filter_t filter); | 181 | symbol_filter_t filter); |
173 | int machine__load_kallsyms(struct machine *self, const char *filename, | 182 | int machine__load_kallsyms(struct machine *machine, const char *filename, |
174 | enum map_type type, symbol_filter_t filter); | 183 | enum map_type type, symbol_filter_t filter); |
175 | int machine__load_vmlinux_path(struct machine *self, enum map_type type, | 184 | int machine__load_vmlinux_path(struct machine *machine, enum map_type type, |
176 | symbol_filter_t filter); | 185 | symbol_filter_t filter); |
177 | 186 | ||
178 | size_t __dsos__fprintf(struct list_head *head, FILE *fp); | 187 | size_t __dsos__fprintf(struct list_head *head, FILE *fp); |
179 | 188 | ||
180 | size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits); | 189 | size_t machine__fprintf_dsos_buildid(struct machine *machine, |
181 | size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp); | 190 | FILE *fp, bool with_hits); |
182 | size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits); | 191 | size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); |
183 | 192 | size_t machines__fprintf_dsos_buildid(struct rb_root *machines, | |
184 | size_t dso__fprintf_buildid(struct dso *self, FILE *fp); | 193 | FILE *fp, bool with_hits); |
185 | size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); | 194 | size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); |
186 | 195 | size_t dso__fprintf_symbols_by_name(struct dso *dso, | |
187 | enum dso_origin { | 196 | enum map_type type, FILE *fp); |
188 | DSO__ORIG_KERNEL = 0, | 197 | size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); |
189 | DSO__ORIG_GUEST_KERNEL, | 198 | |
190 | DSO__ORIG_JAVA_JIT, | 199 | enum symtab_type { |
191 | DSO__ORIG_BUILD_ID_CACHE, | 200 | SYMTAB__KALLSYMS = 0, |
192 | DSO__ORIG_FEDORA, | 201 | SYMTAB__GUEST_KALLSYMS, |
193 | DSO__ORIG_UBUNTU, | 202 | SYMTAB__JAVA_JIT, |
194 | DSO__ORIG_BUILDID, | 203 | SYMTAB__BUILD_ID_CACHE, |
195 | DSO__ORIG_DSO, | 204 | SYMTAB__FEDORA_DEBUGINFO, |
196 | DSO__ORIG_GUEST_KMODULE, | 205 | SYMTAB__UBUNTU_DEBUGINFO, |
197 | DSO__ORIG_KMODULE, | 206 | SYMTAB__BUILDID_DEBUGINFO, |
198 | DSO__ORIG_NOT_FOUND, | 207 | SYMTAB__SYSTEM_PATH_DSO, |
208 | SYMTAB__GUEST_KMODULE, | ||
209 | SYMTAB__SYSTEM_PATH_KMODULE, | ||
210 | SYMTAB__NOT_FOUND, | ||
199 | }; | 211 | }; |
200 | 212 | ||
201 | char dso__symtab_origin(const struct dso *self); | 213 | char dso__symtab_origin(const struct dso *dso); |
202 | void dso__set_long_name(struct dso *self, char *name); | 214 | void dso__set_long_name(struct dso *dso, char *name); |
203 | void dso__set_build_id(struct dso *self, void *build_id); | 215 | void dso__set_build_id(struct dso *dso, void *build_id); |
204 | void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine); | 216 | void dso__read_running_kernel_build_id(struct dso *dso, |
205 | struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); | 217 | struct machine *machine); |
206 | struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, | 218 | struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, |
219 | u64 addr); | ||
220 | struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, | ||
207 | const char *name); | 221 | const char *name); |
208 | 222 | ||
209 | int filename__read_build_id(const char *filename, void *bf, size_t size); | 223 | int filename__read_build_id(const char *filename, void *bf, size_t size); |
210 | int sysfs__read_build_id(const char *filename, void *bf, size_t size); | 224 | int sysfs__read_build_id(const char *filename, void *bf, size_t size); |
211 | bool __dsos__read_build_ids(struct list_head *head, bool with_hits); | 225 | bool __dsos__read_build_ids(struct list_head *head, bool with_hits); |
212 | int build_id__sprintf(const u8 *self, int len, char *bf); | 226 | int build_id__sprintf(const u8 *build_id, int len, char *bf); |
213 | int kallsyms__parse(const char *filename, void *arg, | 227 | int kallsyms__parse(const char *filename, void *arg, |
214 | int (*process_symbol)(void *arg, const char *name, | 228 | int (*process_symbol)(void *arg, const char *name, |
215 | char type, u64 start)); | 229 | char type, u64 start, u64 end)); |
216 | 230 | ||
217 | void machine__destroy_kernel_maps(struct machine *self); | 231 | void machine__destroy_kernel_maps(struct machine *machine); |
218 | int __machine__create_kernel_maps(struct machine *self, struct dso *kernel); | 232 | int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); |
219 | int machine__create_kernel_maps(struct machine *self); | 233 | int machine__create_kernel_maps(struct machine *machine); |
220 | 234 | ||
221 | int machines__create_kernel_maps(struct rb_root *self, pid_t pid); | 235 | int machines__create_kernel_maps(struct rb_root *machines, pid_t pid); |
222 | int machines__create_guest_kernel_maps(struct rb_root *self); | 236 | int machines__create_guest_kernel_maps(struct rb_root *machines); |
223 | void machines__destroy_guest_kernel_maps(struct rb_root *self); | 237 | void machines__destroy_guest_kernel_maps(struct rb_root *machines); |
224 | 238 | ||
225 | int symbol__init(void); | 239 | int symbol__init(void); |
226 | void symbol__exit(void); | 240 | void symbol__exit(void); |
227 | bool symbol_type__is_a(char symbol_type, enum map_type map_type); | 241 | bool symbol_type__is_a(char symbol_type, enum map_type map_type); |
228 | 242 | ||
229 | size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp); | 243 | size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); |
230 | 244 | ||
231 | #endif /* __PERF_SYMBOL */ | 245 | #endif /* __PERF_SYMBOL */ |
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 8c72d888e449..d5d3b22250f3 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c | |||
@@ -7,46 +7,6 @@ | |||
7 | #include "util.h" | 7 | #include "util.h" |
8 | #include "debug.h" | 8 | #include "debug.h" |
9 | 9 | ||
10 | /* Skip "." and ".." directories */ | ||
11 | static int filter(const struct dirent *dir) | ||
12 | { | ||
13 | if (dir->d_name[0] == '.') | ||
14 | return 0; | ||
15 | else | ||
16 | return 1; | ||
17 | } | ||
18 | |||
19 | int find_all_tid(int pid, pid_t ** all_tid) | ||
20 | { | ||
21 | char name[256]; | ||
22 | int items; | ||
23 | struct dirent **namelist = NULL; | ||
24 | int ret = 0; | ||
25 | int i; | ||
26 | |||
27 | sprintf(name, "/proc/%d/task", pid); | ||
28 | items = scandir(name, &namelist, filter, NULL); | ||
29 | if (items <= 0) | ||
30 | return -ENOENT; | ||
31 | *all_tid = malloc(sizeof(pid_t) * items); | ||
32 | if (!*all_tid) { | ||
33 | ret = -ENOMEM; | ||
34 | goto failure; | ||
35 | } | ||
36 | |||
37 | for (i = 0; i < items; i++) | ||
38 | (*all_tid)[i] = atoi(namelist[i]->d_name); | ||
39 | |||
40 | ret = items; | ||
41 | |||
42 | failure: | ||
43 | for (i=0; i<items; i++) | ||
44 | free(namelist[i]); | ||
45 | free(namelist); | ||
46 | |||
47 | return ret; | ||
48 | } | ||
49 | |||
50 | static struct thread *thread__new(pid_t pid) | 10 | static struct thread *thread__new(pid_t pid) |
51 | { | 11 | { |
52 | struct thread *self = zalloc(sizeof(*self)); | 12 | struct thread *self = zalloc(sizeof(*self)); |
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 688500ff826f..e5f2401c1b5e 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h | |||
@@ -22,7 +22,6 @@ struct perf_session; | |||
22 | 22 | ||
23 | void thread__delete(struct thread *self); | 23 | void thread__delete(struct thread *self); |
24 | 24 | ||
25 | int find_all_tid(int pid, pid_t ** all_tid); | ||
26 | int thread__set_comm(struct thread *self, const char *comm); | 25 | int thread__set_comm(struct thread *self, const char *comm); |
27 | int thread__comm_len(struct thread *self); | 26 | int thread__comm_len(struct thread *self); |
28 | struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); | 27 | struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); |
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c new file mode 100644 index 000000000000..a5df131b77c3 --- /dev/null +++ b/tools/perf/util/thread_map.c | |||
@@ -0,0 +1,64 @@ | |||
1 | #include <dirent.h> | ||
2 | #include <stdlib.h> | ||
3 | #include <stdio.h> | ||
4 | #include "thread_map.h" | ||
5 | |||
6 | /* Skip "." and ".." directories */ | ||
7 | static int filter(const struct dirent *dir) | ||
8 | { | ||
9 | if (dir->d_name[0] == '.') | ||
10 | return 0; | ||
11 | else | ||
12 | return 1; | ||
13 | } | ||
14 | |||
15 | struct thread_map *thread_map__new_by_pid(pid_t pid) | ||
16 | { | ||
17 | struct thread_map *threads; | ||
18 | char name[256]; | ||
19 | int items; | ||
20 | struct dirent **namelist = NULL; | ||
21 | int i; | ||
22 | |||
23 | sprintf(name, "/proc/%d/task", pid); | ||
24 | items = scandir(name, &namelist, filter, NULL); | ||
25 | if (items <= 0) | ||
26 | return NULL; | ||
27 | |||
28 | threads = malloc(sizeof(*threads) + sizeof(pid_t) * items); | ||
29 | if (threads != NULL) { | ||
30 | for (i = 0; i < items; i++) | ||
31 | threads->map[i] = atoi(namelist[i]->d_name); | ||
32 | threads->nr = items; | ||
33 | } | ||
34 | |||
35 | for (i=0; i<items; i++) | ||
36 | free(namelist[i]); | ||
37 | free(namelist); | ||
38 | |||
39 | return threads; | ||
40 | } | ||
41 | |||
42 | struct thread_map *thread_map__new_by_tid(pid_t tid) | ||
43 | { | ||
44 | struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t)); | ||
45 | |||
46 | if (threads != NULL) { | ||
47 | threads->map[0] = tid; | ||
48 | threads->nr = 1; | ||
49 | } | ||
50 | |||
51 | return threads; | ||
52 | } | ||
53 | |||
54 | struct thread_map *thread_map__new(pid_t pid, pid_t tid) | ||
55 | { | ||
56 | if (pid != -1) | ||
57 | return thread_map__new_by_pid(pid); | ||
58 | return thread_map__new_by_tid(tid); | ||
59 | } | ||
60 | |||
61 | void thread_map__delete(struct thread_map *threads) | ||
62 | { | ||
63 | free(threads); | ||
64 | } | ||
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h new file mode 100644 index 000000000000..3cb907311409 --- /dev/null +++ b/tools/perf/util/thread_map.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef __PERF_THREAD_MAP_H | ||
2 | #define __PERF_THREAD_MAP_H | ||
3 | |||
4 | #include <sys/types.h> | ||
5 | |||
6 | struct thread_map { | ||
7 | int nr; | ||
8 | int map[]; | ||
9 | }; | ||
10 | |||
11 | struct thread_map *thread_map__new_by_pid(pid_t pid); | ||
12 | struct thread_map *thread_map__new_by_tid(pid_t tid); | ||
13 | struct thread_map *thread_map__new(pid_t pid, pid_t tid); | ||
14 | void thread_map__delete(struct thread_map *threads); | ||
15 | #endif /* __PERF_THREAD_MAP_H */ | ||
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c new file mode 100644 index 000000000000..a11f60735a18 --- /dev/null +++ b/tools/perf/util/top.c | |||
@@ -0,0 +1,238 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | ||
3 | * | ||
4 | * Refactored from builtin-top.c, see that files for further copyright notes. | ||
5 | * | ||
6 | * Released under the GPL v2. (and only v2, not any later version) | ||
7 | */ | ||
8 | |||
9 | #include "cpumap.h" | ||
10 | #include "event.h" | ||
11 | #include "evlist.h" | ||
12 | #include "evsel.h" | ||
13 | #include "parse-events.h" | ||
14 | #include "symbol.h" | ||
15 | #include "top.h" | ||
16 | #include <inttypes.h> | ||
17 | |||
18 | /* | ||
19 | * Ordering weight: count-1 * count-2 * ... / count-n | ||
20 | */ | ||
21 | static double sym_weight(const struct sym_entry *sym, struct perf_top *top) | ||
22 | { | ||
23 | double weight = sym->snap_count; | ||
24 | int counter; | ||
25 | |||
26 | if (!top->display_weighted) | ||
27 | return weight; | ||
28 | |||
29 | for (counter = 1; counter < top->evlist->nr_entries - 1; counter++) | ||
30 | weight *= sym->count[counter]; | ||
31 | |||
32 | weight /= (sym->count[counter] + 1); | ||
33 | |||
34 | return weight; | ||
35 | } | ||
36 | |||
37 | static void perf_top__remove_active_sym(struct perf_top *top, struct sym_entry *syme) | ||
38 | { | ||
39 | pthread_mutex_lock(&top->active_symbols_lock); | ||
40 | list_del_init(&syme->node); | ||
41 | pthread_mutex_unlock(&top->active_symbols_lock); | ||
42 | } | ||
43 | |||
44 | static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) | ||
45 | { | ||
46 | struct rb_node **p = &tree->rb_node; | ||
47 | struct rb_node *parent = NULL; | ||
48 | struct sym_entry *iter; | ||
49 | |||
50 | while (*p != NULL) { | ||
51 | parent = *p; | ||
52 | iter = rb_entry(parent, struct sym_entry, rb_node); | ||
53 | |||
54 | if (se->weight > iter->weight) | ||
55 | p = &(*p)->rb_left; | ||
56 | else | ||
57 | p = &(*p)->rb_right; | ||
58 | } | ||
59 | |||
60 | rb_link_node(&se->rb_node, parent, p); | ||
61 | rb_insert_color(&se->rb_node, tree); | ||
62 | } | ||
63 | |||
64 | #define SNPRINTF(buf, size, fmt, args...) \ | ||
65 | ({ \ | ||
66 | size_t r = snprintf(buf, size, fmt, ## args); \ | ||
67 | r > size ? size : r; \ | ||
68 | }) | ||
69 | |||
70 | size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) | ||
71 | { | ||
72 | struct perf_evsel *counter; | ||
73 | float samples_per_sec = top->samples / top->delay_secs; | ||
74 | float ksamples_per_sec = top->kernel_samples / top->delay_secs; | ||
75 | float esamples_percent = (100.0 * top->exact_samples) / top->samples; | ||
76 | size_t ret = 0; | ||
77 | |||
78 | if (!perf_guest) { | ||
79 | ret = SNPRINTF(bf, size, | ||
80 | " PerfTop:%8.0f irqs/sec kernel:%4.1f%%" | ||
81 | " exact: %4.1f%% [", samples_per_sec, | ||
82 | 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) / | ||
83 | samples_per_sec)), | ||
84 | esamples_percent); | ||
85 | } else { | ||
86 | float us_samples_per_sec = top->us_samples / top->delay_secs; | ||
87 | float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs; | ||
88 | float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs; | ||
89 | |||
90 | ret = SNPRINTF(bf, size, | ||
91 | " PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%" | ||
92 | " guest kernel:%4.1f%% guest us:%4.1f%%" | ||
93 | " exact: %4.1f%% [", samples_per_sec, | ||
94 | 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) / | ||
95 | samples_per_sec)), | ||
96 | 100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) / | ||
97 | samples_per_sec)), | ||
98 | 100.0 - (100.0 * ((samples_per_sec - | ||
99 | guest_kernel_samples_per_sec) / | ||
100 | samples_per_sec)), | ||
101 | 100.0 - (100.0 * ((samples_per_sec - | ||
102 | guest_us_samples_per_sec) / | ||
103 | samples_per_sec)), | ||
104 | esamples_percent); | ||
105 | } | ||
106 | |||
107 | if (top->evlist->nr_entries == 1 || !top->display_weighted) { | ||
108 | struct perf_evsel *first; | ||
109 | first = list_entry(top->evlist->entries.next, struct perf_evsel, node); | ||
110 | ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ", | ||
111 | (uint64_t)first->attr.sample_period, | ||
112 | top->freq ? "Hz" : ""); | ||
113 | } | ||
114 | |||
115 | if (!top->display_weighted) { | ||
116 | ret += SNPRINTF(bf + ret, size - ret, "%s", | ||
117 | event_name(top->sym_evsel)); | ||
118 | } else { | ||
119 | /* | ||
120 | * Don't let events eat all the space. Leaving 30 bytes | ||
121 | * for the rest should be enough. | ||
122 | */ | ||
123 | size_t last_pos = size - 30; | ||
124 | |||
125 | list_for_each_entry(counter, &top->evlist->entries, node) { | ||
126 | ret += SNPRINTF(bf + ret, size - ret, "%s%s", | ||
127 | counter->idx ? "/" : "", | ||
128 | event_name(counter)); | ||
129 | if (ret > last_pos) { | ||
130 | sprintf(bf + last_pos - 3, ".."); | ||
131 | ret = last_pos - 1; | ||
132 | break; | ||
133 | } | ||
134 | } | ||
135 | } | ||
136 | |||
137 | ret += SNPRINTF(bf + ret, size - ret, "], "); | ||
138 | |||
139 | if (top->target_pid != -1) | ||
140 | ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %d", | ||
141 | top->target_pid); | ||
142 | else if (top->target_tid != -1) | ||
143 | ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %d", | ||
144 | top->target_tid); | ||
145 | else | ||
146 | ret += SNPRINTF(bf + ret, size - ret, " (all"); | ||
147 | |||
148 | if (top->cpu_list) | ||
149 | ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", | ||
150 | top->evlist->cpus->nr > 1 ? "s" : "", top->cpu_list); | ||
151 | else { | ||
152 | if (top->target_tid != -1) | ||
153 | ret += SNPRINTF(bf + ret, size - ret, ")"); | ||
154 | else | ||
155 | ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", | ||
156 | top->evlist->cpus->nr, | ||
157 | top->evlist->cpus->nr > 1 ? "s" : ""); | ||
158 | } | ||
159 | |||
160 | return ret; | ||
161 | } | ||
162 | |||
163 | void perf_top__reset_sample_counters(struct perf_top *top) | ||
164 | { | ||
165 | top->samples = top->us_samples = top->kernel_samples = | ||
166 | top->exact_samples = top->guest_kernel_samples = | ||
167 | top->guest_us_samples = 0; | ||
168 | } | ||
169 | |||
170 | float perf_top__decay_samples(struct perf_top *top, struct rb_root *root) | ||
171 | { | ||
172 | struct sym_entry *syme, *n; | ||
173 | float sum_ksamples = 0.0; | ||
174 | int snap = !top->display_weighted ? top->sym_evsel->idx : 0, j; | ||
175 | |||
176 | /* Sort the active symbols */ | ||
177 | pthread_mutex_lock(&top->active_symbols_lock); | ||
178 | syme = list_entry(top->active_symbols.next, struct sym_entry, node); | ||
179 | pthread_mutex_unlock(&top->active_symbols_lock); | ||
180 | |||
181 | top->rb_entries = 0; | ||
182 | list_for_each_entry_safe_from(syme, n, &top->active_symbols, node) { | ||
183 | syme->snap_count = syme->count[snap]; | ||
184 | if (syme->snap_count != 0) { | ||
185 | |||
186 | if ((top->hide_user_symbols && | ||
187 | syme->map->dso->kernel == DSO_TYPE_USER) || | ||
188 | (top->hide_kernel_symbols && | ||
189 | syme->map->dso->kernel == DSO_TYPE_KERNEL)) { | ||
190 | perf_top__remove_active_sym(top, syme); | ||
191 | continue; | ||
192 | } | ||
193 | syme->weight = sym_weight(syme, top); | ||
194 | |||
195 | if ((int)syme->snap_count >= top->count_filter) { | ||
196 | rb_insert_active_sym(root, syme); | ||
197 | ++top->rb_entries; | ||
198 | } | ||
199 | sum_ksamples += syme->snap_count; | ||
200 | |||
201 | for (j = 0; j < top->evlist->nr_entries; j++) | ||
202 | syme->count[j] = top->zero ? 0 : syme->count[j] * 7 / 8; | ||
203 | } else | ||
204 | perf_top__remove_active_sym(top, syme); | ||
205 | } | ||
206 | |||
207 | return sum_ksamples; | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * Find the longest symbol name that will be displayed | ||
212 | */ | ||
213 | void perf_top__find_widths(struct perf_top *top, struct rb_root *root, | ||
214 | int *dso_width, int *dso_short_width, int *sym_width) | ||
215 | { | ||
216 | struct rb_node *nd; | ||
217 | int printed = 0; | ||
218 | |||
219 | *sym_width = *dso_width = *dso_short_width = 0; | ||
220 | |||
221 | for (nd = rb_first(root); nd; nd = rb_next(nd)) { | ||
222 | struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node); | ||
223 | struct symbol *sym = sym_entry__symbol(syme); | ||
224 | |||
225 | if (++printed > top->print_entries || | ||
226 | (int)syme->snap_count < top->count_filter) | ||
227 | continue; | ||
228 | |||
229 | if (syme->map->dso->long_name_len > *dso_width) | ||
230 | *dso_width = syme->map->dso->long_name_len; | ||
231 | |||
232 | if (syme->map->dso->short_name_len > *dso_short_width) | ||
233 | *dso_short_width = syme->map->dso->short_name_len; | ||
234 | |||
235 | if (sym->namelen > *sym_width) | ||
236 | *sym_width = sym->namelen; | ||
237 | } | ||
238 | } | ||
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h new file mode 100644 index 000000000000..bfbf95bcc603 --- /dev/null +++ b/tools/perf/util/top.h | |||
@@ -0,0 +1,64 @@ | |||
1 | #ifndef __PERF_TOP_H | ||
2 | #define __PERF_TOP_H 1 | ||
3 | |||
4 | #include "types.h" | ||
5 | #include "../perf.h" | ||
6 | #include <stddef.h> | ||
7 | #include <pthread.h> | ||
8 | #include <linux/list.h> | ||
9 | #include <linux/rbtree.h> | ||
10 | |||
11 | struct perf_evlist; | ||
12 | struct perf_evsel; | ||
13 | |||
14 | struct sym_entry { | ||
15 | struct rb_node rb_node; | ||
16 | struct list_head node; | ||
17 | unsigned long snap_count; | ||
18 | double weight; | ||
19 | struct map *map; | ||
20 | unsigned long count[0]; | ||
21 | }; | ||
22 | |||
23 | static inline struct symbol *sym_entry__symbol(struct sym_entry *self) | ||
24 | { | ||
25 | return ((void *)self) + symbol_conf.priv_size; | ||
26 | } | ||
27 | |||
28 | struct perf_top { | ||
29 | struct perf_evlist *evlist; | ||
30 | /* | ||
31 | * Symbols will be added here in perf_event__process_sample and will | ||
32 | * get out after decayed. | ||
33 | */ | ||
34 | struct list_head active_symbols; | ||
35 | pthread_mutex_t active_symbols_lock; | ||
36 | pthread_cond_t active_symbols_cond; | ||
37 | u64 samples; | ||
38 | u64 kernel_samples, us_samples; | ||
39 | u64 exact_samples; | ||
40 | u64 guest_us_samples, guest_kernel_samples; | ||
41 | int print_entries, count_filter, delay_secs; | ||
42 | int display_weighted, freq, rb_entries; | ||
43 | pid_t target_pid, target_tid; | ||
44 | bool hide_kernel_symbols, hide_user_symbols, zero; | ||
45 | const char *cpu_list; | ||
46 | struct sym_entry *sym_filter_entry; | ||
47 | struct perf_evsel *sym_evsel; | ||
48 | }; | ||
49 | |||
50 | size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size); | ||
51 | void perf_top__reset_sample_counters(struct perf_top *top); | ||
52 | float perf_top__decay_samples(struct perf_top *top, struct rb_root *root); | ||
53 | void perf_top__find_widths(struct perf_top *top, struct rb_root *root, | ||
54 | int *dso_width, int *dso_short_width, int *sym_width); | ||
55 | |||
56 | #ifdef NO_NEWT_SUPPORT | ||
57 | static inline int perf_top__tui_browser(struct perf_top *top __used) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | #else | ||
62 | int perf_top__tui_browser(struct perf_top *top); | ||
63 | #endif | ||
64 | #endif /* __PERF_TOP_H */ | ||
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index b1572601286c..35729f4c40cb 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c | |||
@@ -34,11 +34,13 @@ | |||
34 | #include <ctype.h> | 34 | #include <ctype.h> |
35 | #include <errno.h> | 35 | #include <errno.h> |
36 | #include <stdbool.h> | 36 | #include <stdbool.h> |
37 | #include <linux/list.h> | ||
37 | #include <linux/kernel.h> | 38 | #include <linux/kernel.h> |
38 | 39 | ||
39 | #include "../perf.h" | 40 | #include "../perf.h" |
40 | #include "trace-event.h" | 41 | #include "trace-event.h" |
41 | #include "debugfs.h" | 42 | #include "debugfs.h" |
43 | #include "evsel.h" | ||
42 | 44 | ||
43 | #define VERSION "0.5" | 45 | #define VERSION "0.5" |
44 | 46 | ||
@@ -469,16 +471,17 @@ out: | |||
469 | } | 471 | } |
470 | 472 | ||
471 | static struct tracepoint_path * | 473 | static struct tracepoint_path * |
472 | get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) | 474 | get_tracepoints_path(struct list_head *pattrs) |
473 | { | 475 | { |
474 | struct tracepoint_path path, *ppath = &path; | 476 | struct tracepoint_path path, *ppath = &path; |
475 | int i, nr_tracepoints = 0; | 477 | struct perf_evsel *pos; |
478 | int nr_tracepoints = 0; | ||
476 | 479 | ||
477 | for (i = 0; i < nb_events; i++) { | 480 | list_for_each_entry(pos, pattrs, node) { |
478 | if (pattrs[i].type != PERF_TYPE_TRACEPOINT) | 481 | if (pos->attr.type != PERF_TYPE_TRACEPOINT) |
479 | continue; | 482 | continue; |
480 | ++nr_tracepoints; | 483 | ++nr_tracepoints; |
481 | ppath->next = tracepoint_id_to_path(pattrs[i].config); | 484 | ppath->next = tracepoint_id_to_path(pos->attr.config); |
482 | if (!ppath->next) | 485 | if (!ppath->next) |
483 | die("%s\n", "No memory to alloc tracepoints list"); | 486 | die("%s\n", "No memory to alloc tracepoints list"); |
484 | ppath = ppath->next; | 487 | ppath = ppath->next; |
@@ -487,21 +490,21 @@ get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) | |||
487 | return nr_tracepoints > 0 ? path.next : NULL; | 490 | return nr_tracepoints > 0 ? path.next : NULL; |
488 | } | 491 | } |
489 | 492 | ||
490 | bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events) | 493 | bool have_tracepoints(struct list_head *pattrs) |
491 | { | 494 | { |
492 | int i; | 495 | struct perf_evsel *pos; |
493 | 496 | ||
494 | for (i = 0; i < nb_events; i++) | 497 | list_for_each_entry(pos, pattrs, node) |
495 | if (pattrs[i].type == PERF_TYPE_TRACEPOINT) | 498 | if (pos->attr.type == PERF_TYPE_TRACEPOINT) |
496 | return true; | 499 | return true; |
497 | 500 | ||
498 | return false; | 501 | return false; |
499 | } | 502 | } |
500 | 503 | ||
501 | int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) | 504 | int read_tracing_data(int fd, struct list_head *pattrs) |
502 | { | 505 | { |
503 | char buf[BUFSIZ]; | 506 | char buf[BUFSIZ]; |
504 | struct tracepoint_path *tps = get_tracepoints_path(pattrs, nb_events); | 507 | struct tracepoint_path *tps = get_tracepoints_path(pattrs); |
505 | 508 | ||
506 | /* | 509 | /* |
507 | * What? No tracepoints? No sense writing anything here, bail out. | 510 | * What? No tracepoints? No sense writing anything here, bail out. |
@@ -545,14 +548,13 @@ int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) | |||
545 | return 0; | 548 | return 0; |
546 | } | 549 | } |
547 | 550 | ||
548 | ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs, | 551 | ssize_t read_tracing_data_size(int fd, struct list_head *pattrs) |
549 | int nb_events) | ||
550 | { | 552 | { |
551 | ssize_t size; | 553 | ssize_t size; |
552 | int err = 0; | 554 | int err = 0; |
553 | 555 | ||
554 | calc_data_size = 1; | 556 | calc_data_size = 1; |
555 | err = read_tracing_data(fd, pattrs, nb_events); | 557 | err = read_tracing_data(fd, pattrs); |
556 | size = calc_data_size - 1; | 558 | size = calc_data_size - 1; |
557 | calc_data_size = 0; | 559 | calc_data_size = 0; |
558 | 560 | ||
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 73a02223c629..0a7ed5b5e281 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c | |||
@@ -153,7 +153,7 @@ void parse_proc_kallsyms(char *file, unsigned int size __unused) | |||
153 | char *next = NULL; | 153 | char *next = NULL; |
154 | char *addr_str; | 154 | char *addr_str; |
155 | char ch; | 155 | char ch; |
156 | int ret; | 156 | int ret __used; |
157 | int i; | 157 | int i; |
158 | 158 | ||
159 | line = strtok_r(file, "\n", &next); | 159 | line = strtok_r(file, "\n", &next); |
@@ -2643,68 +2643,13 @@ static void print_lat_fmt(void *data, int size __unused) | |||
2643 | printf("."); | 2643 | printf("."); |
2644 | 2644 | ||
2645 | if (lock_depth < 0) | 2645 | if (lock_depth < 0) |
2646 | printf("."); | 2646 | printf(". "); |
2647 | else | 2647 | else |
2648 | printf("%d", lock_depth); | 2648 | printf("%d ", lock_depth); |
2649 | } | ||
2650 | |||
2651 | /* taken from Linux, written by Frederic Weisbecker */ | ||
2652 | static void print_graph_cpu(int cpu) | ||
2653 | { | ||
2654 | int i; | ||
2655 | int log10_this = log10_cpu(cpu); | ||
2656 | int log10_all = log10_cpu(cpus); | ||
2657 | |||
2658 | |||
2659 | /* | ||
2660 | * Start with a space character - to make it stand out | ||
2661 | * to the right a bit when trace output is pasted into | ||
2662 | * email: | ||
2663 | */ | ||
2664 | printf(" "); | ||
2665 | |||
2666 | /* | ||
2667 | * Tricky - we space the CPU field according to the max | ||
2668 | * number of online CPUs. On a 2-cpu system it would take | ||
2669 | * a maximum of 1 digit - on a 128 cpu system it would | ||
2670 | * take up to 3 digits: | ||
2671 | */ | ||
2672 | for (i = 0; i < log10_all - log10_this; i++) | ||
2673 | printf(" "); | ||
2674 | |||
2675 | printf("%d) ", cpu); | ||
2676 | } | 2649 | } |
2677 | 2650 | ||
2678 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 | ||
2679 | #define TRACE_GRAPH_INDENT 2 | 2651 | #define TRACE_GRAPH_INDENT 2 |
2680 | 2652 | ||
2681 | static void print_graph_proc(int pid, const char *comm) | ||
2682 | { | ||
2683 | /* sign + log10(MAX_INT) + '\0' */ | ||
2684 | char pid_str[11]; | ||
2685 | int spaces = 0; | ||
2686 | int len; | ||
2687 | int i; | ||
2688 | |||
2689 | sprintf(pid_str, "%d", pid); | ||
2690 | |||
2691 | /* 1 stands for the "-" character */ | ||
2692 | len = strlen(comm) + strlen(pid_str) + 1; | ||
2693 | |||
2694 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | ||
2695 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | ||
2696 | |||
2697 | /* First spaces to align center */ | ||
2698 | for (i = 0; i < spaces / 2; i++) | ||
2699 | printf(" "); | ||
2700 | |||
2701 | printf("%s-%s", comm, pid_str); | ||
2702 | |||
2703 | /* Last spaces to align center */ | ||
2704 | for (i = 0; i < spaces - (spaces / 2); i++) | ||
2705 | printf(" "); | ||
2706 | } | ||
2707 | |||
2708 | static struct record * | 2653 | static struct record * |
2709 | get_return_for_leaf(int cpu, int cur_pid, unsigned long long cur_func, | 2654 | get_return_for_leaf(int cpu, int cur_pid, unsigned long long cur_func, |
2710 | struct record *next) | 2655 | struct record *next) |
@@ -2876,21 +2821,13 @@ static void print_graph_nested(struct event *event, void *data) | |||
2876 | 2821 | ||
2877 | static void | 2822 | static void |
2878 | pretty_print_func_ent(void *data, int size, struct event *event, | 2823 | pretty_print_func_ent(void *data, int size, struct event *event, |
2879 | int cpu, int pid, const char *comm, | 2824 | int cpu, int pid) |
2880 | unsigned long secs, unsigned long usecs) | ||
2881 | { | 2825 | { |
2882 | struct format_field *field; | 2826 | struct format_field *field; |
2883 | struct record *rec; | 2827 | struct record *rec; |
2884 | void *copy_data; | 2828 | void *copy_data; |
2885 | unsigned long val; | 2829 | unsigned long val; |
2886 | 2830 | ||
2887 | printf("%5lu.%06lu | ", secs, usecs); | ||
2888 | |||
2889 | print_graph_cpu(cpu); | ||
2890 | print_graph_proc(pid, comm); | ||
2891 | |||
2892 | printf(" | "); | ||
2893 | |||
2894 | if (latency_format) { | 2831 | if (latency_format) { |
2895 | print_lat_fmt(data, size); | 2832 | print_lat_fmt(data, size); |
2896 | printf(" | "); | 2833 | printf(" | "); |
@@ -2923,22 +2860,13 @@ out_free: | |||
2923 | } | 2860 | } |
2924 | 2861 | ||
2925 | static void | 2862 | static void |
2926 | pretty_print_func_ret(void *data, int size __unused, struct event *event, | 2863 | pretty_print_func_ret(void *data, int size __unused, struct event *event) |
2927 | int cpu, int pid, const char *comm, | ||
2928 | unsigned long secs, unsigned long usecs) | ||
2929 | { | 2864 | { |
2930 | unsigned long long rettime, calltime; | 2865 | unsigned long long rettime, calltime; |
2931 | unsigned long long duration, depth; | 2866 | unsigned long long duration, depth; |
2932 | struct format_field *field; | 2867 | struct format_field *field; |
2933 | int i; | 2868 | int i; |
2934 | 2869 | ||
2935 | printf("%5lu.%06lu | ", secs, usecs); | ||
2936 | |||
2937 | print_graph_cpu(cpu); | ||
2938 | print_graph_proc(pid, comm); | ||
2939 | |||
2940 | printf(" | "); | ||
2941 | |||
2942 | if (latency_format) { | 2870 | if (latency_format) { |
2943 | print_lat_fmt(data, size); | 2871 | print_lat_fmt(data, size); |
2944 | printf(" | "); | 2872 | printf(" | "); |
@@ -2976,31 +2904,21 @@ pretty_print_func_ret(void *data, int size __unused, struct event *event, | |||
2976 | 2904 | ||
2977 | static void | 2905 | static void |
2978 | pretty_print_func_graph(void *data, int size, struct event *event, | 2906 | pretty_print_func_graph(void *data, int size, struct event *event, |
2979 | int cpu, int pid, const char *comm, | 2907 | int cpu, int pid) |
2980 | unsigned long secs, unsigned long usecs) | ||
2981 | { | 2908 | { |
2982 | if (event->flags & EVENT_FL_ISFUNCENT) | 2909 | if (event->flags & EVENT_FL_ISFUNCENT) |
2983 | pretty_print_func_ent(data, size, event, | 2910 | pretty_print_func_ent(data, size, event, cpu, pid); |
2984 | cpu, pid, comm, secs, usecs); | ||
2985 | else if (event->flags & EVENT_FL_ISFUNCRET) | 2911 | else if (event->flags & EVENT_FL_ISFUNCRET) |
2986 | pretty_print_func_ret(data, size, event, | 2912 | pretty_print_func_ret(data, size, event); |
2987 | cpu, pid, comm, secs, usecs); | ||
2988 | printf("\n"); | 2913 | printf("\n"); |
2989 | } | 2914 | } |
2990 | 2915 | ||
2991 | void print_event(int cpu, void *data, int size, unsigned long long nsecs, | 2916 | void print_trace_event(int cpu, void *data, int size) |
2992 | char *comm) | ||
2993 | { | 2917 | { |
2994 | struct event *event; | 2918 | struct event *event; |
2995 | unsigned long secs; | ||
2996 | unsigned long usecs; | ||
2997 | int type; | 2919 | int type; |
2998 | int pid; | 2920 | int pid; |
2999 | 2921 | ||
3000 | secs = nsecs / NSECS_PER_SEC; | ||
3001 | nsecs -= secs * NSECS_PER_SEC; | ||
3002 | usecs = nsecs / NSECS_PER_USEC; | ||
3003 | |||
3004 | type = trace_parse_common_type(data); | 2922 | type = trace_parse_common_type(data); |
3005 | 2923 | ||
3006 | event = trace_find_event(type); | 2924 | event = trace_find_event(type); |
@@ -3012,17 +2930,10 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs, | |||
3012 | pid = trace_parse_common_pid(data); | 2930 | pid = trace_parse_common_pid(data); |
3013 | 2931 | ||
3014 | if (event->flags & (EVENT_FL_ISFUNCENT | EVENT_FL_ISFUNCRET)) | 2932 | if (event->flags & (EVENT_FL_ISFUNCENT | EVENT_FL_ISFUNCRET)) |
3015 | return pretty_print_func_graph(data, size, event, cpu, | 2933 | return pretty_print_func_graph(data, size, event, cpu, pid); |
3016 | pid, comm, secs, usecs); | ||
3017 | 2934 | ||
3018 | if (latency_format) { | 2935 | if (latency_format) |
3019 | printf("%8.8s-%-5d %3d", | ||
3020 | comm, pid, cpu); | ||
3021 | print_lat_fmt(data, size); | 2936 | print_lat_fmt(data, size); |
3022 | } else | ||
3023 | printf("%16s-%-5d [%03d]", comm, pid, cpu); | ||
3024 | |||
3025 | printf(" %5lu.%06lu: %s: ", secs, usecs, event->name); | ||
3026 | 2937 | ||
3027 | if (event->flags & EVENT_FL_FAILED) { | 2938 | if (event->flags & EVENT_FL_FAILED) { |
3028 | printf("EVENT '%s' FAILED TO PARSE\n", | 2939 | printf("EVENT '%s' FAILED TO PARSE\n", |
@@ -3031,7 +2942,6 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs, | |||
3031 | } | 2942 | } |
3032 | 2943 | ||
3033 | pretty_print(data, size, event); | 2944 | pretty_print(data, size, event); |
3034 | printf("\n"); | ||
3035 | } | 2945 | } |
3036 | 2946 | ||
3037 | static void print_fields(struct print_flag_sym *field) | 2947 | static void print_fields(struct print_flag_sym *field) |
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index f7af2fca965d..c9dcbec7d800 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c | |||
@@ -36,11 +36,11 @@ static int stop_script_unsupported(void) | |||
36 | return 0; | 36 | return 0; |
37 | } | 37 | } |
38 | 38 | ||
39 | static void process_event_unsupported(int cpu __unused, | 39 | static void process_event_unsupported(union perf_event *event __unused, |
40 | void *data __unused, | 40 | struct perf_sample *sample __unused, |
41 | int size __unused, | 41 | struct perf_evsel *evsel __unused, |
42 | unsigned long long nsecs __unused, | 42 | struct perf_session *session __unused, |
43 | char *comm __unused) | 43 | struct thread *thread __unused) |
44 | { | 44 | { |
45 | } | 45 | } |
46 | 46 | ||
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index b3e86b1e4444..f674dda3363b 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <stdbool.h> | 4 | #include <stdbool.h> |
5 | #include "parse-events.h" | 5 | #include "parse-events.h" |
6 | #include "session.h" | ||
6 | 7 | ||
7 | #define __unused __attribute__((unused)) | 8 | #define __unused __attribute__((unused)) |
8 | 9 | ||
@@ -176,8 +177,7 @@ void print_printk(void); | |||
176 | 177 | ||
177 | int parse_ftrace_file(char *buf, unsigned long size); | 178 | int parse_ftrace_file(char *buf, unsigned long size); |
178 | int parse_event_file(char *buf, unsigned long size, char *sys); | 179 | int parse_event_file(char *buf, unsigned long size, char *sys); |
179 | void print_event(int cpu, void *data, int size, unsigned long long nsecs, | 180 | void print_trace_event(int cpu, void *data, int size); |
180 | char *comm); | ||
181 | 181 | ||
182 | extern int file_bigendian; | 182 | extern int file_bigendian; |
183 | extern int host_bigendian; | 183 | extern int host_bigendian; |
@@ -262,9 +262,8 @@ raw_field_value(struct event *event, const char *name, void *data); | |||
262 | void *raw_field_ptr(struct event *event, const char *name, void *data); | 262 | void *raw_field_ptr(struct event *event, const char *name, void *data); |
263 | unsigned long long eval_flag(const char *flag); | 263 | unsigned long long eval_flag(const char *flag); |
264 | 264 | ||
265 | int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); | 265 | int read_tracing_data(int fd, struct list_head *pattrs); |
266 | ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs, | 266 | ssize_t read_tracing_data_size(int fd, struct list_head *pattrs); |
267 | int nb_events); | ||
268 | 267 | ||
269 | /* taken from kernel/trace/trace.h */ | 268 | /* taken from kernel/trace/trace.h */ |
270 | enum trace_flag_type { | 269 | enum trace_flag_type { |
@@ -279,8 +278,11 @@ struct scripting_ops { | |||
279 | const char *name; | 278 | const char *name; |
280 | int (*start_script) (const char *script, int argc, const char **argv); | 279 | int (*start_script) (const char *script, int argc, const char **argv); |
281 | int (*stop_script) (void); | 280 | int (*stop_script) (void); |
282 | void (*process_event) (int cpu, void *data, int size, | 281 | void (*process_event) (union perf_event *event, |
283 | unsigned long long nsecs, char *comm); | 282 | struct perf_sample *sample, |
283 | struct perf_evsel *evsel, | ||
284 | struct perf_session *session, | ||
285 | struct thread *thread); | ||
284 | int (*generate_script) (const char *outfile); | 286 | int (*generate_script) (const char *outfile); |
285 | }; | 287 | }; |
286 | 288 | ||
diff --git a/tools/perf/util/types.h b/tools/perf/util/types.h index 7d6b8331f898..5f3689a3d085 100644 --- a/tools/perf/util/types.h +++ b/tools/perf/util/types.h | |||
@@ -1,12 +1,14 @@ | |||
1 | #ifndef __PERF_TYPES_H | 1 | #ifndef __PERF_TYPES_H |
2 | #define __PERF_TYPES_H | 2 | #define __PERF_TYPES_H |
3 | 3 | ||
4 | #include <stdint.h> | ||
5 | |||
4 | /* | 6 | /* |
5 | * We define u64 as unsigned long long for every architecture | 7 | * We define u64 as uint64_t for every architecture |
6 | * so that we can print it with %Lx without getting warnings. | 8 | * so that we can print it with "%"PRIx64 without getting warnings. |
7 | */ | 9 | */ |
8 | typedef unsigned long long u64; | 10 | typedef uint64_t u64; |
9 | typedef signed long long s64; | 11 | typedef int64_t s64; |
10 | typedef unsigned int u32; | 12 | typedef unsigned int u32; |
11 | typedef signed int s32; | 13 | typedef signed int s32; |
12 | typedef unsigned short u16; | 14 | typedef unsigned short u16; |
diff --git a/tools/perf/util/ui/browser.c b/tools/perf/util/ui/browser.c index 66f2d583d8c4..611219f80680 100644 --- a/tools/perf/util/ui/browser.c +++ b/tools/perf/util/ui/browser.c | |||
@@ -1,16 +1,6 @@ | |||
1 | #define _GNU_SOURCE | 1 | #include "libslang.h" |
2 | #include <stdio.h> | 2 | #include "ui.h" |
3 | #undef _GNU_SOURCE | 3 | #include <linux/compiler.h> |
4 | /* | ||
5 | * slang versions <= 2.0.6 have a "#if HAVE_LONG_LONG" that breaks | ||
6 | * the build if it isn't defined. Use the equivalent one that glibc | ||
7 | * has on features.h. | ||
8 | */ | ||
9 | #include <features.h> | ||
10 | #ifndef HAVE_LONG_LONG | ||
11 | #define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG | ||
12 | #endif | ||
13 | #include <slang.h> | ||
14 | #include <linux/list.h> | 4 | #include <linux/list.h> |
15 | #include <linux/rbtree.h> | 5 | #include <linux/rbtree.h> |
16 | #include <stdlib.h> | 6 | #include <stdlib.h> |
@@ -19,17 +9,9 @@ | |||
19 | #include "helpline.h" | 9 | #include "helpline.h" |
20 | #include "../color.h" | 10 | #include "../color.h" |
21 | #include "../util.h" | 11 | #include "../util.h" |
12 | #include <stdio.h> | ||
22 | 13 | ||
23 | #if SLANG_VERSION < 20104 | 14 | static int ui_browser__percent_color(double percent, bool current) |
24 | #define sltt_set_color(obj, name, fg, bg) \ | ||
25 | SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg) | ||
26 | #else | ||
27 | #define sltt_set_color SLtt_set_color | ||
28 | #endif | ||
29 | |||
30 | newtComponent newt_form__new(void); | ||
31 | |||
32 | int ui_browser__percent_color(double percent, bool current) | ||
33 | { | 15 | { |
34 | if (current) | 16 | if (current) |
35 | return HE_COLORSET_SELECTED; | 17 | return HE_COLORSET_SELECTED; |
@@ -40,6 +22,23 @@ int ui_browser__percent_color(double percent, bool current) | |||
40 | return HE_COLORSET_NORMAL; | 22 | return HE_COLORSET_NORMAL; |
41 | } | 23 | } |
42 | 24 | ||
25 | void ui_browser__set_color(struct ui_browser *self __used, int color) | ||
26 | { | ||
27 | SLsmg_set_color(color); | ||
28 | } | ||
29 | |||
30 | void ui_browser__set_percent_color(struct ui_browser *self, | ||
31 | double percent, bool current) | ||
32 | { | ||
33 | int color = ui_browser__percent_color(percent, current); | ||
34 | ui_browser__set_color(self, color); | ||
35 | } | ||
36 | |||
37 | void ui_browser__gotorc(struct ui_browser *self, int y, int x) | ||
38 | { | ||
39 | SLsmg_gotorc(self->y + y, self->x + x); | ||
40 | } | ||
41 | |||
43 | void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence) | 42 | void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence) |
44 | { | 43 | { |
45 | struct list_head *head = self->entries; | 44 | struct list_head *head = self->entries; |
@@ -111,7 +110,7 @@ unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self) | |||
111 | nd = self->top; | 110 | nd = self->top; |
112 | 111 | ||
113 | while (nd != NULL) { | 112 | while (nd != NULL) { |
114 | SLsmg_gotorc(self->y + row, self->x); | 113 | ui_browser__gotorc(self, row, 0); |
115 | self->write(self, nd, row); | 114 | self->write(self, nd, row); |
116 | if (++row == self->height) | 115 | if (++row == self->height) |
117 | break; | 116 | break; |
@@ -131,13 +130,10 @@ void ui_browser__refresh_dimensions(struct ui_browser *self) | |||
131 | int cols, rows; | 130 | int cols, rows; |
132 | newtGetScreenSize(&cols, &rows); | 131 | newtGetScreenSize(&cols, &rows); |
133 | 132 | ||
134 | if (self->width > cols - 4) | 133 | self->width = cols - 1; |
135 | self->width = cols - 4; | 134 | self->height = rows - 2; |
136 | self->height = rows - 5; | 135 | self->y = 1; |
137 | if (self->height > self->nr_entries) | 136 | self->x = 0; |
138 | self->height = self->nr_entries; | ||
139 | self->y = (rows - self->height) / 2; | ||
140 | self->x = (cols - self->width) / 2; | ||
141 | } | 137 | } |
142 | 138 | ||
143 | void ui_browser__reset_index(struct ui_browser *self) | 139 | void ui_browser__reset_index(struct ui_browser *self) |
@@ -146,78 +142,109 @@ void ui_browser__reset_index(struct ui_browser *self) | |||
146 | self->seek(self, 0, SEEK_SET); | 142 | self->seek(self, 0, SEEK_SET); |
147 | } | 143 | } |
148 | 144 | ||
145 | void ui_browser__add_exit_key(struct ui_browser *self, int key) | ||
146 | { | ||
147 | newtFormAddHotKey(self->form, key); | ||
148 | } | ||
149 | |||
150 | void ui_browser__add_exit_keys(struct ui_browser *self, int keys[]) | ||
151 | { | ||
152 | int i = 0; | ||
153 | |||
154 | while (keys[i] && i < 64) { | ||
155 | ui_browser__add_exit_key(self, keys[i]); | ||
156 | ++i; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | void __ui_browser__show_title(struct ui_browser *browser, const char *title) | ||
161 | { | ||
162 | SLsmg_gotorc(0, 0); | ||
163 | ui_browser__set_color(browser, NEWT_COLORSET_ROOT); | ||
164 | slsmg_write_nstring(title, browser->width); | ||
165 | } | ||
166 | |||
167 | void ui_browser__show_title(struct ui_browser *browser, const char *title) | ||
168 | { | ||
169 | pthread_mutex_lock(&ui__lock); | ||
170 | __ui_browser__show_title(browser, title); | ||
171 | pthread_mutex_unlock(&ui__lock); | ||
172 | } | ||
173 | |||
149 | int ui_browser__show(struct ui_browser *self, const char *title, | 174 | int ui_browser__show(struct ui_browser *self, const char *title, |
150 | const char *helpline, ...) | 175 | const char *helpline, ...) |
151 | { | 176 | { |
152 | va_list ap; | 177 | va_list ap; |
178 | int keys[] = { NEWT_KEY_UP, NEWT_KEY_DOWN, NEWT_KEY_PGUP, | ||
179 | NEWT_KEY_PGDN, NEWT_KEY_HOME, NEWT_KEY_END, ' ', | ||
180 | NEWT_KEY_LEFT, NEWT_KEY_ESCAPE, 'q', CTRL('c'), 0 }; | ||
153 | 181 | ||
154 | if (self->form != NULL) { | 182 | if (self->form != NULL) |
155 | newtFormDestroy(self->form); | 183 | newtFormDestroy(self->form); |
156 | newtPopWindow(); | 184 | |
157 | } | ||
158 | ui_browser__refresh_dimensions(self); | 185 | ui_browser__refresh_dimensions(self); |
159 | newtCenteredWindow(self->width, self->height, title); | 186 | self->form = newtForm(NULL, NULL, 0); |
160 | self->form = newt_form__new(); | ||
161 | if (self->form == NULL) | 187 | if (self->form == NULL) |
162 | return -1; | 188 | return -1; |
163 | 189 | ||
164 | self->sb = newtVerticalScrollbar(self->width, 0, self->height, | 190 | self->sb = newtVerticalScrollbar(self->width, 1, self->height, |
165 | HE_COLORSET_NORMAL, | 191 | HE_COLORSET_NORMAL, |
166 | HE_COLORSET_SELECTED); | 192 | HE_COLORSET_SELECTED); |
167 | if (self->sb == NULL) | 193 | if (self->sb == NULL) |
168 | return -1; | 194 | return -1; |
169 | 195 | ||
170 | newtFormAddHotKey(self->form, NEWT_KEY_UP); | 196 | pthread_mutex_lock(&ui__lock); |
171 | newtFormAddHotKey(self->form, NEWT_KEY_DOWN); | 197 | __ui_browser__show_title(self, title); |
172 | newtFormAddHotKey(self->form, NEWT_KEY_PGUP); | 198 | |
173 | newtFormAddHotKey(self->form, NEWT_KEY_PGDN); | 199 | ui_browser__add_exit_keys(self, keys); |
174 | newtFormAddHotKey(self->form, NEWT_KEY_HOME); | ||
175 | newtFormAddHotKey(self->form, NEWT_KEY_END); | ||
176 | newtFormAddHotKey(self->form, ' '); | ||
177 | newtFormAddComponent(self->form, self->sb); | 200 | newtFormAddComponent(self->form, self->sb); |
178 | 201 | ||
179 | va_start(ap, helpline); | 202 | va_start(ap, helpline); |
180 | ui_helpline__vpush(helpline, ap); | 203 | ui_helpline__vpush(helpline, ap); |
181 | va_end(ap); | 204 | va_end(ap); |
205 | pthread_mutex_unlock(&ui__lock); | ||
182 | return 0; | 206 | return 0; |
183 | } | 207 | } |
184 | 208 | ||
185 | void ui_browser__hide(struct ui_browser *self) | 209 | void ui_browser__hide(struct ui_browser *self) |
186 | { | 210 | { |
211 | pthread_mutex_lock(&ui__lock); | ||
187 | newtFormDestroy(self->form); | 212 | newtFormDestroy(self->form); |
188 | newtPopWindow(); | ||
189 | self->form = NULL; | 213 | self->form = NULL; |
190 | ui_helpline__pop(); | 214 | ui_helpline__pop(); |
215 | pthread_mutex_unlock(&ui__lock); | ||
191 | } | 216 | } |
192 | 217 | ||
193 | int ui_browser__refresh(struct ui_browser *self) | 218 | int ui_browser__refresh(struct ui_browser *self) |
194 | { | 219 | { |
195 | int row; | 220 | int row; |
196 | 221 | ||
222 | pthread_mutex_lock(&ui__lock); | ||
197 | newtScrollbarSet(self->sb, self->index, self->nr_entries - 1); | 223 | newtScrollbarSet(self->sb, self->index, self->nr_entries - 1); |
198 | row = self->refresh(self); | 224 | row = self->refresh(self); |
199 | SLsmg_set_color(HE_COLORSET_NORMAL); | 225 | ui_browser__set_color(self, HE_COLORSET_NORMAL); |
200 | SLsmg_fill_region(self->y + row, self->x, | 226 | SLsmg_fill_region(self->y + row, self->x, |
201 | self->height - row, self->width, ' '); | 227 | self->height - row, self->width, ' '); |
228 | pthread_mutex_unlock(&ui__lock); | ||
202 | 229 | ||
203 | return 0; | 230 | return 0; |
204 | } | 231 | } |
205 | 232 | ||
206 | int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es) | 233 | int ui_browser__run(struct ui_browser *self) |
207 | { | 234 | { |
235 | struct newtExitStruct es; | ||
236 | |||
208 | if (ui_browser__refresh(self) < 0) | 237 | if (ui_browser__refresh(self) < 0) |
209 | return -1; | 238 | return -1; |
210 | 239 | ||
211 | while (1) { | 240 | while (1) { |
212 | off_t offset; | 241 | off_t offset; |
213 | 242 | ||
214 | newtFormRun(self->form, es); | 243 | newtFormRun(self->form, &es); |
215 | 244 | ||
216 | if (es->reason != NEWT_EXIT_HOTKEY) | 245 | if (es.reason != NEWT_EXIT_HOTKEY) |
217 | break; | 246 | break; |
218 | if (is_exit_key(es->u.key)) | 247 | switch (es.u.key) { |
219 | return es->u.key; | ||
220 | switch (es->u.key) { | ||
221 | case NEWT_KEY_DOWN: | 248 | case NEWT_KEY_DOWN: |
222 | if (self->index == self->nr_entries - 1) | 249 | if (self->index == self->nr_entries - 1) |
223 | break; | 250 | break; |
@@ -274,12 +301,12 @@ int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es) | |||
274 | self->seek(self, -offset, SEEK_END); | 301 | self->seek(self, -offset, SEEK_END); |
275 | break; | 302 | break; |
276 | default: | 303 | default: |
277 | return es->u.key; | 304 | return es.u.key; |
278 | } | 305 | } |
279 | if (ui_browser__refresh(self) < 0) | 306 | if (ui_browser__refresh(self) < 0) |
280 | return -1; | 307 | return -1; |
281 | } | 308 | } |
282 | return 0; | 309 | return -1; |
283 | } | 310 | } |
284 | 311 | ||
285 | unsigned int ui_browser__list_head_refresh(struct ui_browser *self) | 312 | unsigned int ui_browser__list_head_refresh(struct ui_browser *self) |
@@ -294,7 +321,7 @@ unsigned int ui_browser__list_head_refresh(struct ui_browser *self) | |||
294 | pos = self->top; | 321 | pos = self->top; |
295 | 322 | ||
296 | list_for_each_from(pos, head) { | 323 | list_for_each_from(pos, head) { |
297 | SLsmg_gotorc(self->y + row, self->x); | 324 | ui_browser__gotorc(self, row, 0); |
298 | self->write(self, pos, row); | 325 | self->write(self, pos, row); |
299 | if (++row == self->height) | 326 | if (++row == self->height) |
300 | break; | 327 | break; |
diff --git a/tools/perf/util/ui/browser.h b/tools/perf/util/ui/browser.h index 0b9f829214f7..fc63dda10910 100644 --- a/tools/perf/util/ui/browser.h +++ b/tools/perf/util/ui/browser.h | |||
@@ -24,17 +24,23 @@ struct ui_browser { | |||
24 | u32 nr_entries; | 24 | u32 nr_entries; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | 27 | void ui_browser__set_color(struct ui_browser *self, int color); | |
28 | int ui_browser__percent_color(double percent, bool current); | 28 | void ui_browser__set_percent_color(struct ui_browser *self, |
29 | double percent, bool current); | ||
29 | bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row); | 30 | bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row); |
30 | void ui_browser__refresh_dimensions(struct ui_browser *self); | 31 | void ui_browser__refresh_dimensions(struct ui_browser *self); |
31 | void ui_browser__reset_index(struct ui_browser *self); | 32 | void ui_browser__reset_index(struct ui_browser *self); |
32 | 33 | ||
34 | void ui_browser__gotorc(struct ui_browser *self, int y, int x); | ||
35 | void ui_browser__add_exit_key(struct ui_browser *self, int key); | ||
36 | void ui_browser__add_exit_keys(struct ui_browser *self, int keys[]); | ||
37 | void __ui_browser__show_title(struct ui_browser *browser, const char *title); | ||
38 | void ui_browser__show_title(struct ui_browser *browser, const char *title); | ||
33 | int ui_browser__show(struct ui_browser *self, const char *title, | 39 | int ui_browser__show(struct ui_browser *self, const char *title, |
34 | const char *helpline, ...); | 40 | const char *helpline, ...); |
35 | void ui_browser__hide(struct ui_browser *self); | 41 | void ui_browser__hide(struct ui_browser *self); |
36 | int ui_browser__refresh(struct ui_browser *self); | 42 | int ui_browser__refresh(struct ui_browser *self); |
37 | int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es); | 43 | int ui_browser__run(struct ui_browser *self); |
38 | 44 | ||
39 | void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence); | 45 | void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence); |
40 | unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self); | 46 | unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self); |
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c index a90273e63f4f..0229723aceb3 100644 --- a/tools/perf/util/ui/browsers/annotate.c +++ b/tools/perf/util/ui/browsers/annotate.c | |||
@@ -1,9 +1,11 @@ | |||
1 | #include "../browser.h" | 1 | #include "../browser.h" |
2 | #include "../helpline.h" | 2 | #include "../helpline.h" |
3 | #include "../libslang.h" | 3 | #include "../libslang.h" |
4 | #include "../../annotate.h" | ||
4 | #include "../../hist.h" | 5 | #include "../../hist.h" |
5 | #include "../../sort.h" | 6 | #include "../../sort.h" |
6 | #include "../../symbol.h" | 7 | #include "../../symbol.h" |
8 | #include <pthread.h> | ||
7 | 9 | ||
8 | static void ui__error_window(const char *fmt, ...) | 10 | static void ui__error_window(const char *fmt, ...) |
9 | { | 11 | { |
@@ -40,14 +42,10 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro | |||
40 | 42 | ||
41 | if (ol->offset != -1) { | 43 | if (ol->offset != -1) { |
42 | struct objdump_line_rb_node *olrb = objdump_line__rb(ol); | 44 | struct objdump_line_rb_node *olrb = objdump_line__rb(ol); |
43 | int color = ui_browser__percent_color(olrb->percent, current_entry); | 45 | ui_browser__set_percent_color(self, olrb->percent, current_entry); |
44 | SLsmg_set_color(color); | ||
45 | slsmg_printf(" %7.2f ", olrb->percent); | 46 | slsmg_printf(" %7.2f ", olrb->percent); |
46 | if (!current_entry) | ||
47 | SLsmg_set_color(HE_COLORSET_CODE); | ||
48 | } else { | 47 | } else { |
49 | int color = ui_browser__percent_color(0, current_entry); | 48 | ui_browser__set_percent_color(self, 0, current_entry); |
50 | SLsmg_set_color(color); | ||
51 | slsmg_write_nstring(" ", 9); | 49 | slsmg_write_nstring(" ", 9); |
52 | } | 50 | } |
53 | 51 | ||
@@ -57,35 +55,40 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro | |||
57 | slsmg_write_nstring(" ", width - 18); | 55 | slsmg_write_nstring(" ", width - 18); |
58 | else | 56 | else |
59 | slsmg_write_nstring(ol->line, width - 18); | 57 | slsmg_write_nstring(ol->line, width - 18); |
58 | |||
59 | if (!current_entry) | ||
60 | ui_browser__set_color(self, HE_COLORSET_CODE); | ||
60 | } | 61 | } |
61 | 62 | ||
62 | static double objdump_line__calc_percent(struct objdump_line *self, | 63 | static double objdump_line__calc_percent(struct objdump_line *self, |
63 | struct list_head *head, | 64 | struct symbol *sym, int evidx) |
64 | struct symbol *sym) | ||
65 | { | 65 | { |
66 | double percent = 0.0; | 66 | double percent = 0.0; |
67 | 67 | ||
68 | if (self->offset != -1) { | 68 | if (self->offset != -1) { |
69 | int len = sym->end - sym->start; | 69 | int len = sym->end - sym->start; |
70 | unsigned int hits = 0; | 70 | unsigned int hits = 0; |
71 | struct sym_priv *priv = symbol__priv(sym); | 71 | struct annotation *notes = symbol__annotation(sym); |
72 | struct sym_ext *sym_ext = priv->ext; | 72 | struct source_line *src_line = notes->src->lines; |
73 | struct sym_hist *h = priv->hist; | 73 | struct sym_hist *h = annotation__histogram(notes, evidx); |
74 | s64 offset = self->offset; | 74 | s64 offset = self->offset; |
75 | struct objdump_line *next = objdump__get_next_ip_line(head, self); | 75 | struct objdump_line *next; |
76 | |||
77 | 76 | ||
77 | next = objdump__get_next_ip_line(¬es->src->source, self); | ||
78 | while (offset < (s64)len && | 78 | while (offset < (s64)len && |
79 | (next == NULL || offset < next->offset)) { | 79 | (next == NULL || offset < next->offset)) { |
80 | if (sym_ext) { | 80 | if (src_line) { |
81 | percent += sym_ext[offset].percent; | 81 | percent += src_line[offset].percent; |
82 | } else | 82 | } else |
83 | hits += h->ip[offset]; | 83 | hits += h->addr[offset]; |
84 | 84 | ||
85 | ++offset; | 85 | ++offset; |
86 | } | 86 | } |
87 | 87 | /* | |
88 | if (sym_ext == NULL && h->sum) | 88 | * If the percentage wasn't already calculated in |
89 | * symbol__get_source_line, do it now: | ||
90 | */ | ||
91 | if (src_line == NULL && h->sum) | ||
89 | percent = 100.0 * hits / h->sum; | 92 | percent = 100.0 * hits / h->sum; |
90 | } | 93 | } |
91 | 94 | ||
@@ -135,105 +138,163 @@ static void annotate_browser__set_top(struct annotate_browser *self, | |||
135 | self->curr_hot = nd; | 138 | self->curr_hot = nd; |
136 | } | 139 | } |
137 | 140 | ||
138 | static int annotate_browser__run(struct annotate_browser *self, | 141 | static void annotate_browser__calc_percent(struct annotate_browser *browser, |
139 | struct newtExitStruct *es) | 142 | int evidx) |
143 | { | ||
144 | struct symbol *sym = browser->b.priv; | ||
145 | struct annotation *notes = symbol__annotation(sym); | ||
146 | struct objdump_line *pos; | ||
147 | |||
148 | browser->entries = RB_ROOT; | ||
149 | |||
150 | pthread_mutex_lock(¬es->lock); | ||
151 | |||
152 | list_for_each_entry(pos, ¬es->src->source, node) { | ||
153 | struct objdump_line_rb_node *rbpos = objdump_line__rb(pos); | ||
154 | rbpos->percent = objdump_line__calc_percent(pos, sym, evidx); | ||
155 | if (rbpos->percent < 0.01) { | ||
156 | RB_CLEAR_NODE(&rbpos->rb_node); | ||
157 | continue; | ||
158 | } | ||
159 | objdump__insert_line(&browser->entries, rbpos); | ||
160 | } | ||
161 | pthread_mutex_unlock(¬es->lock); | ||
162 | |||
163 | browser->curr_hot = rb_last(&browser->entries); | ||
164 | } | ||
165 | |||
166 | static int annotate_browser__run(struct annotate_browser *self, int evidx, | ||
167 | int refresh) | ||
140 | { | 168 | { |
141 | struct rb_node *nd; | 169 | struct rb_node *nd = NULL; |
142 | struct hist_entry *he = self->b.priv; | 170 | struct symbol *sym = self->b.priv; |
171 | /* | ||
172 | * RIGHT To allow builtin-annotate to cycle thru multiple symbols by | ||
173 | * examining the exit key for this function. | ||
174 | */ | ||
175 | int exit_keys[] = { 'H', NEWT_KEY_TAB, NEWT_KEY_UNTAB, | ||
176 | NEWT_KEY_RIGHT, 0 }; | ||
177 | int key; | ||
143 | 178 | ||
144 | if (ui_browser__show(&self->b, he->ms.sym->name, | 179 | if (ui_browser__show(&self->b, sym->name, |
145 | "<- or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0) | 180 | "<-, -> or ESC: exit, TAB/shift+TAB: " |
181 | "cycle hottest lines, H: Hottest") < 0) | ||
146 | return -1; | 182 | return -1; |
147 | 183 | ||
148 | newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT); | 184 | ui_browser__add_exit_keys(&self->b, exit_keys); |
149 | newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT); | 185 | annotate_browser__calc_percent(self, evidx); |
186 | |||
187 | if (self->curr_hot) | ||
188 | annotate_browser__set_top(self, self->curr_hot); | ||
150 | 189 | ||
151 | nd = self->curr_hot; | 190 | nd = self->curr_hot; |
152 | if (nd) { | ||
153 | newtFormAddHotKey(self->b.form, NEWT_KEY_TAB); | ||
154 | newtFormAddHotKey(self->b.form, NEWT_KEY_UNTAB); | ||
155 | } | ||
156 | 191 | ||
157 | while (1) { | 192 | if (refresh != 0) |
158 | ui_browser__run(&self->b, es); | 193 | newtFormSetTimer(self->b.form, refresh); |
159 | 194 | ||
160 | if (es->reason != NEWT_EXIT_HOTKEY) | 195 | while (1) { |
161 | break; | 196 | key = ui_browser__run(&self->b); |
197 | |||
198 | if (refresh != 0) { | ||
199 | annotate_browser__calc_percent(self, evidx); | ||
200 | /* | ||
201 | * Current line focus got out of the list of most active | ||
202 | * lines, NULL it so that if TAB|UNTAB is pressed, we | ||
203 | * move to curr_hot (current hottest line). | ||
204 | */ | ||
205 | if (nd != NULL && RB_EMPTY_NODE(nd)) | ||
206 | nd = NULL; | ||
207 | } | ||
162 | 208 | ||
163 | switch (es->u.key) { | 209 | switch (key) { |
210 | case -1: | ||
211 | /* | ||
212 | * FIXME we need to check if it was | ||
213 | * es.reason == NEWT_EXIT_TIMER | ||
214 | */ | ||
215 | if (refresh != 0) | ||
216 | symbol__annotate_decay_histogram(sym, evidx); | ||
217 | continue; | ||
164 | case NEWT_KEY_TAB: | 218 | case NEWT_KEY_TAB: |
165 | nd = rb_prev(nd); | 219 | if (nd != NULL) { |
166 | if (nd == NULL) | 220 | nd = rb_prev(nd); |
167 | nd = rb_last(&self->entries); | 221 | if (nd == NULL) |
168 | annotate_browser__set_top(self, nd); | 222 | nd = rb_last(&self->entries); |
223 | } else | ||
224 | nd = self->curr_hot; | ||
169 | break; | 225 | break; |
170 | case NEWT_KEY_UNTAB: | 226 | case NEWT_KEY_UNTAB: |
171 | nd = rb_next(nd); | 227 | if (nd != NULL) |
172 | if (nd == NULL) | 228 | nd = rb_next(nd); |
173 | nd = rb_first(&self->entries); | 229 | if (nd == NULL) |
174 | annotate_browser__set_top(self, nd); | 230 | nd = rb_first(&self->entries); |
231 | else | ||
232 | nd = self->curr_hot; | ||
233 | break; | ||
234 | case 'H': | ||
235 | nd = self->curr_hot; | ||
175 | break; | 236 | break; |
176 | default: | 237 | default: |
177 | goto out; | 238 | goto out; |
178 | } | 239 | } |
240 | |||
241 | if (nd != NULL) | ||
242 | annotate_browser__set_top(self, nd); | ||
179 | } | 243 | } |
180 | out: | 244 | out: |
181 | ui_browser__hide(&self->b); | 245 | ui_browser__hide(&self->b); |
182 | return es->u.key; | 246 | return key; |
183 | } | 247 | } |
184 | 248 | ||
185 | int hist_entry__tui_annotate(struct hist_entry *self) | 249 | int hist_entry__tui_annotate(struct hist_entry *he, int evidx) |
250 | { | ||
251 | return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, 0); | ||
252 | } | ||
253 | |||
254 | int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, | ||
255 | int refresh) | ||
186 | { | 256 | { |
187 | struct newtExitStruct es; | ||
188 | struct objdump_line *pos, *n; | 257 | struct objdump_line *pos, *n; |
189 | struct objdump_line_rb_node *rbpos; | 258 | struct annotation *notes; |
190 | LIST_HEAD(head); | ||
191 | struct annotate_browser browser = { | 259 | struct annotate_browser browser = { |
192 | .b = { | 260 | .b = { |
193 | .entries = &head, | ||
194 | .refresh = ui_browser__list_head_refresh, | 261 | .refresh = ui_browser__list_head_refresh, |
195 | .seek = ui_browser__list_head_seek, | 262 | .seek = ui_browser__list_head_seek, |
196 | .write = annotate_browser__write, | 263 | .write = annotate_browser__write, |
197 | .priv = self, | 264 | .priv = sym, |
198 | }, | 265 | }, |
199 | }; | 266 | }; |
200 | int ret; | 267 | int ret; |
201 | 268 | ||
202 | if (self->ms.sym == NULL) | 269 | if (sym == NULL) |
203 | return -1; | 270 | return -1; |
204 | 271 | ||
205 | if (self->ms.map->dso->annotate_warned) | 272 | if (map->dso->annotate_warned) |
206 | return -1; | 273 | return -1; |
207 | 274 | ||
208 | if (hist_entry__annotate(self, &head, sizeof(*rbpos)) < 0) { | 275 | if (symbol__annotate(sym, map, sizeof(struct objdump_line_rb_node)) < 0) { |
209 | ui__error_window(ui_helpline__last_msg); | 276 | ui__error_window(ui_helpline__last_msg); |
210 | return -1; | 277 | return -1; |
211 | } | 278 | } |
212 | 279 | ||
213 | ui_helpline__push("Press <- or ESC to exit"); | 280 | ui_helpline__push("Press <- or ESC to exit"); |
214 | 281 | ||
215 | list_for_each_entry(pos, &head, node) { | 282 | notes = symbol__annotation(sym); |
283 | |||
284 | list_for_each_entry(pos, ¬es->src->source, node) { | ||
285 | struct objdump_line_rb_node *rbpos; | ||
216 | size_t line_len = strlen(pos->line); | 286 | size_t line_len = strlen(pos->line); |
287 | |||
217 | if (browser.b.width < line_len) | 288 | if (browser.b.width < line_len) |
218 | browser.b.width = line_len; | 289 | browser.b.width = line_len; |
219 | rbpos = objdump_line__rb(pos); | 290 | rbpos = objdump_line__rb(pos); |
220 | rbpos->idx = browser.b.nr_entries++; | 291 | rbpos->idx = browser.b.nr_entries++; |
221 | rbpos->percent = objdump_line__calc_percent(pos, &head, self->ms.sym); | ||
222 | if (rbpos->percent < 0.01) | ||
223 | continue; | ||
224 | objdump__insert_line(&browser.entries, rbpos); | ||
225 | } | 292 | } |
226 | 293 | ||
227 | /* | 294 | browser.b.entries = ¬es->src->source, |
228 | * Position the browser at the hottest line. | ||
229 | */ | ||
230 | browser.curr_hot = rb_last(&browser.entries); | ||
231 | if (browser.curr_hot) | ||
232 | annotate_browser__set_top(&browser, browser.curr_hot); | ||
233 | |||
234 | browser.b.width += 18; /* Percentage */ | 295 | browser.b.width += 18; /* Percentage */ |
235 | ret = annotate_browser__run(&browser, &es); | 296 | ret = annotate_browser__run(&browser, evidx, refresh); |
236 | list_for_each_entry_safe(pos, n, &head, node) { | 297 | list_for_each_entry_safe(pos, n, ¬es->src->source, node) { |
237 | list_del(&pos->node); | 298 | list_del(&pos->node); |
238 | objdump_line__free(pos); | 299 | objdump_line__free(pos); |
239 | } | 300 | } |
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c index 6866aa4c41e0..5d767c622dfc 100644 --- a/tools/perf/util/ui/browsers/hists.c +++ b/tools/perf/util/ui/browsers/hists.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <newt.h> | 7 | #include <newt.h> |
8 | #include <linux/rbtree.h> | 8 | #include <linux/rbtree.h> |
9 | 9 | ||
10 | #include "../../evsel.h" | ||
11 | #include "../../evlist.h" | ||
10 | #include "../../hist.h" | 12 | #include "../../hist.h" |
11 | #include "../../pstack.h" | 13 | #include "../../pstack.h" |
12 | #include "../../sort.h" | 14 | #include "../../sort.h" |
@@ -58,6 +60,11 @@ static char callchain_list__folded(const struct callchain_list *self) | |||
58 | return map_symbol__folded(&self->ms); | 60 | return map_symbol__folded(&self->ms); |
59 | } | 61 | } |
60 | 62 | ||
63 | static void map_symbol__set_folding(struct map_symbol *self, bool unfold) | ||
64 | { | ||
65 | self->unfolded = unfold ? self->has_children : false; | ||
66 | } | ||
67 | |||
61 | static int callchain_node__count_rows_rb_tree(struct callchain_node *self) | 68 | static int callchain_node__count_rows_rb_tree(struct callchain_node *self) |
62 | { | 69 | { |
63 | int n = 0; | 70 | int n = 0; |
@@ -129,16 +136,16 @@ static void callchain_node__init_have_children_rb_tree(struct callchain_node *se | |||
129 | for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) { | 136 | for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) { |
130 | struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); | 137 | struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); |
131 | struct callchain_list *chain; | 138 | struct callchain_list *chain; |
132 | int first = true; | 139 | bool first = true; |
133 | 140 | ||
134 | list_for_each_entry(chain, &child->val, list) { | 141 | list_for_each_entry(chain, &child->val, list) { |
135 | if (first) { | 142 | if (first) { |
136 | first = false; | 143 | first = false; |
137 | chain->ms.has_children = chain->list.next != &child->val || | 144 | chain->ms.has_children = chain->list.next != &child->val || |
138 | rb_first(&child->rb_root) != NULL; | 145 | !RB_EMPTY_ROOT(&child->rb_root); |
139 | } else | 146 | } else |
140 | chain->ms.has_children = chain->list.next == &child->val && | 147 | chain->ms.has_children = chain->list.next == &child->val && |
141 | rb_first(&child->rb_root) != NULL; | 148 | !RB_EMPTY_ROOT(&child->rb_root); |
142 | } | 149 | } |
143 | 150 | ||
144 | callchain_node__init_have_children_rb_tree(child); | 151 | callchain_node__init_have_children_rb_tree(child); |
@@ -150,7 +157,7 @@ static void callchain_node__init_have_children(struct callchain_node *self) | |||
150 | struct callchain_list *chain; | 157 | struct callchain_list *chain; |
151 | 158 | ||
152 | list_for_each_entry(chain, &self->val, list) | 159 | list_for_each_entry(chain, &self->val, list) |
153 | chain->ms.has_children = rb_first(&self->rb_root) != NULL; | 160 | chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root); |
154 | 161 | ||
155 | callchain_node__init_have_children_rb_tree(self); | 162 | callchain_node__init_have_children_rb_tree(self); |
156 | } | 163 | } |
@@ -168,6 +175,7 @@ static void callchain__init_have_children(struct rb_root *self) | |||
168 | static void hist_entry__init_have_children(struct hist_entry *self) | 175 | static void hist_entry__init_have_children(struct hist_entry *self) |
169 | { | 176 | { |
170 | if (!self->init_have_children) { | 177 | if (!self->init_have_children) { |
178 | self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain); | ||
171 | callchain__init_have_children(&self->sorted_chain); | 179 | callchain__init_have_children(&self->sorted_chain); |
172 | self->init_have_children = true; | 180 | self->init_have_children = true; |
173 | } | 181 | } |
@@ -195,43 +203,115 @@ static bool hist_browser__toggle_fold(struct hist_browser *self) | |||
195 | return false; | 203 | return false; |
196 | } | 204 | } |
197 | 205 | ||
198 | static int hist_browser__run(struct hist_browser *self, const char *title, | 206 | static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold) |
199 | struct newtExitStruct *es) | ||
200 | { | 207 | { |
201 | char str[256], unit; | 208 | int n = 0; |
202 | unsigned long nr_events = self->hists->stats.nr_events[PERF_RECORD_SAMPLE]; | 209 | struct rb_node *nd; |
210 | |||
211 | for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) { | ||
212 | struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); | ||
213 | struct callchain_list *chain; | ||
214 | bool has_children = false; | ||
215 | |||
216 | list_for_each_entry(chain, &child->val, list) { | ||
217 | ++n; | ||
218 | map_symbol__set_folding(&chain->ms, unfold); | ||
219 | has_children = chain->ms.has_children; | ||
220 | } | ||
221 | |||
222 | if (has_children) | ||
223 | n += callchain_node__set_folding_rb_tree(child, unfold); | ||
224 | } | ||
225 | |||
226 | return n; | ||
227 | } | ||
228 | |||
229 | static int callchain_node__set_folding(struct callchain_node *node, bool unfold) | ||
230 | { | ||
231 | struct callchain_list *chain; | ||
232 | bool has_children = false; | ||
233 | int n = 0; | ||
234 | |||
235 | list_for_each_entry(chain, &node->val, list) { | ||
236 | ++n; | ||
237 | map_symbol__set_folding(&chain->ms, unfold); | ||
238 | has_children = chain->ms.has_children; | ||
239 | } | ||
240 | |||
241 | if (has_children) | ||
242 | n += callchain_node__set_folding_rb_tree(node, unfold); | ||
243 | |||
244 | return n; | ||
245 | } | ||
246 | |||
247 | static int callchain__set_folding(struct rb_root *chain, bool unfold) | ||
248 | { | ||
249 | struct rb_node *nd; | ||
250 | int n = 0; | ||
251 | |||
252 | for (nd = rb_first(chain); nd; nd = rb_next(nd)) { | ||
253 | struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); | ||
254 | n += callchain_node__set_folding(node, unfold); | ||
255 | } | ||
256 | |||
257 | return n; | ||
258 | } | ||
259 | |||
260 | static void hist_entry__set_folding(struct hist_entry *self, bool unfold) | ||
261 | { | ||
262 | hist_entry__init_have_children(self); | ||
263 | map_symbol__set_folding(&self->ms, unfold); | ||
264 | |||
265 | if (self->ms.has_children) { | ||
266 | int n = callchain__set_folding(&self->sorted_chain, unfold); | ||
267 | self->nr_rows = unfold ? n : 0; | ||
268 | } else | ||
269 | self->nr_rows = 0; | ||
270 | } | ||
271 | |||
272 | static void hists__set_folding(struct hists *self, bool unfold) | ||
273 | { | ||
274 | struct rb_node *nd; | ||
275 | |||
276 | self->nr_entries = 0; | ||
277 | |||
278 | for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { | ||
279 | struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); | ||
280 | hist_entry__set_folding(he, unfold); | ||
281 | self->nr_entries += 1 + he->nr_rows; | ||
282 | } | ||
283 | } | ||
284 | |||
285 | static void hist_browser__set_folding(struct hist_browser *self, bool unfold) | ||
286 | { | ||
287 | hists__set_folding(self->hists, unfold); | ||
288 | self->b.nr_entries = self->hists->nr_entries; | ||
289 | /* Go to the start, we may be way after valid entries after a collapse */ | ||
290 | ui_browser__reset_index(&self->b); | ||
291 | } | ||
292 | |||
293 | static int hist_browser__run(struct hist_browser *self, const char *title) | ||
294 | { | ||
295 | int key; | ||
296 | int exit_keys[] = { 'a', '?', 'h', 'C', 'd', 'D', 'E', 't', | ||
297 | NEWT_KEY_ENTER, NEWT_KEY_RIGHT, NEWT_KEY_LEFT, | ||
298 | NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0, }; | ||
203 | 299 | ||
204 | self->b.entries = &self->hists->entries; | 300 | self->b.entries = &self->hists->entries; |
205 | self->b.nr_entries = self->hists->nr_entries; | 301 | self->b.nr_entries = self->hists->nr_entries; |
206 | 302 | ||
207 | hist_browser__refresh_dimensions(self); | 303 | hist_browser__refresh_dimensions(self); |
208 | 304 | ||
209 | nr_events = convert_unit(nr_events, &unit); | ||
210 | snprintf(str, sizeof(str), "Events: %lu%c ", | ||
211 | nr_events, unit); | ||
212 | newtDrawRootText(0, 0, str); | ||
213 | |||
214 | if (ui_browser__show(&self->b, title, | 305 | if (ui_browser__show(&self->b, title, |
215 | "Press '?' for help on key bindings") < 0) | 306 | "Press '?' for help on key bindings") < 0) |
216 | return -1; | 307 | return -1; |
217 | 308 | ||
218 | newtFormAddHotKey(self->b.form, 'a'); | 309 | ui_browser__add_exit_keys(&self->b, exit_keys); |
219 | newtFormAddHotKey(self->b.form, '?'); | ||
220 | newtFormAddHotKey(self->b.form, 'h'); | ||
221 | newtFormAddHotKey(self->b.form, 'd'); | ||
222 | newtFormAddHotKey(self->b.form, 'D'); | ||
223 | newtFormAddHotKey(self->b.form, 't'); | ||
224 | |||
225 | newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT); | ||
226 | newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT); | ||
227 | newtFormAddHotKey(self->b.form, NEWT_KEY_ENTER); | ||
228 | 310 | ||
229 | while (1) { | 311 | while (1) { |
230 | ui_browser__run(&self->b, es); | 312 | key = ui_browser__run(&self->b); |
231 | 313 | ||
232 | if (es->reason != NEWT_EXIT_HOTKEY) | 314 | switch (key) { |
233 | break; | ||
234 | switch (es->u.key) { | ||
235 | case 'D': { /* Debug */ | 315 | case 'D': { /* Debug */ |
236 | static int seq; | 316 | static int seq; |
237 | struct hist_entry *h = rb_entry(self->b.top, | 317 | struct hist_entry *h = rb_entry(self->b.top, |
@@ -245,18 +325,26 @@ static int hist_browser__run(struct hist_browser *self, const char *title, | |||
245 | self->b.top_idx, | 325 | self->b.top_idx, |
246 | h->row_offset, h->nr_rows); | 326 | h->row_offset, h->nr_rows); |
247 | } | 327 | } |
248 | continue; | 328 | break; |
329 | case 'C': | ||
330 | /* Collapse the whole world. */ | ||
331 | hist_browser__set_folding(self, false); | ||
332 | break; | ||
333 | case 'E': | ||
334 | /* Expand the whole world. */ | ||
335 | hist_browser__set_folding(self, true); | ||
336 | break; | ||
249 | case NEWT_KEY_ENTER: | 337 | case NEWT_KEY_ENTER: |
250 | if (hist_browser__toggle_fold(self)) | 338 | if (hist_browser__toggle_fold(self)) |
251 | break; | 339 | break; |
252 | /* fall thru */ | 340 | /* fall thru */ |
253 | default: | 341 | default: |
254 | return 0; | 342 | goto out; |
255 | } | 343 | } |
256 | } | 344 | } |
257 | 345 | out: | |
258 | ui_browser__hide(&self->b); | 346 | ui_browser__hide(&self->b); |
259 | return 0; | 347 | return key; |
260 | } | 348 | } |
261 | 349 | ||
262 | static char *callchain_list__sym_name(struct callchain_list *self, | 350 | static char *callchain_list__sym_name(struct callchain_list *self, |
@@ -265,7 +353,7 @@ static char *callchain_list__sym_name(struct callchain_list *self, | |||
265 | if (self->ms.sym) | 353 | if (self->ms.sym) |
266 | return self->ms.sym->name; | 354 | return self->ms.sym->name; |
267 | 355 | ||
268 | snprintf(bf, bfsize, "%#Lx", self->ip); | 356 | snprintf(bf, bfsize, "%#" PRIx64, self->ip); |
269 | return bf; | 357 | return bf; |
270 | } | 358 | } |
271 | 359 | ||
@@ -292,7 +380,7 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self, | |||
292 | while (node) { | 380 | while (node) { |
293 | struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); | 381 | struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); |
294 | struct rb_node *next = rb_next(node); | 382 | struct rb_node *next = rb_next(node); |
295 | u64 cumul = cumul_hits(child); | 383 | u64 cumul = callchain_cumul_hits(child); |
296 | struct callchain_list *chain; | 384 | struct callchain_list *chain; |
297 | char folded_sign = ' '; | 385 | char folded_sign = ' '; |
298 | int first = true; | 386 | int first = true; |
@@ -306,15 +394,10 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self, | |||
306 | int color; | 394 | int color; |
307 | bool was_first = first; | 395 | bool was_first = first; |
308 | 396 | ||
309 | if (first) { | 397 | if (first) |
310 | first = false; | 398 | first = false; |
311 | chain->ms.has_children = chain->list.next != &child->val || | 399 | else |
312 | rb_first(&child->rb_root) != NULL; | ||
313 | } else { | ||
314 | extra_offset = LEVEL_OFFSET_STEP; | 400 | extra_offset = LEVEL_OFFSET_STEP; |
315 | chain->ms.has_children = chain->list.next == &child->val && | ||
316 | rb_first(&child->rb_root) != NULL; | ||
317 | } | ||
318 | 401 | ||
319 | folded_sign = callchain_list__folded(chain); | 402 | folded_sign = callchain_list__folded(chain); |
320 | if (*row_offset != 0) { | 403 | if (*row_offset != 0) { |
@@ -341,8 +424,8 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self, | |||
341 | *is_current_entry = true; | 424 | *is_current_entry = true; |
342 | } | 425 | } |
343 | 426 | ||
344 | SLsmg_set_color(color); | 427 | ui_browser__set_color(&self->b, color); |
345 | SLsmg_gotorc(self->b.y + row, self->b.x); | 428 | ui_browser__gotorc(&self->b, row, 0); |
346 | slsmg_write_nstring(" ", offset + extra_offset); | 429 | slsmg_write_nstring(" ", offset + extra_offset); |
347 | slsmg_printf("%c ", folded_sign); | 430 | slsmg_printf("%c ", folded_sign); |
348 | slsmg_write_nstring(str, width); | 431 | slsmg_write_nstring(str, width); |
@@ -384,12 +467,7 @@ static int hist_browser__show_callchain_node(struct hist_browser *self, | |||
384 | list_for_each_entry(chain, &node->val, list) { | 467 | list_for_each_entry(chain, &node->val, list) { |
385 | char ipstr[BITS_PER_LONG / 4 + 1], *s; | 468 | char ipstr[BITS_PER_LONG / 4 + 1], *s; |
386 | int color; | 469 | int color; |
387 | /* | 470 | |
388 | * FIXME: This should be moved to somewhere else, | ||
389 | * probably when the callchain is created, so as not to | ||
390 | * traverse it all over again | ||
391 | */ | ||
392 | chain->ms.has_children = rb_first(&node->rb_root) != NULL; | ||
393 | folded_sign = callchain_list__folded(chain); | 471 | folded_sign = callchain_list__folded(chain); |
394 | 472 | ||
395 | if (*row_offset != 0) { | 473 | if (*row_offset != 0) { |
@@ -405,8 +483,8 @@ static int hist_browser__show_callchain_node(struct hist_browser *self, | |||
405 | } | 483 | } |
406 | 484 | ||
407 | s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); | 485 | s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); |
408 | SLsmg_gotorc(self->b.y + row, self->b.x); | 486 | ui_browser__gotorc(&self->b, row, 0); |
409 | SLsmg_set_color(color); | 487 | ui_browser__set_color(&self->b, color); |
410 | slsmg_write_nstring(" ", offset); | 488 | slsmg_write_nstring(" ", offset); |
411 | slsmg_printf("%c ", folded_sign); | 489 | slsmg_printf("%c ", folded_sign); |
412 | slsmg_write_nstring(s, width - 2); | 490 | slsmg_write_nstring(s, width - 2); |
@@ -465,7 +543,7 @@ static int hist_browser__show_entry(struct hist_browser *self, | |||
465 | } | 543 | } |
466 | 544 | ||
467 | if (symbol_conf.use_callchain) { | 545 | if (symbol_conf.use_callchain) { |
468 | entry->ms.has_children = !RB_EMPTY_ROOT(&entry->sorted_chain); | 546 | hist_entry__init_have_children(entry); |
469 | folded_sign = hist_entry__folded(entry); | 547 | folded_sign = hist_entry__folded(entry); |
470 | } | 548 | } |
471 | 549 | ||
@@ -484,8 +562,8 @@ static int hist_browser__show_entry(struct hist_browser *self, | |||
484 | color = HE_COLORSET_NORMAL; | 562 | color = HE_COLORSET_NORMAL; |
485 | } | 563 | } |
486 | 564 | ||
487 | SLsmg_set_color(color); | 565 | ui_browser__set_color(&self->b, color); |
488 | SLsmg_gotorc(self->b.y + row, self->b.x); | 566 | ui_browser__gotorc(&self->b, row, 0); |
489 | if (symbol_conf.use_callchain) { | 567 | if (symbol_conf.use_callchain) { |
490 | slsmg_printf("%c ", folded_sign); | 568 | slsmg_printf("%c ", folded_sign); |
491 | width -= 2; | 569 | width -= 2; |
@@ -563,6 +641,9 @@ static void ui_browser__hists_seek(struct ui_browser *self, | |||
563 | struct rb_node *nd; | 641 | struct rb_node *nd; |
564 | bool first = true; | 642 | bool first = true; |
565 | 643 | ||
644 | if (self->nr_entries == 0) | ||
645 | return; | ||
646 | |||
566 | switch (whence) { | 647 | switch (whence) { |
567 | case SEEK_SET: | 648 | case SEEK_SET: |
568 | nd = hists__filter_entries(rb_first(self->entries)); | 649 | nd = hists__filter_entries(rb_first(self->entries)); |
@@ -687,8 +768,6 @@ static struct hist_browser *hist_browser__new(struct hists *hists) | |||
687 | 768 | ||
688 | static void hist_browser__delete(struct hist_browser *self) | 769 | static void hist_browser__delete(struct hist_browser *self) |
689 | { | 770 | { |
690 | newtFormDestroy(self->b.form); | ||
691 | newtPopWindow(); | ||
692 | free(self); | 771 | free(self); |
693 | } | 772 | } |
694 | 773 | ||
@@ -702,30 +781,37 @@ static struct thread *hist_browser__selected_thread(struct hist_browser *self) | |||
702 | return self->he_selection->thread; | 781 | return self->he_selection->thread; |
703 | } | 782 | } |
704 | 783 | ||
705 | static int hist_browser__title(char *bf, size_t size, const char *ev_name, | 784 | static int hists__browser_title(struct hists *self, char *bf, size_t size, |
706 | const struct dso *dso, const struct thread *thread) | 785 | const char *ev_name, const struct dso *dso, |
786 | const struct thread *thread) | ||
707 | { | 787 | { |
708 | int printed = 0; | 788 | char unit; |
789 | int printed; | ||
790 | unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE]; | ||
791 | |||
792 | nr_events = convert_unit(nr_events, &unit); | ||
793 | printed = snprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name); | ||
709 | 794 | ||
710 | if (thread) | 795 | if (thread) |
711 | printed += snprintf(bf + printed, size - printed, | 796 | printed += snprintf(bf + printed, size - printed, |
712 | "Thread: %s(%d)", | 797 | ", Thread: %s(%d)", |
713 | (thread->comm_set ? thread->comm : ""), | 798 | (thread->comm_set ? thread->comm : ""), |
714 | thread->pid); | 799 | thread->pid); |
715 | if (dso) | 800 | if (dso) |
716 | printed += snprintf(bf + printed, size - printed, | 801 | printed += snprintf(bf + printed, size - printed, |
717 | "%sDSO: %s", thread ? " " : "", | 802 | ", DSO: %s", dso->short_name); |
718 | dso->short_name); | 803 | return printed; |
719 | return printed ?: snprintf(bf, size, "Event: %s", ev_name); | ||
720 | } | 804 | } |
721 | 805 | ||
722 | int hists__browse(struct hists *self, const char *helpline, const char *ev_name) | 806 | static int perf_evsel__hists_browse(struct perf_evsel *evsel, |
807 | const char *helpline, const char *ev_name, | ||
808 | bool left_exits) | ||
723 | { | 809 | { |
810 | struct hists *self = &evsel->hists; | ||
724 | struct hist_browser *browser = hist_browser__new(self); | 811 | struct hist_browser *browser = hist_browser__new(self); |
725 | struct pstack *fstack; | 812 | struct pstack *fstack; |
726 | const struct thread *thread_filter = NULL; | 813 | const struct thread *thread_filter = NULL; |
727 | const struct dso *dso_filter = NULL; | 814 | const struct dso *dso_filter = NULL; |
728 | struct newtExitStruct es; | ||
729 | char msg[160]; | 815 | char msg[160]; |
730 | int key = -1; | 816 | int key = -1; |
731 | 817 | ||
@@ -738,84 +824,88 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name) | |||
738 | 824 | ||
739 | ui_helpline__push(helpline); | 825 | ui_helpline__push(helpline); |
740 | 826 | ||
741 | hist_browser__title(msg, sizeof(msg), ev_name, | 827 | hists__browser_title(self, msg, sizeof(msg), ev_name, |
742 | dso_filter, thread_filter); | 828 | dso_filter, thread_filter); |
743 | |||
744 | while (1) { | 829 | while (1) { |
745 | const struct thread *thread; | 830 | const struct thread *thread = NULL; |
746 | const struct dso *dso; | 831 | const struct dso *dso = NULL; |
747 | char *options[16]; | 832 | char *options[16]; |
748 | int nr_options = 0, choice = 0, i, | 833 | int nr_options = 0, choice = 0, i, |
749 | annotate = -2, zoom_dso = -2, zoom_thread = -2, | 834 | annotate = -2, zoom_dso = -2, zoom_thread = -2, |
750 | browse_map = -2; | 835 | browse_map = -2; |
751 | 836 | ||
752 | if (hist_browser__run(browser, msg, &es)) | 837 | key = hist_browser__run(browser, msg); |
753 | break; | ||
754 | 838 | ||
755 | thread = hist_browser__selected_thread(browser); | 839 | if (browser->he_selection != NULL) { |
756 | dso = browser->selection->map ? browser->selection->map->dso : NULL; | 840 | thread = hist_browser__selected_thread(browser); |
841 | dso = browser->selection->map ? browser->selection->map->dso : NULL; | ||
842 | } | ||
757 | 843 | ||
758 | if (es.reason == NEWT_EXIT_HOTKEY) { | 844 | switch (key) { |
759 | key = es.u.key; | 845 | case NEWT_KEY_TAB: |
846 | case NEWT_KEY_UNTAB: | ||
847 | /* | ||
848 | * Exit the browser, let hists__browser_tree | ||
849 | * go to the next or previous | ||
850 | */ | ||
851 | goto out_free_stack; | ||
852 | case 'a': | ||
853 | if (browser->selection == NULL || | ||
854 | browser->selection->sym == NULL || | ||
855 | browser->selection->map->dso->annotate_warned) | ||
856 | continue; | ||
857 | goto do_annotate; | ||
858 | case 'd': | ||
859 | goto zoom_dso; | ||
860 | case 't': | ||
861 | goto zoom_thread; | ||
862 | case NEWT_KEY_F1: | ||
863 | case 'h': | ||
864 | case '?': | ||
865 | ui__help_window("-> Zoom into DSO/Threads & Annotate current symbol\n" | ||
866 | "<- Zoom out\n" | ||
867 | "a Annotate current symbol\n" | ||
868 | "h/?/F1 Show this window\n" | ||
869 | "C Collapse all callchains\n" | ||
870 | "E Expand all callchains\n" | ||
871 | "d Zoom into current DSO\n" | ||
872 | "t Zoom into current Thread\n" | ||
873 | "TAB/UNTAB Switch events\n" | ||
874 | "q/CTRL+C Exit browser"); | ||
875 | continue; | ||
876 | case NEWT_KEY_ENTER: | ||
877 | case NEWT_KEY_RIGHT: | ||
878 | /* menu */ | ||
879 | break; | ||
880 | case NEWT_KEY_LEFT: { | ||
881 | const void *top; | ||
760 | 882 | ||
761 | switch (key) { | 883 | if (pstack__empty(fstack)) { |
762 | case NEWT_KEY_F1: | ||
763 | goto do_help; | ||
764 | case NEWT_KEY_TAB: | ||
765 | case NEWT_KEY_UNTAB: | ||
766 | /* | 884 | /* |
767 | * Exit the browser, let hists__browser_tree | 885 | * Go back to the perf_evsel_menu__run or other user |
768 | * go to the next or previous | ||
769 | */ | 886 | */ |
770 | goto out_free_stack; | 887 | if (left_exits) |
771 | default:; | 888 | goto out_free_stack; |
772 | } | ||
773 | |||
774 | switch (key) { | ||
775 | case 'a': | ||
776 | if (browser->selection->map == NULL || | ||
777 | browser->selection->map->dso->annotate_warned) | ||
778 | continue; | ||
779 | goto do_annotate; | ||
780 | case 'd': | ||
781 | goto zoom_dso; | ||
782 | case 't': | ||
783 | goto zoom_thread; | ||
784 | case 'h': | ||
785 | case '?': | ||
786 | do_help: | ||
787 | ui__help_window("-> Zoom into DSO/Threads & Annotate current symbol\n" | ||
788 | "<- Zoom out\n" | ||
789 | "a Annotate current symbol\n" | ||
790 | "h/?/F1 Show this window\n" | ||
791 | "d Zoom into current DSO\n" | ||
792 | "t Zoom into current Thread\n" | ||
793 | "q/CTRL+C Exit browser"); | ||
794 | continue; | 889 | continue; |
795 | default:; | ||
796 | } | 890 | } |
797 | if (is_exit_key(key)) { | 891 | top = pstack__pop(fstack); |
798 | if (key == NEWT_KEY_ESCAPE && | 892 | if (top == &dso_filter) |
799 | !ui__dialog_yesno("Do you really want to exit?")) | 893 | goto zoom_out_dso; |
800 | continue; | 894 | if (top == &thread_filter) |
801 | break; | 895 | goto zoom_out_thread; |
802 | } | 896 | continue; |
803 | 897 | } | |
804 | if (es.u.key == NEWT_KEY_LEFT) { | 898 | case NEWT_KEY_ESCAPE: |
805 | const void *top; | 899 | if (!left_exits && |
806 | 900 | !ui__dialog_yesno("Do you really want to exit?")) | |
807 | if (pstack__empty(fstack)) | ||
808 | continue; | ||
809 | top = pstack__pop(fstack); | ||
810 | if (top == &dso_filter) | ||
811 | goto zoom_out_dso; | ||
812 | if (top == &thread_filter) | ||
813 | goto zoom_out_thread; | ||
814 | continue; | 901 | continue; |
815 | } | 902 | /* Fall thru */ |
903 | default: | ||
904 | goto out_free_stack; | ||
816 | } | 905 | } |
817 | 906 | ||
818 | if (browser->selection->sym != NULL && | 907 | if (browser->selection != NULL && |
908 | browser->selection->sym != NULL && | ||
819 | !browser->selection->map->dso->annotate_warned && | 909 | !browser->selection->map->dso->annotate_warned && |
820 | asprintf(&options[nr_options], "Annotate %s", | 910 | asprintf(&options[nr_options], "Annotate %s", |
821 | browser->selection->sym->name) > 0) | 911 | browser->selection->sym->name) > 0) |
@@ -834,7 +924,8 @@ do_help: | |||
834 | (dso->kernel ? "the Kernel" : dso->short_name)) > 0) | 924 | (dso->kernel ? "the Kernel" : dso->short_name)) > 0) |
835 | zoom_dso = nr_options++; | 925 | zoom_dso = nr_options++; |
836 | 926 | ||
837 | if (browser->selection->map != NULL && | 927 | if (browser->selection != NULL && |
928 | browser->selection->map != NULL && | ||
838 | asprintf(&options[nr_options], "Browse map details") > 0) | 929 | asprintf(&options[nr_options], "Browse map details") > 0) |
839 | browse_map = nr_options++; | 930 | browse_map = nr_options++; |
840 | 931 | ||
@@ -854,19 +945,11 @@ do_help: | |||
854 | if (choice == annotate) { | 945 | if (choice == annotate) { |
855 | struct hist_entry *he; | 946 | struct hist_entry *he; |
856 | do_annotate: | 947 | do_annotate: |
857 | if (browser->selection->map->dso->origin == DSO__ORIG_KERNEL) { | ||
858 | browser->selection->map->dso->annotate_warned = 1; | ||
859 | ui_helpline__puts("No vmlinux file found, can't " | ||
860 | "annotate with just a " | ||
861 | "kallsyms file"); | ||
862 | continue; | ||
863 | } | ||
864 | |||
865 | he = hist_browser__selected_entry(browser); | 948 | he = hist_browser__selected_entry(browser); |
866 | if (he == NULL) | 949 | if (he == NULL) |
867 | continue; | 950 | continue; |
868 | 951 | ||
869 | hist_entry__tui_annotate(he); | 952 | hist_entry__tui_annotate(he, evsel->idx); |
870 | } else if (choice == browse_map) | 953 | } else if (choice == browse_map) |
871 | map__browse(browser->selection->map); | 954 | map__browse(browser->selection->map); |
872 | else if (choice == zoom_dso) { | 955 | else if (choice == zoom_dso) { |
@@ -885,8 +968,8 @@ zoom_out_dso: | |||
885 | pstack__push(fstack, &dso_filter); | 968 | pstack__push(fstack, &dso_filter); |
886 | } | 969 | } |
887 | hists__filter_by_dso(self, dso_filter); | 970 | hists__filter_by_dso(self, dso_filter); |
888 | hist_browser__title(msg, sizeof(msg), ev_name, | 971 | hists__browser_title(self, msg, sizeof(msg), ev_name, |
889 | dso_filter, thread_filter); | 972 | dso_filter, thread_filter); |
890 | hist_browser__reset(browser); | 973 | hist_browser__reset(browser); |
891 | } else if (choice == zoom_thread) { | 974 | } else if (choice == zoom_thread) { |
892 | zoom_thread: | 975 | zoom_thread: |
@@ -903,8 +986,8 @@ zoom_out_thread: | |||
903 | pstack__push(fstack, &thread_filter); | 986 | pstack__push(fstack, &thread_filter); |
904 | } | 987 | } |
905 | hists__filter_by_thread(self, thread_filter); | 988 | hists__filter_by_thread(self, thread_filter); |
906 | hist_browser__title(msg, sizeof(msg), ev_name, | 989 | hists__browser_title(self, msg, sizeof(msg), ev_name, |
907 | dso_filter, thread_filter); | 990 | dso_filter, thread_filter); |
908 | hist_browser__reset(browser); | 991 | hist_browser__reset(browser); |
909 | } | 992 | } |
910 | } | 993 | } |
@@ -915,34 +998,141 @@ out: | |||
915 | return key; | 998 | return key; |
916 | } | 999 | } |
917 | 1000 | ||
918 | int hists__tui_browse_tree(struct rb_root *self, const char *help) | 1001 | struct perf_evsel_menu { |
1002 | struct ui_browser b; | ||
1003 | struct perf_evsel *selection; | ||
1004 | }; | ||
1005 | |||
1006 | static void perf_evsel_menu__write(struct ui_browser *browser, | ||
1007 | void *entry, int row) | ||
919 | { | 1008 | { |
920 | struct rb_node *first = rb_first(self), *nd = first, *next; | 1009 | struct perf_evsel_menu *menu = container_of(browser, |
921 | int key = 0; | 1010 | struct perf_evsel_menu, b); |
1011 | struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node); | ||
1012 | bool current_entry = ui_browser__is_current_entry(browser, row); | ||
1013 | unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE]; | ||
1014 | const char *ev_name = event_name(evsel); | ||
1015 | char bf[256], unit; | ||
1016 | |||
1017 | ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : | ||
1018 | HE_COLORSET_NORMAL); | ||
1019 | |||
1020 | nr_events = convert_unit(nr_events, &unit); | ||
1021 | snprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events, | ||
1022 | unit, unit == ' ' ? "" : " ", ev_name); | ||
1023 | slsmg_write_nstring(bf, browser->width); | ||
922 | 1024 | ||
923 | while (nd) { | 1025 | if (current_entry) |
924 | struct hists *hists = rb_entry(nd, struct hists, rb_node); | 1026 | menu->selection = evsel; |
925 | const char *ev_name = __event_name(hists->type, hists->config); | 1027 | } |
926 | 1028 | ||
927 | key = hists__browse(hists, help, ev_name); | 1029 | static int perf_evsel_menu__run(struct perf_evsel_menu *menu, const char *help) |
1030 | { | ||
1031 | int exit_keys[] = { NEWT_KEY_ENTER, NEWT_KEY_RIGHT, 0, }; | ||
1032 | struct perf_evlist *evlist = menu->b.priv; | ||
1033 | struct perf_evsel *pos; | ||
1034 | const char *ev_name, *title = "Available samples"; | ||
1035 | int key; | ||
1036 | |||
1037 | if (ui_browser__show(&menu->b, title, | ||
1038 | "ESC: exit, ENTER|->: Browse histograms") < 0) | ||
1039 | return -1; | ||
928 | 1040 | ||
929 | if (is_exit_key(key)) | 1041 | ui_browser__add_exit_keys(&menu->b, exit_keys); |
1042 | |||
1043 | while (1) { | ||
1044 | key = ui_browser__run(&menu->b); | ||
1045 | |||
1046 | switch (key) { | ||
1047 | case NEWT_KEY_RIGHT: | ||
1048 | case NEWT_KEY_ENTER: | ||
1049 | if (!menu->selection) | ||
1050 | continue; | ||
1051 | pos = menu->selection; | ||
1052 | browse_hists: | ||
1053 | ev_name = event_name(pos); | ||
1054 | key = perf_evsel__hists_browse(pos, help, ev_name, true); | ||
1055 | ui_browser__show_title(&menu->b, title); | ||
930 | break; | 1056 | break; |
1057 | case NEWT_KEY_LEFT: | ||
1058 | continue; | ||
1059 | case NEWT_KEY_ESCAPE: | ||
1060 | if (!ui__dialog_yesno("Do you really want to exit?")) | ||
1061 | continue; | ||
1062 | /* Fall thru */ | ||
1063 | default: | ||
1064 | goto out; | ||
1065 | } | ||
931 | 1066 | ||
932 | switch (key) { | 1067 | switch (key) { |
933 | case NEWT_KEY_TAB: | 1068 | case NEWT_KEY_TAB: |
934 | next = rb_next(nd); | 1069 | if (pos->node.next == &evlist->entries) |
935 | if (next) | 1070 | pos = list_entry(evlist->entries.next, struct perf_evsel, node); |
936 | nd = next; | 1071 | else |
937 | break; | 1072 | pos = list_entry(pos->node.next, struct perf_evsel, node); |
1073 | goto browse_hists; | ||
938 | case NEWT_KEY_UNTAB: | 1074 | case NEWT_KEY_UNTAB: |
939 | if (nd == first) | 1075 | if (pos->node.prev == &evlist->entries) |
940 | continue; | 1076 | pos = list_entry(evlist->entries.prev, struct perf_evsel, node); |
941 | nd = rb_prev(nd); | 1077 | else |
1078 | pos = list_entry(pos->node.prev, struct perf_evsel, node); | ||
1079 | goto browse_hists; | ||
1080 | case 'q': | ||
1081 | case CTRL('c'): | ||
1082 | goto out; | ||
942 | default: | 1083 | default: |
943 | break; | 1084 | break; |
944 | } | 1085 | } |
945 | } | 1086 | } |
946 | 1087 | ||
1088 | out: | ||
1089 | ui_browser__hide(&menu->b); | ||
947 | return key; | 1090 | return key; |
948 | } | 1091 | } |
1092 | |||
1093 | static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, | ||
1094 | const char *help) | ||
1095 | { | ||
1096 | struct perf_evsel *pos; | ||
1097 | struct perf_evsel_menu menu = { | ||
1098 | .b = { | ||
1099 | .entries = &evlist->entries, | ||
1100 | .refresh = ui_browser__list_head_refresh, | ||
1101 | .seek = ui_browser__list_head_seek, | ||
1102 | .write = perf_evsel_menu__write, | ||
1103 | .nr_entries = evlist->nr_entries, | ||
1104 | .priv = evlist, | ||
1105 | }, | ||
1106 | }; | ||
1107 | |||
1108 | ui_helpline__push("Press ESC to exit"); | ||
1109 | |||
1110 | list_for_each_entry(pos, &evlist->entries, node) { | ||
1111 | const char *ev_name = event_name(pos); | ||
1112 | size_t line_len = strlen(ev_name) + 7; | ||
1113 | |||
1114 | if (menu.b.width < line_len) | ||
1115 | menu.b.width = line_len; | ||
1116 | /* | ||
1117 | * Cache the evsel name, tracepoints have a _high_ cost per | ||
1118 | * event_name() call. | ||
1119 | */ | ||
1120 | if (pos->name == NULL) | ||
1121 | pos->name = strdup(ev_name); | ||
1122 | } | ||
1123 | |||
1124 | return perf_evsel_menu__run(&menu, help); | ||
1125 | } | ||
1126 | |||
1127 | int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help) | ||
1128 | { | ||
1129 | |||
1130 | if (evlist->nr_entries == 1) { | ||
1131 | struct perf_evsel *first = list_entry(evlist->entries.next, | ||
1132 | struct perf_evsel, node); | ||
1133 | const char *ev_name = event_name(first); | ||
1134 | return perf_evsel__hists_browse(first, help, ev_name, false); | ||
1135 | } | ||
1136 | |||
1137 | return __perf_evlist__tui_browse_hists(evlist, help); | ||
1138 | } | ||
diff --git a/tools/perf/util/ui/browsers/map.c b/tools/perf/util/ui/browsers/map.c index 142b825b42bf..8462bffe20bc 100644 --- a/tools/perf/util/ui/browsers/map.c +++ b/tools/perf/util/ui/browsers/map.c | |||
@@ -1,6 +1,6 @@ | |||
1 | #include "../libslang.h" | 1 | #include "../libslang.h" |
2 | #include <elf.h> | 2 | #include <elf.h> |
3 | #include <newt.h> | 3 | #include <inttypes.h> |
4 | #include <sys/ttydefaults.h> | 4 | #include <sys/ttydefaults.h> |
5 | #include <ctype.h> | 5 | #include <ctype.h> |
6 | #include <string.h> | 6 | #include <string.h> |
@@ -41,13 +41,12 @@ static int ui_entry__read(const char *title, char *bf, size_t size, int width) | |||
41 | out_free_form: | 41 | out_free_form: |
42 | newtPopWindow(); | 42 | newtPopWindow(); |
43 | newtFormDestroy(form); | 43 | newtFormDestroy(form); |
44 | return 0; | 44 | return err; |
45 | } | 45 | } |
46 | 46 | ||
47 | struct map_browser { | 47 | struct map_browser { |
48 | struct ui_browser b; | 48 | struct ui_browser b; |
49 | struct map *map; | 49 | struct map *map; |
50 | u16 namelen; | ||
51 | u8 addrlen; | 50 | u8 addrlen; |
52 | }; | 51 | }; |
53 | 52 | ||
@@ -56,14 +55,16 @@ static void map_browser__write(struct ui_browser *self, void *nd, int row) | |||
56 | struct symbol *sym = rb_entry(nd, struct symbol, rb_node); | 55 | struct symbol *sym = rb_entry(nd, struct symbol, rb_node); |
57 | struct map_browser *mb = container_of(self, struct map_browser, b); | 56 | struct map_browser *mb = container_of(self, struct map_browser, b); |
58 | bool current_entry = ui_browser__is_current_entry(self, row); | 57 | bool current_entry = ui_browser__is_current_entry(self, row); |
59 | int color = ui_browser__percent_color(0, current_entry); | 58 | int width; |
60 | 59 | ||
61 | SLsmg_set_color(color); | 60 | ui_browser__set_percent_color(self, 0, current_entry); |
62 | slsmg_printf("%*llx %*llx %c ", | 61 | slsmg_printf("%*" PRIx64 " %*" PRIx64 " %c ", |
63 | mb->addrlen, sym->start, mb->addrlen, sym->end, | 62 | mb->addrlen, sym->start, mb->addrlen, sym->end, |
64 | sym->binding == STB_GLOBAL ? 'g' : | 63 | sym->binding == STB_GLOBAL ? 'g' : |
65 | sym->binding == STB_LOCAL ? 'l' : 'w'); | 64 | sym->binding == STB_LOCAL ? 'l' : 'w'); |
66 | slsmg_write_nstring(sym->name, mb->namelen); | 65 | width = self->width - ((mb->addrlen * 2) + 4); |
66 | if (width > 0) | ||
67 | slsmg_write_nstring(sym->name, width); | ||
67 | } | 68 | } |
68 | 69 | ||
69 | /* FIXME uber-kludgy, see comment on cmd_report... */ | 70 | /* FIXME uber-kludgy, see comment on cmd_report... */ |
@@ -98,31 +99,29 @@ static int map_browser__search(struct map_browser *self) | |||
98 | return 0; | 99 | return 0; |
99 | } | 100 | } |
100 | 101 | ||
101 | static int map_browser__run(struct map_browser *self, struct newtExitStruct *es) | 102 | static int map_browser__run(struct map_browser *self) |
102 | { | 103 | { |
104 | int key; | ||
105 | |||
103 | if (ui_browser__show(&self->b, self->map->dso->long_name, | 106 | if (ui_browser__show(&self->b, self->map->dso->long_name, |
104 | "Press <- or ESC to exit, %s / to search", | 107 | "Press <- or ESC to exit, %s / to search", |
105 | verbose ? "" : "restart with -v to use") < 0) | 108 | verbose ? "" : "restart with -v to use") < 0) |
106 | return -1; | 109 | return -1; |
107 | 110 | ||
108 | newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT); | ||
109 | newtFormAddHotKey(self->b.form, NEWT_KEY_ENTER); | ||
110 | if (verbose) | 111 | if (verbose) |
111 | newtFormAddHotKey(self->b.form, '/'); | 112 | ui_browser__add_exit_key(&self->b, '/'); |
112 | 113 | ||
113 | while (1) { | 114 | while (1) { |
114 | ui_browser__run(&self->b, es); | 115 | key = ui_browser__run(&self->b); |
115 | 116 | ||
116 | if (es->reason != NEWT_EXIT_HOTKEY) | 117 | if (verbose && key == '/') |
117 | break; | ||
118 | if (verbose && es->u.key == '/') | ||
119 | map_browser__search(self); | 118 | map_browser__search(self); |
120 | else | 119 | else |
121 | break; | 120 | break; |
122 | } | 121 | } |
123 | 122 | ||
124 | ui_browser__hide(&self->b); | 123 | ui_browser__hide(&self->b); |
125 | return 0; | 124 | return key; |
126 | } | 125 | } |
127 | 126 | ||
128 | int map__browse(struct map *self) | 127 | int map__browse(struct map *self) |
@@ -136,7 +135,6 @@ int map__browse(struct map *self) | |||
136 | }, | 135 | }, |
137 | .map = self, | 136 | .map = self, |
138 | }; | 137 | }; |
139 | struct newtExitStruct es; | ||
140 | struct rb_node *nd; | 138 | struct rb_node *nd; |
141 | char tmp[BITS_PER_LONG / 4]; | 139 | char tmp[BITS_PER_LONG / 4]; |
142 | u64 maxaddr = 0; | 140 | u64 maxaddr = 0; |
@@ -144,8 +142,6 @@ int map__browse(struct map *self) | |||
144 | for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) { | 142 | for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) { |
145 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); | 143 | struct symbol *pos = rb_entry(nd, struct symbol, rb_node); |
146 | 144 | ||
147 | if (mb.namelen < pos->namelen) | ||
148 | mb.namelen = pos->namelen; | ||
149 | if (maxaddr < pos->end) | 145 | if (maxaddr < pos->end) |
150 | maxaddr = pos->end; | 146 | maxaddr = pos->end; |
151 | if (verbose) { | 147 | if (verbose) { |
@@ -155,7 +151,6 @@ int map__browse(struct map *self) | |||
155 | ++mb.b.nr_entries; | 151 | ++mb.b.nr_entries; |
156 | } | 152 | } |
157 | 153 | ||
158 | mb.addrlen = snprintf(tmp, sizeof(tmp), "%llx", maxaddr); | 154 | mb.addrlen = snprintf(tmp, sizeof(tmp), "%" PRIx64, maxaddr); |
159 | mb.b.width += mb.addrlen * 2 + 4 + mb.namelen; | 155 | return map_browser__run(&mb); |
160 | return map_browser__run(&mb, &es); | ||
161 | } | 156 | } |
diff --git a/tools/perf/util/ui/browsers/top.c b/tools/perf/util/ui/browsers/top.c new file mode 100644 index 000000000000..5a06538532af --- /dev/null +++ b/tools/perf/util/ui/browsers/top.c | |||
@@ -0,0 +1,213 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | ||
3 | * | ||
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | ||
5 | * copyright notes. | ||
6 | * | ||
7 | * Released under the GPL v2. (and only v2, not any later version) | ||
8 | */ | ||
9 | #include "../browser.h" | ||
10 | #include "../../annotate.h" | ||
11 | #include "../helpline.h" | ||
12 | #include "../libslang.h" | ||
13 | #include "../util.h" | ||
14 | #include "../../evlist.h" | ||
15 | #include "../../hist.h" | ||
16 | #include "../../sort.h" | ||
17 | #include "../../symbol.h" | ||
18 | #include "../../top.h" | ||
19 | |||
20 | struct perf_top_browser { | ||
21 | struct ui_browser b; | ||
22 | struct rb_root root; | ||
23 | struct sym_entry *selection; | ||
24 | float sum_ksamples; | ||
25 | int dso_width; | ||
26 | int dso_short_width; | ||
27 | int sym_width; | ||
28 | }; | ||
29 | |||
30 | static void perf_top_browser__write(struct ui_browser *browser, void *entry, int row) | ||
31 | { | ||
32 | struct perf_top_browser *top_browser = container_of(browser, struct perf_top_browser, b); | ||
33 | struct sym_entry *syme = rb_entry(entry, struct sym_entry, rb_node); | ||
34 | bool current_entry = ui_browser__is_current_entry(browser, row); | ||
35 | struct symbol *symbol = sym_entry__symbol(syme); | ||
36 | struct perf_top *top = browser->priv; | ||
37 | int width = browser->width; | ||
38 | double pcnt; | ||
39 | |||
40 | pcnt = 100.0 - (100.0 * ((top_browser->sum_ksamples - syme->snap_count) / | ||
41 | top_browser->sum_ksamples)); | ||
42 | ui_browser__set_percent_color(browser, pcnt, current_entry); | ||
43 | |||
44 | if (top->evlist->nr_entries == 1 || !top->display_weighted) { | ||
45 | slsmg_printf("%20.2f ", syme->weight); | ||
46 | width -= 24; | ||
47 | } else { | ||
48 | slsmg_printf("%9.1f %10ld ", syme->weight, syme->snap_count); | ||
49 | width -= 23; | ||
50 | } | ||
51 | |||
52 | slsmg_printf("%4.1f%%", pcnt); | ||
53 | width -= 7; | ||
54 | |||
55 | if (verbose) { | ||
56 | slsmg_printf(" %016" PRIx64, symbol->start); | ||
57 | width -= 17; | ||
58 | } | ||
59 | |||
60 | slsmg_printf(" %-*.*s ", top_browser->sym_width, top_browser->sym_width, | ||
61 | symbol->name); | ||
62 | width -= top_browser->sym_width; | ||
63 | slsmg_write_nstring(width >= syme->map->dso->long_name_len ? | ||
64 | syme->map->dso->long_name : | ||
65 | syme->map->dso->short_name, width); | ||
66 | |||
67 | if (current_entry) | ||
68 | top_browser->selection = syme; | ||
69 | } | ||
70 | |||
71 | static void perf_top_browser__update_rb_tree(struct perf_top_browser *browser) | ||
72 | { | ||
73 | struct perf_top *top = browser->b.priv; | ||
74 | u64 top_idx = browser->b.top_idx; | ||
75 | |||
76 | browser->root = RB_ROOT; | ||
77 | browser->b.top = NULL; | ||
78 | browser->sum_ksamples = perf_top__decay_samples(top, &browser->root); | ||
79 | /* | ||
80 | * No active symbols | ||
81 | */ | ||
82 | if (top->rb_entries == 0) | ||
83 | return; | ||
84 | |||
85 | perf_top__find_widths(top, &browser->root, &browser->dso_width, | ||
86 | &browser->dso_short_width, | ||
87 | &browser->sym_width); | ||
88 | if (browser->sym_width + browser->dso_width > browser->b.width - 29) { | ||
89 | browser->dso_width = browser->dso_short_width; | ||
90 | if (browser->sym_width + browser->dso_width > browser->b.width - 29) | ||
91 | browser->sym_width = browser->b.width - browser->dso_width - 29; | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Adjust the ui_browser indexes since the entries in the browser->root | ||
96 | * rb_tree may have changed, then seek it from start, so that we get a | ||
97 | * possible new top of the screen. | ||
98 | */ | ||
99 | browser->b.nr_entries = top->rb_entries; | ||
100 | |||
101 | if (top_idx >= browser->b.nr_entries) { | ||
102 | if (browser->b.height >= browser->b.nr_entries) | ||
103 | top_idx = browser->b.nr_entries - browser->b.height; | ||
104 | else | ||
105 | top_idx = 0; | ||
106 | } | ||
107 | |||
108 | if (browser->b.index >= top_idx + browser->b.height) | ||
109 | browser->b.index = top_idx + browser->b.index - browser->b.top_idx; | ||
110 | |||
111 | if (browser->b.index >= browser->b.nr_entries) | ||
112 | browser->b.index = browser->b.nr_entries - 1; | ||
113 | |||
114 | browser->b.top_idx = top_idx; | ||
115 | browser->b.seek(&browser->b, top_idx, SEEK_SET); | ||
116 | } | ||
117 | |||
118 | static void perf_top_browser__annotate(struct perf_top_browser *browser) | ||
119 | { | ||
120 | struct sym_entry *syme = browser->selection; | ||
121 | struct symbol *sym = sym_entry__symbol(syme); | ||
122 | struct annotation *notes = symbol__annotation(sym); | ||
123 | struct perf_top *top = browser->b.priv; | ||
124 | |||
125 | if (notes->src != NULL) | ||
126 | goto do_annotation; | ||
127 | |||
128 | pthread_mutex_lock(¬es->lock); | ||
129 | |||
130 | top->sym_filter_entry = NULL; | ||
131 | |||
132 | if (symbol__alloc_hist(sym, top->evlist->nr_entries) < 0) { | ||
133 | pr_err("Not enough memory for annotating '%s' symbol!\n", | ||
134 | sym->name); | ||
135 | pthread_mutex_unlock(¬es->lock); | ||
136 | return; | ||
137 | } | ||
138 | |||
139 | top->sym_filter_entry = syme; | ||
140 | |||
141 | pthread_mutex_unlock(¬es->lock); | ||
142 | do_annotation: | ||
143 | symbol__tui_annotate(sym, syme->map, 0, top->delay_secs * 1000); | ||
144 | } | ||
145 | |||
146 | static int perf_top_browser__run(struct perf_top_browser *browser) | ||
147 | { | ||
148 | int key; | ||
149 | char title[160]; | ||
150 | struct perf_top *top = browser->b.priv; | ||
151 | int delay_msecs = top->delay_secs * 1000; | ||
152 | int exit_keys[] = { 'a', NEWT_KEY_ENTER, NEWT_KEY_RIGHT, 0, }; | ||
153 | |||
154 | perf_top_browser__update_rb_tree(browser); | ||
155 | perf_top__header_snprintf(top, title, sizeof(title)); | ||
156 | perf_top__reset_sample_counters(top); | ||
157 | |||
158 | if (ui_browser__show(&browser->b, title, | ||
159 | "ESC: exit, ENTER|->|a: Live Annotate") < 0) | ||
160 | return -1; | ||
161 | |||
162 | newtFormSetTimer(browser->b.form, delay_msecs); | ||
163 | ui_browser__add_exit_keys(&browser->b, exit_keys); | ||
164 | |||
165 | while (1) { | ||
166 | key = ui_browser__run(&browser->b); | ||
167 | |||
168 | switch (key) { | ||
169 | case -1: | ||
170 | /* FIXME we need to check if it was es.reason == NEWT_EXIT_TIMER */ | ||
171 | perf_top_browser__update_rb_tree(browser); | ||
172 | perf_top__header_snprintf(top, title, sizeof(title)); | ||
173 | perf_top__reset_sample_counters(top); | ||
174 | ui_browser__set_color(&browser->b, NEWT_COLORSET_ROOT); | ||
175 | SLsmg_gotorc(0, 0); | ||
176 | slsmg_write_nstring(title, browser->b.width); | ||
177 | break; | ||
178 | case 'a': | ||
179 | case NEWT_KEY_RIGHT: | ||
180 | case NEWT_KEY_ENTER: | ||
181 | if (browser->selection) | ||
182 | perf_top_browser__annotate(browser); | ||
183 | break; | ||
184 | case NEWT_KEY_LEFT: | ||
185 | continue; | ||
186 | case NEWT_KEY_ESCAPE: | ||
187 | if (!ui__dialog_yesno("Do you really want to exit?")) | ||
188 | continue; | ||
189 | /* Fall thru */ | ||
190 | default: | ||
191 | goto out; | ||
192 | } | ||
193 | } | ||
194 | out: | ||
195 | ui_browser__hide(&browser->b); | ||
196 | return key; | ||
197 | } | ||
198 | |||
199 | int perf_top__tui_browser(struct perf_top *top) | ||
200 | { | ||
201 | struct perf_top_browser browser = { | ||
202 | .b = { | ||
203 | .entries = &browser.root, | ||
204 | .refresh = ui_browser__rb_tree_refresh, | ||
205 | .seek = ui_browser__rb_tree_seek, | ||
206 | .write = perf_top_browser__write, | ||
207 | .priv = top, | ||
208 | }, | ||
209 | }; | ||
210 | |||
211 | ui_helpline__push("Press <- or ESC to exit"); | ||
212 | return perf_top_browser__run(&browser); | ||
213 | } | ||
diff --git a/tools/perf/util/ui/helpline.c b/tools/perf/util/ui/helpline.c index 8d79daa4458a..f36d2ff509ed 100644 --- a/tools/perf/util/ui/helpline.c +++ b/tools/perf/util/ui/helpline.c | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #include "../debug.h" | 6 | #include "../debug.h" |
7 | #include "helpline.h" | 7 | #include "helpline.h" |
8 | #include "ui.h" | ||
8 | 9 | ||
9 | void ui_helpline__pop(void) | 10 | void ui_helpline__pop(void) |
10 | { | 11 | { |
@@ -55,7 +56,8 @@ int ui_helpline__show_help(const char *format, va_list ap) | |||
55 | int ret; | 56 | int ret; |
56 | static int backlog; | 57 | static int backlog; |
57 | 58 | ||
58 | ret = vsnprintf(ui_helpline__last_msg + backlog, | 59 | pthread_mutex_lock(&ui__lock); |
60 | ret = vsnprintf(ui_helpline__last_msg + backlog, | ||
59 | sizeof(ui_helpline__last_msg) - backlog, format, ap); | 61 | sizeof(ui_helpline__last_msg) - backlog, format, ap); |
60 | backlog += ret; | 62 | backlog += ret; |
61 | 63 | ||
@@ -64,6 +66,7 @@ int ui_helpline__show_help(const char *format, va_list ap) | |||
64 | newtRefresh(); | 66 | newtRefresh(); |
65 | backlog = 0; | 67 | backlog = 0; |
66 | } | 68 | } |
69 | pthread_mutex_unlock(&ui__lock); | ||
67 | 70 | ||
68 | return ret; | 71 | return ret; |
69 | } | 72 | } |
diff --git a/tools/perf/util/ui/libslang.h b/tools/perf/util/ui/libslang.h index 5623da8e8080..2b63e1c9b181 100644 --- a/tools/perf/util/ui/libslang.h +++ b/tools/perf/util/ui/libslang.h | |||
@@ -13,11 +13,11 @@ | |||
13 | 13 | ||
14 | #if SLANG_VERSION < 20104 | 14 | #if SLANG_VERSION < 20104 |
15 | #define slsmg_printf(msg, args...) \ | 15 | #define slsmg_printf(msg, args...) \ |
16 | SLsmg_printf((char *)msg, ##args) | 16 | SLsmg_printf((char *)(msg), ##args) |
17 | #define slsmg_write_nstring(msg, len) \ | 17 | #define slsmg_write_nstring(msg, len) \ |
18 | SLsmg_write_nstring((char *)msg, len) | 18 | SLsmg_write_nstring((char *)(msg), len) |
19 | #define sltt_set_color(obj, name, fg, bg) \ | 19 | #define sltt_set_color(obj, name, fg, bg) \ |
20 | SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg) | 20 | SLtt_set_color(obj,(char *)(name), (char *)(fg), (char *)(bg)) |
21 | #else | 21 | #else |
22 | #define slsmg_printf SLsmg_printf | 22 | #define slsmg_printf SLsmg_printf |
23 | #define slsmg_write_nstring SLsmg_write_nstring | 23 | #define slsmg_write_nstring SLsmg_write_nstring |
diff --git a/tools/perf/util/ui/setup.c b/tools/perf/util/ui/setup.c index 662085032eb7..ee46d671db59 100644 --- a/tools/perf/util/ui/setup.c +++ b/tools/perf/util/ui/setup.c | |||
@@ -6,6 +6,9 @@ | |||
6 | #include "../debug.h" | 6 | #include "../debug.h" |
7 | #include "browser.h" | 7 | #include "browser.h" |
8 | #include "helpline.h" | 8 | #include "helpline.h" |
9 | #include "ui.h" | ||
10 | |||
11 | pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER; | ||
9 | 12 | ||
10 | static void newt_suspend(void *d __used) | 13 | static void newt_suspend(void *d __used) |
11 | { | 14 | { |
@@ -14,11 +17,12 @@ static void newt_suspend(void *d __used) | |||
14 | newtResume(); | 17 | newtResume(); |
15 | } | 18 | } |
16 | 19 | ||
17 | void setup_browser(void) | 20 | void setup_browser(bool fallback_to_pager) |
18 | { | 21 | { |
19 | if (!isatty(1) || !use_browser || dump_trace) { | 22 | if (!isatty(1) || !use_browser || dump_trace) { |
20 | use_browser = 0; | 23 | use_browser = 0; |
21 | setup_pager(); | 24 | if (fallback_to_pager) |
25 | setup_pager(); | ||
22 | return; | 26 | return; |
23 | } | 27 | } |
24 | 28 | ||
diff --git a/tools/perf/util/ui/ui.h b/tools/perf/util/ui/ui.h new file mode 100644 index 000000000000..d264e059c829 --- /dev/null +++ b/tools/perf/util/ui/ui.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _PERF_UI_H_ | ||
2 | #define _PERF_UI_H_ 1 | ||
3 | |||
4 | #include <pthread.h> | ||
5 | |||
6 | extern pthread_mutex_t ui__lock; | ||
7 | |||
8 | #endif /* _PERF_UI_H_ */ | ||
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c index 04600e26ceea..fdf1fc8f08bc 100644 --- a/tools/perf/util/ui/util.c +++ b/tools/perf/util/ui/util.c | |||
@@ -9,10 +9,9 @@ | |||
9 | #include "../debug.h" | 9 | #include "../debug.h" |
10 | #include "browser.h" | 10 | #include "browser.h" |
11 | #include "helpline.h" | 11 | #include "helpline.h" |
12 | #include "ui.h" | ||
12 | #include "util.h" | 13 | #include "util.h" |
13 | 14 | ||
14 | newtComponent newt_form__new(void); | ||
15 | |||
16 | static void newt_form__set_exit_keys(newtComponent self) | 15 | static void newt_form__set_exit_keys(newtComponent self) |
17 | { | 16 | { |
18 | newtFormAddHotKey(self, NEWT_KEY_LEFT); | 17 | newtFormAddHotKey(self, NEWT_KEY_LEFT); |
@@ -22,7 +21,7 @@ static void newt_form__set_exit_keys(newtComponent self) | |||
22 | newtFormAddHotKey(self, CTRL('c')); | 21 | newtFormAddHotKey(self, CTRL('c')); |
23 | } | 22 | } |
24 | 23 | ||
25 | newtComponent newt_form__new(void) | 24 | static newtComponent newt_form__new(void) |
26 | { | 25 | { |
27 | newtComponent self = newtForm(NULL, NULL, 0); | 26 | newtComponent self = newtForm(NULL, NULL, 0); |
28 | if (self) | 27 | if (self) |
@@ -106,9 +105,26 @@ out_destroy_form: | |||
106 | return rc; | 105 | return rc; |
107 | } | 106 | } |
108 | 107 | ||
108 | static const char yes[] = "Yes", no[] = "No", | ||
109 | warning_str[] = "Warning!", ok[] = "Ok"; | ||
110 | |||
109 | bool ui__dialog_yesno(const char *msg) | 111 | bool ui__dialog_yesno(const char *msg) |
110 | { | 112 | { |
111 | /* newtWinChoice should really be accepting const char pointers... */ | 113 | /* newtWinChoice should really be accepting const char pointers... */ |
112 | char yes[] = "Yes", no[] = "No"; | 114 | return newtWinChoice(NULL, (char *)yes, (char *)no, (char *)msg) == 1; |
113 | return newtWinChoice(NULL, yes, no, (char *)msg) == 1; | 115 | } |
116 | |||
117 | void ui__warning(const char *format, ...) | ||
118 | { | ||
119 | va_list args; | ||
120 | |||
121 | va_start(args, format); | ||
122 | if (use_browser > 0) { | ||
123 | pthread_mutex_lock(&ui__lock); | ||
124 | newtWinMessagev((char *)warning_str, (char *)ok, | ||
125 | (char *)format, args); | ||
126 | pthread_mutex_unlock(&ui__lock); | ||
127 | } else | ||
128 | vfprintf(stderr, format, args); | ||
129 | va_end(args); | ||
114 | } | 130 | } |
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 214265674ddd..5b3ea49aa63e 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c | |||
@@ -114,3 +114,20 @@ unsigned long convert_unit(unsigned long value, char *unit) | |||
114 | 114 | ||
115 | return value; | 115 | return value; |
116 | } | 116 | } |
117 | |||
118 | int readn(int fd, void *buf, size_t n) | ||
119 | { | ||
120 | void *buf_start = buf; | ||
121 | |||
122 | while (n) { | ||
123 | int ret = read(fd, buf, n); | ||
124 | |||
125 | if (ret <= 0) | ||
126 | return ret; | ||
127 | |||
128 | n -= ret; | ||
129 | buf += ret; | ||
130 | } | ||
131 | |||
132 | return buf - buf_start; | ||
133 | } | ||
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index f380fed74359..fc784284ac8b 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h | |||
@@ -70,9 +70,7 @@ | |||
70 | #include <sys/poll.h> | 70 | #include <sys/poll.h> |
71 | #include <sys/socket.h> | 71 | #include <sys/socket.h> |
72 | #include <sys/ioctl.h> | 72 | #include <sys/ioctl.h> |
73 | #ifndef NO_SYS_SELECT_H | ||
74 | #include <sys/select.h> | 73 | #include <sys/select.h> |
75 | #endif | ||
76 | #include <netinet/in.h> | 74 | #include <netinet/in.h> |
77 | #include <netinet/tcp.h> | 75 | #include <netinet/tcp.h> |
78 | #include <arpa/inet.h> | 76 | #include <arpa/inet.h> |
@@ -83,10 +81,6 @@ | |||
83 | #include "types.h" | 81 | #include "types.h" |
84 | #include <sys/ttydefaults.h> | 82 | #include <sys/ttydefaults.h> |
85 | 83 | ||
86 | #ifndef NO_ICONV | ||
87 | #include <iconv.h> | ||
88 | #endif | ||
89 | |||
90 | extern const char *graph_line; | 84 | extern const char *graph_line; |
91 | extern const char *graph_dotted_line; | 85 | extern const char *graph_dotted_line; |
92 | extern char buildid_dir[]; | 86 | extern char buildid_dir[]; |
@@ -236,26 +230,6 @@ static inline int sane_case(int x, int high) | |||
236 | return x; | 230 | return x; |
237 | } | 231 | } |
238 | 232 | ||
239 | #ifndef DIR_HAS_BSD_GROUP_SEMANTICS | ||
240 | # define FORCE_DIR_SET_GID S_ISGID | ||
241 | #else | ||
242 | # define FORCE_DIR_SET_GID 0 | ||
243 | #endif | ||
244 | |||
245 | #ifdef NO_NSEC | ||
246 | #undef USE_NSEC | ||
247 | #define ST_CTIME_NSEC(st) 0 | ||
248 | #define ST_MTIME_NSEC(st) 0 | ||
249 | #else | ||
250 | #ifdef USE_ST_TIMESPEC | ||
251 | #define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctimespec.tv_nsec)) | ||
252 | #define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtimespec.tv_nsec)) | ||
253 | #else | ||
254 | #define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctim.tv_nsec)) | ||
255 | #define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtim.tv_nsec)) | ||
256 | #endif | ||
257 | #endif | ||
258 | |||
259 | int mkdir_p(char *path, mode_t mode); | 233 | int mkdir_p(char *path, mode_t mode); |
260 | int copyfile(const char *from, const char *to); | 234 | int copyfile(const char *from, const char *to); |
261 | 235 | ||
@@ -265,19 +239,7 @@ void argv_free(char **argv); | |||
265 | bool strglobmatch(const char *str, const char *pat); | 239 | bool strglobmatch(const char *str, const char *pat); |
266 | bool strlazymatch(const char *str, const char *pat); | 240 | bool strlazymatch(const char *str, const char *pat); |
267 | unsigned long convert_unit(unsigned long value, char *unit); | 241 | unsigned long convert_unit(unsigned long value, char *unit); |
268 | 242 | int readn(int fd, void *buf, size_t size); | |
269 | #ifndef ESC | ||
270 | #define ESC 27 | ||
271 | #endif | ||
272 | |||
273 | static inline bool is_exit_key(int key) | ||
274 | { | ||
275 | char up; | ||
276 | if (key == CTRL('c') || key == ESC) | ||
277 | return true; | ||
278 | up = toupper(key); | ||
279 | return up == 'Q'; | ||
280 | } | ||
281 | 243 | ||
282 | #define _STR(x) #x | 244 | #define _STR(x) #x |
283 | #define STR(x) _STR(x) | 245 | #define STR(x) _STR(x) |
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c index cfa55d686e3b..bdd33470b235 100644 --- a/tools/perf/util/values.c +++ b/tools/perf/util/values.c | |||
@@ -150,7 +150,7 @@ static void perf_read_values__display_pretty(FILE *fp, | |||
150 | if (width > tidwidth) | 150 | if (width > tidwidth) |
151 | tidwidth = width; | 151 | tidwidth = width; |
152 | for (j = 0; j < values->counters; j++) { | 152 | for (j = 0; j < values->counters; j++) { |
153 | width = snprintf(NULL, 0, "%Lu", values->value[i][j]); | 153 | width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]); |
154 | if (width > counterwidth[j]) | 154 | if (width > counterwidth[j]) |
155 | counterwidth[j] = width; | 155 | counterwidth[j] = width; |
156 | } | 156 | } |
@@ -165,7 +165,7 @@ static void perf_read_values__display_pretty(FILE *fp, | |||
165 | fprintf(fp, " %*d %*d", pidwidth, values->pid[i], | 165 | fprintf(fp, " %*d %*d", pidwidth, values->pid[i], |
166 | tidwidth, values->tid[i]); | 166 | tidwidth, values->tid[i]); |
167 | for (j = 0; j < values->counters; j++) | 167 | for (j = 0; j < values->counters; j++) |
168 | fprintf(fp, " %*Lu", | 168 | fprintf(fp, " %*" PRIu64, |
169 | counterwidth[j], values->value[i][j]); | 169 | counterwidth[j], values->value[i][j]); |
170 | fprintf(fp, "\n"); | 170 | fprintf(fp, "\n"); |
171 | } | 171 | } |
@@ -196,13 +196,13 @@ static void perf_read_values__display_raw(FILE *fp, | |||
196 | width = strlen(values->countername[j]); | 196 | width = strlen(values->countername[j]); |
197 | if (width > namewidth) | 197 | if (width > namewidth) |
198 | namewidth = width; | 198 | namewidth = width; |
199 | width = snprintf(NULL, 0, "%llx", values->counterrawid[j]); | 199 | width = snprintf(NULL, 0, "%" PRIx64, values->counterrawid[j]); |
200 | if (width > rawwidth) | 200 | if (width > rawwidth) |
201 | rawwidth = width; | 201 | rawwidth = width; |
202 | } | 202 | } |
203 | for (i = 0; i < values->threads; i++) { | 203 | for (i = 0; i < values->threads; i++) { |
204 | for (j = 0; j < values->counters; j++) { | 204 | for (j = 0; j < values->counters; j++) { |
205 | width = snprintf(NULL, 0, "%Lu", values->value[i][j]); | 205 | width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]); |
206 | if (width > countwidth) | 206 | if (width > countwidth) |
207 | countwidth = width; | 207 | countwidth = width; |
208 | } | 208 | } |
@@ -214,7 +214,7 @@ static void perf_read_values__display_raw(FILE *fp, | |||
214 | countwidth, "Count"); | 214 | countwidth, "Count"); |
215 | for (i = 0; i < values->threads; i++) | 215 | for (i = 0; i < values->threads; i++) |
216 | for (j = 0; j < values->counters; j++) | 216 | for (j = 0; j < values->counters; j++) |
217 | fprintf(fp, " %*d %*d %*s %*llx %*Lu\n", | 217 | fprintf(fp, " %*d %*d %*s %*" PRIx64 " %*" PRIu64, |
218 | pidwidth, values->pid[i], | 218 | pidwidth, values->pid[i], |
219 | tidwidth, values->tid[i], | 219 | tidwidth, values->tid[i], |
220 | namewidth, values->countername[j], | 220 | namewidth, values->countername[j], |
diff --git a/tools/perf/util/xyarray.c b/tools/perf/util/xyarray.c new file mode 100644 index 000000000000..22afbf6c536a --- /dev/null +++ b/tools/perf/util/xyarray.c | |||
@@ -0,0 +1,20 @@ | |||
1 | #include "xyarray.h" | ||
2 | #include "util.h" | ||
3 | |||
4 | struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size) | ||
5 | { | ||
6 | size_t row_size = ylen * entry_size; | ||
7 | struct xyarray *xy = zalloc(sizeof(*xy) + xlen * row_size); | ||
8 | |||
9 | if (xy != NULL) { | ||
10 | xy->entry_size = entry_size; | ||
11 | xy->row_size = row_size; | ||
12 | } | ||
13 | |||
14 | return xy; | ||
15 | } | ||
16 | |||
17 | void xyarray__delete(struct xyarray *xy) | ||
18 | { | ||
19 | free(xy); | ||
20 | } | ||
diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h new file mode 100644 index 000000000000..c488a07275dd --- /dev/null +++ b/tools/perf/util/xyarray.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _PERF_XYARRAY_H_ | ||
2 | #define _PERF_XYARRAY_H_ 1 | ||
3 | |||
4 | #include <sys/types.h> | ||
5 | |||
6 | struct xyarray { | ||
7 | size_t row_size; | ||
8 | size_t entry_size; | ||
9 | char contents[]; | ||
10 | }; | ||
11 | |||
12 | struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size); | ||
13 | void xyarray__delete(struct xyarray *xy); | ||
14 | |||
15 | static inline void *xyarray__entry(struct xyarray *xy, int x, int y) | ||
16 | { | ||
17 | return &xy->contents[x * xy->row_size + y * xy->entry_size]; | ||
18 | } | ||
19 | |||
20 | #endif /* _PERF_XYARRAY_H_ */ | ||