diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-15 21:31:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-15 21:31:30 -0400 |
commit | a926021cb1f8a99a275eaf6eb546102e9469dc59 (patch) | |
tree | c6d0300cd4b1a1fd658708476db4577b68b4de31 /tools/perf/util | |
parent | 0586bed3e8563c2eb89bc7256e30ce633ae06cfb (diff) | |
parent | 5e814dd597c42daeb8d2a276e64a6ec986ad0e2a (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (184 commits)
perf probe: Clean up probe_point_lazy_walker() return value
tracing: Fix irqoff selftest expanding max buffer
tracing: Align 4 byte ints together in struct tracer
tracing: Export trace_set_clr_event()
tracing: Explain about unstable clock on resume with ring buffer warning
ftrace/graph: Trace function entry before updating index
ftrace: Add .ref.text as one of the safe areas to trace
tracing: Adjust conditional expression latency formatting.
tracing: Fix event alignment: skb:kfree_skb
tracing: Fix event alignment: mce:mce_record
tracing: Fix event alignment: kvm:kvm_hv_hypercall
tracing: Fix event alignment: module:module_request
tracing: Fix event alignment: ftrace:context_switch and ftrace:wakeup
tracing: Remove lock_depth from event entry
perf header: Stop using 'self'
perf session: Use evlist/evsel for managing perf.data attributes
perf top: Don't let events to eat up whole header line
perf top: Fix events overflow in top command
ring-buffer: Remove unused #include <linux/trace_irq.h>
tracing: Add an 'overwrite' trace_option.
...
Diffstat (limited to 'tools/perf/util')
57 files changed, 5072 insertions, 1821 deletions
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c new file mode 100644 index 000000000000..0d0830c98cd7 --- /dev/null +++ b/tools/perf/util/annotate.c | |||
@@ -0,0 +1,605 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | ||
3 | * | ||
4 | * Parts came from builtin-annotate.c, see those files for further | ||
5 | * copyright notes. | ||
6 | * | ||
7 | * Released under the GPL v2. (and only v2, not any later version) | ||
8 | */ | ||
9 | |||
10 | #include "util.h" | ||
11 | #include "build-id.h" | ||
12 | #include "color.h" | ||
13 | #include "cache.h" | ||
14 | #include "symbol.h" | ||
15 | #include "debug.h" | ||
16 | #include "annotate.h" | ||
17 | #include <pthread.h> | ||
18 | |||
19 | int symbol__annotate_init(struct map *map __used, struct symbol *sym) | ||
20 | { | ||
21 | struct annotation *notes = symbol__annotation(sym); | ||
22 | pthread_mutex_init(¬es->lock, NULL); | ||
23 | return 0; | ||
24 | } | ||
25 | |||
26 | int symbol__alloc_hist(struct symbol *sym, int nevents) | ||
27 | { | ||
28 | struct annotation *notes = symbol__annotation(sym); | ||
29 | size_t sizeof_sym_hist = (sizeof(struct sym_hist) + | ||
30 | (sym->end - sym->start) * sizeof(u64)); | ||
31 | |||
32 | notes->src = zalloc(sizeof(*notes->src) + nevents * sizeof_sym_hist); | ||
33 | if (notes->src == NULL) | ||
34 | return -1; | ||
35 | notes->src->sizeof_sym_hist = sizeof_sym_hist; | ||
36 | notes->src->nr_histograms = nevents; | ||
37 | INIT_LIST_HEAD(¬es->src->source); | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | void symbol__annotate_zero_histograms(struct symbol *sym) | ||
42 | { | ||
43 | struct annotation *notes = symbol__annotation(sym); | ||
44 | |||
45 | pthread_mutex_lock(¬es->lock); | ||
46 | if (notes->src != NULL) | ||
47 | memset(notes->src->histograms, 0, | ||
48 | notes->src->nr_histograms * notes->src->sizeof_sym_hist); | ||
49 | pthread_mutex_unlock(¬es->lock); | ||
50 | } | ||
51 | |||
52 | int symbol__inc_addr_samples(struct symbol *sym, struct map *map, | ||
53 | int evidx, u64 addr) | ||
54 | { | ||
55 | unsigned offset; | ||
56 | struct annotation *notes; | ||
57 | struct sym_hist *h; | ||
58 | |||
59 | notes = symbol__annotation(sym); | ||
60 | if (notes->src == NULL) | ||
61 | return -ENOMEM; | ||
62 | |||
63 | pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr)); | ||
64 | |||
65 | if (addr >= sym->end) | ||
66 | return 0; | ||
67 | |||
68 | offset = addr - sym->start; | ||
69 | h = annotation__histogram(notes, evidx); | ||
70 | h->sum++; | ||
71 | h->addr[offset]++; | ||
72 | |||
73 | pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64 | ||
74 | ", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name, | ||
75 | addr, addr - sym->start, evidx, h->addr[offset]); | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize) | ||
80 | { | ||
81 | struct objdump_line *self = malloc(sizeof(*self) + privsize); | ||
82 | |||
83 | if (self != NULL) { | ||
84 | self->offset = offset; | ||
85 | self->line = line; | ||
86 | } | ||
87 | |||
88 | return self; | ||
89 | } | ||
90 | |||
91 | void objdump_line__free(struct objdump_line *self) | ||
92 | { | ||
93 | free(self->line); | ||
94 | free(self); | ||
95 | } | ||
96 | |||
97 | static void objdump__add_line(struct list_head *head, struct objdump_line *line) | ||
98 | { | ||
99 | list_add_tail(&line->node, head); | ||
100 | } | ||
101 | |||
102 | struct objdump_line *objdump__get_next_ip_line(struct list_head *head, | ||
103 | struct objdump_line *pos) | ||
104 | { | ||
105 | list_for_each_entry_continue(pos, head, node) | ||
106 | if (pos->offset >= 0) | ||
107 | return pos; | ||
108 | |||
109 | return NULL; | ||
110 | } | ||
111 | |||
112 | static int objdump_line__print(struct objdump_line *oline, struct symbol *sym, | ||
113 | int evidx, u64 len, int min_pcnt, | ||
114 | int printed, int max_lines, | ||
115 | struct objdump_line *queue) | ||
116 | { | ||
117 | static const char *prev_line; | ||
118 | static const char *prev_color; | ||
119 | |||
120 | if (oline->offset != -1) { | ||
121 | const char *path = NULL; | ||
122 | unsigned int hits = 0; | ||
123 | double percent = 0.0; | ||
124 | const char *color; | ||
125 | struct annotation *notes = symbol__annotation(sym); | ||
126 | struct source_line *src_line = notes->src->lines; | ||
127 | struct sym_hist *h = annotation__histogram(notes, evidx); | ||
128 | s64 offset = oline->offset; | ||
129 | struct objdump_line *next; | ||
130 | |||
131 | next = objdump__get_next_ip_line(¬es->src->source, oline); | ||
132 | |||
133 | while (offset < (s64)len && | ||
134 | (next == NULL || offset < next->offset)) { | ||
135 | if (src_line) { | ||
136 | if (path == NULL) | ||
137 | path = src_line[offset].path; | ||
138 | percent += src_line[offset].percent; | ||
139 | } else | ||
140 | hits += h->addr[offset]; | ||
141 | |||
142 | ++offset; | ||
143 | } | ||
144 | |||
145 | if (src_line == NULL && h->sum) | ||
146 | percent = 100.0 * hits / h->sum; | ||
147 | |||
148 | if (percent < min_pcnt) | ||
149 | return -1; | ||
150 | |||
151 | if (max_lines && printed >= max_lines) | ||
152 | return 1; | ||
153 | |||
154 | if (queue != NULL) { | ||
155 | list_for_each_entry_from(queue, ¬es->src->source, node) { | ||
156 | if (queue == oline) | ||
157 | break; | ||
158 | objdump_line__print(queue, sym, evidx, len, | ||
159 | 0, 0, 1, NULL); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | color = get_percent_color(percent); | ||
164 | |||
165 | /* | ||
166 | * Also color the filename and line if needed, with | ||
167 | * the same color than the percentage. Don't print it | ||
168 | * twice for close colored addr with the same filename:line | ||
169 | */ | ||
170 | if (path) { | ||
171 | if (!prev_line || strcmp(prev_line, path) | ||
172 | || color != prev_color) { | ||
173 | color_fprintf(stdout, color, " %s", path); | ||
174 | prev_line = path; | ||
175 | prev_color = color; | ||
176 | } | ||
177 | } | ||
178 | |||
179 | color_fprintf(stdout, color, " %7.2f", percent); | ||
180 | printf(" : "); | ||
181 | color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", oline->line); | ||
182 | } else if (max_lines && printed >= max_lines) | ||
183 | return 1; | ||
184 | else { | ||
185 | if (queue) | ||
186 | return -1; | ||
187 | |||
188 | if (!*oline->line) | ||
189 | printf(" :\n"); | ||
190 | else | ||
191 | printf(" : %s\n", oline->line); | ||
192 | } | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static int symbol__parse_objdump_line(struct symbol *sym, struct map *map, | ||
198 | FILE *file, size_t privsize) | ||
199 | { | ||
200 | struct annotation *notes = symbol__annotation(sym); | ||
201 | struct objdump_line *objdump_line; | ||
202 | char *line = NULL, *tmp, *tmp2, *c; | ||
203 | size_t line_len; | ||
204 | s64 line_ip, offset = -1; | ||
205 | |||
206 | if (getline(&line, &line_len, file) < 0) | ||
207 | return -1; | ||
208 | |||
209 | if (!line) | ||
210 | return -1; | ||
211 | |||
212 | while (line_len != 0 && isspace(line[line_len - 1])) | ||
213 | line[--line_len] = '\0'; | ||
214 | |||
215 | c = strchr(line, '\n'); | ||
216 | if (c) | ||
217 | *c = 0; | ||
218 | |||
219 | line_ip = -1; | ||
220 | |||
221 | /* | ||
222 | * Strip leading spaces: | ||
223 | */ | ||
224 | tmp = line; | ||
225 | while (*tmp) { | ||
226 | if (*tmp != ' ') | ||
227 | break; | ||
228 | tmp++; | ||
229 | } | ||
230 | |||
231 | if (*tmp) { | ||
232 | /* | ||
233 | * Parse hexa addresses followed by ':' | ||
234 | */ | ||
235 | line_ip = strtoull(tmp, &tmp2, 16); | ||
236 | if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0') | ||
237 | line_ip = -1; | ||
238 | } | ||
239 | |||
240 | if (line_ip != -1) { | ||
241 | u64 start = map__rip_2objdump(map, sym->start), | ||
242 | end = map__rip_2objdump(map, sym->end); | ||
243 | |||
244 | offset = line_ip - start; | ||
245 | if (offset < 0 || (u64)line_ip > end) | ||
246 | offset = -1; | ||
247 | } | ||
248 | |||
249 | objdump_line = objdump_line__new(offset, line, privsize); | ||
250 | if (objdump_line == NULL) { | ||
251 | free(line); | ||
252 | return -1; | ||
253 | } | ||
254 | objdump__add_line(¬es->src->source, objdump_line); | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) | ||
260 | { | ||
261 | struct dso *dso = map->dso; | ||
262 | char *filename = dso__build_id_filename(dso, NULL, 0); | ||
263 | bool free_filename = true; | ||
264 | char command[PATH_MAX * 2]; | ||
265 | FILE *file; | ||
266 | int err = 0; | ||
267 | char symfs_filename[PATH_MAX]; | ||
268 | |||
269 | if (filename) { | ||
270 | snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", | ||
271 | symbol_conf.symfs, filename); | ||
272 | } | ||
273 | |||
274 | if (filename == NULL) { | ||
275 | if (dso->has_build_id) { | ||
276 | pr_err("Can't annotate %s: not enough memory\n", | ||
277 | sym->name); | ||
278 | return -ENOMEM; | ||
279 | } | ||
280 | goto fallback; | ||
281 | } else if (readlink(symfs_filename, command, sizeof(command)) < 0 || | ||
282 | strstr(command, "[kernel.kallsyms]") || | ||
283 | access(symfs_filename, R_OK)) { | ||
284 | free(filename); | ||
285 | fallback: | ||
286 | /* | ||
287 | * If we don't have build-ids or the build-id file isn't in the | ||
288 | * cache, or is just a kallsyms file, well, lets hope that this | ||
289 | * DSO is the same as when 'perf record' ran. | ||
290 | */ | ||
291 | filename = dso->long_name; | ||
292 | snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", | ||
293 | symbol_conf.symfs, filename); | ||
294 | free_filename = false; | ||
295 | } | ||
296 | |||
297 | if (dso->origin == DSO__ORIG_KERNEL) { | ||
298 | char bf[BUILD_ID_SIZE * 2 + 16] = " with build id "; | ||
299 | char *build_id_msg = NULL; | ||
300 | |||
301 | if (dso->annotate_warned) | ||
302 | goto out_free_filename; | ||
303 | |||
304 | if (dso->has_build_id) { | ||
305 | build_id__sprintf(dso->build_id, | ||
306 | sizeof(dso->build_id), bf + 15); | ||
307 | build_id_msg = bf; | ||
308 | } | ||
309 | err = -ENOENT; | ||
310 | dso->annotate_warned = 1; | ||
311 | pr_err("Can't annotate %s: No vmlinux file%s was found in the " | ||
312 | "path.\nPlease use 'perf buildid-cache -av vmlinux' or " | ||
313 | "--vmlinux vmlinux.\n", | ||
314 | sym->name, build_id_msg ?: ""); | ||
315 | goto out_free_filename; | ||
316 | } | ||
317 | |||
318 | pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, | ||
319 | filename, sym->name, map->unmap_ip(map, sym->start), | ||
320 | map->unmap_ip(map, sym->end)); | ||
321 | |||
322 | pr_debug("annotating [%p] %30s : [%p] %30s\n", | ||
323 | dso, dso->long_name, sym, sym->name); | ||
324 | |||
325 | snprintf(command, sizeof(command), | ||
326 | "objdump --start-address=0x%016" PRIx64 | ||
327 | " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand", | ||
328 | map__rip_2objdump(map, sym->start), | ||
329 | map__rip_2objdump(map, sym->end), | ||
330 | symfs_filename, filename); | ||
331 | |||
332 | pr_debug("Executing: %s\n", command); | ||
333 | |||
334 | file = popen(command, "r"); | ||
335 | if (!file) | ||
336 | goto out_free_filename; | ||
337 | |||
338 | while (!feof(file)) | ||
339 | if (symbol__parse_objdump_line(sym, map, file, privsize) < 0) | ||
340 | break; | ||
341 | |||
342 | pclose(file); | ||
343 | out_free_filename: | ||
344 | if (free_filename) | ||
345 | free(filename); | ||
346 | return err; | ||
347 | } | ||
348 | |||
349 | static void insert_source_line(struct rb_root *root, struct source_line *src_line) | ||
350 | { | ||
351 | struct source_line *iter; | ||
352 | struct rb_node **p = &root->rb_node; | ||
353 | struct rb_node *parent = NULL; | ||
354 | |||
355 | while (*p != NULL) { | ||
356 | parent = *p; | ||
357 | iter = rb_entry(parent, struct source_line, node); | ||
358 | |||
359 | if (src_line->percent > iter->percent) | ||
360 | p = &(*p)->rb_left; | ||
361 | else | ||
362 | p = &(*p)->rb_right; | ||
363 | } | ||
364 | |||
365 | rb_link_node(&src_line->node, parent, p); | ||
366 | rb_insert_color(&src_line->node, root); | ||
367 | } | ||
368 | |||
369 | static void symbol__free_source_line(struct symbol *sym, int len) | ||
370 | { | ||
371 | struct annotation *notes = symbol__annotation(sym); | ||
372 | struct source_line *src_line = notes->src->lines; | ||
373 | int i; | ||
374 | |||
375 | for (i = 0; i < len; i++) | ||
376 | free(src_line[i].path); | ||
377 | |||
378 | free(src_line); | ||
379 | notes->src->lines = NULL; | ||
380 | } | ||
381 | |||
382 | /* Get the filename:line for the colored entries */ | ||
383 | static int symbol__get_source_line(struct symbol *sym, struct map *map, | ||
384 | int evidx, struct rb_root *root, int len, | ||
385 | const char *filename) | ||
386 | { | ||
387 | u64 start; | ||
388 | int i; | ||
389 | char cmd[PATH_MAX * 2]; | ||
390 | struct source_line *src_line; | ||
391 | struct annotation *notes = symbol__annotation(sym); | ||
392 | struct sym_hist *h = annotation__histogram(notes, evidx); | ||
393 | |||
394 | if (!h->sum) | ||
395 | return 0; | ||
396 | |||
397 | src_line = notes->src->lines = calloc(len, sizeof(struct source_line)); | ||
398 | if (!notes->src->lines) | ||
399 | return -1; | ||
400 | |||
401 | start = map->unmap_ip(map, sym->start); | ||
402 | |||
403 | for (i = 0; i < len; i++) { | ||
404 | char *path = NULL; | ||
405 | size_t line_len; | ||
406 | u64 offset; | ||
407 | FILE *fp; | ||
408 | |||
409 | src_line[i].percent = 100.0 * h->addr[i] / h->sum; | ||
410 | if (src_line[i].percent <= 0.5) | ||
411 | continue; | ||
412 | |||
413 | offset = start + i; | ||
414 | sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset); | ||
415 | fp = popen(cmd, "r"); | ||
416 | if (!fp) | ||
417 | continue; | ||
418 | |||
419 | if (getline(&path, &line_len, fp) < 0 || !line_len) | ||
420 | goto next; | ||
421 | |||
422 | src_line[i].path = malloc(sizeof(char) * line_len + 1); | ||
423 | if (!src_line[i].path) | ||
424 | goto next; | ||
425 | |||
426 | strcpy(src_line[i].path, path); | ||
427 | insert_source_line(root, &src_line[i]); | ||
428 | |||
429 | next: | ||
430 | pclose(fp); | ||
431 | } | ||
432 | |||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | static void print_summary(struct rb_root *root, const char *filename) | ||
437 | { | ||
438 | struct source_line *src_line; | ||
439 | struct rb_node *node; | ||
440 | |||
441 | printf("\nSorted summary for file %s\n", filename); | ||
442 | printf("----------------------------------------------\n\n"); | ||
443 | |||
444 | if (RB_EMPTY_ROOT(root)) { | ||
445 | printf(" Nothing higher than %1.1f%%\n", MIN_GREEN); | ||
446 | return; | ||
447 | } | ||
448 | |||
449 | node = rb_first(root); | ||
450 | while (node) { | ||
451 | double percent; | ||
452 | const char *color; | ||
453 | char *path; | ||
454 | |||
455 | src_line = rb_entry(node, struct source_line, node); | ||
456 | percent = src_line->percent; | ||
457 | color = get_percent_color(percent); | ||
458 | path = src_line->path; | ||
459 | |||
460 | color_fprintf(stdout, color, " %7.2f %s", percent, path); | ||
461 | node = rb_next(node); | ||
462 | } | ||
463 | } | ||
464 | |||
465 | static void symbol__annotate_hits(struct symbol *sym, int evidx) | ||
466 | { | ||
467 | struct annotation *notes = symbol__annotation(sym); | ||
468 | struct sym_hist *h = annotation__histogram(notes, evidx); | ||
469 | u64 len = sym->end - sym->start, offset; | ||
470 | |||
471 | for (offset = 0; offset < len; ++offset) | ||
472 | if (h->addr[offset] != 0) | ||
473 | printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, | ||
474 | sym->start + offset, h->addr[offset]); | ||
475 | printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum); | ||
476 | } | ||
477 | |||
478 | int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, | ||
479 | bool full_paths, int min_pcnt, int max_lines, | ||
480 | int context) | ||
481 | { | ||
482 | struct dso *dso = map->dso; | ||
483 | const char *filename = dso->long_name, *d_filename; | ||
484 | struct annotation *notes = symbol__annotation(sym); | ||
485 | struct objdump_line *pos, *queue = NULL; | ||
486 | int printed = 2, queue_len = 0; | ||
487 | int more = 0; | ||
488 | u64 len; | ||
489 | |||
490 | if (full_paths) | ||
491 | d_filename = filename; | ||
492 | else | ||
493 | d_filename = basename(filename); | ||
494 | |||
495 | len = sym->end - sym->start; | ||
496 | |||
497 | printf(" Percent | Source code & Disassembly of %s\n", d_filename); | ||
498 | printf("------------------------------------------------\n"); | ||
499 | |||
500 | if (verbose) | ||
501 | symbol__annotate_hits(sym, evidx); | ||
502 | |||
503 | list_for_each_entry(pos, ¬es->src->source, node) { | ||
504 | if (context && queue == NULL) { | ||
505 | queue = pos; | ||
506 | queue_len = 0; | ||
507 | } | ||
508 | |||
509 | switch (objdump_line__print(pos, sym, evidx, len, min_pcnt, | ||
510 | printed, max_lines, queue)) { | ||
511 | case 0: | ||
512 | ++printed; | ||
513 | if (context) { | ||
514 | printed += queue_len; | ||
515 | queue = NULL; | ||
516 | queue_len = 0; | ||
517 | } | ||
518 | break; | ||
519 | case 1: | ||
520 | /* filtered by max_lines */ | ||
521 | ++more; | ||
522 | break; | ||
523 | case -1: | ||
524 | default: | ||
525 | /* | ||
526 | * Filtered by min_pcnt or non IP lines when | ||
527 | * context != 0 | ||
528 | */ | ||
529 | if (!context) | ||
530 | break; | ||
531 | if (queue_len == context) | ||
532 | queue = list_entry(queue->node.next, typeof(*queue), node); | ||
533 | else | ||
534 | ++queue_len; | ||
535 | break; | ||
536 | } | ||
537 | } | ||
538 | |||
539 | return more; | ||
540 | } | ||
541 | |||
542 | void symbol__annotate_zero_histogram(struct symbol *sym, int evidx) | ||
543 | { | ||
544 | struct annotation *notes = symbol__annotation(sym); | ||
545 | struct sym_hist *h = annotation__histogram(notes, evidx); | ||
546 | |||
547 | memset(h, 0, notes->src->sizeof_sym_hist); | ||
548 | } | ||
549 | |||
550 | void symbol__annotate_decay_histogram(struct symbol *sym, int evidx) | ||
551 | { | ||
552 | struct annotation *notes = symbol__annotation(sym); | ||
553 | struct sym_hist *h = annotation__histogram(notes, evidx); | ||
554 | struct objdump_line *pos; | ||
555 | int len = sym->end - sym->start; | ||
556 | |||
557 | h->sum = 0; | ||
558 | |||
559 | list_for_each_entry(pos, ¬es->src->source, node) { | ||
560 | if (pos->offset != -1 && pos->offset < len) { | ||
561 | h->addr[pos->offset] = h->addr[pos->offset] * 7 / 8; | ||
562 | h->sum += h->addr[pos->offset]; | ||
563 | } | ||
564 | } | ||
565 | } | ||
566 | |||
567 | void objdump_line_list__purge(struct list_head *head) | ||
568 | { | ||
569 | struct objdump_line *pos, *n; | ||
570 | |||
571 | list_for_each_entry_safe(pos, n, head, node) { | ||
572 | list_del(&pos->node); | ||
573 | objdump_line__free(pos); | ||
574 | } | ||
575 | } | ||
576 | |||
577 | int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, | ||
578 | bool print_lines, bool full_paths, int min_pcnt, | ||
579 | int max_lines) | ||
580 | { | ||
581 | struct dso *dso = map->dso; | ||
582 | const char *filename = dso->long_name; | ||
583 | struct rb_root source_line = RB_ROOT; | ||
584 | u64 len; | ||
585 | |||
586 | if (symbol__annotate(sym, map, 0) < 0) | ||
587 | return -1; | ||
588 | |||
589 | len = sym->end - sym->start; | ||
590 | |||
591 | if (print_lines) { | ||
592 | symbol__get_source_line(sym, map, evidx, &source_line, | ||
593 | len, filename); | ||
594 | print_summary(&source_line, filename); | ||
595 | } | ||
596 | |||
597 | symbol__annotate_printf(sym, map, evidx, full_paths, | ||
598 | min_pcnt, max_lines, 0); | ||
599 | if (print_lines) | ||
600 | symbol__free_source_line(sym, len); | ||
601 | |||
602 | objdump_line_list__purge(&symbol__annotation(sym)->src->source); | ||
603 | |||
604 | return 0; | ||
605 | } | ||
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h new file mode 100644 index 000000000000..c2c286896801 --- /dev/null +++ b/tools/perf/util/annotate.h | |||
@@ -0,0 +1,103 @@ | |||
1 | #ifndef __PERF_ANNOTATE_H | ||
2 | #define __PERF_ANNOTATE_H | ||
3 | |||
4 | #include <stdbool.h> | ||
5 | #include "types.h" | ||
6 | #include "symbol.h" | ||
7 | #include <linux/list.h> | ||
8 | #include <linux/rbtree.h> | ||
9 | |||
10 | struct objdump_line { | ||
11 | struct list_head node; | ||
12 | s64 offset; | ||
13 | char *line; | ||
14 | }; | ||
15 | |||
16 | void objdump_line__free(struct objdump_line *self); | ||
17 | struct objdump_line *objdump__get_next_ip_line(struct list_head *head, | ||
18 | struct objdump_line *pos); | ||
19 | |||
20 | struct sym_hist { | ||
21 | u64 sum; | ||
22 | u64 addr[0]; | ||
23 | }; | ||
24 | |||
25 | struct source_line { | ||
26 | struct rb_node node; | ||
27 | double percent; | ||
28 | char *path; | ||
29 | }; | ||
30 | |||
31 | /** struct annotated_source - symbols with hits have this attached as in sannotation | ||
32 | * | ||
33 | * @histogram: Array of addr hit histograms per event being monitored | ||
34 | * @lines: If 'print_lines' is specified, per source code line percentages | ||
35 | * @source: source parsed from objdump -dS | ||
36 | * | ||
37 | * lines is allocated, percentages calculated and all sorted by percentage | ||
38 | * when the annotation is about to be presented, so the percentages are for | ||
39 | * one of the entries in the histogram array, i.e. for the event/counter being | ||
40 | * presented. It is deallocated right after symbol__{tui,tty,etc}_annotate | ||
41 | * returns. | ||
42 | */ | ||
43 | struct annotated_source { | ||
44 | struct list_head source; | ||
45 | struct source_line *lines; | ||
46 | int nr_histograms; | ||
47 | int sizeof_sym_hist; | ||
48 | struct sym_hist histograms[0]; | ||
49 | }; | ||
50 | |||
51 | struct annotation { | ||
52 | pthread_mutex_t lock; | ||
53 | struct annotated_source *src; | ||
54 | }; | ||
55 | |||
56 | struct sannotation { | ||
57 | struct annotation annotation; | ||
58 | struct symbol symbol; | ||
59 | }; | ||
60 | |||
61 | static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx) | ||
62 | { | ||
63 | return (((void *)¬es->src->histograms) + | ||
64 | (notes->src->sizeof_sym_hist * idx)); | ||
65 | } | ||
66 | |||
67 | static inline struct annotation *symbol__annotation(struct symbol *sym) | ||
68 | { | ||
69 | struct sannotation *a = container_of(sym, struct sannotation, symbol); | ||
70 | return &a->annotation; | ||
71 | } | ||
72 | |||
73 | int symbol__inc_addr_samples(struct symbol *sym, struct map *map, | ||
74 | int evidx, u64 addr); | ||
75 | int symbol__alloc_hist(struct symbol *sym, int nevents); | ||
76 | void symbol__annotate_zero_histograms(struct symbol *sym); | ||
77 | |||
78 | int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize); | ||
79 | int symbol__annotate_init(struct map *map __used, struct symbol *sym); | ||
80 | int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, | ||
81 | bool full_paths, int min_pcnt, int max_lines, | ||
82 | int context); | ||
83 | void symbol__annotate_zero_histogram(struct symbol *sym, int evidx); | ||
84 | void symbol__annotate_decay_histogram(struct symbol *sym, int evidx); | ||
85 | void objdump_line_list__purge(struct list_head *head); | ||
86 | |||
87 | int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, | ||
88 | bool print_lines, bool full_paths, int min_pcnt, | ||
89 | int max_lines); | ||
90 | |||
91 | #ifdef NO_NEWT_SUPPORT | ||
92 | static inline int symbol__tui_annotate(struct symbol *sym __used, | ||
93 | struct map *map __used, | ||
94 | int evidx __used, int refresh __used) | ||
95 | { | ||
96 | return 0; | ||
97 | } | ||
98 | #else | ||
99 | int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, | ||
100 | int refresh); | ||
101 | #endif | ||
102 | |||
103 | #endif /* __PERF_ANNOTATE_H */ | ||
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index deffb8c96071..31f934af9861 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c | |||
@@ -14,8 +14,8 @@ | |||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include "debug.h" | 15 | #include "debug.h" |
16 | 16 | ||
17 | static int build_id__mark_dso_hit(event_t *event, | 17 | static int build_id__mark_dso_hit(union perf_event *event, |
18 | struct sample_data *sample __used, | 18 | struct perf_sample *sample __used, |
19 | struct perf_session *session) | 19 | struct perf_session *session) |
20 | { | 20 | { |
21 | struct addr_location al; | 21 | struct addr_location al; |
@@ -37,13 +37,14 @@ static int build_id__mark_dso_hit(event_t *event, | |||
37 | return 0; | 37 | return 0; |
38 | } | 38 | } |
39 | 39 | ||
40 | static int event__exit_del_thread(event_t *self, struct sample_data *sample __used, | 40 | static int perf_event__exit_del_thread(union perf_event *event, |
41 | struct perf_session *session) | 41 | struct perf_sample *sample __used, |
42 | struct perf_session *session) | ||
42 | { | 43 | { |
43 | struct thread *thread = perf_session__findnew(session, self->fork.tid); | 44 | struct thread *thread = perf_session__findnew(session, event->fork.tid); |
44 | 45 | ||
45 | dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, | 46 | dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, |
46 | self->fork.ppid, self->fork.ptid); | 47 | event->fork.ppid, event->fork.ptid); |
47 | 48 | ||
48 | if (thread) { | 49 | if (thread) { |
49 | rb_erase(&thread->rb_node, &session->threads); | 50 | rb_erase(&thread->rb_node, &session->threads); |
@@ -56,9 +57,9 @@ static int event__exit_del_thread(event_t *self, struct sample_data *sample __us | |||
56 | 57 | ||
57 | struct perf_event_ops build_id__mark_dso_hit_ops = { | 58 | struct perf_event_ops build_id__mark_dso_hit_ops = { |
58 | .sample = build_id__mark_dso_hit, | 59 | .sample = build_id__mark_dso_hit, |
59 | .mmap = event__process_mmap, | 60 | .mmap = perf_event__process_mmap, |
60 | .fork = event__process_task, | 61 | .fork = perf_event__process_task, |
61 | .exit = event__exit_del_thread, | 62 | .exit = perf_event__exit_del_thread, |
62 | }; | 63 | }; |
63 | 64 | ||
64 | char *dso__build_id_filename(struct dso *self, char *bf, size_t size) | 65 | char *dso__build_id_filename(struct dso *self, char *bf, size_t size) |
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index a7729797fd96..fc5e5a09d5b9 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h | |||
@@ -34,13 +34,14 @@ extern int pager_use_color; | |||
34 | extern int use_browser; | 34 | extern int use_browser; |
35 | 35 | ||
36 | #ifdef NO_NEWT_SUPPORT | 36 | #ifdef NO_NEWT_SUPPORT |
37 | static inline void setup_browser(void) | 37 | static inline void setup_browser(bool fallback_to_pager) |
38 | { | 38 | { |
39 | setup_pager(); | 39 | if (fallback_to_pager) |
40 | setup_pager(); | ||
40 | } | 41 | } |
41 | static inline void exit_browser(bool wait_for_ok __used) {} | 42 | static inline void exit_browser(bool wait_for_ok __used) {} |
42 | #else | 43 | #else |
43 | void setup_browser(void); | 44 | void setup_browser(bool fallback_to_pager); |
44 | void exit_browser(bool wait_for_ok); | 45 | void exit_browser(bool wait_for_ok); |
45 | #endif | 46 | #endif |
46 | 47 | ||
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index e12d539417b2..9f7106a8d9a4 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2009-2010, Frederic Weisbecker <fweisbec@gmail.com> | 2 | * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com> |
3 | * | 3 | * |
4 | * Handle the callchains from the stream in an ad-hoc radix tree and then | 4 | * Handle the callchains from the stream in an ad-hoc radix tree and then |
5 | * sort them in an rbtree. | 5 | * sort them in an rbtree. |
@@ -18,7 +18,8 @@ | |||
18 | #include "util.h" | 18 | #include "util.h" |
19 | #include "callchain.h" | 19 | #include "callchain.h" |
20 | 20 | ||
21 | bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event) | 21 | bool ip_callchain__valid(struct ip_callchain *chain, |
22 | const union perf_event *event) | ||
22 | { | 23 | { |
23 | unsigned int chain_size = event->header.size; | 24 | unsigned int chain_size = event->header.size; |
24 | chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event; | 25 | chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event; |
@@ -26,10 +27,10 @@ bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event) | |||
26 | } | 27 | } |
27 | 28 | ||
28 | #define chain_for_each_child(child, parent) \ | 29 | #define chain_for_each_child(child, parent) \ |
29 | list_for_each_entry(child, &parent->children, brothers) | 30 | list_for_each_entry(child, &parent->children, siblings) |
30 | 31 | ||
31 | #define chain_for_each_child_safe(child, next, parent) \ | 32 | #define chain_for_each_child_safe(child, next, parent) \ |
32 | list_for_each_entry_safe(child, next, &parent->children, brothers) | 33 | list_for_each_entry_safe(child, next, &parent->children, siblings) |
33 | 34 | ||
34 | static void | 35 | static void |
35 | rb_insert_callchain(struct rb_root *root, struct callchain_node *chain, | 36 | rb_insert_callchain(struct rb_root *root, struct callchain_node *chain, |
@@ -38,14 +39,14 @@ rb_insert_callchain(struct rb_root *root, struct callchain_node *chain, | |||
38 | struct rb_node **p = &root->rb_node; | 39 | struct rb_node **p = &root->rb_node; |
39 | struct rb_node *parent = NULL; | 40 | struct rb_node *parent = NULL; |
40 | struct callchain_node *rnode; | 41 | struct callchain_node *rnode; |
41 | u64 chain_cumul = cumul_hits(chain); | 42 | u64 chain_cumul = callchain_cumul_hits(chain); |
42 | 43 | ||
43 | while (*p) { | 44 | while (*p) { |
44 | u64 rnode_cumul; | 45 | u64 rnode_cumul; |
45 | 46 | ||
46 | parent = *p; | 47 | parent = *p; |
47 | rnode = rb_entry(parent, struct callchain_node, rb_node); | 48 | rnode = rb_entry(parent, struct callchain_node, rb_node); |
48 | rnode_cumul = cumul_hits(rnode); | 49 | rnode_cumul = callchain_cumul_hits(rnode); |
49 | 50 | ||
50 | switch (mode) { | 51 | switch (mode) { |
51 | case CHAIN_FLAT: | 52 | case CHAIN_FLAT: |
@@ -104,7 +105,7 @@ static void __sort_chain_graph_abs(struct callchain_node *node, | |||
104 | 105 | ||
105 | chain_for_each_child(child, node) { | 106 | chain_for_each_child(child, node) { |
106 | __sort_chain_graph_abs(child, min_hit); | 107 | __sort_chain_graph_abs(child, min_hit); |
107 | if (cumul_hits(child) >= min_hit) | 108 | if (callchain_cumul_hits(child) >= min_hit) |
108 | rb_insert_callchain(&node->rb_root, child, | 109 | rb_insert_callchain(&node->rb_root, child, |
109 | CHAIN_GRAPH_ABS); | 110 | CHAIN_GRAPH_ABS); |
110 | } | 111 | } |
@@ -129,7 +130,7 @@ static void __sort_chain_graph_rel(struct callchain_node *node, | |||
129 | 130 | ||
130 | chain_for_each_child(child, node) { | 131 | chain_for_each_child(child, node) { |
131 | __sort_chain_graph_rel(child, min_percent); | 132 | __sort_chain_graph_rel(child, min_percent); |
132 | if (cumul_hits(child) >= min_hit) | 133 | if (callchain_cumul_hits(child) >= min_hit) |
133 | rb_insert_callchain(&node->rb_root, child, | 134 | rb_insert_callchain(&node->rb_root, child, |
134 | CHAIN_GRAPH_REL); | 135 | CHAIN_GRAPH_REL); |
135 | } | 136 | } |
@@ -143,7 +144,7 @@ sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root, | |||
143 | rb_root->rb_node = chain_root->node.rb_root.rb_node; | 144 | rb_root->rb_node = chain_root->node.rb_root.rb_node; |
144 | } | 145 | } |
145 | 146 | ||
146 | int register_callchain_param(struct callchain_param *param) | 147 | int callchain_register_param(struct callchain_param *param) |
147 | { | 148 | { |
148 | switch (param->mode) { | 149 | switch (param->mode) { |
149 | case CHAIN_GRAPH_ABS: | 150 | case CHAIN_GRAPH_ABS: |
@@ -189,32 +190,27 @@ create_child(struct callchain_node *parent, bool inherit_children) | |||
189 | chain_for_each_child(next, new) | 190 | chain_for_each_child(next, new) |
190 | next->parent = new; | 191 | next->parent = new; |
191 | } | 192 | } |
192 | list_add_tail(&new->brothers, &parent->children); | 193 | list_add_tail(&new->siblings, &parent->children); |
193 | 194 | ||
194 | return new; | 195 | return new; |
195 | } | 196 | } |
196 | 197 | ||
197 | 198 | ||
198 | struct resolved_ip { | ||
199 | u64 ip; | ||
200 | struct map_symbol ms; | ||
201 | }; | ||
202 | |||
203 | struct resolved_chain { | ||
204 | u64 nr; | ||
205 | struct resolved_ip ips[0]; | ||
206 | }; | ||
207 | |||
208 | |||
209 | /* | 199 | /* |
210 | * Fill the node with callchain values | 200 | * Fill the node with callchain values |
211 | */ | 201 | */ |
212 | static void | 202 | static void |
213 | fill_node(struct callchain_node *node, struct resolved_chain *chain, int start) | 203 | fill_node(struct callchain_node *node, struct callchain_cursor *cursor) |
214 | { | 204 | { |
215 | unsigned int i; | 205 | struct callchain_cursor_node *cursor_node; |
206 | |||
207 | node->val_nr = cursor->nr - cursor->pos; | ||
208 | if (!node->val_nr) | ||
209 | pr_warning("Warning: empty node in callchain tree\n"); | ||
216 | 210 | ||
217 | for (i = start; i < chain->nr; i++) { | 211 | cursor_node = callchain_cursor_current(cursor); |
212 | |||
213 | while (cursor_node) { | ||
218 | struct callchain_list *call; | 214 | struct callchain_list *call; |
219 | 215 | ||
220 | call = zalloc(sizeof(*call)); | 216 | call = zalloc(sizeof(*call)); |
@@ -222,23 +218,25 @@ fill_node(struct callchain_node *node, struct resolved_chain *chain, int start) | |||
222 | perror("not enough memory for the code path tree"); | 218 | perror("not enough memory for the code path tree"); |
223 | return; | 219 | return; |
224 | } | 220 | } |
225 | call->ip = chain->ips[i].ip; | 221 | call->ip = cursor_node->ip; |
226 | call->ms = chain->ips[i].ms; | 222 | call->ms.sym = cursor_node->sym; |
223 | call->ms.map = cursor_node->map; | ||
227 | list_add_tail(&call->list, &node->val); | 224 | list_add_tail(&call->list, &node->val); |
225 | |||
226 | callchain_cursor_advance(cursor); | ||
227 | cursor_node = callchain_cursor_current(cursor); | ||
228 | } | 228 | } |
229 | node->val_nr = chain->nr - start; | ||
230 | if (!node->val_nr) | ||
231 | pr_warning("Warning: empty node in callchain tree\n"); | ||
232 | } | 229 | } |
233 | 230 | ||
234 | static void | 231 | static void |
235 | add_child(struct callchain_node *parent, struct resolved_chain *chain, | 232 | add_child(struct callchain_node *parent, |
236 | int start, u64 period) | 233 | struct callchain_cursor *cursor, |
234 | u64 period) | ||
237 | { | 235 | { |
238 | struct callchain_node *new; | 236 | struct callchain_node *new; |
239 | 237 | ||
240 | new = create_child(parent, false); | 238 | new = create_child(parent, false); |
241 | fill_node(new, chain, start); | 239 | fill_node(new, cursor); |
242 | 240 | ||
243 | new->children_hit = 0; | 241 | new->children_hit = 0; |
244 | new->hit = period; | 242 | new->hit = period; |
@@ -250,9 +248,10 @@ add_child(struct callchain_node *parent, struct resolved_chain *chain, | |||
250 | * Then create another child to host the given callchain of new branch | 248 | * Then create another child to host the given callchain of new branch |
251 | */ | 249 | */ |
252 | static void | 250 | static void |
253 | split_add_child(struct callchain_node *parent, struct resolved_chain *chain, | 251 | split_add_child(struct callchain_node *parent, |
254 | struct callchain_list *to_split, int idx_parents, int idx_local, | 252 | struct callchain_cursor *cursor, |
255 | u64 period) | 253 | struct callchain_list *to_split, |
254 | u64 idx_parents, u64 idx_local, u64 period) | ||
256 | { | 255 | { |
257 | struct callchain_node *new; | 256 | struct callchain_node *new; |
258 | struct list_head *old_tail; | 257 | struct list_head *old_tail; |
@@ -272,14 +271,14 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain, | |||
272 | /* split the hits */ | 271 | /* split the hits */ |
273 | new->hit = parent->hit; | 272 | new->hit = parent->hit; |
274 | new->children_hit = parent->children_hit; | 273 | new->children_hit = parent->children_hit; |
275 | parent->children_hit = cumul_hits(new); | 274 | parent->children_hit = callchain_cumul_hits(new); |
276 | new->val_nr = parent->val_nr - idx_local; | 275 | new->val_nr = parent->val_nr - idx_local; |
277 | parent->val_nr = idx_local; | 276 | parent->val_nr = idx_local; |
278 | 277 | ||
279 | /* create a new child for the new branch if any */ | 278 | /* create a new child for the new branch if any */ |
280 | if (idx_total < chain->nr) { | 279 | if (idx_total < cursor->nr) { |
281 | parent->hit = 0; | 280 | parent->hit = 0; |
282 | add_child(parent, chain, idx_total, period); | 281 | add_child(parent, cursor, period); |
283 | parent->children_hit += period; | 282 | parent->children_hit += period; |
284 | } else { | 283 | } else { |
285 | parent->hit = period; | 284 | parent->hit = period; |
@@ -287,36 +286,41 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain, | |||
287 | } | 286 | } |
288 | 287 | ||
289 | static int | 288 | static int |
290 | append_chain(struct callchain_node *root, struct resolved_chain *chain, | 289 | append_chain(struct callchain_node *root, |
291 | unsigned int start, u64 period); | 290 | struct callchain_cursor *cursor, |
291 | u64 period); | ||
292 | 292 | ||
293 | static void | 293 | static void |
294 | append_chain_children(struct callchain_node *root, struct resolved_chain *chain, | 294 | append_chain_children(struct callchain_node *root, |
295 | unsigned int start, u64 period) | 295 | struct callchain_cursor *cursor, |
296 | u64 period) | ||
296 | { | 297 | { |
297 | struct callchain_node *rnode; | 298 | struct callchain_node *rnode; |
298 | 299 | ||
299 | /* lookup in childrens */ | 300 | /* lookup in childrens */ |
300 | chain_for_each_child(rnode, root) { | 301 | chain_for_each_child(rnode, root) { |
301 | unsigned int ret = append_chain(rnode, chain, start, period); | 302 | unsigned int ret = append_chain(rnode, cursor, period); |
302 | 303 | ||
303 | if (!ret) | 304 | if (!ret) |
304 | goto inc_children_hit; | 305 | goto inc_children_hit; |
305 | } | 306 | } |
306 | /* nothing in children, add to the current node */ | 307 | /* nothing in children, add to the current node */ |
307 | add_child(root, chain, start, period); | 308 | add_child(root, cursor, period); |
308 | 309 | ||
309 | inc_children_hit: | 310 | inc_children_hit: |
310 | root->children_hit += period; | 311 | root->children_hit += period; |
311 | } | 312 | } |
312 | 313 | ||
313 | static int | 314 | static int |
314 | append_chain(struct callchain_node *root, struct resolved_chain *chain, | 315 | append_chain(struct callchain_node *root, |
315 | unsigned int start, u64 period) | 316 | struct callchain_cursor *cursor, |
317 | u64 period) | ||
316 | { | 318 | { |
319 | struct callchain_cursor_node *curr_snap = cursor->curr; | ||
317 | struct callchain_list *cnode; | 320 | struct callchain_list *cnode; |
318 | unsigned int i = start; | 321 | u64 start = cursor->pos; |
319 | bool found = false; | 322 | bool found = false; |
323 | u64 matches; | ||
320 | 324 | ||
321 | /* | 325 | /* |
322 | * Lookup in the current node | 326 | * Lookup in the current node |
@@ -324,141 +328,134 @@ append_chain(struct callchain_node *root, struct resolved_chain *chain, | |||
324 | * anywhere inside a function. | 328 | * anywhere inside a function. |
325 | */ | 329 | */ |
326 | list_for_each_entry(cnode, &root->val, list) { | 330 | list_for_each_entry(cnode, &root->val, list) { |
331 | struct callchain_cursor_node *node; | ||
327 | struct symbol *sym; | 332 | struct symbol *sym; |
328 | 333 | ||
329 | if (i == chain->nr) | 334 | node = callchain_cursor_current(cursor); |
335 | if (!node) | ||
330 | break; | 336 | break; |
331 | 337 | ||
332 | sym = chain->ips[i].ms.sym; | 338 | sym = node->sym; |
333 | 339 | ||
334 | if (cnode->ms.sym && sym) { | 340 | if (cnode->ms.sym && sym) { |
335 | if (cnode->ms.sym->start != sym->start) | 341 | if (cnode->ms.sym->start != sym->start) |
336 | break; | 342 | break; |
337 | } else if (cnode->ip != chain->ips[i].ip) | 343 | } else if (cnode->ip != node->ip) |
338 | break; | 344 | break; |
339 | 345 | ||
340 | if (!found) | 346 | if (!found) |
341 | found = true; | 347 | found = true; |
342 | i++; | 348 | |
349 | callchain_cursor_advance(cursor); | ||
343 | } | 350 | } |
344 | 351 | ||
345 | /* matches not, relay on the parent */ | 352 | /* matches not, relay on the parent */ |
346 | if (!found) | 353 | if (!found) { |
354 | cursor->curr = curr_snap; | ||
355 | cursor->pos = start; | ||
347 | return -1; | 356 | return -1; |
357 | } | ||
358 | |||
359 | matches = cursor->pos - start; | ||
348 | 360 | ||
349 | /* we match only a part of the node. Split it and add the new chain */ | 361 | /* we match only a part of the node. Split it and add the new chain */ |
350 | if (i - start < root->val_nr) { | 362 | if (matches < root->val_nr) { |
351 | split_add_child(root, chain, cnode, start, i - start, period); | 363 | split_add_child(root, cursor, cnode, start, matches, period); |
352 | return 0; | 364 | return 0; |
353 | } | 365 | } |
354 | 366 | ||
355 | /* we match 100% of the path, increment the hit */ | 367 | /* we match 100% of the path, increment the hit */ |
356 | if (i - start == root->val_nr && i == chain->nr) { | 368 | if (matches == root->val_nr && cursor->pos == cursor->nr) { |
357 | root->hit += period; | 369 | root->hit += period; |
358 | return 0; | 370 | return 0; |
359 | } | 371 | } |
360 | 372 | ||
361 | /* We match the node and still have a part remaining */ | 373 | /* We match the node and still have a part remaining */ |
362 | append_chain_children(root, chain, i, period); | 374 | append_chain_children(root, cursor, period); |
363 | 375 | ||
364 | return 0; | 376 | return 0; |
365 | } | 377 | } |
366 | 378 | ||
367 | static void filter_context(struct ip_callchain *old, struct resolved_chain *new, | 379 | int callchain_append(struct callchain_root *root, |
368 | struct map_symbol *syms) | 380 | struct callchain_cursor *cursor, |
369 | { | 381 | u64 period) |
370 | int i, j = 0; | ||
371 | |||
372 | for (i = 0; i < (int)old->nr; i++) { | ||
373 | if (old->ips[i] >= PERF_CONTEXT_MAX) | ||
374 | continue; | ||
375 | |||
376 | new->ips[j].ip = old->ips[i]; | ||
377 | new->ips[j].ms = syms[i]; | ||
378 | j++; | ||
379 | } | ||
380 | |||
381 | new->nr = j; | ||
382 | } | ||
383 | |||
384 | |||
385 | int callchain_append(struct callchain_root *root, struct ip_callchain *chain, | ||
386 | struct map_symbol *syms, u64 period) | ||
387 | { | 382 | { |
388 | struct resolved_chain *filtered; | 383 | if (!cursor->nr) |
389 | |||
390 | if (!chain->nr) | ||
391 | return 0; | 384 | return 0; |
392 | 385 | ||
393 | filtered = zalloc(sizeof(*filtered) + | 386 | callchain_cursor_commit(cursor); |
394 | chain->nr * sizeof(struct resolved_ip)); | ||
395 | if (!filtered) | ||
396 | return -ENOMEM; | ||
397 | |||
398 | filter_context(chain, filtered, syms); | ||
399 | |||
400 | if (!filtered->nr) | ||
401 | goto end; | ||
402 | 387 | ||
403 | append_chain_children(&root->node, filtered, 0, period); | 388 | append_chain_children(&root->node, cursor, period); |
404 | 389 | ||
405 | if (filtered->nr > root->max_depth) | 390 | if (cursor->nr > root->max_depth) |
406 | root->max_depth = filtered->nr; | 391 | root->max_depth = cursor->nr; |
407 | end: | ||
408 | free(filtered); | ||
409 | 392 | ||
410 | return 0; | 393 | return 0; |
411 | } | 394 | } |
412 | 395 | ||
413 | static int | 396 | static int |
414 | merge_chain_branch(struct callchain_node *dst, struct callchain_node *src, | 397 | merge_chain_branch(struct callchain_cursor *cursor, |
415 | struct resolved_chain *chain) | 398 | struct callchain_node *dst, struct callchain_node *src) |
416 | { | 399 | { |
400 | struct callchain_cursor_node **old_last = cursor->last; | ||
417 | struct callchain_node *child, *next_child; | 401 | struct callchain_node *child, *next_child; |
418 | struct callchain_list *list, *next_list; | 402 | struct callchain_list *list, *next_list; |
419 | int old_pos = chain->nr; | 403 | int old_pos = cursor->nr; |
420 | int err = 0; | 404 | int err = 0; |
421 | 405 | ||
422 | list_for_each_entry_safe(list, next_list, &src->val, list) { | 406 | list_for_each_entry_safe(list, next_list, &src->val, list) { |
423 | chain->ips[chain->nr].ip = list->ip; | 407 | callchain_cursor_append(cursor, list->ip, |
424 | chain->ips[chain->nr].ms = list->ms; | 408 | list->ms.map, list->ms.sym); |
425 | chain->nr++; | ||
426 | list_del(&list->list); | 409 | list_del(&list->list); |
427 | free(list); | 410 | free(list); |
428 | } | 411 | } |
429 | 412 | ||
430 | if (src->hit) | 413 | if (src->hit) { |
431 | append_chain_children(dst, chain, 0, src->hit); | 414 | callchain_cursor_commit(cursor); |
415 | append_chain_children(dst, cursor, src->hit); | ||
416 | } | ||
432 | 417 | ||
433 | chain_for_each_child_safe(child, next_child, src) { | 418 | chain_for_each_child_safe(child, next_child, src) { |
434 | err = merge_chain_branch(dst, child, chain); | 419 | err = merge_chain_branch(cursor, dst, child); |
435 | if (err) | 420 | if (err) |
436 | break; | 421 | break; |
437 | 422 | ||
438 | list_del(&child->brothers); | 423 | list_del(&child->siblings); |
439 | free(child); | 424 | free(child); |
440 | } | 425 | } |
441 | 426 | ||
442 | chain->nr = old_pos; | 427 | cursor->nr = old_pos; |
428 | cursor->last = old_last; | ||
443 | 429 | ||
444 | return err; | 430 | return err; |
445 | } | 431 | } |
446 | 432 | ||
447 | int callchain_merge(struct callchain_root *dst, struct callchain_root *src) | 433 | int callchain_merge(struct callchain_cursor *cursor, |
434 | struct callchain_root *dst, struct callchain_root *src) | ||
435 | { | ||
436 | return merge_chain_branch(cursor, &dst->node, &src->node); | ||
437 | } | ||
438 | |||
439 | int callchain_cursor_append(struct callchain_cursor *cursor, | ||
440 | u64 ip, struct map *map, struct symbol *sym) | ||
448 | { | 441 | { |
449 | struct resolved_chain *chain; | 442 | struct callchain_cursor_node *node = *cursor->last; |
450 | int err; | ||
451 | 443 | ||
452 | chain = malloc(sizeof(*chain) + | 444 | if (!node) { |
453 | src->max_depth * sizeof(struct resolved_ip)); | 445 | node = calloc(sizeof(*node), 1); |
454 | if (!chain) | 446 | if (!node) |
455 | return -ENOMEM; | 447 | return -ENOMEM; |
456 | 448 | ||
457 | chain->nr = 0; | 449 | *cursor->last = node; |
450 | } | ||
458 | 451 | ||
459 | err = merge_chain_branch(&dst->node, &src->node, chain); | 452 | node->ip = ip; |
453 | node->map = map; | ||
454 | node->sym = sym; | ||
460 | 455 | ||
461 | free(chain); | 456 | cursor->nr++; |
462 | 457 | ||
463 | return err; | 458 | cursor->last = &node->next; |
459 | |||
460 | return 0; | ||
464 | } | 461 | } |
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index c15fb8c24ad2..1a79df9f739f 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h | |||
@@ -16,7 +16,7 @@ enum chain_mode { | |||
16 | 16 | ||
17 | struct callchain_node { | 17 | struct callchain_node { |
18 | struct callchain_node *parent; | 18 | struct callchain_node *parent; |
19 | struct list_head brothers; | 19 | struct list_head siblings; |
20 | struct list_head children; | 20 | struct list_head children; |
21 | struct list_head val; | 21 | struct list_head val; |
22 | struct rb_node rb_node; /* to sort nodes in an rbtree */ | 22 | struct rb_node rb_node; /* to sort nodes in an rbtree */ |
@@ -49,9 +49,30 @@ struct callchain_list { | |||
49 | struct list_head list; | 49 | struct list_head list; |
50 | }; | 50 | }; |
51 | 51 | ||
52 | /* | ||
53 | * A callchain cursor is a single linked list that | ||
54 | * let one feed a callchain progressively. | ||
55 | * It keeps persitent allocated entries to minimize | ||
56 | * allocations. | ||
57 | */ | ||
58 | struct callchain_cursor_node { | ||
59 | u64 ip; | ||
60 | struct map *map; | ||
61 | struct symbol *sym; | ||
62 | struct callchain_cursor_node *next; | ||
63 | }; | ||
64 | |||
65 | struct callchain_cursor { | ||
66 | u64 nr; | ||
67 | struct callchain_cursor_node *first; | ||
68 | struct callchain_cursor_node **last; | ||
69 | u64 pos; | ||
70 | struct callchain_cursor_node *curr; | ||
71 | }; | ||
72 | |||
52 | static inline void callchain_init(struct callchain_root *root) | 73 | static inline void callchain_init(struct callchain_root *root) |
53 | { | 74 | { |
54 | INIT_LIST_HEAD(&root->node.brothers); | 75 | INIT_LIST_HEAD(&root->node.siblings); |
55 | INIT_LIST_HEAD(&root->node.children); | 76 | INIT_LIST_HEAD(&root->node.children); |
56 | INIT_LIST_HEAD(&root->node.val); | 77 | INIT_LIST_HEAD(&root->node.val); |
57 | 78 | ||
@@ -61,15 +82,54 @@ static inline void callchain_init(struct callchain_root *root) | |||
61 | root->max_depth = 0; | 82 | root->max_depth = 0; |
62 | } | 83 | } |
63 | 84 | ||
64 | static inline u64 cumul_hits(struct callchain_node *node) | 85 | static inline u64 callchain_cumul_hits(struct callchain_node *node) |
65 | { | 86 | { |
66 | return node->hit + node->children_hit; | 87 | return node->hit + node->children_hit; |
67 | } | 88 | } |
68 | 89 | ||
69 | int register_callchain_param(struct callchain_param *param); | 90 | int callchain_register_param(struct callchain_param *param); |
70 | int callchain_append(struct callchain_root *root, struct ip_callchain *chain, | 91 | int callchain_append(struct callchain_root *root, |
71 | struct map_symbol *syms, u64 period); | 92 | struct callchain_cursor *cursor, |
72 | int callchain_merge(struct callchain_root *dst, struct callchain_root *src); | 93 | u64 period); |
94 | |||
95 | int callchain_merge(struct callchain_cursor *cursor, | ||
96 | struct callchain_root *dst, struct callchain_root *src); | ||
97 | |||
98 | bool ip_callchain__valid(struct ip_callchain *chain, | ||
99 | const union perf_event *event); | ||
100 | /* | ||
101 | * Initialize a cursor before adding entries inside, but keep | ||
102 | * the previously allocated entries as a cache. | ||
103 | */ | ||
104 | static inline void callchain_cursor_reset(struct callchain_cursor *cursor) | ||
105 | { | ||
106 | cursor->nr = 0; | ||
107 | cursor->last = &cursor->first; | ||
108 | } | ||
109 | |||
110 | int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip, | ||
111 | struct map *map, struct symbol *sym); | ||
73 | 112 | ||
74 | bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event); | 113 | /* Close a cursor writing session. Initialize for the reader */ |
114 | static inline void callchain_cursor_commit(struct callchain_cursor *cursor) | ||
115 | { | ||
116 | cursor->curr = cursor->first; | ||
117 | cursor->pos = 0; | ||
118 | } | ||
119 | |||
120 | /* Cursor reading iteration helpers */ | ||
121 | static inline struct callchain_cursor_node * | ||
122 | callchain_cursor_current(struct callchain_cursor *cursor) | ||
123 | { | ||
124 | if (cursor->pos == cursor->nr) | ||
125 | return NULL; | ||
126 | |||
127 | return cursor->curr; | ||
128 | } | ||
129 | |||
130 | static inline void callchain_cursor_advance(struct callchain_cursor *cursor) | ||
131 | { | ||
132 | cursor->curr = cursor->curr->next; | ||
133 | cursor->pos++; | ||
134 | } | ||
75 | #endif /* __PERF_CALLCHAIN_H */ | 135 | #endif /* __PERF_CALLCHAIN_H */ |
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c new file mode 100644 index 000000000000..9fea75535221 --- /dev/null +++ b/tools/perf/util/cgroup.c | |||
@@ -0,0 +1,178 @@ | |||
1 | #include "util.h" | ||
2 | #include "../perf.h" | ||
3 | #include "parse-options.h" | ||
4 | #include "evsel.h" | ||
5 | #include "cgroup.h" | ||
6 | #include "debugfs.h" /* MAX_PATH, STR() */ | ||
7 | #include "evlist.h" | ||
8 | |||
9 | int nr_cgroups; | ||
10 | |||
11 | static int | ||
12 | cgroupfs_find_mountpoint(char *buf, size_t maxlen) | ||
13 | { | ||
14 | FILE *fp; | ||
15 | char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1]; | ||
16 | char *token, *saved_ptr; | ||
17 | int found = 0; | ||
18 | |||
19 | fp = fopen("/proc/mounts", "r"); | ||
20 | if (!fp) | ||
21 | return -1; | ||
22 | |||
23 | /* | ||
24 | * in order to handle split hierarchy, we need to scan /proc/mounts | ||
25 | * and inspect every cgroupfs mount point to find one that has | ||
26 | * perf_event subsystem | ||
27 | */ | ||
28 | while (fscanf(fp, "%*s %"STR(MAX_PATH)"s %"STR(MAX_PATH)"s %" | ||
29 | STR(MAX_PATH)"s %*d %*d\n", | ||
30 | mountpoint, type, tokens) == 3) { | ||
31 | |||
32 | if (!strcmp(type, "cgroup")) { | ||
33 | |||
34 | token = strtok_r(tokens, ",", &saved_ptr); | ||
35 | |||
36 | while (token != NULL) { | ||
37 | if (!strcmp(token, "perf_event")) { | ||
38 | found = 1; | ||
39 | break; | ||
40 | } | ||
41 | token = strtok_r(NULL, ",", &saved_ptr); | ||
42 | } | ||
43 | } | ||
44 | if (found) | ||
45 | break; | ||
46 | } | ||
47 | fclose(fp); | ||
48 | if (!found) | ||
49 | return -1; | ||
50 | |||
51 | if (strlen(mountpoint) < maxlen) { | ||
52 | strcpy(buf, mountpoint); | ||
53 | return 0; | ||
54 | } | ||
55 | return -1; | ||
56 | } | ||
57 | |||
58 | static int open_cgroup(char *name) | ||
59 | { | ||
60 | char path[MAX_PATH+1]; | ||
61 | char mnt[MAX_PATH+1]; | ||
62 | int fd; | ||
63 | |||
64 | |||
65 | if (cgroupfs_find_mountpoint(mnt, MAX_PATH+1)) | ||
66 | return -1; | ||
67 | |||
68 | snprintf(path, MAX_PATH, "%s/%s", mnt, name); | ||
69 | |||
70 | fd = open(path, O_RDONLY); | ||
71 | if (fd == -1) | ||
72 | fprintf(stderr, "no access to cgroup %s\n", path); | ||
73 | |||
74 | return fd; | ||
75 | } | ||
76 | |||
77 | static int add_cgroup(struct perf_evlist *evlist, char *str) | ||
78 | { | ||
79 | struct perf_evsel *counter; | ||
80 | struct cgroup_sel *cgrp = NULL; | ||
81 | int n; | ||
82 | /* | ||
83 | * check if cgrp is already defined, if so we reuse it | ||
84 | */ | ||
85 | list_for_each_entry(counter, &evlist->entries, node) { | ||
86 | cgrp = counter->cgrp; | ||
87 | if (!cgrp) | ||
88 | continue; | ||
89 | if (!strcmp(cgrp->name, str)) | ||
90 | break; | ||
91 | |||
92 | cgrp = NULL; | ||
93 | } | ||
94 | |||
95 | if (!cgrp) { | ||
96 | cgrp = zalloc(sizeof(*cgrp)); | ||
97 | if (!cgrp) | ||
98 | return -1; | ||
99 | |||
100 | cgrp->name = str; | ||
101 | |||
102 | cgrp->fd = open_cgroup(str); | ||
103 | if (cgrp->fd == -1) { | ||
104 | free(cgrp); | ||
105 | return -1; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * find corresponding event | ||
111 | * if add cgroup N, then need to find event N | ||
112 | */ | ||
113 | n = 0; | ||
114 | list_for_each_entry(counter, &evlist->entries, node) { | ||
115 | if (n == nr_cgroups) | ||
116 | goto found; | ||
117 | n++; | ||
118 | } | ||
119 | if (cgrp->refcnt == 0) | ||
120 | free(cgrp); | ||
121 | |||
122 | return -1; | ||
123 | found: | ||
124 | cgrp->refcnt++; | ||
125 | counter->cgrp = cgrp; | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | void close_cgroup(struct cgroup_sel *cgrp) | ||
130 | { | ||
131 | if (!cgrp) | ||
132 | return; | ||
133 | |||
134 | /* XXX: not reentrant */ | ||
135 | if (--cgrp->refcnt == 0) { | ||
136 | close(cgrp->fd); | ||
137 | free(cgrp->name); | ||
138 | free(cgrp); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | int parse_cgroups(const struct option *opt __used, const char *str, | ||
143 | int unset __used) | ||
144 | { | ||
145 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; | ||
146 | const char *p, *e, *eos = str + strlen(str); | ||
147 | char *s; | ||
148 | int ret; | ||
149 | |||
150 | if (list_empty(&evlist->entries)) { | ||
151 | fprintf(stderr, "must define events before cgroups\n"); | ||
152 | return -1; | ||
153 | } | ||
154 | |||
155 | for (;;) { | ||
156 | p = strchr(str, ','); | ||
157 | e = p ? p : eos; | ||
158 | |||
159 | /* allow empty cgroups, i.e., skip */ | ||
160 | if (e - str) { | ||
161 | /* termination added */ | ||
162 | s = strndup(str, e - str); | ||
163 | if (!s) | ||
164 | return -1; | ||
165 | ret = add_cgroup(evlist, s); | ||
166 | if (ret) { | ||
167 | free(s); | ||
168 | return -1; | ||
169 | } | ||
170 | } | ||
171 | /* nr_cgroups is increased een for empty cgroups */ | ||
172 | nr_cgroups++; | ||
173 | if (!p) | ||
174 | break; | ||
175 | str = p+1; | ||
176 | } | ||
177 | return 0; | ||
178 | } | ||
diff --git a/tools/perf/util/cgroup.h b/tools/perf/util/cgroup.h new file mode 100644 index 000000000000..89acd6debdc5 --- /dev/null +++ b/tools/perf/util/cgroup.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef __CGROUP_H__ | ||
2 | #define __CGROUP_H__ | ||
3 | |||
4 | struct option; | ||
5 | |||
6 | struct cgroup_sel { | ||
7 | char *name; | ||
8 | int fd; | ||
9 | int refcnt; | ||
10 | }; | ||
11 | |||
12 | |||
13 | extern int nr_cgroups; /* number of explicit cgroups defined */ | ||
14 | extern void close_cgroup(struct cgroup_sel *cgrp); | ||
15 | extern int parse_cgroups(const struct option *opt, const char *str, int unset); | ||
16 | |||
17 | #endif /* __CGROUP_H__ */ | ||
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 3ccaa1043383..6893eec693ab 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c | |||
@@ -177,3 +177,8 @@ struct cpu_map *cpu_map__dummy_new(void) | |||
177 | 177 | ||
178 | return cpus; | 178 | return cpus; |
179 | } | 179 | } |
180 | |||
181 | void cpu_map__delete(struct cpu_map *map) | ||
182 | { | ||
183 | free(map); | ||
184 | } | ||
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index f7a4f42f6307..072c0a374794 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h | |||
@@ -8,6 +8,6 @@ struct cpu_map { | |||
8 | 8 | ||
9 | struct cpu_map *cpu_map__new(const char *cpu_list); | 9 | struct cpu_map *cpu_map__new(const char *cpu_list); |
10 | struct cpu_map *cpu_map__dummy_new(void); | 10 | struct cpu_map *cpu_map__dummy_new(void); |
11 | void *cpu_map__delete(struct cpu_map *map); | 11 | void cpu_map__delete(struct cpu_map *map); |
12 | 12 | ||
13 | #endif /* __PERF_CPUMAP_H */ | 13 | #endif /* __PERF_CPUMAP_H */ |
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 01bbe8ecec3f..d4536a9e0d8c 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c | |||
@@ -57,7 +57,7 @@ void ui__warning(const char *format, ...) | |||
57 | } | 57 | } |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | void trace_event(event_t *event) | 60 | void trace_event(union perf_event *event) |
61 | { | 61 | { |
62 | unsigned char *raw_event = (void *)event; | 62 | unsigned char *raw_event = (void *)event; |
63 | const char *color = PERF_COLOR_BLUE; | 63 | const char *color = PERF_COLOR_BLUE; |
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index ca35fd66b5df..93516cf4682c 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h | |||
@@ -9,7 +9,7 @@ extern int verbose; | |||
9 | extern bool quiet, dump_trace; | 9 | extern bool quiet, dump_trace; |
10 | 10 | ||
11 | int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); | 11 | int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); |
12 | void trace_event(event_t *event); | 12 | void trace_event(union perf_event *event); |
13 | 13 | ||
14 | struct ui_progress; | 14 | struct ui_progress; |
15 | 15 | ||
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 50d0a931497a..2b15c362ef56 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -6,8 +6,9 @@ | |||
6 | #include "string.h" | 6 | #include "string.h" |
7 | #include "strlist.h" | 7 | #include "strlist.h" |
8 | #include "thread.h" | 8 | #include "thread.h" |
9 | #include "thread_map.h" | ||
9 | 10 | ||
10 | static const char *event__name[] = { | 11 | static const char *perf_event__names[] = { |
11 | [0] = "TOTAL", | 12 | [0] = "TOTAL", |
12 | [PERF_RECORD_MMAP] = "MMAP", | 13 | [PERF_RECORD_MMAP] = "MMAP", |
13 | [PERF_RECORD_LOST] = "LOST", | 14 | [PERF_RECORD_LOST] = "LOST", |
@@ -25,16 +26,16 @@ static const char *event__name[] = { | |||
25 | [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", | 26 | [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", |
26 | }; | 27 | }; |
27 | 28 | ||
28 | const char *event__get_event_name(unsigned int id) | 29 | const char *perf_event__name(unsigned int id) |
29 | { | 30 | { |
30 | if (id >= ARRAY_SIZE(event__name)) | 31 | if (id >= ARRAY_SIZE(perf_event__names)) |
31 | return "INVALID"; | 32 | return "INVALID"; |
32 | if (!event__name[id]) | 33 | if (!perf_event__names[id]) |
33 | return "UNKNOWN"; | 34 | return "UNKNOWN"; |
34 | return event__name[id]; | 35 | return perf_event__names[id]; |
35 | } | 36 | } |
36 | 37 | ||
37 | static struct sample_data synth_sample = { | 38 | static struct perf_sample synth_sample = { |
38 | .pid = -1, | 39 | .pid = -1, |
39 | .tid = -1, | 40 | .tid = -1, |
40 | .time = -1, | 41 | .time = -1, |
@@ -43,9 +44,9 @@ static struct sample_data synth_sample = { | |||
43 | .period = 1, | 44 | .period = 1, |
44 | }; | 45 | }; |
45 | 46 | ||
46 | static pid_t event__synthesize_comm(event_t *event, pid_t pid, int full, | 47 | static pid_t perf_event__synthesize_comm(union perf_event *event, pid_t pid, |
47 | event__handler_t process, | 48 | int full, perf_event__handler_t process, |
48 | struct perf_session *session) | 49 | struct perf_session *session) |
49 | { | 50 | { |
50 | char filename[PATH_MAX]; | 51 | char filename[PATH_MAX]; |
51 | char bf[BUFSIZ]; | 52 | char bf[BUFSIZ]; |
@@ -126,9 +127,10 @@ out: | |||
126 | return tgid; | 127 | return tgid; |
127 | } | 128 | } |
128 | 129 | ||
129 | static int event__synthesize_mmap_events(event_t *event, pid_t pid, pid_t tgid, | 130 | static int perf_event__synthesize_mmap_events(union perf_event *event, |
130 | event__handler_t process, | 131 | pid_t pid, pid_t tgid, |
131 | struct perf_session *session) | 132 | perf_event__handler_t process, |
133 | struct perf_session *session) | ||
132 | { | 134 | { |
133 | char filename[PATH_MAX]; | 135 | char filename[PATH_MAX]; |
134 | FILE *fp; | 136 | FILE *fp; |
@@ -199,14 +201,14 @@ static int event__synthesize_mmap_events(event_t *event, pid_t pid, pid_t tgid, | |||
199 | return 0; | 201 | return 0; |
200 | } | 202 | } |
201 | 203 | ||
202 | int event__synthesize_modules(event__handler_t process, | 204 | int perf_event__synthesize_modules(perf_event__handler_t process, |
203 | struct perf_session *session, | 205 | struct perf_session *session, |
204 | struct machine *machine) | 206 | struct machine *machine) |
205 | { | 207 | { |
206 | struct rb_node *nd; | 208 | struct rb_node *nd; |
207 | struct map_groups *kmaps = &machine->kmaps; | 209 | struct map_groups *kmaps = &machine->kmaps; |
208 | event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size); | 210 | union perf_event *event = zalloc((sizeof(event->mmap) + |
209 | 211 | session->id_hdr_size)); | |
210 | if (event == NULL) { | 212 | if (event == NULL) { |
211 | pr_debug("Not enough memory synthesizing mmap event " | 213 | pr_debug("Not enough memory synthesizing mmap event " |
212 | "for kernel modules\n"); | 214 | "for kernel modules\n"); |
@@ -251,23 +253,24 @@ int event__synthesize_modules(event__handler_t process, | |||
251 | return 0; | 253 | return 0; |
252 | } | 254 | } |
253 | 255 | ||
254 | static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event, | 256 | static int __event__synthesize_thread(union perf_event *comm_event, |
255 | pid_t pid, event__handler_t process, | 257 | union perf_event *mmap_event, |
258 | pid_t pid, perf_event__handler_t process, | ||
256 | struct perf_session *session) | 259 | struct perf_session *session) |
257 | { | 260 | { |
258 | pid_t tgid = event__synthesize_comm(comm_event, pid, 1, process, | 261 | pid_t tgid = perf_event__synthesize_comm(comm_event, pid, 1, process, |
259 | session); | 262 | session); |
260 | if (tgid == -1) | 263 | if (tgid == -1) |
261 | return -1; | 264 | return -1; |
262 | return event__synthesize_mmap_events(mmap_event, pid, tgid, | 265 | return perf_event__synthesize_mmap_events(mmap_event, pid, tgid, |
263 | process, session); | 266 | process, session); |
264 | } | 267 | } |
265 | 268 | ||
266 | int event__synthesize_thread_map(struct thread_map *threads, | 269 | int perf_event__synthesize_thread_map(struct thread_map *threads, |
267 | event__handler_t process, | 270 | perf_event__handler_t process, |
268 | struct perf_session *session) | 271 | struct perf_session *session) |
269 | { | 272 | { |
270 | event_t *comm_event, *mmap_event; | 273 | union perf_event *comm_event, *mmap_event; |
271 | int err = -1, thread; | 274 | int err = -1, thread; |
272 | 275 | ||
273 | comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); | 276 | comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); |
@@ -294,12 +297,12 @@ out: | |||
294 | return err; | 297 | return err; |
295 | } | 298 | } |
296 | 299 | ||
297 | int event__synthesize_threads(event__handler_t process, | 300 | int perf_event__synthesize_threads(perf_event__handler_t process, |
298 | struct perf_session *session) | 301 | struct perf_session *session) |
299 | { | 302 | { |
300 | DIR *proc; | 303 | DIR *proc; |
301 | struct dirent dirent, *next; | 304 | struct dirent dirent, *next; |
302 | event_t *comm_event, *mmap_event; | 305 | union perf_event *comm_event, *mmap_event; |
303 | int err = -1; | 306 | int err = -1; |
304 | 307 | ||
305 | comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); | 308 | comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); |
@@ -357,10 +360,10 @@ static int find_symbol_cb(void *arg, const char *name, char type, | |||
357 | return 1; | 360 | return 1; |
358 | } | 361 | } |
359 | 362 | ||
360 | int event__synthesize_kernel_mmap(event__handler_t process, | 363 | int perf_event__synthesize_kernel_mmap(perf_event__handler_t process, |
361 | struct perf_session *session, | 364 | struct perf_session *session, |
362 | struct machine *machine, | 365 | struct machine *machine, |
363 | const char *symbol_name) | 366 | const char *symbol_name) |
364 | { | 367 | { |
365 | size_t size; | 368 | size_t size; |
366 | const char *filename, *mmap_name; | 369 | const char *filename, *mmap_name; |
@@ -374,8 +377,8 @@ int event__synthesize_kernel_mmap(event__handler_t process, | |||
374 | * kernels. | 377 | * kernels. |
375 | */ | 378 | */ |
376 | struct process_symbol_args args = { .name = symbol_name, }; | 379 | struct process_symbol_args args = { .name = symbol_name, }; |
377 | event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size); | 380 | union perf_event *event = zalloc((sizeof(event->mmap) + |
378 | 381 | session->id_hdr_size)); | |
379 | if (event == NULL) { | 382 | if (event == NULL) { |
380 | pr_debug("Not enough memory synthesizing mmap event " | 383 | pr_debug("Not enough memory synthesizing mmap event " |
381 | "for kernel modules\n"); | 384 | "for kernel modules\n"); |
@@ -421,42 +424,15 @@ int event__synthesize_kernel_mmap(event__handler_t process, | |||
421 | return err; | 424 | return err; |
422 | } | 425 | } |
423 | 426 | ||
424 | static void thread__comm_adjust(struct thread *self, struct hists *hists) | 427 | int perf_event__process_comm(union perf_event *event, |
425 | { | 428 | struct perf_sample *sample __used, |
426 | char *comm = self->comm; | 429 | struct perf_session *session) |
427 | |||
428 | if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && | ||
429 | (!symbol_conf.comm_list || | ||
430 | strlist__has_entry(symbol_conf.comm_list, comm))) { | ||
431 | u16 slen = strlen(comm); | ||
432 | |||
433 | if (hists__new_col_len(hists, HISTC_COMM, slen)) | ||
434 | hists__set_col_len(hists, HISTC_THREAD, slen + 6); | ||
435 | } | ||
436 | } | ||
437 | |||
438 | static int thread__set_comm_adjust(struct thread *self, const char *comm, | ||
439 | struct hists *hists) | ||
440 | { | 430 | { |
441 | int ret = thread__set_comm(self, comm); | 431 | struct thread *thread = perf_session__findnew(session, event->comm.tid); |
442 | |||
443 | if (ret) | ||
444 | return ret; | ||
445 | |||
446 | thread__comm_adjust(self, hists); | ||
447 | 432 | ||
448 | return 0; | 433 | dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid); |
449 | } | ||
450 | 434 | ||
451 | int event__process_comm(event_t *self, struct sample_data *sample __used, | 435 | if (thread == NULL || thread__set_comm(thread, event->comm.comm)) { |
452 | struct perf_session *session) | ||
453 | { | ||
454 | struct thread *thread = perf_session__findnew(session, self->comm.tid); | ||
455 | |||
456 | dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid); | ||
457 | |||
458 | if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm, | ||
459 | &session->hists)) { | ||
460 | dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); | 436 | dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); |
461 | return -1; | 437 | return -1; |
462 | } | 438 | } |
@@ -464,19 +440,21 @@ int event__process_comm(event_t *self, struct sample_data *sample __used, | |||
464 | return 0; | 440 | return 0; |
465 | } | 441 | } |
466 | 442 | ||
467 | int event__process_lost(event_t *self, struct sample_data *sample __used, | 443 | int perf_event__process_lost(union perf_event *event, |
468 | struct perf_session *session) | 444 | struct perf_sample *sample __used, |
445 | struct perf_session *session) | ||
469 | { | 446 | { |
470 | dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", | 447 | dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", |
471 | self->lost.id, self->lost.lost); | 448 | event->lost.id, event->lost.lost); |
472 | session->hists.stats.total_lost += self->lost.lost; | 449 | session->hists.stats.total_lost += event->lost.lost; |
473 | return 0; | 450 | return 0; |
474 | } | 451 | } |
475 | 452 | ||
476 | static void event_set_kernel_mmap_len(struct map **maps, event_t *self) | 453 | static void perf_event__set_kernel_mmap_len(union perf_event *event, |
454 | struct map **maps) | ||
477 | { | 455 | { |
478 | maps[MAP__FUNCTION]->start = self->mmap.start; | 456 | maps[MAP__FUNCTION]->start = event->mmap.start; |
479 | maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len; | 457 | maps[MAP__FUNCTION]->end = event->mmap.start + event->mmap.len; |
480 | /* | 458 | /* |
481 | * Be a bit paranoid here, some perf.data file came with | 459 | * Be a bit paranoid here, some perf.data file came with |
482 | * a zero sized synthesized MMAP event for the kernel. | 460 | * a zero sized synthesized MMAP event for the kernel. |
@@ -485,8 +463,8 @@ static void event_set_kernel_mmap_len(struct map **maps, event_t *self) | |||
485 | maps[MAP__FUNCTION]->end = ~0ULL; | 463 | maps[MAP__FUNCTION]->end = ~0ULL; |
486 | } | 464 | } |
487 | 465 | ||
488 | static int event__process_kernel_mmap(event_t *self, | 466 | static int perf_event__process_kernel_mmap(union perf_event *event, |
489 | struct perf_session *session) | 467 | struct perf_session *session) |
490 | { | 468 | { |
491 | struct map *map; | 469 | struct map *map; |
492 | char kmmap_prefix[PATH_MAX]; | 470 | char kmmap_prefix[PATH_MAX]; |
@@ -494,9 +472,9 @@ static int event__process_kernel_mmap(event_t *self, | |||
494 | enum dso_kernel_type kernel_type; | 472 | enum dso_kernel_type kernel_type; |
495 | bool is_kernel_mmap; | 473 | bool is_kernel_mmap; |
496 | 474 | ||
497 | machine = perf_session__findnew_machine(session, self->mmap.pid); | 475 | machine = perf_session__findnew_machine(session, event->mmap.pid); |
498 | if (!machine) { | 476 | if (!machine) { |
499 | pr_err("Can't find id %d's machine\n", self->mmap.pid); | 477 | pr_err("Can't find id %d's machine\n", event->mmap.pid); |
500 | goto out_problem; | 478 | goto out_problem; |
501 | } | 479 | } |
502 | 480 | ||
@@ -506,17 +484,17 @@ static int event__process_kernel_mmap(event_t *self, | |||
506 | else | 484 | else |
507 | kernel_type = DSO_TYPE_GUEST_KERNEL; | 485 | kernel_type = DSO_TYPE_GUEST_KERNEL; |
508 | 486 | ||
509 | is_kernel_mmap = memcmp(self->mmap.filename, | 487 | is_kernel_mmap = memcmp(event->mmap.filename, |
510 | kmmap_prefix, | 488 | kmmap_prefix, |
511 | strlen(kmmap_prefix)) == 0; | 489 | strlen(kmmap_prefix)) == 0; |
512 | if (self->mmap.filename[0] == '/' || | 490 | if (event->mmap.filename[0] == '/' || |
513 | (!is_kernel_mmap && self->mmap.filename[0] == '[')) { | 491 | (!is_kernel_mmap && event->mmap.filename[0] == '[')) { |
514 | 492 | ||
515 | char short_module_name[1024]; | 493 | char short_module_name[1024]; |
516 | char *name, *dot; | 494 | char *name, *dot; |
517 | 495 | ||
518 | if (self->mmap.filename[0] == '/') { | 496 | if (event->mmap.filename[0] == '/') { |
519 | name = strrchr(self->mmap.filename, '/'); | 497 | name = strrchr(event->mmap.filename, '/'); |
520 | if (name == NULL) | 498 | if (name == NULL) |
521 | goto out_problem; | 499 | goto out_problem; |
522 | 500 | ||
@@ -528,10 +506,10 @@ static int event__process_kernel_mmap(event_t *self, | |||
528 | "[%.*s]", (int)(dot - name), name); | 506 | "[%.*s]", (int)(dot - name), name); |
529 | strxfrchar(short_module_name, '-', '_'); | 507 | strxfrchar(short_module_name, '-', '_'); |
530 | } else | 508 | } else |
531 | strcpy(short_module_name, self->mmap.filename); | 509 | strcpy(short_module_name, event->mmap.filename); |
532 | 510 | ||
533 | map = machine__new_module(machine, self->mmap.start, | 511 | map = machine__new_module(machine, event->mmap.start, |
534 | self->mmap.filename); | 512 | event->mmap.filename); |
535 | if (map == NULL) | 513 | if (map == NULL) |
536 | goto out_problem; | 514 | goto out_problem; |
537 | 515 | ||
@@ -541,9 +519,9 @@ static int event__process_kernel_mmap(event_t *self, | |||
541 | 519 | ||
542 | map->dso->short_name = name; | 520 | map->dso->short_name = name; |
543 | map->dso->sname_alloc = 1; | 521 | map->dso->sname_alloc = 1; |
544 | map->end = map->start + self->mmap.len; | 522 | map->end = map->start + event->mmap.len; |
545 | } else if (is_kernel_mmap) { | 523 | } else if (is_kernel_mmap) { |
546 | const char *symbol_name = (self->mmap.filename + | 524 | const char *symbol_name = (event->mmap.filename + |
547 | strlen(kmmap_prefix)); | 525 | strlen(kmmap_prefix)); |
548 | /* | 526 | /* |
549 | * Should be there already, from the build-id table in | 527 | * Should be there already, from the build-id table in |
@@ -558,10 +536,10 @@ static int event__process_kernel_mmap(event_t *self, | |||
558 | if (__machine__create_kernel_maps(machine, kernel) < 0) | 536 | if (__machine__create_kernel_maps(machine, kernel) < 0) |
559 | goto out_problem; | 537 | goto out_problem; |
560 | 538 | ||
561 | event_set_kernel_mmap_len(machine->vmlinux_maps, self); | 539 | perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps); |
562 | perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, | 540 | perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, |
563 | symbol_name, | 541 | symbol_name, |
564 | self->mmap.pgoff); | 542 | event->mmap.pgoff); |
565 | if (machine__is_default_guest(machine)) { | 543 | if (machine__is_default_guest(machine)) { |
566 | /* | 544 | /* |
567 | * preload dso of guest kernel and modules | 545 | * preload dso of guest kernel and modules |
@@ -575,22 +553,23 @@ out_problem: | |||
575 | return -1; | 553 | return -1; |
576 | } | 554 | } |
577 | 555 | ||
578 | int event__process_mmap(event_t *self, struct sample_data *sample __used, | 556 | int perf_event__process_mmap(union perf_event *event, |
579 | struct perf_session *session) | 557 | struct perf_sample *sample __used, |
558 | struct perf_session *session) | ||
580 | { | 559 | { |
581 | struct machine *machine; | 560 | struct machine *machine; |
582 | struct thread *thread; | 561 | struct thread *thread; |
583 | struct map *map; | 562 | struct map *map; |
584 | u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 563 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
585 | int ret = 0; | 564 | int ret = 0; |
586 | 565 | ||
587 | dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", | 566 | dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", |
588 | self->mmap.pid, self->mmap.tid, self->mmap.start, | 567 | event->mmap.pid, event->mmap.tid, event->mmap.start, |
589 | self->mmap.len, self->mmap.pgoff, self->mmap.filename); | 568 | event->mmap.len, event->mmap.pgoff, event->mmap.filename); |
590 | 569 | ||
591 | if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || | 570 | if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || |
592 | cpumode == PERF_RECORD_MISC_KERNEL) { | 571 | cpumode == PERF_RECORD_MISC_KERNEL) { |
593 | ret = event__process_kernel_mmap(self, session); | 572 | ret = perf_event__process_kernel_mmap(event, session); |
594 | if (ret < 0) | 573 | if (ret < 0) |
595 | goto out_problem; | 574 | goto out_problem; |
596 | return 0; | 575 | return 0; |
@@ -599,12 +578,12 @@ int event__process_mmap(event_t *self, struct sample_data *sample __used, | |||
599 | machine = perf_session__find_host_machine(session); | 578 | machine = perf_session__find_host_machine(session); |
600 | if (machine == NULL) | 579 | if (machine == NULL) |
601 | goto out_problem; | 580 | goto out_problem; |
602 | thread = perf_session__findnew(session, self->mmap.pid); | 581 | thread = perf_session__findnew(session, event->mmap.pid); |
603 | if (thread == NULL) | 582 | if (thread == NULL) |
604 | goto out_problem; | 583 | goto out_problem; |
605 | map = map__new(&machine->user_dsos, self->mmap.start, | 584 | map = map__new(&machine->user_dsos, event->mmap.start, |
606 | self->mmap.len, self->mmap.pgoff, | 585 | event->mmap.len, event->mmap.pgoff, |
607 | self->mmap.pid, self->mmap.filename, | 586 | event->mmap.pid, event->mmap.filename, |
608 | MAP__FUNCTION); | 587 | MAP__FUNCTION); |
609 | if (map == NULL) | 588 | if (map == NULL) |
610 | goto out_problem; | 589 | goto out_problem; |
@@ -617,16 +596,17 @@ out_problem: | |||
617 | return 0; | 596 | return 0; |
618 | } | 597 | } |
619 | 598 | ||
620 | int event__process_task(event_t *self, struct sample_data *sample __used, | 599 | int perf_event__process_task(union perf_event *event, |
621 | struct perf_session *session) | 600 | struct perf_sample *sample __used, |
601 | struct perf_session *session) | ||
622 | { | 602 | { |
623 | struct thread *thread = perf_session__findnew(session, self->fork.tid); | 603 | struct thread *thread = perf_session__findnew(session, event->fork.tid); |
624 | struct thread *parent = perf_session__findnew(session, self->fork.ptid); | 604 | struct thread *parent = perf_session__findnew(session, event->fork.ptid); |
625 | 605 | ||
626 | dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, | 606 | dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, |
627 | self->fork.ppid, self->fork.ptid); | 607 | event->fork.ppid, event->fork.ptid); |
628 | 608 | ||
629 | if (self->header.type == PERF_RECORD_EXIT) { | 609 | if (event->header.type == PERF_RECORD_EXIT) { |
630 | perf_session__remove_thread(session, thread); | 610 | perf_session__remove_thread(session, thread); |
631 | return 0; | 611 | return 0; |
632 | } | 612 | } |
@@ -640,20 +620,22 @@ int event__process_task(event_t *self, struct sample_data *sample __used, | |||
640 | return 0; | 620 | return 0; |
641 | } | 621 | } |
642 | 622 | ||
643 | int event__process(event_t *event, struct sample_data *sample, | 623 | int perf_event__process(union perf_event *event, struct perf_sample *sample, |
644 | struct perf_session *session) | 624 | struct perf_session *session) |
645 | { | 625 | { |
646 | switch (event->header.type) { | 626 | switch (event->header.type) { |
647 | case PERF_RECORD_COMM: | 627 | case PERF_RECORD_COMM: |
648 | event__process_comm(event, sample, session); | 628 | perf_event__process_comm(event, sample, session); |
649 | break; | 629 | break; |
650 | case PERF_RECORD_MMAP: | 630 | case PERF_RECORD_MMAP: |
651 | event__process_mmap(event, sample, session); | 631 | perf_event__process_mmap(event, sample, session); |
652 | break; | 632 | break; |
653 | case PERF_RECORD_FORK: | 633 | case PERF_RECORD_FORK: |
654 | case PERF_RECORD_EXIT: | 634 | case PERF_RECORD_EXIT: |
655 | event__process_task(event, sample, session); | 635 | perf_event__process_task(event, sample, session); |
656 | break; | 636 | break; |
637 | case PERF_RECORD_LOST: | ||
638 | perf_event__process_lost(event, sample, session); | ||
657 | default: | 639 | default: |
658 | break; | 640 | break; |
659 | } | 641 | } |
@@ -750,24 +732,14 @@ void thread__find_addr_location(struct thread *self, | |||
750 | al->sym = NULL; | 732 | al->sym = NULL; |
751 | } | 733 | } |
752 | 734 | ||
753 | static void dso__calc_col_width(struct dso *self, struct hists *hists) | 735 | int perf_event__preprocess_sample(const union perf_event *event, |
754 | { | 736 | struct perf_session *session, |
755 | if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && | 737 | struct addr_location *al, |
756 | (!symbol_conf.dso_list || | 738 | struct perf_sample *sample, |
757 | strlist__has_entry(symbol_conf.dso_list, self->name))) { | 739 | symbol_filter_t filter) |
758 | u16 slen = dso__name_len(self); | ||
759 | hists__new_col_len(hists, HISTC_DSO, slen); | ||
760 | } | ||
761 | |||
762 | self->slen_calculated = 1; | ||
763 | } | ||
764 | |||
765 | int event__preprocess_sample(const event_t *self, struct perf_session *session, | ||
766 | struct addr_location *al, struct sample_data *data, | ||
767 | symbol_filter_t filter) | ||
768 | { | 740 | { |
769 | u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 741 | u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
770 | struct thread *thread = perf_session__findnew(session, self->ip.pid); | 742 | struct thread *thread = perf_session__findnew(session, event->ip.pid); |
771 | 743 | ||
772 | if (thread == NULL) | 744 | if (thread == NULL) |
773 | return -1; | 745 | return -1; |
@@ -789,12 +761,12 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session, | |||
789 | machine__create_kernel_maps(&session->host_machine); | 761 | machine__create_kernel_maps(&session->host_machine); |
790 | 762 | ||
791 | thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, | 763 | thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, |
792 | self->ip.pid, self->ip.ip, al); | 764 | event->ip.pid, event->ip.ip, al); |
793 | dump_printf(" ...... dso: %s\n", | 765 | dump_printf(" ...... dso: %s\n", |
794 | al->map ? al->map->dso->long_name : | 766 | al->map ? al->map->dso->long_name : |
795 | al->level == 'H' ? "[hypervisor]" : "<not found>"); | 767 | al->level == 'H' ? "[hypervisor]" : "<not found>"); |
796 | al->sym = NULL; | 768 | al->sym = NULL; |
797 | al->cpu = data->cpu; | 769 | al->cpu = sample->cpu; |
798 | 770 | ||
799 | if (al->map) { | 771 | if (al->map) { |
800 | if (symbol_conf.dso_list && | 772 | if (symbol_conf.dso_list && |
@@ -805,23 +777,8 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session, | |||
805 | strlist__has_entry(symbol_conf.dso_list, | 777 | strlist__has_entry(symbol_conf.dso_list, |
806 | al->map->dso->long_name))))) | 778 | al->map->dso->long_name))))) |
807 | goto out_filtered; | 779 | goto out_filtered; |
808 | /* | ||
809 | * We have to do this here as we may have a dso with no symbol | ||
810 | * hit that has a name longer than the ones with symbols | ||
811 | * sampled. | ||
812 | */ | ||
813 | if (!sort_dso.elide && !al->map->dso->slen_calculated) | ||
814 | dso__calc_col_width(al->map->dso, &session->hists); | ||
815 | 780 | ||
816 | al->sym = map__find_symbol(al->map, al->addr, filter); | 781 | al->sym = map__find_symbol(al->map, al->addr, filter); |
817 | } else { | ||
818 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; | ||
819 | |||
820 | if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width && | ||
821 | !symbol_conf.col_width_list_str && !symbol_conf.field_sep && | ||
822 | !symbol_conf.dso_list) | ||
823 | hists__set_col_len(&session->hists, HISTC_DSO, | ||
824 | unresolved_col_width); | ||
825 | } | 782 | } |
826 | 783 | ||
827 | if (symbol_conf.sym_list && al->sym && | 784 | if (symbol_conf.sym_list && al->sym && |
@@ -834,128 +791,3 @@ out_filtered: | |||
834 | al->filtered = true; | 791 | al->filtered = true; |
835 | return 0; | 792 | return 0; |
836 | } | 793 | } |
837 | |||
838 | static int event__parse_id_sample(const event_t *event, | ||
839 | struct perf_session *session, | ||
840 | struct sample_data *sample) | ||
841 | { | ||
842 | const u64 *array; | ||
843 | u64 type; | ||
844 | |||
845 | sample->cpu = sample->pid = sample->tid = -1; | ||
846 | sample->stream_id = sample->id = sample->time = -1ULL; | ||
847 | |||
848 | if (!session->sample_id_all) | ||
849 | return 0; | ||
850 | |||
851 | array = event->sample.array; | ||
852 | array += ((event->header.size - | ||
853 | sizeof(event->header)) / sizeof(u64)) - 1; | ||
854 | type = session->sample_type; | ||
855 | |||
856 | if (type & PERF_SAMPLE_CPU) { | ||
857 | u32 *p = (u32 *)array; | ||
858 | sample->cpu = *p; | ||
859 | array--; | ||
860 | } | ||
861 | |||
862 | if (type & PERF_SAMPLE_STREAM_ID) { | ||
863 | sample->stream_id = *array; | ||
864 | array--; | ||
865 | } | ||
866 | |||
867 | if (type & PERF_SAMPLE_ID) { | ||
868 | sample->id = *array; | ||
869 | array--; | ||
870 | } | ||
871 | |||
872 | if (type & PERF_SAMPLE_TIME) { | ||
873 | sample->time = *array; | ||
874 | array--; | ||
875 | } | ||
876 | |||
877 | if (type & PERF_SAMPLE_TID) { | ||
878 | u32 *p = (u32 *)array; | ||
879 | sample->pid = p[0]; | ||
880 | sample->tid = p[1]; | ||
881 | } | ||
882 | |||
883 | return 0; | ||
884 | } | ||
885 | |||
886 | int event__parse_sample(const event_t *event, struct perf_session *session, | ||
887 | struct sample_data *data) | ||
888 | { | ||
889 | const u64 *array; | ||
890 | u64 type; | ||
891 | |||
892 | if (event->header.type != PERF_RECORD_SAMPLE) | ||
893 | return event__parse_id_sample(event, session, data); | ||
894 | |||
895 | array = event->sample.array; | ||
896 | type = session->sample_type; | ||
897 | |||
898 | if (type & PERF_SAMPLE_IP) { | ||
899 | data->ip = event->ip.ip; | ||
900 | array++; | ||
901 | } | ||
902 | |||
903 | if (type & PERF_SAMPLE_TID) { | ||
904 | u32 *p = (u32 *)array; | ||
905 | data->pid = p[0]; | ||
906 | data->tid = p[1]; | ||
907 | array++; | ||
908 | } | ||
909 | |||
910 | if (type & PERF_SAMPLE_TIME) { | ||
911 | data->time = *array; | ||
912 | array++; | ||
913 | } | ||
914 | |||
915 | if (type & PERF_SAMPLE_ADDR) { | ||
916 | data->addr = *array; | ||
917 | array++; | ||
918 | } | ||
919 | |||
920 | data->id = -1ULL; | ||
921 | if (type & PERF_SAMPLE_ID) { | ||
922 | data->id = *array; | ||
923 | array++; | ||
924 | } | ||
925 | |||
926 | if (type & PERF_SAMPLE_STREAM_ID) { | ||
927 | data->stream_id = *array; | ||
928 | array++; | ||
929 | } | ||
930 | |||
931 | if (type & PERF_SAMPLE_CPU) { | ||
932 | u32 *p = (u32 *)array; | ||
933 | data->cpu = *p; | ||
934 | array++; | ||
935 | } else | ||
936 | data->cpu = -1; | ||
937 | |||
938 | if (type & PERF_SAMPLE_PERIOD) { | ||
939 | data->period = *array; | ||
940 | array++; | ||
941 | } | ||
942 | |||
943 | if (type & PERF_SAMPLE_READ) { | ||
944 | pr_debug("PERF_SAMPLE_READ is unsuported for now\n"); | ||
945 | return -1; | ||
946 | } | ||
947 | |||
948 | if (type & PERF_SAMPLE_CALLCHAIN) { | ||
949 | data->callchain = (struct ip_callchain *)array; | ||
950 | array += 1 + data->callchain->nr; | ||
951 | } | ||
952 | |||
953 | if (type & PERF_SAMPLE_RAW) { | ||
954 | u32 *p = (u32 *)array; | ||
955 | data->raw_size = *p; | ||
956 | p++; | ||
957 | data->raw_data = p; | ||
958 | } | ||
959 | |||
960 | return 0; | ||
961 | } | ||
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index cc7b52f9b492..9c35170fb379 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h | |||
@@ -61,7 +61,7 @@ struct sample_event { | |||
61 | u64 array[]; | 61 | u64 array[]; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | struct sample_data { | 64 | struct perf_sample { |
65 | u64 ip; | 65 | u64 ip; |
66 | u32 pid, tid; | 66 | u32 pid, tid; |
67 | u64 time; | 67 | u64 time; |
@@ -117,7 +117,7 @@ struct tracing_data_event { | |||
117 | u32 size; | 117 | u32 size; |
118 | }; | 118 | }; |
119 | 119 | ||
120 | typedef union event_union { | 120 | union perf_event { |
121 | struct perf_event_header header; | 121 | struct perf_event_header header; |
122 | struct ip_event ip; | 122 | struct ip_event ip; |
123 | struct mmap_event mmap; | 123 | struct mmap_event mmap; |
@@ -130,50 +130,54 @@ typedef union event_union { | |||
130 | struct event_type_event event_type; | 130 | struct event_type_event event_type; |
131 | struct tracing_data_event tracing_data; | 131 | struct tracing_data_event tracing_data; |
132 | struct build_id_event build_id; | 132 | struct build_id_event build_id; |
133 | } event_t; | 133 | }; |
134 | 134 | ||
135 | void event__print_totals(void); | 135 | void perf_event__print_totals(void); |
136 | 136 | ||
137 | struct perf_session; | 137 | struct perf_session; |
138 | struct thread_map; | 138 | struct thread_map; |
139 | 139 | ||
140 | typedef int (*event__handler_synth_t)(event_t *event, | 140 | typedef int (*perf_event__handler_synth_t)(union perf_event *event, |
141 | struct perf_session *session); | ||
142 | typedef int (*perf_event__handler_t)(union perf_event *event, | ||
143 | struct perf_sample *sample, | ||
141 | struct perf_session *session); | 144 | struct perf_session *session); |
142 | typedef int (*event__handler_t)(event_t *event, struct sample_data *sample, | 145 | |
143 | struct perf_session *session); | 146 | int perf_event__synthesize_thread_map(struct thread_map *threads, |
144 | 147 | perf_event__handler_t process, | |
145 | int event__synthesize_thread_map(struct thread_map *threads, | 148 | struct perf_session *session); |
146 | event__handler_t process, | 149 | int perf_event__synthesize_threads(perf_event__handler_t process, |
147 | struct perf_session *session); | 150 | struct perf_session *session); |
148 | int event__synthesize_threads(event__handler_t process, | 151 | int perf_event__synthesize_kernel_mmap(perf_event__handler_t process, |
149 | struct perf_session *session); | 152 | struct perf_session *session, |
150 | int event__synthesize_kernel_mmap(event__handler_t process, | 153 | struct machine *machine, |
151 | struct perf_session *session, | 154 | const char *symbol_name); |
152 | struct machine *machine, | 155 | |
153 | const char *symbol_name); | 156 | int perf_event__synthesize_modules(perf_event__handler_t process, |
154 | 157 | struct perf_session *session, | |
155 | int event__synthesize_modules(event__handler_t process, | 158 | struct machine *machine); |
156 | struct perf_session *session, | 159 | |
157 | struct machine *machine); | 160 | int perf_event__process_comm(union perf_event *event, struct perf_sample *sample, |
158 | 161 | struct perf_session *session); | |
159 | int event__process_comm(event_t *self, struct sample_data *sample, | 162 | int perf_event__process_lost(union perf_event *event, struct perf_sample *sample, |
160 | struct perf_session *session); | 163 | struct perf_session *session); |
161 | int event__process_lost(event_t *self, struct sample_data *sample, | 164 | int perf_event__process_mmap(union perf_event *event, struct perf_sample *sample, |
162 | struct perf_session *session); | 165 | struct perf_session *session); |
163 | int event__process_mmap(event_t *self, struct sample_data *sample, | 166 | int perf_event__process_task(union perf_event *event, struct perf_sample *sample, |
164 | struct perf_session *session); | 167 | struct perf_session *session); |
165 | int event__process_task(event_t *self, struct sample_data *sample, | 168 | int perf_event__process(union perf_event *event, struct perf_sample *sample, |
166 | struct perf_session *session); | 169 | struct perf_session *session); |
167 | int event__process(event_t *event, struct sample_data *sample, | ||
168 | struct perf_session *session); | ||
169 | 170 | ||
170 | struct addr_location; | 171 | struct addr_location; |
171 | int event__preprocess_sample(const event_t *self, struct perf_session *session, | 172 | int perf_event__preprocess_sample(const union perf_event *self, |
172 | struct addr_location *al, struct sample_data *data, | 173 | struct perf_session *session, |
173 | symbol_filter_t filter); | 174 | struct addr_location *al, |
174 | int event__parse_sample(const event_t *event, struct perf_session *session, | 175 | struct perf_sample *sample, |
175 | struct sample_data *sample); | 176 | symbol_filter_t filter); |
177 | |||
178 | const char *perf_event__name(unsigned int id); | ||
176 | 179 | ||
177 | const char *event__get_event_name(unsigned int id); | 180 | int perf_event__parse_sample(const union perf_event *event, u64 type, |
181 | bool sample_id_all, struct perf_sample *sample); | ||
178 | 182 | ||
179 | #endif /* __PERF_RECORD_H */ | 183 | #endif /* __PERF_RECORD_H */ |
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c new file mode 100644 index 000000000000..d852cefa20de --- /dev/null +++ b/tools/perf/util/evlist.c | |||
@@ -0,0 +1,394 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | ||
3 | * | ||
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | ||
5 | * copyright notes. | ||
6 | * | ||
7 | * Released under the GPL v2. (and only v2, not any later version) | ||
8 | */ | ||
9 | #include <poll.h> | ||
10 | #include "cpumap.h" | ||
11 | #include "thread_map.h" | ||
12 | #include "evlist.h" | ||
13 | #include "evsel.h" | ||
14 | #include "util.h" | ||
15 | |||
16 | #include <sys/mman.h> | ||
17 | |||
18 | #include <linux/bitops.h> | ||
19 | #include <linux/hash.h> | ||
20 | |||
21 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | ||
22 | #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) | ||
23 | |||
24 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, | ||
25 | struct thread_map *threads) | ||
26 | { | ||
27 | int i; | ||
28 | |||
29 | for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) | ||
30 | INIT_HLIST_HEAD(&evlist->heads[i]); | ||
31 | INIT_LIST_HEAD(&evlist->entries); | ||
32 | perf_evlist__set_maps(evlist, cpus, threads); | ||
33 | } | ||
34 | |||
35 | struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, | ||
36 | struct thread_map *threads) | ||
37 | { | ||
38 | struct perf_evlist *evlist = zalloc(sizeof(*evlist)); | ||
39 | |||
40 | if (evlist != NULL) | ||
41 | perf_evlist__init(evlist, cpus, threads); | ||
42 | |||
43 | return evlist; | ||
44 | } | ||
45 | |||
46 | static void perf_evlist__purge(struct perf_evlist *evlist) | ||
47 | { | ||
48 | struct perf_evsel *pos, *n; | ||
49 | |||
50 | list_for_each_entry_safe(pos, n, &evlist->entries, node) { | ||
51 | list_del_init(&pos->node); | ||
52 | perf_evsel__delete(pos); | ||
53 | } | ||
54 | |||
55 | evlist->nr_entries = 0; | ||
56 | } | ||
57 | |||
58 | void perf_evlist__exit(struct perf_evlist *evlist) | ||
59 | { | ||
60 | free(evlist->mmap); | ||
61 | free(evlist->pollfd); | ||
62 | evlist->mmap = NULL; | ||
63 | evlist->pollfd = NULL; | ||
64 | } | ||
65 | |||
66 | void perf_evlist__delete(struct perf_evlist *evlist) | ||
67 | { | ||
68 | perf_evlist__purge(evlist); | ||
69 | perf_evlist__exit(evlist); | ||
70 | free(evlist); | ||
71 | } | ||
72 | |||
73 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) | ||
74 | { | ||
75 | list_add_tail(&entry->node, &evlist->entries); | ||
76 | ++evlist->nr_entries; | ||
77 | } | ||
78 | |||
79 | int perf_evlist__add_default(struct perf_evlist *evlist) | ||
80 | { | ||
81 | struct perf_event_attr attr = { | ||
82 | .type = PERF_TYPE_HARDWARE, | ||
83 | .config = PERF_COUNT_HW_CPU_CYCLES, | ||
84 | }; | ||
85 | struct perf_evsel *evsel = perf_evsel__new(&attr, 0); | ||
86 | |||
87 | if (evsel == NULL) | ||
88 | return -ENOMEM; | ||
89 | |||
90 | perf_evlist__add(evlist, evsel); | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) | ||
95 | { | ||
96 | int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; | ||
97 | evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); | ||
98 | return evlist->pollfd != NULL ? 0 : -ENOMEM; | ||
99 | } | ||
100 | |||
101 | void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) | ||
102 | { | ||
103 | fcntl(fd, F_SETFL, O_NONBLOCK); | ||
104 | evlist->pollfd[evlist->nr_fds].fd = fd; | ||
105 | evlist->pollfd[evlist->nr_fds].events = POLLIN; | ||
106 | evlist->nr_fds++; | ||
107 | } | ||
108 | |||
109 | static void perf_evlist__id_hash(struct perf_evlist *evlist, | ||
110 | struct perf_evsel *evsel, | ||
111 | int cpu, int thread, u64 id) | ||
112 | { | ||
113 | int hash; | ||
114 | struct perf_sample_id *sid = SID(evsel, cpu, thread); | ||
115 | |||
116 | sid->id = id; | ||
117 | sid->evsel = evsel; | ||
118 | hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); | ||
119 | hlist_add_head(&sid->node, &evlist->heads[hash]); | ||
120 | } | ||
121 | |||
122 | void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, | ||
123 | int cpu, int thread, u64 id) | ||
124 | { | ||
125 | perf_evlist__id_hash(evlist, evsel, cpu, thread, id); | ||
126 | evsel->id[evsel->ids++] = id; | ||
127 | } | ||
128 | |||
129 | static int perf_evlist__id_add_fd(struct perf_evlist *evlist, | ||
130 | struct perf_evsel *evsel, | ||
131 | int cpu, int thread, int fd) | ||
132 | { | ||
133 | u64 read_data[4] = { 0, }; | ||
134 | int id_idx = 1; /* The first entry is the counter value */ | ||
135 | |||
136 | if (!(evsel->attr.read_format & PERF_FORMAT_ID) || | ||
137 | read(fd, &read_data, sizeof(read_data)) == -1) | ||
138 | return -1; | ||
139 | |||
140 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
141 | ++id_idx; | ||
142 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
143 | ++id_idx; | ||
144 | |||
145 | perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]); | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | ||
150 | { | ||
151 | struct hlist_head *head; | ||
152 | struct hlist_node *pos; | ||
153 | struct perf_sample_id *sid; | ||
154 | int hash; | ||
155 | |||
156 | if (evlist->nr_entries == 1) | ||
157 | return list_entry(evlist->entries.next, struct perf_evsel, node); | ||
158 | |||
159 | hash = hash_64(id, PERF_EVLIST__HLIST_BITS); | ||
160 | head = &evlist->heads[hash]; | ||
161 | |||
162 | hlist_for_each_entry(sid, pos, head, node) | ||
163 | if (sid->id == id) | ||
164 | return sid->evsel; | ||
165 | return NULL; | ||
166 | } | ||
167 | |||
168 | union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) | ||
169 | { | ||
170 | /* XXX Move this to perf.c, making it generally available */ | ||
171 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | ||
172 | struct perf_mmap *md = &evlist->mmap[cpu]; | ||
173 | unsigned int head = perf_mmap__read_head(md); | ||
174 | unsigned int old = md->prev; | ||
175 | unsigned char *data = md->base + page_size; | ||
176 | union perf_event *event = NULL; | ||
177 | |||
178 | if (evlist->overwrite) { | ||
179 | /* | ||
180 | * If we're further behind than half the buffer, there's a chance | ||
181 | * the writer will bite our tail and mess up the samples under us. | ||
182 | * | ||
183 | * If we somehow ended up ahead of the head, we got messed up. | ||
184 | * | ||
185 | * In either case, truncate and restart at head. | ||
186 | */ | ||
187 | int diff = head - old; | ||
188 | if (diff > md->mask / 2 || diff < 0) { | ||
189 | fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); | ||
190 | |||
191 | /* | ||
192 | * head points to a known good entry, start there. | ||
193 | */ | ||
194 | old = head; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | if (old != head) { | ||
199 | size_t size; | ||
200 | |||
201 | event = (union perf_event *)&data[old & md->mask]; | ||
202 | size = event->header.size; | ||
203 | |||
204 | /* | ||
205 | * Event straddles the mmap boundary -- header should always | ||
206 | * be inside due to u64 alignment of output. | ||
207 | */ | ||
208 | if ((old & md->mask) + size != ((old + size) & md->mask)) { | ||
209 | unsigned int offset = old; | ||
210 | unsigned int len = min(sizeof(*event), size), cpy; | ||
211 | void *dst = &evlist->event_copy; | ||
212 | |||
213 | do { | ||
214 | cpy = min(md->mask + 1 - (offset & md->mask), len); | ||
215 | memcpy(dst, &data[offset & md->mask], cpy); | ||
216 | offset += cpy; | ||
217 | dst += cpy; | ||
218 | len -= cpy; | ||
219 | } while (len); | ||
220 | |||
221 | event = &evlist->event_copy; | ||
222 | } | ||
223 | |||
224 | old += size; | ||
225 | } | ||
226 | |||
227 | md->prev = old; | ||
228 | |||
229 | if (!evlist->overwrite) | ||
230 | perf_mmap__write_tail(md, old); | ||
231 | |||
232 | return event; | ||
233 | } | ||
234 | |||
235 | void perf_evlist__munmap(struct perf_evlist *evlist) | ||
236 | { | ||
237 | int cpu; | ||
238 | |||
239 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | ||
240 | if (evlist->mmap[cpu].base != NULL) { | ||
241 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
242 | evlist->mmap[cpu].base = NULL; | ||
243 | } | ||
244 | } | ||
245 | } | ||
246 | |||
247 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist) | ||
248 | { | ||
249 | evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap)); | ||
250 | return evlist->mmap != NULL ? 0 : -ENOMEM; | ||
251 | } | ||
252 | |||
253 | static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot, | ||
254 | int mask, int fd) | ||
255 | { | ||
256 | evlist->mmap[cpu].prev = 0; | ||
257 | evlist->mmap[cpu].mask = mask; | ||
258 | evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, | ||
259 | MAP_SHARED, fd, 0); | ||
260 | if (evlist->mmap[cpu].base == MAP_FAILED) | ||
261 | return -1; | ||
262 | |||
263 | perf_evlist__add_pollfd(evlist, fd); | ||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | /** perf_evlist__mmap - Create per cpu maps to receive events | ||
268 | * | ||
269 | * @evlist - list of events | ||
270 | * @pages - map length in pages | ||
271 | * @overwrite - overwrite older events? | ||
272 | * | ||
273 | * If overwrite is false the user needs to signal event consuption using: | ||
274 | * | ||
275 | * struct perf_mmap *m = &evlist->mmap[cpu]; | ||
276 | * unsigned int head = perf_mmap__read_head(m); | ||
277 | * | ||
278 | * perf_mmap__write_tail(m, head) | ||
279 | * | ||
280 | * Using perf_evlist__read_on_cpu does this automatically. | ||
281 | */ | ||
282 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite) | ||
283 | { | ||
284 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | ||
285 | int mask = pages * page_size - 1, cpu; | ||
286 | struct perf_evsel *first_evsel, *evsel; | ||
287 | const struct cpu_map *cpus = evlist->cpus; | ||
288 | const struct thread_map *threads = evlist->threads; | ||
289 | int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); | ||
290 | |||
291 | if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) | ||
292 | return -ENOMEM; | ||
293 | |||
294 | if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) | ||
295 | return -ENOMEM; | ||
296 | |||
297 | evlist->overwrite = overwrite; | ||
298 | evlist->mmap_len = (pages + 1) * page_size; | ||
299 | first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
300 | |||
301 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
302 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
303 | evsel->sample_id == NULL && | ||
304 | perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) | ||
305 | return -ENOMEM; | ||
306 | |||
307 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
308 | for (thread = 0; thread < threads->nr; thread++) { | ||
309 | int fd = FD(evsel, cpu, thread); | ||
310 | |||
311 | if (evsel->idx || thread) { | ||
312 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, | ||
313 | FD(first_evsel, cpu, 0)) != 0) | ||
314 | goto out_unmap; | ||
315 | } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0) | ||
316 | goto out_unmap; | ||
317 | |||
318 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
319 | perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) | ||
320 | goto out_unmap; | ||
321 | } | ||
322 | } | ||
323 | } | ||
324 | |||
325 | return 0; | ||
326 | |||
327 | out_unmap: | ||
328 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
329 | if (evlist->mmap[cpu].base != NULL) { | ||
330 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
331 | evlist->mmap[cpu].base = NULL; | ||
332 | } | ||
333 | } | ||
334 | return -1; | ||
335 | } | ||
336 | |||
337 | int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, | ||
338 | pid_t target_tid, const char *cpu_list) | ||
339 | { | ||
340 | evlist->threads = thread_map__new(target_pid, target_tid); | ||
341 | |||
342 | if (evlist->threads == NULL) | ||
343 | return -1; | ||
344 | |||
345 | if (target_tid != -1) | ||
346 | evlist->cpus = cpu_map__dummy_new(); | ||
347 | else | ||
348 | evlist->cpus = cpu_map__new(cpu_list); | ||
349 | |||
350 | if (evlist->cpus == NULL) | ||
351 | goto out_delete_threads; | ||
352 | |||
353 | return 0; | ||
354 | |||
355 | out_delete_threads: | ||
356 | thread_map__delete(evlist->threads); | ||
357 | return -1; | ||
358 | } | ||
359 | |||
360 | void perf_evlist__delete_maps(struct perf_evlist *evlist) | ||
361 | { | ||
362 | cpu_map__delete(evlist->cpus); | ||
363 | thread_map__delete(evlist->threads); | ||
364 | evlist->cpus = NULL; | ||
365 | evlist->threads = NULL; | ||
366 | } | ||
367 | |||
368 | int perf_evlist__set_filters(struct perf_evlist *evlist) | ||
369 | { | ||
370 | const struct thread_map *threads = evlist->threads; | ||
371 | const struct cpu_map *cpus = evlist->cpus; | ||
372 | struct perf_evsel *evsel; | ||
373 | char *filter; | ||
374 | int thread; | ||
375 | int cpu; | ||
376 | int err; | ||
377 | int fd; | ||
378 | |||
379 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
380 | filter = evsel->filter; | ||
381 | if (!filter) | ||
382 | continue; | ||
383 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
384 | for (thread = 0; thread < threads->nr; thread++) { | ||
385 | fd = FD(evsel, cpu, thread); | ||
386 | err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); | ||
387 | if (err) | ||
388 | return err; | ||
389 | } | ||
390 | } | ||
391 | } | ||
392 | |||
393 | return 0; | ||
394 | } | ||
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h new file mode 100644 index 000000000000..8b1cb7a4c5f1 --- /dev/null +++ b/tools/perf/util/evlist.h | |||
@@ -0,0 +1,68 @@ | |||
1 | #ifndef __PERF_EVLIST_H | ||
2 | #define __PERF_EVLIST_H 1 | ||
3 | |||
4 | #include <linux/list.h> | ||
5 | #include "../perf.h" | ||
6 | #include "event.h" | ||
7 | |||
8 | struct pollfd; | ||
9 | struct thread_map; | ||
10 | struct cpu_map; | ||
11 | |||
12 | #define PERF_EVLIST__HLIST_BITS 8 | ||
13 | #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) | ||
14 | |||
15 | struct perf_evlist { | ||
16 | struct list_head entries; | ||
17 | struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; | ||
18 | int nr_entries; | ||
19 | int nr_fds; | ||
20 | int mmap_len; | ||
21 | bool overwrite; | ||
22 | union perf_event event_copy; | ||
23 | struct perf_mmap *mmap; | ||
24 | struct pollfd *pollfd; | ||
25 | struct thread_map *threads; | ||
26 | struct cpu_map *cpus; | ||
27 | }; | ||
28 | |||
29 | struct perf_evsel; | ||
30 | |||
31 | struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, | ||
32 | struct thread_map *threads); | ||
33 | void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, | ||
34 | struct thread_map *threads); | ||
35 | void perf_evlist__exit(struct perf_evlist *evlist); | ||
36 | void perf_evlist__delete(struct perf_evlist *evlist); | ||
37 | |||
38 | void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); | ||
39 | int perf_evlist__add_default(struct perf_evlist *evlist); | ||
40 | |||
41 | void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, | ||
42 | int cpu, int thread, u64 id); | ||
43 | |||
44 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist); | ||
45 | void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); | ||
46 | |||
47 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); | ||
48 | |||
49 | union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu); | ||
50 | |||
51 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist); | ||
52 | int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); | ||
53 | void perf_evlist__munmap(struct perf_evlist *evlist); | ||
54 | |||
55 | static inline void perf_evlist__set_maps(struct perf_evlist *evlist, | ||
56 | struct cpu_map *cpus, | ||
57 | struct thread_map *threads) | ||
58 | { | ||
59 | evlist->cpus = cpus; | ||
60 | evlist->threads = threads; | ||
61 | } | ||
62 | |||
63 | int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, | ||
64 | pid_t target_tid, const char *cpu_list); | ||
65 | void perf_evlist__delete_maps(struct perf_evlist *evlist); | ||
66 | int perf_evlist__set_filters(struct perf_evlist *evlist); | ||
67 | |||
68 | #endif /* __PERF_EVLIST_H */ | ||
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index d8575d31ee6c..662596afd7f1 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -1,20 +1,34 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | ||
3 | * | ||
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | ||
5 | * copyright notes. | ||
6 | * | ||
7 | * Released under the GPL v2. (and only v2, not any later version) | ||
8 | */ | ||
9 | |||
1 | #include "evsel.h" | 10 | #include "evsel.h" |
2 | #include "../perf.h" | 11 | #include "evlist.h" |
3 | #include "util.h" | 12 | #include "util.h" |
4 | #include "cpumap.h" | 13 | #include "cpumap.h" |
5 | #include "thread.h" | 14 | #include "thread_map.h" |
6 | 15 | ||
7 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | 16 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
8 | 17 | ||
18 | void perf_evsel__init(struct perf_evsel *evsel, | ||
19 | struct perf_event_attr *attr, int idx) | ||
20 | { | ||
21 | evsel->idx = idx; | ||
22 | evsel->attr = *attr; | ||
23 | INIT_LIST_HEAD(&evsel->node); | ||
24 | } | ||
25 | |||
9 | struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) | 26 | struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) |
10 | { | 27 | { |
11 | struct perf_evsel *evsel = zalloc(sizeof(*evsel)); | 28 | struct perf_evsel *evsel = zalloc(sizeof(*evsel)); |
12 | 29 | ||
13 | if (evsel != NULL) { | 30 | if (evsel != NULL) |
14 | evsel->idx = idx; | 31 | perf_evsel__init(evsel, attr, idx); |
15 | evsel->attr = *attr; | ||
16 | INIT_LIST_HEAD(&evsel->node); | ||
17 | } | ||
18 | 32 | ||
19 | return evsel; | 33 | return evsel; |
20 | } | 34 | } |
@@ -25,6 +39,22 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | |||
25 | return evsel->fd != NULL ? 0 : -ENOMEM; | 39 | return evsel->fd != NULL ? 0 : -ENOMEM; |
26 | } | 40 | } |
27 | 41 | ||
42 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) | ||
43 | { | ||
44 | evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); | ||
45 | if (evsel->sample_id == NULL) | ||
46 | return -ENOMEM; | ||
47 | |||
48 | evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); | ||
49 | if (evsel->id == NULL) { | ||
50 | xyarray__delete(evsel->sample_id); | ||
51 | evsel->sample_id = NULL; | ||
52 | return -ENOMEM; | ||
53 | } | ||
54 | |||
55 | return 0; | ||
56 | } | ||
57 | |||
28 | int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus) | 58 | int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus) |
29 | { | 59 | { |
30 | evsel->counts = zalloc((sizeof(*evsel->counts) + | 60 | evsel->counts = zalloc((sizeof(*evsel->counts) + |
@@ -38,6 +68,14 @@ void perf_evsel__free_fd(struct perf_evsel *evsel) | |||
38 | evsel->fd = NULL; | 68 | evsel->fd = NULL; |
39 | } | 69 | } |
40 | 70 | ||
71 | void perf_evsel__free_id(struct perf_evsel *evsel) | ||
72 | { | ||
73 | xyarray__delete(evsel->sample_id); | ||
74 | evsel->sample_id = NULL; | ||
75 | free(evsel->id); | ||
76 | evsel->id = NULL; | ||
77 | } | ||
78 | |||
41 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | 79 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) |
42 | { | 80 | { |
43 | int cpu, thread; | 81 | int cpu, thread; |
@@ -49,10 +87,19 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | |||
49 | } | 87 | } |
50 | } | 88 | } |
51 | 89 | ||
52 | void perf_evsel__delete(struct perf_evsel *evsel) | 90 | void perf_evsel__exit(struct perf_evsel *evsel) |
53 | { | 91 | { |
54 | assert(list_empty(&evsel->node)); | 92 | assert(list_empty(&evsel->node)); |
55 | xyarray__delete(evsel->fd); | 93 | xyarray__delete(evsel->fd); |
94 | xyarray__delete(evsel->sample_id); | ||
95 | free(evsel->id); | ||
96 | } | ||
97 | |||
98 | void perf_evsel__delete(struct perf_evsel *evsel) | ||
99 | { | ||
100 | perf_evsel__exit(evsel); | ||
101 | close_cgroup(evsel->cgrp); | ||
102 | free(evsel->name); | ||
56 | free(evsel); | 103 | free(evsel); |
57 | } | 104 | } |
58 | 105 | ||
@@ -128,21 +175,51 @@ int __perf_evsel__read(struct perf_evsel *evsel, | |||
128 | } | 175 | } |
129 | 176 | ||
130 | static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | 177 | static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, |
131 | struct thread_map *threads) | 178 | struct thread_map *threads, bool group, bool inherit) |
132 | { | 179 | { |
133 | int cpu, thread; | 180 | int cpu, thread; |
181 | unsigned long flags = 0; | ||
182 | int pid = -1; | ||
134 | 183 | ||
135 | if (evsel->fd == NULL && | 184 | if (evsel->fd == NULL && |
136 | perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) | 185 | perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) |
137 | return -1; | 186 | return -1; |
138 | 187 | ||
188 | if (evsel->cgrp) { | ||
189 | flags = PERF_FLAG_PID_CGROUP; | ||
190 | pid = evsel->cgrp->fd; | ||
191 | } | ||
192 | |||
139 | for (cpu = 0; cpu < cpus->nr; cpu++) { | 193 | for (cpu = 0; cpu < cpus->nr; cpu++) { |
194 | int group_fd = -1; | ||
195 | /* | ||
196 | * Don't allow mmap() of inherited per-task counters. This | ||
197 | * would create a performance issue due to all children writing | ||
198 | * to the same buffer. | ||
199 | * | ||
200 | * FIXME: | ||
201 | * Proper fix is not to pass 'inherit' to perf_evsel__open*, | ||
202 | * but a 'flags' parameter, with 'group' folded there as well, | ||
203 | * then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if | ||
204 | * O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is | ||
205 | * set. Lets go for the minimal fix first tho. | ||
206 | */ | ||
207 | evsel->attr.inherit = (cpus->map[cpu] >= 0) && inherit; | ||
208 | |||
140 | for (thread = 0; thread < threads->nr; thread++) { | 209 | for (thread = 0; thread < threads->nr; thread++) { |
210 | |||
211 | if (!evsel->cgrp) | ||
212 | pid = threads->map[thread]; | ||
213 | |||
141 | FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, | 214 | FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, |
142 | threads->map[thread], | 215 | pid, |
143 | cpus->map[cpu], -1, 0); | 216 | cpus->map[cpu], |
217 | group_fd, flags); | ||
144 | if (FD(evsel, cpu, thread) < 0) | 218 | if (FD(evsel, cpu, thread) < 0) |
145 | goto out_close; | 219 | goto out_close; |
220 | |||
221 | if (group && group_fd == -1) | ||
222 | group_fd = FD(evsel, cpu, thread); | ||
146 | } | 223 | } |
147 | } | 224 | } |
148 | 225 | ||
@@ -175,10 +252,9 @@ static struct { | |||
175 | .threads = { -1, }, | 252 | .threads = { -1, }, |
176 | }; | 253 | }; |
177 | 254 | ||
178 | int perf_evsel__open(struct perf_evsel *evsel, | 255 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, |
179 | struct cpu_map *cpus, struct thread_map *threads) | 256 | struct thread_map *threads, bool group, bool inherit) |
180 | { | 257 | { |
181 | |||
182 | if (cpus == NULL) { | 258 | if (cpus == NULL) { |
183 | /* Work around old compiler warnings about strict aliasing */ | 259 | /* Work around old compiler warnings about strict aliasing */ |
184 | cpus = &empty_cpu_map.map; | 260 | cpus = &empty_cpu_map.map; |
@@ -187,15 +263,135 @@ int perf_evsel__open(struct perf_evsel *evsel, | |||
187 | if (threads == NULL) | 263 | if (threads == NULL) |
188 | threads = &empty_thread_map.map; | 264 | threads = &empty_thread_map.map; |
189 | 265 | ||
190 | return __perf_evsel__open(evsel, cpus, threads); | 266 | return __perf_evsel__open(evsel, cpus, threads, group, inherit); |
191 | } | 267 | } |
192 | 268 | ||
193 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus) | 269 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, |
270 | struct cpu_map *cpus, bool group, bool inherit) | ||
194 | { | 271 | { |
195 | return __perf_evsel__open(evsel, cpus, &empty_thread_map.map); | 272 | return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit); |
273 | } | ||
274 | |||
275 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, | ||
276 | struct thread_map *threads, bool group, bool inherit) | ||
277 | { | ||
278 | return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit); | ||
279 | } | ||
280 | |||
281 | static int perf_event__parse_id_sample(const union perf_event *event, u64 type, | ||
282 | struct perf_sample *sample) | ||
283 | { | ||
284 | const u64 *array = event->sample.array; | ||
285 | |||
286 | array += ((event->header.size - | ||
287 | sizeof(event->header)) / sizeof(u64)) - 1; | ||
288 | |||
289 | if (type & PERF_SAMPLE_CPU) { | ||
290 | u32 *p = (u32 *)array; | ||
291 | sample->cpu = *p; | ||
292 | array--; | ||
293 | } | ||
294 | |||
295 | if (type & PERF_SAMPLE_STREAM_ID) { | ||
296 | sample->stream_id = *array; | ||
297 | array--; | ||
298 | } | ||
299 | |||
300 | if (type & PERF_SAMPLE_ID) { | ||
301 | sample->id = *array; | ||
302 | array--; | ||
303 | } | ||
304 | |||
305 | if (type & PERF_SAMPLE_TIME) { | ||
306 | sample->time = *array; | ||
307 | array--; | ||
308 | } | ||
309 | |||
310 | if (type & PERF_SAMPLE_TID) { | ||
311 | u32 *p = (u32 *)array; | ||
312 | sample->pid = p[0]; | ||
313 | sample->tid = p[1]; | ||
314 | } | ||
315 | |||
316 | return 0; | ||
196 | } | 317 | } |
197 | 318 | ||
198 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads) | 319 | int perf_event__parse_sample(const union perf_event *event, u64 type, |
320 | bool sample_id_all, struct perf_sample *data) | ||
199 | { | 321 | { |
200 | return __perf_evsel__open(evsel, &empty_cpu_map.map, threads); | 322 | const u64 *array; |
323 | |||
324 | data->cpu = data->pid = data->tid = -1; | ||
325 | data->stream_id = data->id = data->time = -1ULL; | ||
326 | |||
327 | if (event->header.type != PERF_RECORD_SAMPLE) { | ||
328 | if (!sample_id_all) | ||
329 | return 0; | ||
330 | return perf_event__parse_id_sample(event, type, data); | ||
331 | } | ||
332 | |||
333 | array = event->sample.array; | ||
334 | |||
335 | if (type & PERF_SAMPLE_IP) { | ||
336 | data->ip = event->ip.ip; | ||
337 | array++; | ||
338 | } | ||
339 | |||
340 | if (type & PERF_SAMPLE_TID) { | ||
341 | u32 *p = (u32 *)array; | ||
342 | data->pid = p[0]; | ||
343 | data->tid = p[1]; | ||
344 | array++; | ||
345 | } | ||
346 | |||
347 | if (type & PERF_SAMPLE_TIME) { | ||
348 | data->time = *array; | ||
349 | array++; | ||
350 | } | ||
351 | |||
352 | if (type & PERF_SAMPLE_ADDR) { | ||
353 | data->addr = *array; | ||
354 | array++; | ||
355 | } | ||
356 | |||
357 | data->id = -1ULL; | ||
358 | if (type & PERF_SAMPLE_ID) { | ||
359 | data->id = *array; | ||
360 | array++; | ||
361 | } | ||
362 | |||
363 | if (type & PERF_SAMPLE_STREAM_ID) { | ||
364 | data->stream_id = *array; | ||
365 | array++; | ||
366 | } | ||
367 | |||
368 | if (type & PERF_SAMPLE_CPU) { | ||
369 | u32 *p = (u32 *)array; | ||
370 | data->cpu = *p; | ||
371 | array++; | ||
372 | } | ||
373 | |||
374 | if (type & PERF_SAMPLE_PERIOD) { | ||
375 | data->period = *array; | ||
376 | array++; | ||
377 | } | ||
378 | |||
379 | if (type & PERF_SAMPLE_READ) { | ||
380 | fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n"); | ||
381 | return -1; | ||
382 | } | ||
383 | |||
384 | if (type & PERF_SAMPLE_CALLCHAIN) { | ||
385 | data->callchain = (struct ip_callchain *)array; | ||
386 | array += 1 + data->callchain->nr; | ||
387 | } | ||
388 | |||
389 | if (type & PERF_SAMPLE_RAW) { | ||
390 | u32 *p = (u32 *)array; | ||
391 | data->raw_size = *p; | ||
392 | p++; | ||
393 | data->raw_data = p; | ||
394 | } | ||
395 | |||
396 | return 0; | ||
201 | } | 397 | } |
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index b2d755fe88a5..6710ab538342 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h | |||
@@ -6,6 +6,8 @@ | |||
6 | #include "../../../include/linux/perf_event.h" | 6 | #include "../../../include/linux/perf_event.h" |
7 | #include "types.h" | 7 | #include "types.h" |
8 | #include "xyarray.h" | 8 | #include "xyarray.h" |
9 | #include "cgroup.h" | ||
10 | #include "hist.h" | ||
9 | 11 | ||
10 | struct perf_counts_values { | 12 | struct perf_counts_values { |
11 | union { | 13 | union { |
@@ -24,31 +26,66 @@ struct perf_counts { | |||
24 | struct perf_counts_values cpu[]; | 26 | struct perf_counts_values cpu[]; |
25 | }; | 27 | }; |
26 | 28 | ||
29 | struct perf_evsel; | ||
30 | |||
31 | /* | ||
32 | * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are | ||
33 | * more than one entry in the evlist. | ||
34 | */ | ||
35 | struct perf_sample_id { | ||
36 | struct hlist_node node; | ||
37 | u64 id; | ||
38 | struct perf_evsel *evsel; | ||
39 | }; | ||
40 | |||
41 | /** struct perf_evsel - event selector | ||
42 | * | ||
43 | * @name - Can be set to retain the original event name passed by the user, | ||
44 | * so that when showing results in tools such as 'perf stat', we | ||
45 | * show the name used, not some alias. | ||
46 | */ | ||
27 | struct perf_evsel { | 47 | struct perf_evsel { |
28 | struct list_head node; | 48 | struct list_head node; |
29 | struct perf_event_attr attr; | 49 | struct perf_event_attr attr; |
30 | char *filter; | 50 | char *filter; |
31 | struct xyarray *fd; | 51 | struct xyarray *fd; |
52 | struct xyarray *sample_id; | ||
53 | u64 *id; | ||
32 | struct perf_counts *counts; | 54 | struct perf_counts *counts; |
33 | int idx; | 55 | int idx; |
34 | void *priv; | 56 | int ids; |
57 | struct hists hists; | ||
58 | char *name; | ||
59 | union { | ||
60 | void *priv; | ||
61 | off_t id_offset; | ||
62 | }; | ||
63 | struct cgroup_sel *cgrp; | ||
35 | }; | 64 | }; |
36 | 65 | ||
37 | struct cpu_map; | 66 | struct cpu_map; |
38 | struct thread_map; | 67 | struct thread_map; |
68 | struct perf_evlist; | ||
39 | 69 | ||
40 | struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); | 70 | struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); |
71 | void perf_evsel__init(struct perf_evsel *evsel, | ||
72 | struct perf_event_attr *attr, int idx); | ||
73 | void perf_evsel__exit(struct perf_evsel *evsel); | ||
41 | void perf_evsel__delete(struct perf_evsel *evsel); | 74 | void perf_evsel__delete(struct perf_evsel *evsel); |
42 | 75 | ||
43 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); | 76 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); |
77 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); | ||
44 | int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); | 78 | int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); |
45 | void perf_evsel__free_fd(struct perf_evsel *evsel); | 79 | void perf_evsel__free_fd(struct perf_evsel *evsel); |
80 | void perf_evsel__free_id(struct perf_evsel *evsel); | ||
46 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); | 81 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); |
47 | 82 | ||
48 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus); | 83 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, |
49 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads); | 84 | struct cpu_map *cpus, bool group, bool inherit); |
50 | int perf_evsel__open(struct perf_evsel *evsel, | 85 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, |
51 | struct cpu_map *cpus, struct thread_map *threads); | 86 | struct thread_map *threads, bool group, bool inherit); |
87 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | ||
88 | struct thread_map *threads, bool group, bool inherit); | ||
52 | 89 | ||
53 | #define perf_evsel__match(evsel, t, c) \ | 90 | #define perf_evsel__match(evsel, t, c) \ |
54 | (evsel->attr.type == PERF_TYPE_##t && \ | 91 | (evsel->attr.type == PERF_TYPE_##t && \ |
diff --git a/tools/perf/util/exec_cmd.c b/tools/perf/util/exec_cmd.c index 67eeff571568..7adf4ad15d8f 100644 --- a/tools/perf/util/exec_cmd.c +++ b/tools/perf/util/exec_cmd.c | |||
@@ -11,31 +11,12 @@ static const char *argv0_path; | |||
11 | 11 | ||
12 | const char *system_path(const char *path) | 12 | const char *system_path(const char *path) |
13 | { | 13 | { |
14 | #ifdef RUNTIME_PREFIX | ||
15 | static const char *prefix; | ||
16 | #else | ||
17 | static const char *prefix = PREFIX; | 14 | static const char *prefix = PREFIX; |
18 | #endif | ||
19 | struct strbuf d = STRBUF_INIT; | 15 | struct strbuf d = STRBUF_INIT; |
20 | 16 | ||
21 | if (is_absolute_path(path)) | 17 | if (is_absolute_path(path)) |
22 | return path; | 18 | return path; |
23 | 19 | ||
24 | #ifdef RUNTIME_PREFIX | ||
25 | assert(argv0_path); | ||
26 | assert(is_absolute_path(argv0_path)); | ||
27 | |||
28 | if (!prefix && | ||
29 | !(prefix = strip_path_suffix(argv0_path, PERF_EXEC_PATH)) && | ||
30 | !(prefix = strip_path_suffix(argv0_path, BINDIR)) && | ||
31 | !(prefix = strip_path_suffix(argv0_path, "perf"))) { | ||
32 | prefix = PREFIX; | ||
33 | fprintf(stderr, "RUNTIME_PREFIX requested, " | ||
34 | "but prefix computation failed. " | ||
35 | "Using static fallback '%s'.\n", prefix); | ||
36 | } | ||
37 | #endif | ||
38 | |||
39 | strbuf_addf(&d, "%s/%s", prefix, path); | 20 | strbuf_addf(&d, "%s/%s", prefix, path); |
40 | path = strbuf_detach(&d, NULL); | 21 | path = strbuf_detach(&d, NULL); |
41 | return path; | 22 | return path; |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 0866bcdb5e8e..e5230c0ef95b 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -8,6 +8,8 @@ | |||
8 | #include <linux/list.h> | 8 | #include <linux/list.h> |
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | 10 | ||
11 | #include "evlist.h" | ||
12 | #include "evsel.h" | ||
11 | #include "util.h" | 13 | #include "util.h" |
12 | #include "header.h" | 14 | #include "header.h" |
13 | #include "../perf.h" | 15 | #include "../perf.h" |
@@ -18,89 +20,6 @@ | |||
18 | 20 | ||
19 | static bool no_buildid_cache = false; | 21 | static bool no_buildid_cache = false; |
20 | 22 | ||
21 | /* | ||
22 | * Create new perf.data header attribute: | ||
23 | */ | ||
24 | struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr) | ||
25 | { | ||
26 | struct perf_header_attr *self = malloc(sizeof(*self)); | ||
27 | |||
28 | if (self != NULL) { | ||
29 | self->attr = *attr; | ||
30 | self->ids = 0; | ||
31 | self->size = 1; | ||
32 | self->id = malloc(sizeof(u64)); | ||
33 | if (self->id == NULL) { | ||
34 | free(self); | ||
35 | self = NULL; | ||
36 | } | ||
37 | } | ||
38 | |||
39 | return self; | ||
40 | } | ||
41 | |||
42 | void perf_header_attr__delete(struct perf_header_attr *self) | ||
43 | { | ||
44 | free(self->id); | ||
45 | free(self); | ||
46 | } | ||
47 | |||
48 | int perf_header_attr__add_id(struct perf_header_attr *self, u64 id) | ||
49 | { | ||
50 | int pos = self->ids; | ||
51 | |||
52 | self->ids++; | ||
53 | if (self->ids > self->size) { | ||
54 | int nsize = self->size * 2; | ||
55 | u64 *nid = realloc(self->id, nsize * sizeof(u64)); | ||
56 | |||
57 | if (nid == NULL) | ||
58 | return -1; | ||
59 | |||
60 | self->size = nsize; | ||
61 | self->id = nid; | ||
62 | } | ||
63 | self->id[pos] = id; | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | int perf_header__init(struct perf_header *self) | ||
68 | { | ||
69 | self->size = 1; | ||
70 | self->attr = malloc(sizeof(void *)); | ||
71 | return self->attr == NULL ? -ENOMEM : 0; | ||
72 | } | ||
73 | |||
74 | void perf_header__exit(struct perf_header *self) | ||
75 | { | ||
76 | int i; | ||
77 | for (i = 0; i < self->attrs; ++i) | ||
78 | perf_header_attr__delete(self->attr[i]); | ||
79 | free(self->attr); | ||
80 | } | ||
81 | |||
82 | int perf_header__add_attr(struct perf_header *self, | ||
83 | struct perf_header_attr *attr) | ||
84 | { | ||
85 | if (self->frozen) | ||
86 | return -1; | ||
87 | |||
88 | if (self->attrs == self->size) { | ||
89 | int nsize = self->size * 2; | ||
90 | struct perf_header_attr **nattr; | ||
91 | |||
92 | nattr = realloc(self->attr, nsize * sizeof(void *)); | ||
93 | if (nattr == NULL) | ||
94 | return -1; | ||
95 | |||
96 | self->size = nsize; | ||
97 | self->attr = nattr; | ||
98 | } | ||
99 | |||
100 | self->attr[self->attrs++] = attr; | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int event_count; | 23 | static int event_count; |
105 | static struct perf_trace_event_type *events; | 24 | static struct perf_trace_event_type *events; |
106 | 25 | ||
@@ -147,19 +66,19 @@ struct perf_file_attr { | |||
147 | struct perf_file_section ids; | 66 | struct perf_file_section ids; |
148 | }; | 67 | }; |
149 | 68 | ||
150 | void perf_header__set_feat(struct perf_header *self, int feat) | 69 | void perf_header__set_feat(struct perf_header *header, int feat) |
151 | { | 70 | { |
152 | set_bit(feat, self->adds_features); | 71 | set_bit(feat, header->adds_features); |
153 | } | 72 | } |
154 | 73 | ||
155 | void perf_header__clear_feat(struct perf_header *self, int feat) | 74 | void perf_header__clear_feat(struct perf_header *header, int feat) |
156 | { | 75 | { |
157 | clear_bit(feat, self->adds_features); | 76 | clear_bit(feat, header->adds_features); |
158 | } | 77 | } |
159 | 78 | ||
160 | bool perf_header__has_feat(const struct perf_header *self, int feat) | 79 | bool perf_header__has_feat(const struct perf_header *header, int feat) |
161 | { | 80 | { |
162 | return test_bit(feat, self->adds_features); | 81 | return test_bit(feat, header->adds_features); |
163 | } | 82 | } |
164 | 83 | ||
165 | static int do_write(int fd, const void *buf, size_t size) | 84 | static int do_write(int fd, const void *buf, size_t size) |
@@ -228,22 +147,22 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, | |||
228 | return 0; | 147 | return 0; |
229 | } | 148 | } |
230 | 149 | ||
231 | static int machine__write_buildid_table(struct machine *self, int fd) | 150 | static int machine__write_buildid_table(struct machine *machine, int fd) |
232 | { | 151 | { |
233 | int err; | 152 | int err; |
234 | u16 kmisc = PERF_RECORD_MISC_KERNEL, | 153 | u16 kmisc = PERF_RECORD_MISC_KERNEL, |
235 | umisc = PERF_RECORD_MISC_USER; | 154 | umisc = PERF_RECORD_MISC_USER; |
236 | 155 | ||
237 | if (!machine__is_host(self)) { | 156 | if (!machine__is_host(machine)) { |
238 | kmisc = PERF_RECORD_MISC_GUEST_KERNEL; | 157 | kmisc = PERF_RECORD_MISC_GUEST_KERNEL; |
239 | umisc = PERF_RECORD_MISC_GUEST_USER; | 158 | umisc = PERF_RECORD_MISC_GUEST_USER; |
240 | } | 159 | } |
241 | 160 | ||
242 | err = __dsos__write_buildid_table(&self->kernel_dsos, self->pid, | 161 | err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, |
243 | kmisc, fd); | 162 | kmisc, fd); |
244 | if (err == 0) | 163 | if (err == 0) |
245 | err = __dsos__write_buildid_table(&self->user_dsos, | 164 | err = __dsos__write_buildid_table(&machine->user_dsos, |
246 | self->pid, umisc, fd); | 165 | machine->pid, umisc, fd); |
247 | return err; | 166 | return err; |
248 | } | 167 | } |
249 | 168 | ||
@@ -366,12 +285,12 @@ out_free: | |||
366 | return err; | 285 | return err; |
367 | } | 286 | } |
368 | 287 | ||
369 | static int dso__cache_build_id(struct dso *self, const char *debugdir) | 288 | static int dso__cache_build_id(struct dso *dso, const char *debugdir) |
370 | { | 289 | { |
371 | bool is_kallsyms = self->kernel && self->long_name[0] != '/'; | 290 | bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; |
372 | 291 | ||
373 | return build_id_cache__add_b(self->build_id, sizeof(self->build_id), | 292 | return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), |
374 | self->long_name, debugdir, is_kallsyms); | 293 | dso->long_name, debugdir, is_kallsyms); |
375 | } | 294 | } |
376 | 295 | ||
377 | static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) | 296 | static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) |
@@ -386,14 +305,14 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) | |||
386 | return err; | 305 | return err; |
387 | } | 306 | } |
388 | 307 | ||
389 | static int machine__cache_build_ids(struct machine *self, const char *debugdir) | 308 | static int machine__cache_build_ids(struct machine *machine, const char *debugdir) |
390 | { | 309 | { |
391 | int ret = __dsos__cache_build_ids(&self->kernel_dsos, debugdir); | 310 | int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); |
392 | ret |= __dsos__cache_build_ids(&self->user_dsos, debugdir); | 311 | ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); |
393 | return ret; | 312 | return ret; |
394 | } | 313 | } |
395 | 314 | ||
396 | static int perf_session__cache_build_ids(struct perf_session *self) | 315 | static int perf_session__cache_build_ids(struct perf_session *session) |
397 | { | 316 | { |
398 | struct rb_node *nd; | 317 | struct rb_node *nd; |
399 | int ret; | 318 | int ret; |
@@ -404,28 +323,28 @@ static int perf_session__cache_build_ids(struct perf_session *self) | |||
404 | if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) | 323 | if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) |
405 | return -1; | 324 | return -1; |
406 | 325 | ||
407 | ret = machine__cache_build_ids(&self->host_machine, debugdir); | 326 | ret = machine__cache_build_ids(&session->host_machine, debugdir); |
408 | 327 | ||
409 | for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) { | 328 | for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { |
410 | struct machine *pos = rb_entry(nd, struct machine, rb_node); | 329 | struct machine *pos = rb_entry(nd, struct machine, rb_node); |
411 | ret |= machine__cache_build_ids(pos, debugdir); | 330 | ret |= machine__cache_build_ids(pos, debugdir); |
412 | } | 331 | } |
413 | return ret ? -1 : 0; | 332 | return ret ? -1 : 0; |
414 | } | 333 | } |
415 | 334 | ||
416 | static bool machine__read_build_ids(struct machine *self, bool with_hits) | 335 | static bool machine__read_build_ids(struct machine *machine, bool with_hits) |
417 | { | 336 | { |
418 | bool ret = __dsos__read_build_ids(&self->kernel_dsos, with_hits); | 337 | bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits); |
419 | ret |= __dsos__read_build_ids(&self->user_dsos, with_hits); | 338 | ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits); |
420 | return ret; | 339 | return ret; |
421 | } | 340 | } |
422 | 341 | ||
423 | static bool perf_session__read_build_ids(struct perf_session *self, bool with_hits) | 342 | static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) |
424 | { | 343 | { |
425 | struct rb_node *nd; | 344 | struct rb_node *nd; |
426 | bool ret = machine__read_build_ids(&self->host_machine, with_hits); | 345 | bool ret = machine__read_build_ids(&session->host_machine, with_hits); |
427 | 346 | ||
428 | for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) { | 347 | for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { |
429 | struct machine *pos = rb_entry(nd, struct machine, rb_node); | 348 | struct machine *pos = rb_entry(nd, struct machine, rb_node); |
430 | ret |= machine__read_build_ids(pos, with_hits); | 349 | ret |= machine__read_build_ids(pos, with_hits); |
431 | } | 350 | } |
@@ -433,7 +352,8 @@ static bool perf_session__read_build_ids(struct perf_session *self, bool with_hi | |||
433 | return ret; | 352 | return ret; |
434 | } | 353 | } |
435 | 354 | ||
436 | static int perf_header__adds_write(struct perf_header *self, int fd) | 355 | static int perf_header__adds_write(struct perf_header *header, |
356 | struct perf_evlist *evlist, int fd) | ||
437 | { | 357 | { |
438 | int nr_sections; | 358 | int nr_sections; |
439 | struct perf_session *session; | 359 | struct perf_session *session; |
@@ -442,13 +362,13 @@ static int perf_header__adds_write(struct perf_header *self, int fd) | |||
442 | u64 sec_start; | 362 | u64 sec_start; |
443 | int idx = 0, err; | 363 | int idx = 0, err; |
444 | 364 | ||
445 | session = container_of(self, struct perf_session, header); | 365 | session = container_of(header, struct perf_session, header); |
446 | 366 | ||
447 | if (perf_header__has_feat(self, HEADER_BUILD_ID && | 367 | if (perf_header__has_feat(header, HEADER_BUILD_ID && |
448 | !perf_session__read_build_ids(session, true))) | 368 | !perf_session__read_build_ids(session, true))) |
449 | perf_header__clear_feat(self, HEADER_BUILD_ID); | 369 | perf_header__clear_feat(header, HEADER_BUILD_ID); |
450 | 370 | ||
451 | nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); | 371 | nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); |
452 | if (!nr_sections) | 372 | if (!nr_sections) |
453 | return 0; | 373 | return 0; |
454 | 374 | ||
@@ -458,28 +378,28 @@ static int perf_header__adds_write(struct perf_header *self, int fd) | |||
458 | 378 | ||
459 | sec_size = sizeof(*feat_sec) * nr_sections; | 379 | sec_size = sizeof(*feat_sec) * nr_sections; |
460 | 380 | ||
461 | sec_start = self->data_offset + self->data_size; | 381 | sec_start = header->data_offset + header->data_size; |
462 | lseek(fd, sec_start + sec_size, SEEK_SET); | 382 | lseek(fd, sec_start + sec_size, SEEK_SET); |
463 | 383 | ||
464 | if (perf_header__has_feat(self, HEADER_TRACE_INFO)) { | 384 | if (perf_header__has_feat(header, HEADER_TRACE_INFO)) { |
465 | struct perf_file_section *trace_sec; | 385 | struct perf_file_section *trace_sec; |
466 | 386 | ||
467 | trace_sec = &feat_sec[idx++]; | 387 | trace_sec = &feat_sec[idx++]; |
468 | 388 | ||
469 | /* Write trace info */ | 389 | /* Write trace info */ |
470 | trace_sec->offset = lseek(fd, 0, SEEK_CUR); | 390 | trace_sec->offset = lseek(fd, 0, SEEK_CUR); |
471 | read_tracing_data(fd, &evsel_list); | 391 | read_tracing_data(fd, &evlist->entries); |
472 | trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; | 392 | trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; |
473 | } | 393 | } |
474 | 394 | ||
475 | if (perf_header__has_feat(self, HEADER_BUILD_ID)) { | 395 | if (perf_header__has_feat(header, HEADER_BUILD_ID)) { |
476 | struct perf_file_section *buildid_sec; | 396 | struct perf_file_section *buildid_sec; |
477 | 397 | ||
478 | buildid_sec = &feat_sec[idx++]; | 398 | buildid_sec = &feat_sec[idx++]; |
479 | 399 | ||
480 | /* Write build-ids */ | 400 | /* Write build-ids */ |
481 | buildid_sec->offset = lseek(fd, 0, SEEK_CUR); | 401 | buildid_sec->offset = lseek(fd, 0, SEEK_CUR); |
482 | err = dsos__write_buildid_table(self, fd); | 402 | err = dsos__write_buildid_table(header, fd); |
483 | if (err < 0) { | 403 | if (err < 0) { |
484 | pr_debug("failed to write buildid table\n"); | 404 | pr_debug("failed to write buildid table\n"); |
485 | goto out_free; | 405 | goto out_free; |
@@ -518,32 +438,41 @@ int perf_header__write_pipe(int fd) | |||
518 | return 0; | 438 | return 0; |
519 | } | 439 | } |
520 | 440 | ||
521 | int perf_header__write(struct perf_header *self, int fd, bool at_exit) | 441 | int perf_session__write_header(struct perf_session *session, |
442 | struct perf_evlist *evlist, | ||
443 | int fd, bool at_exit) | ||
522 | { | 444 | { |
523 | struct perf_file_header f_header; | 445 | struct perf_file_header f_header; |
524 | struct perf_file_attr f_attr; | 446 | struct perf_file_attr f_attr; |
525 | struct perf_header_attr *attr; | 447 | struct perf_header *header = &session->header; |
526 | int i, err; | 448 | struct perf_evsel *attr, *pair = NULL; |
449 | int err; | ||
527 | 450 | ||
528 | lseek(fd, sizeof(f_header), SEEK_SET); | 451 | lseek(fd, sizeof(f_header), SEEK_SET); |
529 | 452 | ||
530 | for (i = 0; i < self->attrs; i++) { | 453 | if (session->evlist != evlist) |
531 | attr = self->attr[i]; | 454 | pair = list_entry(session->evlist->entries.next, struct perf_evsel, node); |
532 | 455 | ||
456 | list_for_each_entry(attr, &evlist->entries, node) { | ||
533 | attr->id_offset = lseek(fd, 0, SEEK_CUR); | 457 | attr->id_offset = lseek(fd, 0, SEEK_CUR); |
534 | err = do_write(fd, attr->id, attr->ids * sizeof(u64)); | 458 | err = do_write(fd, attr->id, attr->ids * sizeof(u64)); |
535 | if (err < 0) { | 459 | if (err < 0) { |
460 | out_err_write: | ||
536 | pr_debug("failed to write perf header\n"); | 461 | pr_debug("failed to write perf header\n"); |
537 | return err; | 462 | return err; |
538 | } | 463 | } |
464 | if (session->evlist != evlist) { | ||
465 | err = do_write(fd, pair->id, pair->ids * sizeof(u64)); | ||
466 | if (err < 0) | ||
467 | goto out_err_write; | ||
468 | attr->ids += pair->ids; | ||
469 | pair = list_entry(pair->node.next, struct perf_evsel, node); | ||
470 | } | ||
539 | } | 471 | } |
540 | 472 | ||
473 | header->attr_offset = lseek(fd, 0, SEEK_CUR); | ||
541 | 474 | ||
542 | self->attr_offset = lseek(fd, 0, SEEK_CUR); | 475 | list_for_each_entry(attr, &evlist->entries, node) { |
543 | |||
544 | for (i = 0; i < self->attrs; i++) { | ||
545 | attr = self->attr[i]; | ||
546 | |||
547 | f_attr = (struct perf_file_attr){ | 476 | f_attr = (struct perf_file_attr){ |
548 | .attr = attr->attr, | 477 | .attr = attr->attr, |
549 | .ids = { | 478 | .ids = { |
@@ -558,20 +487,20 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit) | |||
558 | } | 487 | } |
559 | } | 488 | } |
560 | 489 | ||
561 | self->event_offset = lseek(fd, 0, SEEK_CUR); | 490 | header->event_offset = lseek(fd, 0, SEEK_CUR); |
562 | self->event_size = event_count * sizeof(struct perf_trace_event_type); | 491 | header->event_size = event_count * sizeof(struct perf_trace_event_type); |
563 | if (events) { | 492 | if (events) { |
564 | err = do_write(fd, events, self->event_size); | 493 | err = do_write(fd, events, header->event_size); |
565 | if (err < 0) { | 494 | if (err < 0) { |
566 | pr_debug("failed to write perf header events\n"); | 495 | pr_debug("failed to write perf header events\n"); |
567 | return err; | 496 | return err; |
568 | } | 497 | } |
569 | } | 498 | } |
570 | 499 | ||
571 | self->data_offset = lseek(fd, 0, SEEK_CUR); | 500 | header->data_offset = lseek(fd, 0, SEEK_CUR); |
572 | 501 | ||
573 | if (at_exit) { | 502 | if (at_exit) { |
574 | err = perf_header__adds_write(self, fd); | 503 | err = perf_header__adds_write(header, evlist, fd); |
575 | if (err < 0) | 504 | if (err < 0) |
576 | return err; | 505 | return err; |
577 | } | 506 | } |
@@ -581,20 +510,20 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit) | |||
581 | .size = sizeof(f_header), | 510 | .size = sizeof(f_header), |
582 | .attr_size = sizeof(f_attr), | 511 | .attr_size = sizeof(f_attr), |
583 | .attrs = { | 512 | .attrs = { |
584 | .offset = self->attr_offset, | 513 | .offset = header->attr_offset, |
585 | .size = self->attrs * sizeof(f_attr), | 514 | .size = evlist->nr_entries * sizeof(f_attr), |
586 | }, | 515 | }, |
587 | .data = { | 516 | .data = { |
588 | .offset = self->data_offset, | 517 | .offset = header->data_offset, |
589 | .size = self->data_size, | 518 | .size = header->data_size, |
590 | }, | 519 | }, |
591 | .event_types = { | 520 | .event_types = { |
592 | .offset = self->event_offset, | 521 | .offset = header->event_offset, |
593 | .size = self->event_size, | 522 | .size = header->event_size, |
594 | }, | 523 | }, |
595 | }; | 524 | }; |
596 | 525 | ||
597 | memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features)); | 526 | memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); |
598 | 527 | ||
599 | lseek(fd, 0, SEEK_SET); | 528 | lseek(fd, 0, SEEK_SET); |
600 | err = do_write(fd, &f_header, sizeof(f_header)); | 529 | err = do_write(fd, &f_header, sizeof(f_header)); |
@@ -602,26 +531,26 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit) | |||
602 | pr_debug("failed to write perf header\n"); | 531 | pr_debug("failed to write perf header\n"); |
603 | return err; | 532 | return err; |
604 | } | 533 | } |
605 | lseek(fd, self->data_offset + self->data_size, SEEK_SET); | 534 | lseek(fd, header->data_offset + header->data_size, SEEK_SET); |
606 | 535 | ||
607 | self->frozen = 1; | 536 | header->frozen = 1; |
608 | return 0; | 537 | return 0; |
609 | } | 538 | } |
610 | 539 | ||
611 | static int perf_header__getbuffer64(struct perf_header *self, | 540 | static int perf_header__getbuffer64(struct perf_header *header, |
612 | int fd, void *buf, size_t size) | 541 | int fd, void *buf, size_t size) |
613 | { | 542 | { |
614 | if (readn(fd, buf, size) <= 0) | 543 | if (readn(fd, buf, size) <= 0) |
615 | return -1; | 544 | return -1; |
616 | 545 | ||
617 | if (self->needs_swap) | 546 | if (header->needs_swap) |
618 | mem_bswap_64(buf, size); | 547 | mem_bswap_64(buf, size); |
619 | 548 | ||
620 | return 0; | 549 | return 0; |
621 | } | 550 | } |
622 | 551 | ||
623 | int perf_header__process_sections(struct perf_header *self, int fd, | 552 | int perf_header__process_sections(struct perf_header *header, int fd, |
624 | int (*process)(struct perf_file_section *self, | 553 | int (*process)(struct perf_file_section *section, |
625 | struct perf_header *ph, | 554 | struct perf_header *ph, |
626 | int feat, int fd)) | 555 | int feat, int fd)) |
627 | { | 556 | { |
@@ -631,7 +560,7 @@ int perf_header__process_sections(struct perf_header *self, int fd, | |||
631 | int idx = 0; | 560 | int idx = 0; |
632 | int err = -1, feat = 1; | 561 | int err = -1, feat = 1; |
633 | 562 | ||
634 | nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); | 563 | nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); |
635 | if (!nr_sections) | 564 | if (!nr_sections) |
636 | return 0; | 565 | return 0; |
637 | 566 | ||
@@ -641,17 +570,17 @@ int perf_header__process_sections(struct perf_header *self, int fd, | |||
641 | 570 | ||
642 | sec_size = sizeof(*feat_sec) * nr_sections; | 571 | sec_size = sizeof(*feat_sec) * nr_sections; |
643 | 572 | ||
644 | lseek(fd, self->data_offset + self->data_size, SEEK_SET); | 573 | lseek(fd, header->data_offset + header->data_size, SEEK_SET); |
645 | 574 | ||
646 | if (perf_header__getbuffer64(self, fd, feat_sec, sec_size)) | 575 | if (perf_header__getbuffer64(header, fd, feat_sec, sec_size)) |
647 | goto out_free; | 576 | goto out_free; |
648 | 577 | ||
649 | err = 0; | 578 | err = 0; |
650 | while (idx < nr_sections && feat < HEADER_LAST_FEATURE) { | 579 | while (idx < nr_sections && feat < HEADER_LAST_FEATURE) { |
651 | if (perf_header__has_feat(self, feat)) { | 580 | if (perf_header__has_feat(header, feat)) { |
652 | struct perf_file_section *sec = &feat_sec[idx++]; | 581 | struct perf_file_section *sec = &feat_sec[idx++]; |
653 | 582 | ||
654 | err = process(sec, self, feat, fd); | 583 | err = process(sec, header, feat, fd); |
655 | if (err < 0) | 584 | if (err < 0) |
656 | break; | 585 | break; |
657 | } | 586 | } |
@@ -662,35 +591,35 @@ out_free: | |||
662 | return err; | 591 | return err; |
663 | } | 592 | } |
664 | 593 | ||
665 | int perf_file_header__read(struct perf_file_header *self, | 594 | int perf_file_header__read(struct perf_file_header *header, |
666 | struct perf_header *ph, int fd) | 595 | struct perf_header *ph, int fd) |
667 | { | 596 | { |
668 | lseek(fd, 0, SEEK_SET); | 597 | lseek(fd, 0, SEEK_SET); |
669 | 598 | ||
670 | if (readn(fd, self, sizeof(*self)) <= 0 || | 599 | if (readn(fd, header, sizeof(*header)) <= 0 || |
671 | memcmp(&self->magic, __perf_magic, sizeof(self->magic))) | 600 | memcmp(&header->magic, __perf_magic, sizeof(header->magic))) |
672 | return -1; | 601 | return -1; |
673 | 602 | ||
674 | if (self->attr_size != sizeof(struct perf_file_attr)) { | 603 | if (header->attr_size != sizeof(struct perf_file_attr)) { |
675 | u64 attr_size = bswap_64(self->attr_size); | 604 | u64 attr_size = bswap_64(header->attr_size); |
676 | 605 | ||
677 | if (attr_size != sizeof(struct perf_file_attr)) | 606 | if (attr_size != sizeof(struct perf_file_attr)) |
678 | return -1; | 607 | return -1; |
679 | 608 | ||
680 | mem_bswap_64(self, offsetof(struct perf_file_header, | 609 | mem_bswap_64(header, offsetof(struct perf_file_header, |
681 | adds_features)); | 610 | adds_features)); |
682 | ph->needs_swap = true; | 611 | ph->needs_swap = true; |
683 | } | 612 | } |
684 | 613 | ||
685 | if (self->size != sizeof(*self)) { | 614 | if (header->size != sizeof(*header)) { |
686 | /* Support the previous format */ | 615 | /* Support the previous format */ |
687 | if (self->size == offsetof(typeof(*self), adds_features)) | 616 | if (header->size == offsetof(typeof(*header), adds_features)) |
688 | bitmap_zero(self->adds_features, HEADER_FEAT_BITS); | 617 | bitmap_zero(header->adds_features, HEADER_FEAT_BITS); |
689 | else | 618 | else |
690 | return -1; | 619 | return -1; |
691 | } | 620 | } |
692 | 621 | ||
693 | memcpy(&ph->adds_features, &self->adds_features, | 622 | memcpy(&ph->adds_features, &header->adds_features, |
694 | sizeof(ph->adds_features)); | 623 | sizeof(ph->adds_features)); |
695 | /* | 624 | /* |
696 | * FIXME: hack that assumes that if we need swap the perf.data file | 625 | * FIXME: hack that assumes that if we need swap the perf.data file |
@@ -704,10 +633,10 @@ int perf_file_header__read(struct perf_file_header *self, | |||
704 | perf_header__set_feat(ph, HEADER_BUILD_ID); | 633 | perf_header__set_feat(ph, HEADER_BUILD_ID); |
705 | } | 634 | } |
706 | 635 | ||
707 | ph->event_offset = self->event_types.offset; | 636 | ph->event_offset = header->event_types.offset; |
708 | ph->event_size = self->event_types.size; | 637 | ph->event_size = header->event_types.size; |
709 | ph->data_offset = self->data.offset; | 638 | ph->data_offset = header->data.offset; |
710 | ph->data_size = self->data.size; | 639 | ph->data_size = header->data.size; |
711 | return 0; | 640 | return 0; |
712 | } | 641 | } |
713 | 642 | ||
@@ -766,11 +695,10 @@ out: | |||
766 | return err; | 695 | return err; |
767 | } | 696 | } |
768 | 697 | ||
769 | static int perf_header__read_build_ids(struct perf_header *self, | 698 | static int perf_header__read_build_ids(struct perf_header *header, |
770 | int input, u64 offset, u64 size) | 699 | int input, u64 offset, u64 size) |
771 | { | 700 | { |
772 | struct perf_session *session = container_of(self, | 701 | struct perf_session *session = container_of(header, struct perf_session, header); |
773 | struct perf_session, header); | ||
774 | struct build_id_event bev; | 702 | struct build_id_event bev; |
775 | char filename[PATH_MAX]; | 703 | char filename[PATH_MAX]; |
776 | u64 limit = offset + size; | 704 | u64 limit = offset + size; |
@@ -782,7 +710,7 @@ static int perf_header__read_build_ids(struct perf_header *self, | |||
782 | if (read(input, &bev, sizeof(bev)) != sizeof(bev)) | 710 | if (read(input, &bev, sizeof(bev)) != sizeof(bev)) |
783 | goto out; | 711 | goto out; |
784 | 712 | ||
785 | if (self->needs_swap) | 713 | if (header->needs_swap) |
786 | perf_event_header__bswap(&bev.header); | 714 | perf_event_header__bswap(&bev.header); |
787 | 715 | ||
788 | len = bev.header.size - sizeof(bev); | 716 | len = bev.header.size - sizeof(bev); |
@@ -798,13 +726,13 @@ out: | |||
798 | return err; | 726 | return err; |
799 | } | 727 | } |
800 | 728 | ||
801 | static int perf_file_section__process(struct perf_file_section *self, | 729 | static int perf_file_section__process(struct perf_file_section *section, |
802 | struct perf_header *ph, | 730 | struct perf_header *ph, |
803 | int feat, int fd) | 731 | int feat, int fd) |
804 | { | 732 | { |
805 | if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) { | 733 | if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { |
806 | pr_debug("Failed to lseek to %" PRIu64 " offset for feature " | 734 | pr_debug("Failed to lseek to %" PRIu64 " offset for feature " |
807 | "%d, continuing...\n", self->offset, feat); | 735 | "%d, continuing...\n", section->offset, feat); |
808 | return 0; | 736 | return 0; |
809 | } | 737 | } |
810 | 738 | ||
@@ -814,7 +742,7 @@ static int perf_file_section__process(struct perf_file_section *self, | |||
814 | break; | 742 | break; |
815 | 743 | ||
816 | case HEADER_BUILD_ID: | 744 | case HEADER_BUILD_ID: |
817 | if (perf_header__read_build_ids(ph, fd, self->offset, self->size)) | 745 | if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) |
818 | pr_debug("Failed to read buildids, continuing...\n"); | 746 | pr_debug("Failed to read buildids, continuing...\n"); |
819 | break; | 747 | break; |
820 | default: | 748 | default: |
@@ -824,21 +752,21 @@ static int perf_file_section__process(struct perf_file_section *self, | |||
824 | return 0; | 752 | return 0; |
825 | } | 753 | } |
826 | 754 | ||
827 | static int perf_file_header__read_pipe(struct perf_pipe_file_header *self, | 755 | static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, |
828 | struct perf_header *ph, int fd, | 756 | struct perf_header *ph, int fd, |
829 | bool repipe) | 757 | bool repipe) |
830 | { | 758 | { |
831 | if (readn(fd, self, sizeof(*self)) <= 0 || | 759 | if (readn(fd, header, sizeof(*header)) <= 0 || |
832 | memcmp(&self->magic, __perf_magic, sizeof(self->magic))) | 760 | memcmp(&header->magic, __perf_magic, sizeof(header->magic))) |
833 | return -1; | 761 | return -1; |
834 | 762 | ||
835 | if (repipe && do_write(STDOUT_FILENO, self, sizeof(*self)) < 0) | 763 | if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0) |
836 | return -1; | 764 | return -1; |
837 | 765 | ||
838 | if (self->size != sizeof(*self)) { | 766 | if (header->size != sizeof(*header)) { |
839 | u64 size = bswap_64(self->size); | 767 | u64 size = bswap_64(header->size); |
840 | 768 | ||
841 | if (size != sizeof(*self)) | 769 | if (size != sizeof(*header)) |
842 | return -1; | 770 | return -1; |
843 | 771 | ||
844 | ph->needs_swap = true; | 772 | ph->needs_swap = true; |
@@ -849,10 +777,10 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *self, | |||
849 | 777 | ||
850 | static int perf_header__read_pipe(struct perf_session *session, int fd) | 778 | static int perf_header__read_pipe(struct perf_session *session, int fd) |
851 | { | 779 | { |
852 | struct perf_header *self = &session->header; | 780 | struct perf_header *header = &session->header; |
853 | struct perf_pipe_file_header f_header; | 781 | struct perf_pipe_file_header f_header; |
854 | 782 | ||
855 | if (perf_file_header__read_pipe(&f_header, self, fd, | 783 | if (perf_file_header__read_pipe(&f_header, header, fd, |
856 | session->repipe) < 0) { | 784 | session->repipe) < 0) { |
857 | pr_debug("incompatible file format\n"); | 785 | pr_debug("incompatible file format\n"); |
858 | return -EINVAL; | 786 | return -EINVAL; |
@@ -863,18 +791,22 @@ static int perf_header__read_pipe(struct perf_session *session, int fd) | |||
863 | return 0; | 791 | return 0; |
864 | } | 792 | } |
865 | 793 | ||
866 | int perf_header__read(struct perf_session *session, int fd) | 794 | int perf_session__read_header(struct perf_session *session, int fd) |
867 | { | 795 | { |
868 | struct perf_header *self = &session->header; | 796 | struct perf_header *header = &session->header; |
869 | struct perf_file_header f_header; | 797 | struct perf_file_header f_header; |
870 | struct perf_file_attr f_attr; | 798 | struct perf_file_attr f_attr; |
871 | u64 f_id; | 799 | u64 f_id; |
872 | int nr_attrs, nr_ids, i, j; | 800 | int nr_attrs, nr_ids, i, j; |
873 | 801 | ||
802 | session->evlist = perf_evlist__new(NULL, NULL); | ||
803 | if (session->evlist == NULL) | ||
804 | return -ENOMEM; | ||
805 | |||
874 | if (session->fd_pipe) | 806 | if (session->fd_pipe) |
875 | return perf_header__read_pipe(session, fd); | 807 | return perf_header__read_pipe(session, fd); |
876 | 808 | ||
877 | if (perf_file_header__read(&f_header, self, fd) < 0) { | 809 | if (perf_file_header__read(&f_header, header, fd) < 0) { |
878 | pr_debug("incompatible file format\n"); | 810 | pr_debug("incompatible file format\n"); |
879 | return -EINVAL; | 811 | return -EINVAL; |
880 | } | 812 | } |
@@ -883,33 +815,39 @@ int perf_header__read(struct perf_session *session, int fd) | |||
883 | lseek(fd, f_header.attrs.offset, SEEK_SET); | 815 | lseek(fd, f_header.attrs.offset, SEEK_SET); |
884 | 816 | ||
885 | for (i = 0; i < nr_attrs; i++) { | 817 | for (i = 0; i < nr_attrs; i++) { |
886 | struct perf_header_attr *attr; | 818 | struct perf_evsel *evsel; |
887 | off_t tmp; | 819 | off_t tmp; |
888 | 820 | ||
889 | if (perf_header__getbuffer64(self, fd, &f_attr, sizeof(f_attr))) | 821 | if (perf_header__getbuffer64(header, fd, &f_attr, sizeof(f_attr))) |
890 | goto out_errno; | 822 | goto out_errno; |
891 | 823 | ||
892 | tmp = lseek(fd, 0, SEEK_CUR); | 824 | tmp = lseek(fd, 0, SEEK_CUR); |
825 | evsel = perf_evsel__new(&f_attr.attr, i); | ||
893 | 826 | ||
894 | attr = perf_header_attr__new(&f_attr.attr); | 827 | if (evsel == NULL) |
895 | if (attr == NULL) | 828 | goto out_delete_evlist; |
896 | return -ENOMEM; | 829 | /* |
830 | * Do it before so that if perf_evsel__alloc_id fails, this | ||
831 | * entry gets purged too at perf_evlist__delete(). | ||
832 | */ | ||
833 | perf_evlist__add(session->evlist, evsel); | ||
897 | 834 | ||
898 | nr_ids = f_attr.ids.size / sizeof(u64); | 835 | nr_ids = f_attr.ids.size / sizeof(u64); |
836 | /* | ||
837 | * We don't have the cpu and thread maps on the header, so | ||
838 | * for allocating the perf_sample_id table we fake 1 cpu and | ||
839 | * hattr->ids threads. | ||
840 | */ | ||
841 | if (perf_evsel__alloc_id(evsel, 1, nr_ids)) | ||
842 | goto out_delete_evlist; | ||
843 | |||
899 | lseek(fd, f_attr.ids.offset, SEEK_SET); | 844 | lseek(fd, f_attr.ids.offset, SEEK_SET); |
900 | 845 | ||
901 | for (j = 0; j < nr_ids; j++) { | 846 | for (j = 0; j < nr_ids; j++) { |
902 | if (perf_header__getbuffer64(self, fd, &f_id, sizeof(f_id))) | 847 | if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) |
903 | goto out_errno; | 848 | goto out_errno; |
904 | 849 | ||
905 | if (perf_header_attr__add_id(attr, f_id) < 0) { | 850 | perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); |
906 | perf_header_attr__delete(attr); | ||
907 | return -ENOMEM; | ||
908 | } | ||
909 | } | ||
910 | if (perf_header__add_attr(self, attr) < 0) { | ||
911 | perf_header_attr__delete(attr); | ||
912 | return -ENOMEM; | ||
913 | } | 851 | } |
914 | 852 | ||
915 | lseek(fd, tmp, SEEK_SET); | 853 | lseek(fd, tmp, SEEK_SET); |
@@ -920,93 +858,63 @@ int perf_header__read(struct perf_session *session, int fd) | |||
920 | events = malloc(f_header.event_types.size); | 858 | events = malloc(f_header.event_types.size); |
921 | if (events == NULL) | 859 | if (events == NULL) |
922 | return -ENOMEM; | 860 | return -ENOMEM; |
923 | if (perf_header__getbuffer64(self, fd, events, | 861 | if (perf_header__getbuffer64(header, fd, events, |
924 | f_header.event_types.size)) | 862 | f_header.event_types.size)) |
925 | goto out_errno; | 863 | goto out_errno; |
926 | event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); | 864 | event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); |
927 | } | 865 | } |
928 | 866 | ||
929 | perf_header__process_sections(self, fd, perf_file_section__process); | 867 | perf_header__process_sections(header, fd, perf_file_section__process); |
930 | 868 | ||
931 | lseek(fd, self->data_offset, SEEK_SET); | 869 | lseek(fd, header->data_offset, SEEK_SET); |
932 | 870 | ||
933 | self->frozen = 1; | 871 | header->frozen = 1; |
934 | return 0; | 872 | return 0; |
935 | out_errno: | 873 | out_errno: |
936 | return -errno; | 874 | return -errno; |
875 | |||
876 | out_delete_evlist: | ||
877 | perf_evlist__delete(session->evlist); | ||
878 | session->evlist = NULL; | ||
879 | return -ENOMEM; | ||
937 | } | 880 | } |
938 | 881 | ||
939 | u64 perf_header__sample_type(struct perf_header *header) | 882 | u64 perf_evlist__sample_type(struct perf_evlist *evlist) |
940 | { | 883 | { |
884 | struct perf_evsel *pos; | ||
941 | u64 type = 0; | 885 | u64 type = 0; |
942 | int i; | ||
943 | |||
944 | for (i = 0; i < header->attrs; i++) { | ||
945 | struct perf_header_attr *attr = header->attr[i]; | ||
946 | 886 | ||
887 | list_for_each_entry(pos, &evlist->entries, node) { | ||
947 | if (!type) | 888 | if (!type) |
948 | type = attr->attr.sample_type; | 889 | type = pos->attr.sample_type; |
949 | else if (type != attr->attr.sample_type) | 890 | else if (type != pos->attr.sample_type) |
950 | die("non matching sample_type"); | 891 | die("non matching sample_type"); |
951 | } | 892 | } |
952 | 893 | ||
953 | return type; | 894 | return type; |
954 | } | 895 | } |
955 | 896 | ||
956 | bool perf_header__sample_id_all(const struct perf_header *header) | 897 | bool perf_evlist__sample_id_all(const struct perf_evlist *evlist) |
957 | { | 898 | { |
958 | bool value = false, first = true; | 899 | bool value = false, first = true; |
959 | int i; | 900 | struct perf_evsel *pos; |
960 | |||
961 | for (i = 0; i < header->attrs; i++) { | ||
962 | struct perf_header_attr *attr = header->attr[i]; | ||
963 | 901 | ||
902 | list_for_each_entry(pos, &evlist->entries, node) { | ||
964 | if (first) { | 903 | if (first) { |
965 | value = attr->attr.sample_id_all; | 904 | value = pos->attr.sample_id_all; |
966 | first = false; | 905 | first = false; |
967 | } else if (value != attr->attr.sample_id_all) | 906 | } else if (value != pos->attr.sample_id_all) |
968 | die("non matching sample_id_all"); | 907 | die("non matching sample_id_all"); |
969 | } | 908 | } |
970 | 909 | ||
971 | return value; | 910 | return value; |
972 | } | 911 | } |
973 | 912 | ||
974 | struct perf_event_attr * | 913 | int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, |
975 | perf_header__find_attr(u64 id, struct perf_header *header) | 914 | perf_event__handler_t process, |
976 | { | 915 | struct perf_session *session) |
977 | int i; | ||
978 | |||
979 | /* | ||
980 | * We set id to -1 if the data file doesn't contain sample | ||
981 | * ids. This can happen when the data file contains one type | ||
982 | * of event and in that case, the header can still store the | ||
983 | * event attribute information. Check for this and avoid | ||
984 | * walking through the entire list of ids which may be large. | ||
985 | */ | ||
986 | if (id == -1ULL) { | ||
987 | if (header->attrs > 0) | ||
988 | return &header->attr[0]->attr; | ||
989 | return NULL; | ||
990 | } | ||
991 | |||
992 | for (i = 0; i < header->attrs; i++) { | ||
993 | struct perf_header_attr *attr = header->attr[i]; | ||
994 | int j; | ||
995 | |||
996 | for (j = 0; j < attr->ids; j++) { | ||
997 | if (attr->id[j] == id) | ||
998 | return &attr->attr; | ||
999 | } | ||
1000 | } | ||
1001 | |||
1002 | return NULL; | ||
1003 | } | ||
1004 | |||
1005 | int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, | ||
1006 | event__handler_t process, | ||
1007 | struct perf_session *session) | ||
1008 | { | 916 | { |
1009 | event_t *ev; | 917 | union perf_event *ev; |
1010 | size_t size; | 918 | size_t size; |
1011 | int err; | 919 | int err; |
1012 | 920 | ||
@@ -1033,17 +941,15 @@ int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, | |||
1033 | return err; | 941 | return err; |
1034 | } | 942 | } |
1035 | 943 | ||
1036 | int event__synthesize_attrs(struct perf_header *self, event__handler_t process, | 944 | int perf_session__synthesize_attrs(struct perf_session *session, |
1037 | struct perf_session *session) | 945 | perf_event__handler_t process) |
1038 | { | 946 | { |
1039 | struct perf_header_attr *attr; | 947 | struct perf_evsel *attr; |
1040 | int i, err = 0; | 948 | int err = 0; |
1041 | |||
1042 | for (i = 0; i < self->attrs; i++) { | ||
1043 | attr = self->attr[i]; | ||
1044 | 949 | ||
1045 | err = event__synthesize_attr(&attr->attr, attr->ids, attr->id, | 950 | list_for_each_entry(attr, &session->evlist->entries, node) { |
1046 | process, session); | 951 | err = perf_event__synthesize_attr(&attr->attr, attr->ids, |
952 | attr->id, process, session); | ||
1047 | if (err) { | 953 | if (err) { |
1048 | pr_debug("failed to create perf header attribute\n"); | 954 | pr_debug("failed to create perf header attribute\n"); |
1049 | return err; | 955 | return err; |
@@ -1053,29 +959,39 @@ int event__synthesize_attrs(struct perf_header *self, event__handler_t process, | |||
1053 | return err; | 959 | return err; |
1054 | } | 960 | } |
1055 | 961 | ||
1056 | int event__process_attr(event_t *self, struct perf_session *session) | 962 | int perf_event__process_attr(union perf_event *event, |
963 | struct perf_session *session) | ||
1057 | { | 964 | { |
1058 | struct perf_header_attr *attr; | ||
1059 | unsigned int i, ids, n_ids; | 965 | unsigned int i, ids, n_ids; |
966 | struct perf_evsel *evsel; | ||
1060 | 967 | ||
1061 | attr = perf_header_attr__new(&self->attr.attr); | 968 | if (session->evlist == NULL) { |
1062 | if (attr == NULL) | 969 | session->evlist = perf_evlist__new(NULL, NULL); |
970 | if (session->evlist == NULL) | ||
971 | return -ENOMEM; | ||
972 | } | ||
973 | |||
974 | evsel = perf_evsel__new(&event->attr.attr, | ||
975 | session->evlist->nr_entries); | ||
976 | if (evsel == NULL) | ||
1063 | return -ENOMEM; | 977 | return -ENOMEM; |
1064 | 978 | ||
1065 | ids = self->header.size; | 979 | perf_evlist__add(session->evlist, evsel); |
1066 | ids -= (void *)&self->attr.id - (void *)self; | 980 | |
981 | ids = event->header.size; | ||
982 | ids -= (void *)&event->attr.id - (void *)event; | ||
1067 | n_ids = ids / sizeof(u64); | 983 | n_ids = ids / sizeof(u64); |
984 | /* | ||
985 | * We don't have the cpu and thread maps on the header, so | ||
986 | * for allocating the perf_sample_id table we fake 1 cpu and | ||
987 | * hattr->ids threads. | ||
988 | */ | ||
989 | if (perf_evsel__alloc_id(evsel, 1, n_ids)) | ||
990 | return -ENOMEM; | ||
1068 | 991 | ||
1069 | for (i = 0; i < n_ids; i++) { | 992 | for (i = 0; i < n_ids; i++) { |
1070 | if (perf_header_attr__add_id(attr, self->attr.id[i]) < 0) { | 993 | perf_evlist__id_add(session->evlist, evsel, 0, i, |
1071 | perf_header_attr__delete(attr); | 994 | event->attr.id[i]); |
1072 | return -ENOMEM; | ||
1073 | } | ||
1074 | } | ||
1075 | |||
1076 | if (perf_header__add_attr(&session->header, attr) < 0) { | ||
1077 | perf_header_attr__delete(attr); | ||
1078 | return -ENOMEM; | ||
1079 | } | 995 | } |
1080 | 996 | ||
1081 | perf_session__update_sample_type(session); | 997 | perf_session__update_sample_type(session); |
@@ -1083,11 +999,11 @@ int event__process_attr(event_t *self, struct perf_session *session) | |||
1083 | return 0; | 999 | return 0; |
1084 | } | 1000 | } |
1085 | 1001 | ||
1086 | int event__synthesize_event_type(u64 event_id, char *name, | 1002 | int perf_event__synthesize_event_type(u64 event_id, char *name, |
1087 | event__handler_t process, | 1003 | perf_event__handler_t process, |
1088 | struct perf_session *session) | 1004 | struct perf_session *session) |
1089 | { | 1005 | { |
1090 | event_t ev; | 1006 | union perf_event ev; |
1091 | size_t size = 0; | 1007 | size_t size = 0; |
1092 | int err = 0; | 1008 | int err = 0; |
1093 | 1009 | ||
@@ -1108,8 +1024,8 @@ int event__synthesize_event_type(u64 event_id, char *name, | |||
1108 | return err; | 1024 | return err; |
1109 | } | 1025 | } |
1110 | 1026 | ||
1111 | int event__synthesize_event_types(event__handler_t process, | 1027 | int perf_event__synthesize_event_types(perf_event__handler_t process, |
1112 | struct perf_session *session) | 1028 | struct perf_session *session) |
1113 | { | 1029 | { |
1114 | struct perf_trace_event_type *type; | 1030 | struct perf_trace_event_type *type; |
1115 | int i, err = 0; | 1031 | int i, err = 0; |
@@ -1117,8 +1033,9 @@ int event__synthesize_event_types(event__handler_t process, | |||
1117 | for (i = 0; i < event_count; i++) { | 1033 | for (i = 0; i < event_count; i++) { |
1118 | type = &events[i]; | 1034 | type = &events[i]; |
1119 | 1035 | ||
1120 | err = event__synthesize_event_type(type->event_id, type->name, | 1036 | err = perf_event__synthesize_event_type(type->event_id, |
1121 | process, session); | 1037 | type->name, process, |
1038 | session); | ||
1122 | if (err) { | 1039 | if (err) { |
1123 | pr_debug("failed to create perf header event type\n"); | 1040 | pr_debug("failed to create perf header event type\n"); |
1124 | return err; | 1041 | return err; |
@@ -1128,28 +1045,28 @@ int event__synthesize_event_types(event__handler_t process, | |||
1128 | return err; | 1045 | return err; |
1129 | } | 1046 | } |
1130 | 1047 | ||
1131 | int event__process_event_type(event_t *self, | 1048 | int perf_event__process_event_type(union perf_event *event, |
1132 | struct perf_session *session __unused) | 1049 | struct perf_session *session __unused) |
1133 | { | 1050 | { |
1134 | if (perf_header__push_event(self->event_type.event_type.event_id, | 1051 | if (perf_header__push_event(event->event_type.event_type.event_id, |
1135 | self->event_type.event_type.name) < 0) | 1052 | event->event_type.event_type.name) < 0) |
1136 | return -ENOMEM; | 1053 | return -ENOMEM; |
1137 | 1054 | ||
1138 | return 0; | 1055 | return 0; |
1139 | } | 1056 | } |
1140 | 1057 | ||
1141 | int event__synthesize_tracing_data(int fd, struct list_head *pattrs, | 1058 | int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist, |
1142 | event__handler_t process, | 1059 | perf_event__handler_t process, |
1143 | struct perf_session *session __unused) | 1060 | struct perf_session *session __unused) |
1144 | { | 1061 | { |
1145 | event_t ev; | 1062 | union perf_event ev; |
1146 | ssize_t size = 0, aligned_size = 0, padding; | 1063 | ssize_t size = 0, aligned_size = 0, padding; |
1147 | int err = 0; | 1064 | int err __used = 0; |
1148 | 1065 | ||
1149 | memset(&ev, 0, sizeof(ev)); | 1066 | memset(&ev, 0, sizeof(ev)); |
1150 | 1067 | ||
1151 | ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; | 1068 | ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; |
1152 | size = read_tracing_data_size(fd, pattrs); | 1069 | size = read_tracing_data_size(fd, &evlist->entries); |
1153 | if (size <= 0) | 1070 | if (size <= 0) |
1154 | return size; | 1071 | return size; |
1155 | aligned_size = ALIGN(size, sizeof(u64)); | 1072 | aligned_size = ALIGN(size, sizeof(u64)); |
@@ -1159,16 +1076,16 @@ int event__synthesize_tracing_data(int fd, struct list_head *pattrs, | |||
1159 | 1076 | ||
1160 | process(&ev, NULL, session); | 1077 | process(&ev, NULL, session); |
1161 | 1078 | ||
1162 | err = read_tracing_data(fd, pattrs); | 1079 | err = read_tracing_data(fd, &evlist->entries); |
1163 | write_padded(fd, NULL, 0, padding); | 1080 | write_padded(fd, NULL, 0, padding); |
1164 | 1081 | ||
1165 | return aligned_size; | 1082 | return aligned_size; |
1166 | } | 1083 | } |
1167 | 1084 | ||
1168 | int event__process_tracing_data(event_t *self, | 1085 | int perf_event__process_tracing_data(union perf_event *event, |
1169 | struct perf_session *session) | 1086 | struct perf_session *session) |
1170 | { | 1087 | { |
1171 | ssize_t size_read, padding, size = self->tracing_data.size; | 1088 | ssize_t size_read, padding, size = event->tracing_data.size; |
1172 | off_t offset = lseek(session->fd, 0, SEEK_CUR); | 1089 | off_t offset = lseek(session->fd, 0, SEEK_CUR); |
1173 | char buf[BUFSIZ]; | 1090 | char buf[BUFSIZ]; |
1174 | 1091 | ||
@@ -1194,12 +1111,12 @@ int event__process_tracing_data(event_t *self, | |||
1194 | return size_read + padding; | 1111 | return size_read + padding; |
1195 | } | 1112 | } |
1196 | 1113 | ||
1197 | int event__synthesize_build_id(struct dso *pos, u16 misc, | 1114 | int perf_event__synthesize_build_id(struct dso *pos, u16 misc, |
1198 | event__handler_t process, | 1115 | perf_event__handler_t process, |
1199 | struct machine *machine, | 1116 | struct machine *machine, |
1200 | struct perf_session *session) | 1117 | struct perf_session *session) |
1201 | { | 1118 | { |
1202 | event_t ev; | 1119 | union perf_event ev; |
1203 | size_t len; | 1120 | size_t len; |
1204 | int err = 0; | 1121 | int err = 0; |
1205 | 1122 | ||
@@ -1222,11 +1139,11 @@ int event__synthesize_build_id(struct dso *pos, u16 misc, | |||
1222 | return err; | 1139 | return err; |
1223 | } | 1140 | } |
1224 | 1141 | ||
1225 | int event__process_build_id(event_t *self, | 1142 | int perf_event__process_build_id(union perf_event *event, |
1226 | struct perf_session *session) | 1143 | struct perf_session *session) |
1227 | { | 1144 | { |
1228 | __event_process_build_id(&self->build_id, | 1145 | __event_process_build_id(&event->build_id, |
1229 | self->build_id.filename, | 1146 | event->build_id.filename, |
1230 | session); | 1147 | session); |
1231 | return 0; | 1148 | return 0; |
1232 | } | 1149 | } |
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 33f16be7b72f..456661d7f10e 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h | |||
@@ -9,13 +9,6 @@ | |||
9 | 9 | ||
10 | #include <linux/bitmap.h> | 10 | #include <linux/bitmap.h> |
11 | 11 | ||
12 | struct perf_header_attr { | ||
13 | struct perf_event_attr attr; | ||
14 | int ids, size; | ||
15 | u64 *id; | ||
16 | off_t id_offset; | ||
17 | }; | ||
18 | |||
19 | enum { | 12 | enum { |
20 | HEADER_TRACE_INFO = 1, | 13 | HEADER_TRACE_INFO = 1, |
21 | HEADER_BUILD_ID, | 14 | HEADER_BUILD_ID, |
@@ -46,14 +39,12 @@ struct perf_pipe_file_header { | |||
46 | 39 | ||
47 | struct perf_header; | 40 | struct perf_header; |
48 | 41 | ||
49 | int perf_file_header__read(struct perf_file_header *self, | 42 | int perf_file_header__read(struct perf_file_header *header, |
50 | struct perf_header *ph, int fd); | 43 | struct perf_header *ph, int fd); |
51 | 44 | ||
52 | struct perf_header { | 45 | struct perf_header { |
53 | int frozen; | 46 | int frozen; |
54 | int attrs, size; | ||
55 | bool needs_swap; | 47 | bool needs_swap; |
56 | struct perf_header_attr **attr; | ||
57 | s64 attr_offset; | 48 | s64 attr_offset; |
58 | u64 data_offset; | 49 | u64 data_offset; |
59 | u64 data_size; | 50 | u64 data_size; |
@@ -62,34 +53,25 @@ struct perf_header { | |||
62 | DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); | 53 | DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); |
63 | }; | 54 | }; |
64 | 55 | ||
65 | int perf_header__init(struct perf_header *self); | 56 | struct perf_evlist; |
66 | void perf_header__exit(struct perf_header *self); | ||
67 | 57 | ||
68 | int perf_header__read(struct perf_session *session, int fd); | 58 | int perf_session__read_header(struct perf_session *session, int fd); |
69 | int perf_header__write(struct perf_header *self, int fd, bool at_exit); | 59 | int perf_session__write_header(struct perf_session *session, |
60 | struct perf_evlist *evlist, | ||
61 | int fd, bool at_exit); | ||
70 | int perf_header__write_pipe(int fd); | 62 | int perf_header__write_pipe(int fd); |
71 | 63 | ||
72 | int perf_header__add_attr(struct perf_header *self, | ||
73 | struct perf_header_attr *attr); | ||
74 | |||
75 | int perf_header__push_event(u64 id, const char *name); | 64 | int perf_header__push_event(u64 id, const char *name); |
76 | char *perf_header__find_event(u64 id); | 65 | char *perf_header__find_event(u64 id); |
77 | 66 | ||
78 | struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr); | 67 | u64 perf_evlist__sample_type(struct perf_evlist *evlist); |
79 | void perf_header_attr__delete(struct perf_header_attr *self); | 68 | bool perf_evlist__sample_id_all(const struct perf_evlist *evlist); |
69 | void perf_header__set_feat(struct perf_header *header, int feat); | ||
70 | void perf_header__clear_feat(struct perf_header *header, int feat); | ||
71 | bool perf_header__has_feat(const struct perf_header *header, int feat); | ||
80 | 72 | ||
81 | int perf_header_attr__add_id(struct perf_header_attr *self, u64 id); | 73 | int perf_header__process_sections(struct perf_header *header, int fd, |
82 | 74 | int (*process)(struct perf_file_section *section, | |
83 | u64 perf_header__sample_type(struct perf_header *header); | ||
84 | bool perf_header__sample_id_all(const struct perf_header *header); | ||
85 | struct perf_event_attr * | ||
86 | perf_header__find_attr(u64 id, struct perf_header *header); | ||
87 | void perf_header__set_feat(struct perf_header *self, int feat); | ||
88 | void perf_header__clear_feat(struct perf_header *self, int feat); | ||
89 | bool perf_header__has_feat(const struct perf_header *self, int feat); | ||
90 | |||
91 | int perf_header__process_sections(struct perf_header *self, int fd, | ||
92 | int (*process)(struct perf_file_section *self, | ||
93 | struct perf_header *ph, | 75 | struct perf_header *ph, |
94 | int feat, int fd)); | 76 | int feat, int fd)); |
95 | 77 | ||
@@ -97,32 +79,31 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | |||
97 | const char *name, bool is_kallsyms); | 79 | const char *name, bool is_kallsyms); |
98 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir); | 80 | int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir); |
99 | 81 | ||
100 | int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, | 82 | int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id, |
101 | event__handler_t process, | 83 | perf_event__handler_t process, |
102 | struct perf_session *session); | ||
103 | int event__synthesize_attrs(struct perf_header *self, | ||
104 | event__handler_t process, | ||
105 | struct perf_session *session); | ||
106 | int event__process_attr(event_t *self, struct perf_session *session); | ||
107 | |||
108 | int event__synthesize_event_type(u64 event_id, char *name, | ||
109 | event__handler_t process, | ||
110 | struct perf_session *session); | ||
111 | int event__synthesize_event_types(event__handler_t process, | ||
112 | struct perf_session *session); | ||
113 | int event__process_event_type(event_t *self, | ||
114 | struct perf_session *session); | ||
115 | |||
116 | int event__synthesize_tracing_data(int fd, struct list_head *pattrs, | ||
117 | event__handler_t process, | ||
118 | struct perf_session *session); | ||
119 | int event__process_tracing_data(event_t *self, | ||
120 | struct perf_session *session); | 84 | struct perf_session *session); |
85 | int perf_session__synthesize_attrs(struct perf_session *session, | ||
86 | perf_event__handler_t process); | ||
87 | int perf_event__process_attr(union perf_event *event, struct perf_session *session); | ||
88 | |||
89 | int perf_event__synthesize_event_type(u64 event_id, char *name, | ||
90 | perf_event__handler_t process, | ||
91 | struct perf_session *session); | ||
92 | int perf_event__synthesize_event_types(perf_event__handler_t process, | ||
93 | struct perf_session *session); | ||
94 | int perf_event__process_event_type(union perf_event *event, | ||
95 | struct perf_session *session); | ||
121 | 96 | ||
122 | int event__synthesize_build_id(struct dso *pos, u16 misc, | 97 | int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist, |
123 | event__handler_t process, | 98 | perf_event__handler_t process, |
124 | struct machine *machine, | 99 | struct perf_session *session); |
125 | struct perf_session *session); | 100 | int perf_event__process_tracing_data(union perf_event *event, |
126 | int event__process_build_id(event_t *self, struct perf_session *session); | 101 | struct perf_session *session); |
127 | 102 | ||
103 | int perf_event__synthesize_build_id(struct dso *pos, u16 misc, | ||
104 | perf_event__handler_t process, | ||
105 | struct machine *machine, | ||
106 | struct perf_session *session); | ||
107 | int perf_event__process_build_id(union perf_event *event, | ||
108 | struct perf_session *session); | ||
128 | #endif /* __PERF_HEADER_H */ | 109 | #endif /* __PERF_HEADER_H */ |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index df51560f16f7..627a02e03c57 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #include "annotate.h" | ||
1 | #include "util.h" | 2 | #include "util.h" |
2 | #include "build-id.h" | 3 | #include "build-id.h" |
3 | #include "hist.h" | 4 | #include "hist.h" |
@@ -49,6 +50,15 @@ static void hists__calc_col_len(struct hists *self, struct hist_entry *h) | |||
49 | 50 | ||
50 | if (h->ms.sym) | 51 | if (h->ms.sym) |
51 | hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen); | 52 | hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen); |
53 | else { | ||
54 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; | ||
55 | |||
56 | if (hists__col_len(self, HISTC_DSO) < unresolved_col_width && | ||
57 | !symbol_conf.col_width_list_str && !symbol_conf.field_sep && | ||
58 | !symbol_conf.dso_list) | ||
59 | hists__set_col_len(self, HISTC_DSO, | ||
60 | unresolved_col_width); | ||
61 | } | ||
52 | 62 | ||
53 | len = thread__comm_len(h->thread); | 63 | len = thread__comm_len(h->thread); |
54 | if (hists__new_col_len(self, HISTC_COMM, len)) | 64 | if (hists__new_col_len(self, HISTC_COMM, len)) |
@@ -211,7 +221,9 @@ void hist_entry__free(struct hist_entry *he) | |||
211 | * collapse the histogram | 221 | * collapse the histogram |
212 | */ | 222 | */ |
213 | 223 | ||
214 | static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he) | 224 | static bool hists__collapse_insert_entry(struct hists *self, |
225 | struct rb_root *root, | ||
226 | struct hist_entry *he) | ||
215 | { | 227 | { |
216 | struct rb_node **p = &root->rb_node; | 228 | struct rb_node **p = &root->rb_node; |
217 | struct rb_node *parent = NULL; | 229 | struct rb_node *parent = NULL; |
@@ -226,8 +238,11 @@ static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he) | |||
226 | 238 | ||
227 | if (!cmp) { | 239 | if (!cmp) { |
228 | iter->period += he->period; | 240 | iter->period += he->period; |
229 | if (symbol_conf.use_callchain) | 241 | if (symbol_conf.use_callchain) { |
230 | callchain_merge(iter->callchain, he->callchain); | 242 | callchain_cursor_reset(&self->callchain_cursor); |
243 | callchain_merge(&self->callchain_cursor, iter->callchain, | ||
244 | he->callchain); | ||
245 | } | ||
231 | hist_entry__free(he); | 246 | hist_entry__free(he); |
232 | return false; | 247 | return false; |
233 | } | 248 | } |
@@ -262,7 +277,7 @@ void hists__collapse_resort(struct hists *self) | |||
262 | next = rb_next(&n->rb_node); | 277 | next = rb_next(&n->rb_node); |
263 | 278 | ||
264 | rb_erase(&n->rb_node, &self->entries); | 279 | rb_erase(&n->rb_node, &self->entries); |
265 | if (collapse__insert_entry(&tmp, n)) | 280 | if (hists__collapse_insert_entry(self, &tmp, n)) |
266 | hists__inc_nr_entries(self, n); | 281 | hists__inc_nr_entries(self, n); |
267 | } | 282 | } |
268 | 283 | ||
@@ -425,7 +440,7 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, | |||
425 | u64 cumul; | 440 | u64 cumul; |
426 | 441 | ||
427 | child = rb_entry(node, struct callchain_node, rb_node); | 442 | child = rb_entry(node, struct callchain_node, rb_node); |
428 | cumul = cumul_hits(child); | 443 | cumul = callchain_cumul_hits(child); |
429 | remaining -= cumul; | 444 | remaining -= cumul; |
430 | 445 | ||
431 | /* | 446 | /* |
@@ -947,225 +962,14 @@ void hists__filter_by_thread(struct hists *self, const struct thread *thread) | |||
947 | } | 962 | } |
948 | } | 963 | } |
949 | 964 | ||
950 | static int symbol__alloc_hist(struct symbol *self) | 965 | int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) |
951 | { | ||
952 | struct sym_priv *priv = symbol__priv(self); | ||
953 | const int size = (sizeof(*priv->hist) + | ||
954 | (self->end - self->start) * sizeof(u64)); | ||
955 | |||
956 | priv->hist = zalloc(size); | ||
957 | return priv->hist == NULL ? -1 : 0; | ||
958 | } | ||
959 | |||
960 | int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip) | ||
961 | { | ||
962 | unsigned int sym_size, offset; | ||
963 | struct symbol *sym = self->ms.sym; | ||
964 | struct sym_priv *priv; | ||
965 | struct sym_hist *h; | ||
966 | |||
967 | if (!sym || !self->ms.map) | ||
968 | return 0; | ||
969 | |||
970 | priv = symbol__priv(sym); | ||
971 | if (priv->hist == NULL && symbol__alloc_hist(sym) < 0) | ||
972 | return -ENOMEM; | ||
973 | |||
974 | sym_size = sym->end - sym->start; | ||
975 | offset = ip - sym->start; | ||
976 | |||
977 | pr_debug3("%s: ip=%#" PRIx64 "\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip)); | ||
978 | |||
979 | if (offset >= sym_size) | ||
980 | return 0; | ||
981 | |||
982 | h = priv->hist; | ||
983 | h->sum++; | ||
984 | h->ip[offset]++; | ||
985 | |||
986 | pr_debug3("%#" PRIx64 " %s: period++ [ip: %#" PRIx64 ", %#" PRIx64 | ||
987 | "] => %" PRIu64 "\n", self->ms.sym->start, self->ms.sym->name, | ||
988 | ip, ip - self->ms.sym->start, h->ip[offset]); | ||
989 | return 0; | ||
990 | } | ||
991 | |||
992 | static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize) | ||
993 | { | ||
994 | struct objdump_line *self = malloc(sizeof(*self) + privsize); | ||
995 | |||
996 | if (self != NULL) { | ||
997 | self->offset = offset; | ||
998 | self->line = line; | ||
999 | } | ||
1000 | |||
1001 | return self; | ||
1002 | } | ||
1003 | |||
1004 | void objdump_line__free(struct objdump_line *self) | ||
1005 | { | ||
1006 | free(self->line); | ||
1007 | free(self); | ||
1008 | } | ||
1009 | |||
1010 | static void objdump__add_line(struct list_head *head, struct objdump_line *line) | ||
1011 | { | ||
1012 | list_add_tail(&line->node, head); | ||
1013 | } | ||
1014 | |||
1015 | struct objdump_line *objdump__get_next_ip_line(struct list_head *head, | ||
1016 | struct objdump_line *pos) | ||
1017 | { | ||
1018 | list_for_each_entry_continue(pos, head, node) | ||
1019 | if (pos->offset >= 0) | ||
1020 | return pos; | ||
1021 | |||
1022 | return NULL; | ||
1023 | } | ||
1024 | |||
1025 | static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file, | ||
1026 | struct list_head *head, size_t privsize) | ||
1027 | { | 966 | { |
1028 | struct symbol *sym = self->ms.sym; | 967 | return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); |
1029 | struct objdump_line *objdump_line; | ||
1030 | char *line = NULL, *tmp, *tmp2, *c; | ||
1031 | size_t line_len; | ||
1032 | s64 line_ip, offset = -1; | ||
1033 | |||
1034 | if (getline(&line, &line_len, file) < 0) | ||
1035 | return -1; | ||
1036 | |||
1037 | if (!line) | ||
1038 | return -1; | ||
1039 | |||
1040 | while (line_len != 0 && isspace(line[line_len - 1])) | ||
1041 | line[--line_len] = '\0'; | ||
1042 | |||
1043 | c = strchr(line, '\n'); | ||
1044 | if (c) | ||
1045 | *c = 0; | ||
1046 | |||
1047 | line_ip = -1; | ||
1048 | |||
1049 | /* | ||
1050 | * Strip leading spaces: | ||
1051 | */ | ||
1052 | tmp = line; | ||
1053 | while (*tmp) { | ||
1054 | if (*tmp != ' ') | ||
1055 | break; | ||
1056 | tmp++; | ||
1057 | } | ||
1058 | |||
1059 | if (*tmp) { | ||
1060 | /* | ||
1061 | * Parse hexa addresses followed by ':' | ||
1062 | */ | ||
1063 | line_ip = strtoull(tmp, &tmp2, 16); | ||
1064 | if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0') | ||
1065 | line_ip = -1; | ||
1066 | } | ||
1067 | |||
1068 | if (line_ip != -1) { | ||
1069 | u64 start = map__rip_2objdump(self->ms.map, sym->start), | ||
1070 | end = map__rip_2objdump(self->ms.map, sym->end); | ||
1071 | |||
1072 | offset = line_ip - start; | ||
1073 | if (offset < 0 || (u64)line_ip > end) | ||
1074 | offset = -1; | ||
1075 | } | ||
1076 | |||
1077 | objdump_line = objdump_line__new(offset, line, privsize); | ||
1078 | if (objdump_line == NULL) { | ||
1079 | free(line); | ||
1080 | return -1; | ||
1081 | } | ||
1082 | objdump__add_line(head, objdump_line); | ||
1083 | |||
1084 | return 0; | ||
1085 | } | 968 | } |
1086 | 969 | ||
1087 | int hist_entry__annotate(struct hist_entry *self, struct list_head *head, | 970 | int hist_entry__annotate(struct hist_entry *he, size_t privsize) |
1088 | size_t privsize) | ||
1089 | { | 971 | { |
1090 | struct symbol *sym = self->ms.sym; | 972 | return symbol__annotate(he->ms.sym, he->ms.map, privsize); |
1091 | struct map *map = self->ms.map; | ||
1092 | struct dso *dso = map->dso; | ||
1093 | char *filename = dso__build_id_filename(dso, NULL, 0); | ||
1094 | bool free_filename = true; | ||
1095 | char command[PATH_MAX * 2]; | ||
1096 | FILE *file; | ||
1097 | int err = 0; | ||
1098 | u64 len; | ||
1099 | char symfs_filename[PATH_MAX]; | ||
1100 | |||
1101 | if (filename) { | ||
1102 | snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", | ||
1103 | symbol_conf.symfs, filename); | ||
1104 | } | ||
1105 | |||
1106 | if (filename == NULL) { | ||
1107 | if (dso->has_build_id) { | ||
1108 | pr_err("Can't annotate %s: not enough memory\n", | ||
1109 | sym->name); | ||
1110 | return -ENOMEM; | ||
1111 | } | ||
1112 | goto fallback; | ||
1113 | } else if (readlink(symfs_filename, command, sizeof(command)) < 0 || | ||
1114 | strstr(command, "[kernel.kallsyms]") || | ||
1115 | access(symfs_filename, R_OK)) { | ||
1116 | free(filename); | ||
1117 | fallback: | ||
1118 | /* | ||
1119 | * If we don't have build-ids or the build-id file isn't in the | ||
1120 | * cache, or is just a kallsyms file, well, lets hope that this | ||
1121 | * DSO is the same as when 'perf record' ran. | ||
1122 | */ | ||
1123 | filename = dso->long_name; | ||
1124 | snprintf(symfs_filename, sizeof(symfs_filename), "%s%s", | ||
1125 | symbol_conf.symfs, filename); | ||
1126 | free_filename = false; | ||
1127 | } | ||
1128 | |||
1129 | if (dso->origin == DSO__ORIG_KERNEL) { | ||
1130 | if (dso->annotate_warned) | ||
1131 | goto out_free_filename; | ||
1132 | err = -ENOENT; | ||
1133 | dso->annotate_warned = 1; | ||
1134 | pr_err("Can't annotate %s: No vmlinux file was found in the " | ||
1135 | "path\n", sym->name); | ||
1136 | goto out_free_filename; | ||
1137 | } | ||
1138 | |||
1139 | pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, | ||
1140 | filename, sym->name, map->unmap_ip(map, sym->start), | ||
1141 | map->unmap_ip(map, sym->end)); | ||
1142 | |||
1143 | len = sym->end - sym->start; | ||
1144 | |||
1145 | pr_debug("annotating [%p] %30s : [%p] %30s\n", | ||
1146 | dso, dso->long_name, sym, sym->name); | ||
1147 | |||
1148 | snprintf(command, sizeof(command), | ||
1149 | "objdump --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand", | ||
1150 | map__rip_2objdump(map, sym->start), | ||
1151 | map__rip_2objdump(map, sym->end), | ||
1152 | symfs_filename, filename); | ||
1153 | |||
1154 | pr_debug("Executing: %s\n", command); | ||
1155 | |||
1156 | file = popen(command, "r"); | ||
1157 | if (!file) | ||
1158 | goto out_free_filename; | ||
1159 | |||
1160 | while (!feof(file)) | ||
1161 | if (hist_entry__parse_objdump_line(self, file, head, privsize) < 0) | ||
1162 | break; | ||
1163 | |||
1164 | pclose(file); | ||
1165 | out_free_filename: | ||
1166 | if (free_filename) | ||
1167 | free(filename); | ||
1168 | return err; | ||
1169 | } | 973 | } |
1170 | 974 | ||
1171 | void hists__inc_nr_events(struct hists *self, u32 type) | 975 | void hists__inc_nr_events(struct hists *self, u32 type) |
@@ -1180,8 +984,12 @@ size_t hists__fprintf_nr_events(struct hists *self, FILE *fp) | |||
1180 | size_t ret = 0; | 984 | size_t ret = 0; |
1181 | 985 | ||
1182 | for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { | 986 | for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { |
1183 | const char *name = event__get_event_name(i); | 987 | const char *name; |
988 | |||
989 | if (self->stats.nr_events[i] == 0) | ||
990 | continue; | ||
1184 | 991 | ||
992 | name = perf_event__name(i); | ||
1185 | if (!strcmp(name, "UNKNOWN")) | 993 | if (!strcmp(name, "UNKNOWN")) |
1186 | continue; | 994 | continue; |
1187 | 995 | ||
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index ee789856a8c9..cb6858a2f9a3 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h | |||
@@ -9,33 +9,6 @@ extern struct callchain_param callchain_param; | |||
9 | struct hist_entry; | 9 | struct hist_entry; |
10 | struct addr_location; | 10 | struct addr_location; |
11 | struct symbol; | 11 | struct symbol; |
12 | struct rb_root; | ||
13 | |||
14 | struct objdump_line { | ||
15 | struct list_head node; | ||
16 | s64 offset; | ||
17 | char *line; | ||
18 | }; | ||
19 | |||
20 | void objdump_line__free(struct objdump_line *self); | ||
21 | struct objdump_line *objdump__get_next_ip_line(struct list_head *head, | ||
22 | struct objdump_line *pos); | ||
23 | |||
24 | struct sym_hist { | ||
25 | u64 sum; | ||
26 | u64 ip[0]; | ||
27 | }; | ||
28 | |||
29 | struct sym_ext { | ||
30 | struct rb_node node; | ||
31 | double percent; | ||
32 | char *path; | ||
33 | }; | ||
34 | |||
35 | struct sym_priv { | ||
36 | struct sym_hist *hist; | ||
37 | struct sym_ext *ext; | ||
38 | }; | ||
39 | 12 | ||
40 | /* | 13 | /* |
41 | * The kernel collects the number of events it couldn't send in a stretch and | 14 | * The kernel collects the number of events it couldn't send in a stretch and |
@@ -69,14 +42,13 @@ enum hist_column { | |||
69 | }; | 42 | }; |
70 | 43 | ||
71 | struct hists { | 44 | struct hists { |
72 | struct rb_node rb_node; | ||
73 | struct rb_root entries; | 45 | struct rb_root entries; |
74 | u64 nr_entries; | 46 | u64 nr_entries; |
75 | struct events_stats stats; | 47 | struct events_stats stats; |
76 | u64 config; | ||
77 | u64 event_stream; | 48 | u64 event_stream; |
78 | u32 type; | ||
79 | u16 col_len[HISTC_NR_COLS]; | 49 | u16 col_len[HISTC_NR_COLS]; |
50 | /* Best would be to reuse the session callchain cursor */ | ||
51 | struct callchain_cursor callchain_cursor; | ||
80 | }; | 52 | }; |
81 | 53 | ||
82 | struct hist_entry *__hists__add_entry(struct hists *self, | 54 | struct hist_entry *__hists__add_entry(struct hists *self, |
@@ -102,9 +74,8 @@ size_t hists__fprintf_nr_events(struct hists *self, FILE *fp); | |||
102 | size_t hists__fprintf(struct hists *self, struct hists *pair, | 74 | size_t hists__fprintf(struct hists *self, struct hists *pair, |
103 | bool show_displacement, FILE *fp); | 75 | bool show_displacement, FILE *fp); |
104 | 76 | ||
105 | int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip); | 77 | int hist_entry__inc_addr_samples(struct hist_entry *self, int evidx, u64 addr); |
106 | int hist_entry__annotate(struct hist_entry *self, struct list_head *head, | 78 | int hist_entry__annotate(struct hist_entry *self, size_t privsize); |
107 | size_t privsize); | ||
108 | 79 | ||
109 | void hists__filter_by_dso(struct hists *self, const struct dso *dso); | 80 | void hists__filter_by_dso(struct hists *self, const struct dso *dso); |
110 | void hists__filter_by_thread(struct hists *self, const struct thread *thread); | 81 | void hists__filter_by_thread(struct hists *self, const struct thread *thread); |
@@ -113,21 +84,18 @@ u16 hists__col_len(struct hists *self, enum hist_column col); | |||
113 | void hists__set_col_len(struct hists *self, enum hist_column col, u16 len); | 84 | void hists__set_col_len(struct hists *self, enum hist_column col, u16 len); |
114 | bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len); | 85 | bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len); |
115 | 86 | ||
116 | #ifdef NO_NEWT_SUPPORT | 87 | struct perf_evlist; |
117 | static inline int hists__browse(struct hists *self __used, | ||
118 | const char *helpline __used, | ||
119 | const char *ev_name __used) | ||
120 | { | ||
121 | return 0; | ||
122 | } | ||
123 | 88 | ||
124 | static inline int hists__tui_browse_tree(struct rb_root *self __used, | 89 | #ifdef NO_NEWT_SUPPORT |
125 | const char *help __used) | 90 | static inline |
91 | int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used, | ||
92 | const char *help __used) | ||
126 | { | 93 | { |
127 | return 0; | 94 | return 0; |
128 | } | 95 | } |
129 | 96 | ||
130 | static inline int hist_entry__tui_annotate(struct hist_entry *self __used) | 97 | static inline int hist_entry__tui_annotate(struct hist_entry *self __used, |
98 | int evidx __used) | ||
131 | { | 99 | { |
132 | return 0; | 100 | return 0; |
133 | } | 101 | } |
@@ -135,14 +103,12 @@ static inline int hist_entry__tui_annotate(struct hist_entry *self __used) | |||
135 | #define KEY_RIGHT -2 | 103 | #define KEY_RIGHT -2 |
136 | #else | 104 | #else |
137 | #include <newt.h> | 105 | #include <newt.h> |
138 | int hists__browse(struct hists *self, const char *helpline, | 106 | int hist_entry__tui_annotate(struct hist_entry *self, int evidx); |
139 | const char *ev_name); | ||
140 | int hist_entry__tui_annotate(struct hist_entry *self); | ||
141 | 107 | ||
142 | #define KEY_LEFT NEWT_KEY_LEFT | 108 | #define KEY_LEFT NEWT_KEY_LEFT |
143 | #define KEY_RIGHT NEWT_KEY_RIGHT | 109 | #define KEY_RIGHT NEWT_KEY_RIGHT |
144 | 110 | ||
145 | int hists__tui_browse_tree(struct rb_root *self, const char *help); | 111 | int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help); |
146 | #endif | 112 | #endif |
147 | 113 | ||
148 | unsigned int hists__sort_list_width(struct hists *self); | 114 | unsigned int hists__sort_list_width(struct hists *self); |
diff --git a/tools/perf/util/include/linux/list.h b/tools/perf/util/include/linux/list.h index f5ca26e53fbb..356c7e467b83 100644 --- a/tools/perf/util/include/linux/list.h +++ b/tools/perf/util/include/linux/list.h | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <linux/kernel.h> | ||
1 | #include "../../../../include/linux/list.h" | 2 | #include "../../../../include/linux/list.h" |
2 | 3 | ||
3 | #ifndef PERF_LIST_H | 4 | #ifndef PERF_LIST_H |
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 135f69baf966..54a7e2634d58 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include "../../../include/linux/hw_breakpoint.h" | 1 | #include "../../../include/linux/hw_breakpoint.h" |
2 | #include "util.h" | 2 | #include "util.h" |
3 | #include "../perf.h" | 3 | #include "../perf.h" |
4 | #include "evlist.h" | ||
4 | #include "evsel.h" | 5 | #include "evsel.h" |
5 | #include "parse-options.h" | 6 | #include "parse-options.h" |
6 | #include "parse-events.h" | 7 | #include "parse-events.h" |
@@ -11,10 +12,6 @@ | |||
11 | #include "header.h" | 12 | #include "header.h" |
12 | #include "debugfs.h" | 13 | #include "debugfs.h" |
13 | 14 | ||
14 | int nr_counters; | ||
15 | |||
16 | LIST_HEAD(evsel_list); | ||
17 | |||
18 | struct event_symbol { | 15 | struct event_symbol { |
19 | u8 type; | 16 | u8 type; |
20 | u64 config; | 17 | u64 config; |
@@ -271,6 +268,9 @@ const char *event_name(struct perf_evsel *evsel) | |||
271 | u64 config = evsel->attr.config; | 268 | u64 config = evsel->attr.config; |
272 | int type = evsel->attr.type; | 269 | int type = evsel->attr.type; |
273 | 270 | ||
271 | if (evsel->name) | ||
272 | return evsel->name; | ||
273 | |||
274 | return __event_name(type, config); | 274 | return __event_name(type, config); |
275 | } | 275 | } |
276 | 276 | ||
@@ -449,8 +449,8 @@ parse_single_tracepoint_event(char *sys_name, | |||
449 | /* sys + ':' + event + ':' + flags*/ | 449 | /* sys + ':' + event + ':' + flags*/ |
450 | #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128) | 450 | #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128) |
451 | static enum event_result | 451 | static enum event_result |
452 | parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp, | 452 | parse_multiple_tracepoint_event(const struct option *opt, char *sys_name, |
453 | char *flags) | 453 | const char *evt_exp, char *flags) |
454 | { | 454 | { |
455 | char evt_path[MAXPATHLEN]; | 455 | char evt_path[MAXPATHLEN]; |
456 | struct dirent *evt_ent; | 456 | struct dirent *evt_ent; |
@@ -483,15 +483,16 @@ parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp, | |||
483 | if (len < 0) | 483 | if (len < 0) |
484 | return EVT_FAILED; | 484 | return EVT_FAILED; |
485 | 485 | ||
486 | if (parse_events(NULL, event_opt, 0)) | 486 | if (parse_events(opt, event_opt, 0)) |
487 | return EVT_FAILED; | 487 | return EVT_FAILED; |
488 | } | 488 | } |
489 | 489 | ||
490 | return EVT_HANDLED_ALL; | 490 | return EVT_HANDLED_ALL; |
491 | } | 491 | } |
492 | 492 | ||
493 | static enum event_result parse_tracepoint_event(const char **strp, | 493 | static enum event_result |
494 | struct perf_event_attr *attr) | 494 | parse_tracepoint_event(const struct option *opt, const char **strp, |
495 | struct perf_event_attr *attr) | ||
495 | { | 496 | { |
496 | const char *evt_name; | 497 | const char *evt_name; |
497 | char *flags = NULL, *comma_loc; | 498 | char *flags = NULL, *comma_loc; |
@@ -530,7 +531,7 @@ static enum event_result parse_tracepoint_event(const char **strp, | |||
530 | return EVT_FAILED; | 531 | return EVT_FAILED; |
531 | if (strpbrk(evt_name, "*?")) { | 532 | if (strpbrk(evt_name, "*?")) { |
532 | *strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */ | 533 | *strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */ |
533 | return parse_multiple_tracepoint_event(sys_name, evt_name, | 534 | return parse_multiple_tracepoint_event(opt, sys_name, evt_name, |
534 | flags); | 535 | flags); |
535 | } else { | 536 | } else { |
536 | return parse_single_tracepoint_event(sys_name, evt_name, | 537 | return parse_single_tracepoint_event(sys_name, evt_name, |
@@ -740,11 +741,12 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr) | |||
740 | * Symbolic names are (almost) exactly matched. | 741 | * Symbolic names are (almost) exactly matched. |
741 | */ | 742 | */ |
742 | static enum event_result | 743 | static enum event_result |
743 | parse_event_symbols(const char **str, struct perf_event_attr *attr) | 744 | parse_event_symbols(const struct option *opt, const char **str, |
745 | struct perf_event_attr *attr) | ||
744 | { | 746 | { |
745 | enum event_result ret; | 747 | enum event_result ret; |
746 | 748 | ||
747 | ret = parse_tracepoint_event(str, attr); | 749 | ret = parse_tracepoint_event(opt, str, attr); |
748 | if (ret != EVT_FAILED) | 750 | if (ret != EVT_FAILED) |
749 | goto modifier; | 751 | goto modifier; |
750 | 752 | ||
@@ -778,14 +780,17 @@ modifier: | |||
778 | return ret; | 780 | return ret; |
779 | } | 781 | } |
780 | 782 | ||
781 | int parse_events(const struct option *opt __used, const char *str, int unset __used) | 783 | int parse_events(const struct option *opt, const char *str, int unset __used) |
782 | { | 784 | { |
785 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; | ||
783 | struct perf_event_attr attr; | 786 | struct perf_event_attr attr; |
784 | enum event_result ret; | 787 | enum event_result ret; |
788 | const char *ostr; | ||
785 | 789 | ||
786 | for (;;) { | 790 | for (;;) { |
791 | ostr = str; | ||
787 | memset(&attr, 0, sizeof(attr)); | 792 | memset(&attr, 0, sizeof(attr)); |
788 | ret = parse_event_symbols(&str, &attr); | 793 | ret = parse_event_symbols(opt, &str, &attr); |
789 | if (ret == EVT_FAILED) | 794 | if (ret == EVT_FAILED) |
790 | return -1; | 795 | return -1; |
791 | 796 | ||
@@ -794,12 +799,15 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u | |||
794 | 799 | ||
795 | if (ret != EVT_HANDLED_ALL) { | 800 | if (ret != EVT_HANDLED_ALL) { |
796 | struct perf_evsel *evsel; | 801 | struct perf_evsel *evsel; |
797 | evsel = perf_evsel__new(&attr, | 802 | evsel = perf_evsel__new(&attr, evlist->nr_entries); |
798 | nr_counters); | ||
799 | if (evsel == NULL) | 803 | if (evsel == NULL) |
800 | return -1; | 804 | return -1; |
801 | list_add_tail(&evsel->node, &evsel_list); | 805 | perf_evlist__add(evlist, evsel); |
802 | ++nr_counters; | 806 | |
807 | evsel->name = calloc(str - ostr + 1, 1); | ||
808 | if (!evsel->name) | ||
809 | return -1; | ||
810 | strncpy(evsel->name, ostr, str - ostr); | ||
803 | } | 811 | } |
804 | 812 | ||
805 | if (*str == 0) | 813 | if (*str == 0) |
@@ -813,13 +821,14 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u | |||
813 | return 0; | 821 | return 0; |
814 | } | 822 | } |
815 | 823 | ||
816 | int parse_filter(const struct option *opt __used, const char *str, | 824 | int parse_filter(const struct option *opt, const char *str, |
817 | int unset __used) | 825 | int unset __used) |
818 | { | 826 | { |
827 | struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; | ||
819 | struct perf_evsel *last = NULL; | 828 | struct perf_evsel *last = NULL; |
820 | 829 | ||
821 | if (!list_empty(&evsel_list)) | 830 | if (evlist->nr_entries > 0) |
822 | last = list_entry(evsel_list.prev, struct perf_evsel, node); | 831 | last = list_entry(evlist->entries.prev, struct perf_evsel, node); |
823 | 832 | ||
824 | if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) { | 833 | if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) { |
825 | fprintf(stderr, | 834 | fprintf(stderr, |
@@ -849,7 +858,7 @@ static const char * const event_type_descriptors[] = { | |||
849 | * Print the events from <debugfs_mount_point>/tracing/events | 858 | * Print the events from <debugfs_mount_point>/tracing/events |
850 | */ | 859 | */ |
851 | 860 | ||
852 | static void print_tracepoint_events(void) | 861 | void print_tracepoint_events(const char *subsys_glob, const char *event_glob) |
853 | { | 862 | { |
854 | DIR *sys_dir, *evt_dir; | 863 | DIR *sys_dir, *evt_dir; |
855 | struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; | 864 | struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; |
@@ -864,6 +873,9 @@ static void print_tracepoint_events(void) | |||
864 | return; | 873 | return; |
865 | 874 | ||
866 | for_each_subsystem(sys_dir, sys_dirent, sys_next) { | 875 | for_each_subsystem(sys_dir, sys_dirent, sys_next) { |
876 | if (subsys_glob != NULL && | ||
877 | !strglobmatch(sys_dirent.d_name, subsys_glob)) | ||
878 | continue; | ||
867 | 879 | ||
868 | snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, | 880 | snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path, |
869 | sys_dirent.d_name); | 881 | sys_dirent.d_name); |
@@ -872,6 +884,10 @@ static void print_tracepoint_events(void) | |||
872 | continue; | 884 | continue; |
873 | 885 | ||
874 | for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { | 886 | for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { |
887 | if (event_glob != NULL && | ||
888 | !strglobmatch(evt_dirent.d_name, event_glob)) | ||
889 | continue; | ||
890 | |||
875 | snprintf(evt_path, MAXPATHLEN, "%s:%s", | 891 | snprintf(evt_path, MAXPATHLEN, "%s:%s", |
876 | sys_dirent.d_name, evt_dirent.d_name); | 892 | sys_dirent.d_name, evt_dirent.d_name); |
877 | printf(" %-42s [%s]\n", evt_path, | 893 | printf(" %-42s [%s]\n", evt_path, |
@@ -923,13 +939,61 @@ int is_valid_tracepoint(const char *event_string) | |||
923 | return 0; | 939 | return 0; |
924 | } | 940 | } |
925 | 941 | ||
942 | void print_events_type(u8 type) | ||
943 | { | ||
944 | struct event_symbol *syms = event_symbols; | ||
945 | unsigned int i; | ||
946 | char name[64]; | ||
947 | |||
948 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { | ||
949 | if (type != syms->type) | ||
950 | continue; | ||
951 | |||
952 | if (strlen(syms->alias)) | ||
953 | snprintf(name, sizeof(name), "%s OR %s", | ||
954 | syms->symbol, syms->alias); | ||
955 | else | ||
956 | snprintf(name, sizeof(name), "%s", syms->symbol); | ||
957 | |||
958 | printf(" %-42s [%s]\n", name, | ||
959 | event_type_descriptors[type]); | ||
960 | } | ||
961 | } | ||
962 | |||
963 | int print_hwcache_events(const char *event_glob) | ||
964 | { | ||
965 | unsigned int type, op, i, printed = 0; | ||
966 | |||
967 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | ||
968 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | ||
969 | /* skip invalid cache type */ | ||
970 | if (!is_cache_op_valid(type, op)) | ||
971 | continue; | ||
972 | |||
973 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | ||
974 | char *name = event_cache_name(type, op, i); | ||
975 | |||
976 | if (event_glob != NULL && | ||
977 | !strglobmatch(name, event_glob)) | ||
978 | continue; | ||
979 | |||
980 | printf(" %-42s [%s]\n", name, | ||
981 | event_type_descriptors[PERF_TYPE_HW_CACHE]); | ||
982 | ++printed; | ||
983 | } | ||
984 | } | ||
985 | } | ||
986 | |||
987 | return printed; | ||
988 | } | ||
989 | |||
926 | /* | 990 | /* |
927 | * Print the help text for the event symbols: | 991 | * Print the help text for the event symbols: |
928 | */ | 992 | */ |
929 | void print_events(void) | 993 | void print_events(const char *event_glob) |
930 | { | 994 | { |
931 | struct event_symbol *syms = event_symbols; | 995 | struct event_symbol *syms = event_symbols; |
932 | unsigned int i, type, op, prev_type = -1; | 996 | unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0; |
933 | char name[40]; | 997 | char name[40]; |
934 | 998 | ||
935 | printf("\n"); | 999 | printf("\n"); |
@@ -938,8 +1002,16 @@ void print_events(void) | |||
938 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { | 1002 | for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { |
939 | type = syms->type; | 1003 | type = syms->type; |
940 | 1004 | ||
941 | if (type != prev_type) | 1005 | if (type != prev_type && printed) { |
942 | printf("\n"); | 1006 | printf("\n"); |
1007 | printed = 0; | ||
1008 | ntypes_printed++; | ||
1009 | } | ||
1010 | |||
1011 | if (event_glob != NULL && | ||
1012 | !(strglobmatch(syms->symbol, event_glob) || | ||
1013 | (syms->alias && strglobmatch(syms->alias, event_glob)))) | ||
1014 | continue; | ||
943 | 1015 | ||
944 | if (strlen(syms->alias)) | 1016 | if (strlen(syms->alias)) |
945 | sprintf(name, "%s OR %s", syms->symbol, syms->alias); | 1017 | sprintf(name, "%s OR %s", syms->symbol, syms->alias); |
@@ -949,22 +1021,17 @@ void print_events(void) | |||
949 | event_type_descriptors[type]); | 1021 | event_type_descriptors[type]); |
950 | 1022 | ||
951 | prev_type = type; | 1023 | prev_type = type; |
1024 | ++printed; | ||
952 | } | 1025 | } |
953 | 1026 | ||
954 | printf("\n"); | 1027 | if (ntypes_printed) { |
955 | for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { | 1028 | printed = 0; |
956 | for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { | 1029 | printf("\n"); |
957 | /* skip invalid cache type */ | ||
958 | if (!is_cache_op_valid(type, op)) | ||
959 | continue; | ||
960 | |||
961 | for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { | ||
962 | printf(" %-42s [%s]\n", | ||
963 | event_cache_name(type, op, i), | ||
964 | event_type_descriptors[PERF_TYPE_HW_CACHE]); | ||
965 | } | ||
966 | } | ||
967 | } | 1030 | } |
1031 | print_hwcache_events(event_glob); | ||
1032 | |||
1033 | if (event_glob != NULL) | ||
1034 | return; | ||
968 | 1035 | ||
969 | printf("\n"); | 1036 | printf("\n"); |
970 | printf(" %-42s [%s]\n", | 1037 | printf(" %-42s [%s]\n", |
@@ -977,37 +1044,7 @@ void print_events(void) | |||
977 | event_type_descriptors[PERF_TYPE_BREAKPOINT]); | 1044 | event_type_descriptors[PERF_TYPE_BREAKPOINT]); |
978 | printf("\n"); | 1045 | printf("\n"); |
979 | 1046 | ||
980 | print_tracepoint_events(); | 1047 | print_tracepoint_events(NULL, NULL); |
981 | 1048 | ||
982 | exit(129); | 1049 | exit(129); |
983 | } | 1050 | } |
984 | |||
985 | int perf_evsel_list__create_default(void) | ||
986 | { | ||
987 | struct perf_evsel *evsel; | ||
988 | struct perf_event_attr attr; | ||
989 | |||
990 | memset(&attr, 0, sizeof(attr)); | ||
991 | attr.type = PERF_TYPE_HARDWARE; | ||
992 | attr.config = PERF_COUNT_HW_CPU_CYCLES; | ||
993 | |||
994 | evsel = perf_evsel__new(&attr, 0); | ||
995 | |||
996 | if (evsel == NULL) | ||
997 | return -ENOMEM; | ||
998 | |||
999 | list_add(&evsel->node, &evsel_list); | ||
1000 | ++nr_counters; | ||
1001 | return 0; | ||
1002 | } | ||
1003 | |||
1004 | void perf_evsel_list__delete(void) | ||
1005 | { | ||
1006 | struct perf_evsel *pos, *n; | ||
1007 | |||
1008 | list_for_each_entry_safe(pos, n, &evsel_list, node) { | ||
1009 | list_del_init(&pos->node); | ||
1010 | perf_evsel__delete(pos); | ||
1011 | } | ||
1012 | nr_counters = 0; | ||
1013 | } | ||
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 458e3ecf17af..212f88e07a9c 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h | |||
@@ -9,11 +9,6 @@ | |||
9 | struct list_head; | 9 | struct list_head; |
10 | struct perf_evsel; | 10 | struct perf_evsel; |
11 | 11 | ||
12 | extern struct list_head evsel_list; | ||
13 | |||
14 | int perf_evsel_list__create_default(void); | ||
15 | void perf_evsel_list__delete(void); | ||
16 | |||
17 | struct option; | 12 | struct option; |
18 | 13 | ||
19 | struct tracepoint_path { | 14 | struct tracepoint_path { |
@@ -25,8 +20,6 @@ struct tracepoint_path { | |||
25 | extern struct tracepoint_path *tracepoint_id_to_path(u64 config); | 20 | extern struct tracepoint_path *tracepoint_id_to_path(u64 config); |
26 | extern bool have_tracepoints(struct list_head *evlist); | 21 | extern bool have_tracepoints(struct list_head *evlist); |
27 | 22 | ||
28 | extern int nr_counters; | ||
29 | |||
30 | const char *event_name(struct perf_evsel *event); | 23 | const char *event_name(struct perf_evsel *event); |
31 | extern const char *__event_name(int type, u64 config); | 24 | extern const char *__event_name(int type, u64 config); |
32 | 25 | ||
@@ -35,7 +28,10 @@ extern int parse_filter(const struct option *opt, const char *str, int unset); | |||
35 | 28 | ||
36 | #define EVENTS_HELP_MAX (128*1024) | 29 | #define EVENTS_HELP_MAX (128*1024) |
37 | 30 | ||
38 | extern void print_events(void); | 31 | void print_events(const char *event_glob); |
32 | void print_events_type(u8 type); | ||
33 | void print_tracepoint_events(const char *subsys_glob, const char *event_glob); | ||
34 | int print_hwcache_events(const char *event_glob); | ||
39 | extern int is_valid_tracepoint(const char *event_string); | 35 | extern int is_valid_tracepoint(const char *event_string); |
40 | 36 | ||
41 | extern char debugfs_path[]; | 37 | extern char debugfs_path[]; |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 6e29d9c9dccc..5ddee66020a7 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <string.h> | 31 | #include <string.h> |
32 | #include <stdarg.h> | 32 | #include <stdarg.h> |
33 | #include <limits.h> | 33 | #include <limits.h> |
34 | #include <elf.h> | ||
34 | 35 | ||
35 | #undef _GNU_SOURCE | 36 | #undef _GNU_SOURCE |
36 | #include "util.h" | 37 | #include "util.h" |
@@ -111,7 +112,25 @@ static struct symbol *__find_kernel_function_by_name(const char *name, | |||
111 | NULL); | 112 | NULL); |
112 | } | 113 | } |
113 | 114 | ||
114 | const char *kernel_get_module_path(const char *module) | 115 | static struct map *kernel_get_module_map(const char *module) |
116 | { | ||
117 | struct rb_node *nd; | ||
118 | struct map_groups *grp = &machine.kmaps; | ||
119 | |||
120 | if (!module) | ||
121 | module = "kernel"; | ||
122 | |||
123 | for (nd = rb_first(&grp->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) { | ||
124 | struct map *pos = rb_entry(nd, struct map, rb_node); | ||
125 | if (strncmp(pos->dso->short_name + 1, module, | ||
126 | pos->dso->short_name_len - 2) == 0) { | ||
127 | return pos; | ||
128 | } | ||
129 | } | ||
130 | return NULL; | ||
131 | } | ||
132 | |||
133 | static struct dso *kernel_get_module_dso(const char *module) | ||
115 | { | 134 | { |
116 | struct dso *dso; | 135 | struct dso *dso; |
117 | struct map *map; | 136 | struct map *map; |
@@ -141,7 +160,13 @@ const char *kernel_get_module_path(const char *module) | |||
141 | } | 160 | } |
142 | } | 161 | } |
143 | found: | 162 | found: |
144 | return dso->long_name; | 163 | return dso; |
164 | } | ||
165 | |||
166 | const char *kernel_get_module_path(const char *module) | ||
167 | { | ||
168 | struct dso *dso = kernel_get_module_dso(module); | ||
169 | return (dso) ? dso->long_name : NULL; | ||
145 | } | 170 | } |
146 | 171 | ||
147 | #ifdef DWARF_SUPPORT | 172 | #ifdef DWARF_SUPPORT |
@@ -384,7 +409,7 @@ int show_line_range(struct line_range *lr, const char *module) | |||
384 | setup_pager(); | 409 | setup_pager(); |
385 | 410 | ||
386 | if (lr->function) | 411 | if (lr->function) |
387 | fprintf(stdout, "<%s:%d>\n", lr->function, | 412 | fprintf(stdout, "<%s@%s:%d>\n", lr->function, lr->path, |
388 | lr->start - lr->offset); | 413 | lr->start - lr->offset); |
389 | else | 414 | else |
390 | fprintf(stdout, "<%s:%d>\n", lr->path, lr->start); | 415 | fprintf(stdout, "<%s:%d>\n", lr->path, lr->start); |
@@ -426,12 +451,14 @@ end: | |||
426 | } | 451 | } |
427 | 452 | ||
428 | static int show_available_vars_at(int fd, struct perf_probe_event *pev, | 453 | static int show_available_vars_at(int fd, struct perf_probe_event *pev, |
429 | int max_vls, bool externs) | 454 | int max_vls, struct strfilter *_filter, |
455 | bool externs) | ||
430 | { | 456 | { |
431 | char *buf; | 457 | char *buf; |
432 | int ret, i; | 458 | int ret, i, nvars; |
433 | struct str_node *node; | 459 | struct str_node *node; |
434 | struct variable_list *vls = NULL, *vl; | 460 | struct variable_list *vls = NULL, *vl; |
461 | const char *var; | ||
435 | 462 | ||
436 | buf = synthesize_perf_probe_point(&pev->point); | 463 | buf = synthesize_perf_probe_point(&pev->point); |
437 | if (!buf) | 464 | if (!buf) |
@@ -439,36 +466,45 @@ static int show_available_vars_at(int fd, struct perf_probe_event *pev, | |||
439 | pr_debug("Searching variables at %s\n", buf); | 466 | pr_debug("Searching variables at %s\n", buf); |
440 | 467 | ||
441 | ret = find_available_vars_at(fd, pev, &vls, max_vls, externs); | 468 | ret = find_available_vars_at(fd, pev, &vls, max_vls, externs); |
442 | if (ret > 0) { | 469 | if (ret <= 0) { |
443 | /* Some variables were found */ | 470 | pr_err("Failed to find variables at %s (%d)\n", buf, ret); |
444 | fprintf(stdout, "Available variables at %s\n", buf); | 471 | goto end; |
445 | for (i = 0; i < ret; i++) { | 472 | } |
446 | vl = &vls[i]; | 473 | /* Some variables are found */ |
447 | /* | 474 | fprintf(stdout, "Available variables at %s\n", buf); |
448 | * A probe point might be converted to | 475 | for (i = 0; i < ret; i++) { |
449 | * several trace points. | 476 | vl = &vls[i]; |
450 | */ | 477 | /* |
451 | fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol, | 478 | * A probe point might be converted to |
452 | vl->point.offset); | 479 | * several trace points. |
453 | free(vl->point.symbol); | 480 | */ |
454 | if (vl->vars) { | 481 | fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol, |
455 | strlist__for_each(node, vl->vars) | 482 | vl->point.offset); |
483 | free(vl->point.symbol); | ||
484 | nvars = 0; | ||
485 | if (vl->vars) { | ||
486 | strlist__for_each(node, vl->vars) { | ||
487 | var = strchr(node->s, '\t') + 1; | ||
488 | if (strfilter__compare(_filter, var)) { | ||
456 | fprintf(stdout, "\t\t%s\n", node->s); | 489 | fprintf(stdout, "\t\t%s\n", node->s); |
457 | strlist__delete(vl->vars); | 490 | nvars++; |
458 | } else | 491 | } |
459 | fprintf(stdout, "(No variables)\n"); | 492 | } |
493 | strlist__delete(vl->vars); | ||
460 | } | 494 | } |
461 | free(vls); | 495 | if (nvars == 0) |
462 | } else | 496 | fprintf(stdout, "\t\t(No matched variables)\n"); |
463 | pr_err("Failed to find variables at %s (%d)\n", buf, ret); | 497 | } |
464 | 498 | free(vls); | |
499 | end: | ||
465 | free(buf); | 500 | free(buf); |
466 | return ret; | 501 | return ret; |
467 | } | 502 | } |
468 | 503 | ||
469 | /* Show available variables on given probe point */ | 504 | /* Show available variables on given probe point */ |
470 | int show_available_vars(struct perf_probe_event *pevs, int npevs, | 505 | int show_available_vars(struct perf_probe_event *pevs, int npevs, |
471 | int max_vls, const char *module, bool externs) | 506 | int max_vls, const char *module, |
507 | struct strfilter *_filter, bool externs) | ||
472 | { | 508 | { |
473 | int i, fd, ret = 0; | 509 | int i, fd, ret = 0; |
474 | 510 | ||
@@ -485,7 +521,8 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs, | |||
485 | setup_pager(); | 521 | setup_pager(); |
486 | 522 | ||
487 | for (i = 0; i < npevs && ret >= 0; i++) | 523 | for (i = 0; i < npevs && ret >= 0; i++) |
488 | ret = show_available_vars_at(fd, &pevs[i], max_vls, externs); | 524 | ret = show_available_vars_at(fd, &pevs[i], max_vls, _filter, |
525 | externs); | ||
489 | 526 | ||
490 | close(fd); | 527 | close(fd); |
491 | return ret; | 528 | return ret; |
@@ -531,7 +568,9 @@ int show_line_range(struct line_range *lr __unused, const char *module __unused) | |||
531 | 568 | ||
532 | int show_available_vars(struct perf_probe_event *pevs __unused, | 569 | int show_available_vars(struct perf_probe_event *pevs __unused, |
533 | int npevs __unused, int max_vls __unused, | 570 | int npevs __unused, int max_vls __unused, |
534 | const char *module __unused, bool externs __unused) | 571 | const char *module __unused, |
572 | struct strfilter *filter __unused, | ||
573 | bool externs __unused) | ||
535 | { | 574 | { |
536 | pr_warning("Debuginfo-analysis is not supported.\n"); | 575 | pr_warning("Debuginfo-analysis is not supported.\n"); |
537 | return -ENOSYS; | 576 | return -ENOSYS; |
@@ -556,11 +595,11 @@ static int parse_line_num(char **ptr, int *val, const char *what) | |||
556 | * The line range syntax is described by: | 595 | * The line range syntax is described by: |
557 | * | 596 | * |
558 | * SRC[:SLN[+NUM|-ELN]] | 597 | * SRC[:SLN[+NUM|-ELN]] |
559 | * FNC[:SLN[+NUM|-ELN]] | 598 | * FNC[@SRC][:SLN[+NUM|-ELN]] |
560 | */ | 599 | */ |
561 | int parse_line_range_desc(const char *arg, struct line_range *lr) | 600 | int parse_line_range_desc(const char *arg, struct line_range *lr) |
562 | { | 601 | { |
563 | char *range, *name = strdup(arg); | 602 | char *range, *file, *name = strdup(arg); |
564 | int err; | 603 | int err; |
565 | 604 | ||
566 | if (!name) | 605 | if (!name) |
@@ -610,7 +649,16 @@ int parse_line_range_desc(const char *arg, struct line_range *lr) | |||
610 | } | 649 | } |
611 | } | 650 | } |
612 | 651 | ||
613 | if (strchr(name, '.')) | 652 | file = strchr(name, '@'); |
653 | if (file) { | ||
654 | *file = '\0'; | ||
655 | lr->file = strdup(++file); | ||
656 | if (lr->file == NULL) { | ||
657 | err = -ENOMEM; | ||
658 | goto err; | ||
659 | } | ||
660 | lr->function = name; | ||
661 | } else if (strchr(name, '.')) | ||
614 | lr->file = name; | 662 | lr->file = name; |
615 | else | 663 | else |
616 | lr->function = name; | 664 | lr->function = name; |
@@ -1784,9 +1832,12 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs, | |||
1784 | } | 1832 | } |
1785 | 1833 | ||
1786 | /* Loop 2: add all events */ | 1834 | /* Loop 2: add all events */ |
1787 | for (i = 0; i < npevs && ret >= 0; i++) | 1835 | for (i = 0; i < npevs; i++) { |
1788 | ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs, | 1836 | ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs, |
1789 | pkgs[i].ntevs, force_add); | 1837 | pkgs[i].ntevs, force_add); |
1838 | if (ret < 0) | ||
1839 | break; | ||
1840 | } | ||
1790 | end: | 1841 | end: |
1791 | /* Loop 3: cleanup and free trace events */ | 1842 | /* Loop 3: cleanup and free trace events */ |
1792 | for (i = 0; i < npevs; i++) { | 1843 | for (i = 0; i < npevs; i++) { |
@@ -1912,4 +1963,46 @@ int del_perf_probe_events(struct strlist *dellist) | |||
1912 | 1963 | ||
1913 | return ret; | 1964 | return ret; |
1914 | } | 1965 | } |
1966 | /* TODO: don't use a global variable for filter ... */ | ||
1967 | static struct strfilter *available_func_filter; | ||
1968 | |||
1969 | /* | ||
1970 | * If a symbol corresponds to a function with global binding and | ||
1971 | * matches filter return 0. For all others return 1. | ||
1972 | */ | ||
1973 | static int filter_available_functions(struct map *map __unused, | ||
1974 | struct symbol *sym) | ||
1975 | { | ||
1976 | if (sym->binding == STB_GLOBAL && | ||
1977 | strfilter__compare(available_func_filter, sym->name)) | ||
1978 | return 0; | ||
1979 | return 1; | ||
1980 | } | ||
1981 | |||
1982 | int show_available_funcs(const char *module, struct strfilter *_filter) | ||
1983 | { | ||
1984 | struct map *map; | ||
1985 | int ret; | ||
1986 | |||
1987 | setup_pager(); | ||
1988 | |||
1989 | ret = init_vmlinux(); | ||
1990 | if (ret < 0) | ||
1991 | return ret; | ||
1915 | 1992 | ||
1993 | map = kernel_get_module_map(module); | ||
1994 | if (!map) { | ||
1995 | pr_err("Failed to find %s map.\n", (module) ? : "kernel"); | ||
1996 | return -EINVAL; | ||
1997 | } | ||
1998 | available_func_filter = _filter; | ||
1999 | if (map__load(map, filter_available_functions)) { | ||
2000 | pr_err("Failed to load map.\n"); | ||
2001 | return -EINVAL; | ||
2002 | } | ||
2003 | if (!dso__sorted_by_name(map->dso, map->type)) | ||
2004 | dso__sort_by_name(map->dso, map->type); | ||
2005 | |||
2006 | dso__fprintf_symbols_by_name(map->dso, map->type, stdout); | ||
2007 | return 0; | ||
2008 | } | ||
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h index 5accbedfea37..3434fc9d79d5 100644 --- a/tools/perf/util/probe-event.h +++ b/tools/perf/util/probe-event.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <stdbool.h> | 4 | #include <stdbool.h> |
5 | #include "strlist.h" | 5 | #include "strlist.h" |
6 | #include "strfilter.h" | ||
6 | 7 | ||
7 | extern bool probe_event_dry_run; | 8 | extern bool probe_event_dry_run; |
8 | 9 | ||
@@ -126,7 +127,8 @@ extern int show_perf_probe_events(void); | |||
126 | extern int show_line_range(struct line_range *lr, const char *module); | 127 | extern int show_line_range(struct line_range *lr, const char *module); |
127 | extern int show_available_vars(struct perf_probe_event *pevs, int npevs, | 128 | extern int show_available_vars(struct perf_probe_event *pevs, int npevs, |
128 | int max_probe_points, const char *module, | 129 | int max_probe_points, const char *module, |
129 | bool externs); | 130 | struct strfilter *filter, bool externs); |
131 | extern int show_available_funcs(const char *module, struct strfilter *filter); | ||
130 | 132 | ||
131 | 133 | ||
132 | /* Maximum index number of event-name postfix */ | 134 | /* Maximum index number of event-name postfix */ |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index ab83b6ac5d65..194f9e2a3285 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <ctype.h> | 33 | #include <ctype.h> |
34 | #include <dwarf-regs.h> | 34 | #include <dwarf-regs.h> |
35 | 35 | ||
36 | #include <linux/bitops.h> | ||
36 | #include "event.h" | 37 | #include "event.h" |
37 | #include "debug.h" | 38 | #include "debug.h" |
38 | #include "util.h" | 39 | #include "util.h" |
@@ -280,6 +281,19 @@ static bool die_compare_name(Dwarf_Die *dw_die, const char *tname) | |||
280 | return name ? (strcmp(tname, name) == 0) : false; | 281 | return name ? (strcmp(tname, name) == 0) : false; |
281 | } | 282 | } |
282 | 283 | ||
284 | /* Get callsite line number of inline-function instance */ | ||
285 | static int die_get_call_lineno(Dwarf_Die *in_die) | ||
286 | { | ||
287 | Dwarf_Attribute attr; | ||
288 | Dwarf_Word ret; | ||
289 | |||
290 | if (!dwarf_attr(in_die, DW_AT_call_line, &attr)) | ||
291 | return -ENOENT; | ||
292 | |||
293 | dwarf_formudata(&attr, &ret); | ||
294 | return (int)ret; | ||
295 | } | ||
296 | |||
283 | /* Get type die */ | 297 | /* Get type die */ |
284 | static Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) | 298 | static Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) |
285 | { | 299 | { |
@@ -320,13 +334,23 @@ static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) | |||
320 | return vr_die; | 334 | return vr_die; |
321 | } | 335 | } |
322 | 336 | ||
323 | static bool die_is_signed_type(Dwarf_Die *tp_die) | 337 | static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name, |
338 | Dwarf_Word *result) | ||
324 | { | 339 | { |
325 | Dwarf_Attribute attr; | 340 | Dwarf_Attribute attr; |
341 | |||
342 | if (dwarf_attr(tp_die, attr_name, &attr) == NULL || | ||
343 | dwarf_formudata(&attr, result) != 0) | ||
344 | return -ENOENT; | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | static bool die_is_signed_type(Dwarf_Die *tp_die) | ||
350 | { | ||
326 | Dwarf_Word ret; | 351 | Dwarf_Word ret; |
327 | 352 | ||
328 | if (dwarf_attr(tp_die, DW_AT_encoding, &attr) == NULL || | 353 | if (die_get_attr_udata(tp_die, DW_AT_encoding, &ret)) |
329 | dwarf_formudata(&attr, &ret) != 0) | ||
330 | return false; | 354 | return false; |
331 | 355 | ||
332 | return (ret == DW_ATE_signed_char || ret == DW_ATE_signed || | 356 | return (ret == DW_ATE_signed_char || ret == DW_ATE_signed || |
@@ -335,11 +359,29 @@ static bool die_is_signed_type(Dwarf_Die *tp_die) | |||
335 | 359 | ||
336 | static int die_get_byte_size(Dwarf_Die *tp_die) | 360 | static int die_get_byte_size(Dwarf_Die *tp_die) |
337 | { | 361 | { |
338 | Dwarf_Attribute attr; | ||
339 | Dwarf_Word ret; | 362 | Dwarf_Word ret; |
340 | 363 | ||
341 | if (dwarf_attr(tp_die, DW_AT_byte_size, &attr) == NULL || | 364 | if (die_get_attr_udata(tp_die, DW_AT_byte_size, &ret)) |
342 | dwarf_formudata(&attr, &ret) != 0) | 365 | return 0; |
366 | |||
367 | return (int)ret; | ||
368 | } | ||
369 | |||
370 | static int die_get_bit_size(Dwarf_Die *tp_die) | ||
371 | { | ||
372 | Dwarf_Word ret; | ||
373 | |||
374 | if (die_get_attr_udata(tp_die, DW_AT_bit_size, &ret)) | ||
375 | return 0; | ||
376 | |||
377 | return (int)ret; | ||
378 | } | ||
379 | |||
380 | static int die_get_bit_offset(Dwarf_Die *tp_die) | ||
381 | { | ||
382 | Dwarf_Word ret; | ||
383 | |||
384 | if (die_get_attr_udata(tp_die, DW_AT_bit_offset, &ret)) | ||
343 | return 0; | 385 | return 0; |
344 | 386 | ||
345 | return (int)ret; | 387 | return (int)ret; |
@@ -458,6 +500,151 @@ static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, | |||
458 | return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem); | 500 | return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem); |
459 | } | 501 | } |
460 | 502 | ||
503 | /* Walker on lines (Note: line number will not be sorted) */ | ||
504 | typedef int (* line_walk_handler_t) (const char *fname, int lineno, | ||
505 | Dwarf_Addr addr, void *data); | ||
506 | |||
507 | struct __line_walk_param { | ||
508 | const char *fname; | ||
509 | line_walk_handler_t handler; | ||
510 | void *data; | ||
511 | int retval; | ||
512 | }; | ||
513 | |||
514 | static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data) | ||
515 | { | ||
516 | struct __line_walk_param *lw = data; | ||
517 | Dwarf_Addr addr; | ||
518 | int lineno; | ||
519 | |||
520 | if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) { | ||
521 | lineno = die_get_call_lineno(in_die); | ||
522 | if (lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) { | ||
523 | lw->retval = lw->handler(lw->fname, lineno, addr, | ||
524 | lw->data); | ||
525 | if (lw->retval != 0) | ||
526 | return DIE_FIND_CB_FOUND; | ||
527 | } | ||
528 | } | ||
529 | return DIE_FIND_CB_SIBLING; | ||
530 | } | ||
531 | |||
532 | /* Walk on lines of blocks included in given DIE */ | ||
533 | static int __die_walk_funclines(Dwarf_Die *sp_die, | ||
534 | line_walk_handler_t handler, void *data) | ||
535 | { | ||
536 | struct __line_walk_param lw = { | ||
537 | .handler = handler, | ||
538 | .data = data, | ||
539 | .retval = 0, | ||
540 | }; | ||
541 | Dwarf_Die die_mem; | ||
542 | Dwarf_Addr addr; | ||
543 | int lineno; | ||
544 | |||
545 | /* Handle function declaration line */ | ||
546 | lw.fname = dwarf_decl_file(sp_die); | ||
547 | if (lw.fname && dwarf_decl_line(sp_die, &lineno) == 0 && | ||
548 | dwarf_entrypc(sp_die, &addr) == 0) { | ||
549 | lw.retval = handler(lw.fname, lineno, addr, data); | ||
550 | if (lw.retval != 0) | ||
551 | goto done; | ||
552 | } | ||
553 | die_find_child(sp_die, __die_walk_funclines_cb, &lw, &die_mem); | ||
554 | done: | ||
555 | return lw.retval; | ||
556 | } | ||
557 | |||
558 | static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data) | ||
559 | { | ||
560 | struct __line_walk_param *lw = data; | ||
561 | |||
562 | lw->retval = __die_walk_funclines(sp_die, lw->handler, lw->data); | ||
563 | if (lw->retval != 0) | ||
564 | return DWARF_CB_ABORT; | ||
565 | |||
566 | return DWARF_CB_OK; | ||
567 | } | ||
568 | |||
569 | /* | ||
570 | * Walk on lines inside given PDIE. If the PDIE is subprogram, walk only on | ||
571 | * the lines inside the subprogram, otherwise PDIE must be a CU DIE. | ||
572 | */ | ||
573 | static int die_walk_lines(Dwarf_Die *pdie, line_walk_handler_t handler, | ||
574 | void *data) | ||
575 | { | ||
576 | Dwarf_Lines *lines; | ||
577 | Dwarf_Line *line; | ||
578 | Dwarf_Addr addr; | ||
579 | const char *fname; | ||
580 | int lineno, ret = 0; | ||
581 | Dwarf_Die die_mem, *cu_die; | ||
582 | size_t nlines, i; | ||
583 | |||
584 | /* Get the CU die */ | ||
585 | if (dwarf_tag(pdie) == DW_TAG_subprogram) | ||
586 | cu_die = dwarf_diecu(pdie, &die_mem, NULL, NULL); | ||
587 | else | ||
588 | cu_die = pdie; | ||
589 | if (!cu_die) { | ||
590 | pr_debug2("Failed to get CU from subprogram\n"); | ||
591 | return -EINVAL; | ||
592 | } | ||
593 | |||
594 | /* Get lines list in the CU */ | ||
595 | if (dwarf_getsrclines(cu_die, &lines, &nlines) != 0) { | ||
596 | pr_debug2("Failed to get source lines on this CU.\n"); | ||
597 | return -ENOENT; | ||
598 | } | ||
599 | pr_debug2("Get %zd lines from this CU\n", nlines); | ||
600 | |||
601 | /* Walk on the lines on lines list */ | ||
602 | for (i = 0; i < nlines; i++) { | ||
603 | line = dwarf_onesrcline(lines, i); | ||
604 | if (line == NULL || | ||
605 | dwarf_lineno(line, &lineno) != 0 || | ||
606 | dwarf_lineaddr(line, &addr) != 0) { | ||
607 | pr_debug2("Failed to get line info. " | ||
608 | "Possible error in debuginfo.\n"); | ||
609 | continue; | ||
610 | } | ||
611 | /* Filter lines based on address */ | ||
612 | if (pdie != cu_die) | ||
613 | /* | ||
614 | * Address filtering | ||
615 | * The line is included in given function, and | ||
616 | * no inline block includes it. | ||
617 | */ | ||
618 | if (!dwarf_haspc(pdie, addr) || | ||
619 | die_find_inlinefunc(pdie, addr, &die_mem)) | ||
620 | continue; | ||
621 | /* Get source line */ | ||
622 | fname = dwarf_linesrc(line, NULL, NULL); | ||
623 | |||
624 | ret = handler(fname, lineno, addr, data); | ||
625 | if (ret != 0) | ||
626 | return ret; | ||
627 | } | ||
628 | |||
629 | /* | ||
630 | * Dwarf lines doesn't include function declarations and inlined | ||
631 | * subroutines. We have to check functions list or given function. | ||
632 | */ | ||
633 | if (pdie != cu_die) | ||
634 | ret = __die_walk_funclines(pdie, handler, data); | ||
635 | else { | ||
636 | struct __line_walk_param param = { | ||
637 | .handler = handler, | ||
638 | .data = data, | ||
639 | .retval = 0, | ||
640 | }; | ||
641 | dwarf_getfuncs(cu_die, __die_walk_culines_cb, ¶m, 0); | ||
642 | ret = param.retval; | ||
643 | } | ||
644 | |||
645 | return ret; | ||
646 | } | ||
647 | |||
461 | struct __find_variable_param { | 648 | struct __find_variable_param { |
462 | const char *name; | 649 | const char *name; |
463 | Dwarf_Addr addr; | 650 | Dwarf_Addr addr; |
@@ -669,6 +856,8 @@ static_var: | |||
669 | return 0; | 856 | return 0; |
670 | } | 857 | } |
671 | 858 | ||
859 | #define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long)) | ||
860 | |||
672 | static int convert_variable_type(Dwarf_Die *vr_die, | 861 | static int convert_variable_type(Dwarf_Die *vr_die, |
673 | struct probe_trace_arg *tvar, | 862 | struct probe_trace_arg *tvar, |
674 | const char *cast) | 863 | const char *cast) |
@@ -685,6 +874,14 @@ static int convert_variable_type(Dwarf_Die *vr_die, | |||
685 | return (tvar->type == NULL) ? -ENOMEM : 0; | 874 | return (tvar->type == NULL) ? -ENOMEM : 0; |
686 | } | 875 | } |
687 | 876 | ||
877 | if (die_get_bit_size(vr_die) != 0) { | ||
878 | /* This is a bitfield */ | ||
879 | ret = snprintf(buf, 16, "b%d@%d/%zd", die_get_bit_size(vr_die), | ||
880 | die_get_bit_offset(vr_die), | ||
881 | BYTES_TO_BITS(die_get_byte_size(vr_die))); | ||
882 | goto formatted; | ||
883 | } | ||
884 | |||
688 | if (die_get_real_type(vr_die, &type) == NULL) { | 885 | if (die_get_real_type(vr_die, &type) == NULL) { |
689 | pr_warning("Failed to get a type information of %s.\n", | 886 | pr_warning("Failed to get a type information of %s.\n", |
690 | dwarf_diename(vr_die)); | 887 | dwarf_diename(vr_die)); |
@@ -729,29 +926,31 @@ static int convert_variable_type(Dwarf_Die *vr_die, | |||
729 | return (tvar->type == NULL) ? -ENOMEM : 0; | 926 | return (tvar->type == NULL) ? -ENOMEM : 0; |
730 | } | 927 | } |
731 | 928 | ||
732 | ret = die_get_byte_size(&type) * 8; | 929 | ret = BYTES_TO_BITS(die_get_byte_size(&type)); |
733 | if (ret) { | 930 | if (!ret) |
734 | /* Check the bitwidth */ | 931 | /* No size ... try to use default type */ |
735 | if (ret > MAX_BASIC_TYPE_BITS) { | 932 | return 0; |
736 | pr_info("%s exceeds max-bitwidth." | ||
737 | " Cut down to %d bits.\n", | ||
738 | dwarf_diename(&type), MAX_BASIC_TYPE_BITS); | ||
739 | ret = MAX_BASIC_TYPE_BITS; | ||
740 | } | ||
741 | 933 | ||
742 | ret = snprintf(buf, 16, "%c%d", | 934 | /* Check the bitwidth */ |
743 | die_is_signed_type(&type) ? 's' : 'u', ret); | 935 | if (ret > MAX_BASIC_TYPE_BITS) { |
744 | if (ret < 0 || ret >= 16) { | 936 | pr_info("%s exceeds max-bitwidth. Cut down to %d bits.\n", |
745 | if (ret >= 16) | 937 | dwarf_diename(&type), MAX_BASIC_TYPE_BITS); |
746 | ret = -E2BIG; | 938 | ret = MAX_BASIC_TYPE_BITS; |
747 | pr_warning("Failed to convert variable type: %s\n", | 939 | } |
748 | strerror(-ret)); | 940 | ret = snprintf(buf, 16, "%c%d", |
749 | return ret; | 941 | die_is_signed_type(&type) ? 's' : 'u', ret); |
750 | } | 942 | |
751 | tvar->type = strdup(buf); | 943 | formatted: |
752 | if (tvar->type == NULL) | 944 | if (ret < 0 || ret >= 16) { |
753 | return -ENOMEM; | 945 | if (ret >= 16) |
946 | ret = -E2BIG; | ||
947 | pr_warning("Failed to convert variable type: %s\n", | ||
948 | strerror(-ret)); | ||
949 | return ret; | ||
754 | } | 950 | } |
951 | tvar->type = strdup(buf); | ||
952 | if (tvar->type == NULL) | ||
953 | return -ENOMEM; | ||
755 | return 0; | 954 | return 0; |
756 | } | 955 | } |
757 | 956 | ||
@@ -1050,157 +1249,102 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf) | |||
1050 | return ret; | 1249 | return ret; |
1051 | } | 1250 | } |
1052 | 1251 | ||
1053 | /* Find probe point from its line number */ | 1252 | static int probe_point_line_walker(const char *fname, int lineno, |
1054 | static int find_probe_point_by_line(struct probe_finder *pf) | 1253 | Dwarf_Addr addr, void *data) |
1055 | { | 1254 | { |
1056 | Dwarf_Lines *lines; | 1255 | struct probe_finder *pf = data; |
1057 | Dwarf_Line *line; | 1256 | int ret; |
1058 | size_t nlines, i; | ||
1059 | Dwarf_Addr addr; | ||
1060 | int lineno; | ||
1061 | int ret = 0; | ||
1062 | |||
1063 | if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) { | ||
1064 | pr_warning("No source lines found.\n"); | ||
1065 | return -ENOENT; | ||
1066 | } | ||
1067 | 1257 | ||
1068 | for (i = 0; i < nlines && ret == 0; i++) { | 1258 | if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0) |
1069 | line = dwarf_onesrcline(lines, i); | 1259 | return 0; |
1070 | if (dwarf_lineno(line, &lineno) != 0 || | ||
1071 | lineno != pf->lno) | ||
1072 | continue; | ||
1073 | 1260 | ||
1074 | /* TODO: Get fileno from line, but how? */ | 1261 | pf->addr = addr; |
1075 | if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0) | 1262 | ret = call_probe_finder(NULL, pf); |
1076 | continue; | ||
1077 | 1263 | ||
1078 | if (dwarf_lineaddr(line, &addr) != 0) { | 1264 | /* Continue if no error, because the line will be in inline function */ |
1079 | pr_warning("Failed to get the address of the line.\n"); | 1265 | return ret < 0 ? ret : 0; |
1080 | return -ENOENT; | 1266 | } |
1081 | } | ||
1082 | pr_debug("Probe line found: line[%d]:%d addr:0x%jx\n", | ||
1083 | (int)i, lineno, (uintmax_t)addr); | ||
1084 | pf->addr = addr; | ||
1085 | 1267 | ||
1086 | ret = call_probe_finder(NULL, pf); | 1268 | /* Find probe point from its line number */ |
1087 | /* Continuing, because target line might be inlined. */ | 1269 | static int find_probe_point_by_line(struct probe_finder *pf) |
1088 | } | 1270 | { |
1089 | return ret; | 1271 | return die_walk_lines(&pf->cu_die, probe_point_line_walker, pf); |
1090 | } | 1272 | } |
1091 | 1273 | ||
1092 | /* Find lines which match lazy pattern */ | 1274 | /* Find lines which match lazy pattern */ |
1093 | static int find_lazy_match_lines(struct list_head *head, | 1275 | static int find_lazy_match_lines(struct list_head *head, |
1094 | const char *fname, const char *pat) | 1276 | const char *fname, const char *pat) |
1095 | { | 1277 | { |
1096 | char *fbuf, *p1, *p2; | 1278 | FILE *fp; |
1097 | int fd, line, nlines = -1; | 1279 | char *line = NULL; |
1098 | struct stat st; | 1280 | size_t line_len; |
1099 | 1281 | ssize_t len; | |
1100 | fd = open(fname, O_RDONLY); | 1282 | int count = 0, linenum = 1; |
1101 | if (fd < 0) { | 1283 | |
1102 | pr_warning("Failed to open %s: %s\n", fname, strerror(-fd)); | 1284 | fp = fopen(fname, "r"); |
1285 | if (!fp) { | ||
1286 | pr_warning("Failed to open %s: %s\n", fname, strerror(errno)); | ||
1103 | return -errno; | 1287 | return -errno; |
1104 | } | 1288 | } |
1105 | 1289 | ||
1106 | if (fstat(fd, &st) < 0) { | 1290 | while ((len = getline(&line, &line_len, fp)) > 0) { |
1107 | pr_warning("Failed to get the size of %s: %s\n", | 1291 | |
1108 | fname, strerror(errno)); | 1292 | if (line[len - 1] == '\n') |
1109 | nlines = -errno; | 1293 | line[len - 1] = '\0'; |
1110 | goto out_close; | 1294 | |
1111 | } | 1295 | if (strlazymatch(line, pat)) { |
1112 | 1296 | line_list__add_line(head, linenum); | |
1113 | nlines = -ENOMEM; | 1297 | count++; |
1114 | fbuf = malloc(st.st_size + 2); | ||
1115 | if (fbuf == NULL) | ||
1116 | goto out_close; | ||
1117 | if (read(fd, fbuf, st.st_size) < 0) { | ||
1118 | pr_warning("Failed to read %s: %s\n", fname, strerror(errno)); | ||
1119 | nlines = -errno; | ||
1120 | goto out_free_fbuf; | ||
1121 | } | ||
1122 | fbuf[st.st_size] = '\n'; /* Dummy line */ | ||
1123 | fbuf[st.st_size + 1] = '\0'; | ||
1124 | p1 = fbuf; | ||
1125 | line = 1; | ||
1126 | nlines = 0; | ||
1127 | while ((p2 = strchr(p1, '\n')) != NULL) { | ||
1128 | *p2 = '\0'; | ||
1129 | if (strlazymatch(p1, pat)) { | ||
1130 | line_list__add_line(head, line); | ||
1131 | nlines++; | ||
1132 | } | 1298 | } |
1133 | line++; | 1299 | linenum++; |
1134 | p1 = p2 + 1; | ||
1135 | } | 1300 | } |
1136 | out_free_fbuf: | 1301 | |
1137 | free(fbuf); | 1302 | if (ferror(fp)) |
1138 | out_close: | 1303 | count = -errno; |
1139 | close(fd); | 1304 | free(line); |
1140 | return nlines; | 1305 | fclose(fp); |
1306 | |||
1307 | if (count == 0) | ||
1308 | pr_debug("No matched lines found in %s.\n", fname); | ||
1309 | return count; | ||
1310 | } | ||
1311 | |||
1312 | static int probe_point_lazy_walker(const char *fname, int lineno, | ||
1313 | Dwarf_Addr addr, void *data) | ||
1314 | { | ||
1315 | struct probe_finder *pf = data; | ||
1316 | int ret; | ||
1317 | |||
1318 | if (!line_list__has_line(&pf->lcache, lineno) || | ||
1319 | strtailcmp(fname, pf->fname) != 0) | ||
1320 | return 0; | ||
1321 | |||
1322 | pr_debug("Probe line found: line:%d addr:0x%llx\n", | ||
1323 | lineno, (unsigned long long)addr); | ||
1324 | pf->addr = addr; | ||
1325 | ret = call_probe_finder(NULL, pf); | ||
1326 | |||
1327 | /* | ||
1328 | * Continue if no error, because the lazy pattern will match | ||
1329 | * to other lines | ||
1330 | */ | ||
1331 | return ret < 0 ? ret : 0; | ||
1141 | } | 1332 | } |
1142 | 1333 | ||
1143 | /* Find probe points from lazy pattern */ | 1334 | /* Find probe points from lazy pattern */ |
1144 | static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) | 1335 | static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) |
1145 | { | 1336 | { |
1146 | Dwarf_Lines *lines; | ||
1147 | Dwarf_Line *line; | ||
1148 | size_t nlines, i; | ||
1149 | Dwarf_Addr addr; | ||
1150 | Dwarf_Die die_mem; | ||
1151 | int lineno; | ||
1152 | int ret = 0; | 1337 | int ret = 0; |
1153 | 1338 | ||
1154 | if (list_empty(&pf->lcache)) { | 1339 | if (list_empty(&pf->lcache)) { |
1155 | /* Matching lazy line pattern */ | 1340 | /* Matching lazy line pattern */ |
1156 | ret = find_lazy_match_lines(&pf->lcache, pf->fname, | 1341 | ret = find_lazy_match_lines(&pf->lcache, pf->fname, |
1157 | pf->pev->point.lazy_line); | 1342 | pf->pev->point.lazy_line); |
1158 | if (ret == 0) { | 1343 | if (ret <= 0) |
1159 | pr_debug("No matched lines found in %s.\n", pf->fname); | ||
1160 | return 0; | ||
1161 | } else if (ret < 0) | ||
1162 | return ret; | 1344 | return ret; |
1163 | } | 1345 | } |
1164 | 1346 | ||
1165 | if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) { | 1347 | return die_walk_lines(sp_die, probe_point_lazy_walker, pf); |
1166 | pr_warning("No source lines found.\n"); | ||
1167 | return -ENOENT; | ||
1168 | } | ||
1169 | |||
1170 | for (i = 0; i < nlines && ret >= 0; i++) { | ||
1171 | line = dwarf_onesrcline(lines, i); | ||
1172 | |||
1173 | if (dwarf_lineno(line, &lineno) != 0 || | ||
1174 | !line_list__has_line(&pf->lcache, lineno)) | ||
1175 | continue; | ||
1176 | |||
1177 | /* TODO: Get fileno from line, but how? */ | ||
1178 | if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0) | ||
1179 | continue; | ||
1180 | |||
1181 | if (dwarf_lineaddr(line, &addr) != 0) { | ||
1182 | pr_debug("Failed to get the address of line %d.\n", | ||
1183 | lineno); | ||
1184 | continue; | ||
1185 | } | ||
1186 | if (sp_die) { | ||
1187 | /* Address filtering 1: does sp_die include addr? */ | ||
1188 | if (!dwarf_haspc(sp_die, addr)) | ||
1189 | continue; | ||
1190 | /* Address filtering 2: No child include addr? */ | ||
1191 | if (die_find_inlinefunc(sp_die, addr, &die_mem)) | ||
1192 | continue; | ||
1193 | } | ||
1194 | |||
1195 | pr_debug("Probe line found: line[%d]:%d addr:0x%llx\n", | ||
1196 | (int)i, lineno, (unsigned long long)addr); | ||
1197 | pf->addr = addr; | ||
1198 | |||
1199 | ret = call_probe_finder(sp_die, pf); | ||
1200 | /* Continuing, because target line might be inlined. */ | ||
1201 | } | ||
1202 | /* TODO: deallocate lines, but how? */ | ||
1203 | return ret; | ||
1204 | } | 1348 | } |
1205 | 1349 | ||
1206 | /* Callback parameter with return value */ | 1350 | /* Callback parameter with return value */ |
@@ -1318,8 +1462,7 @@ static int find_probes(int fd, struct probe_finder *pf) | |||
1318 | off = 0; | 1462 | off = 0; |
1319 | line_list__init(&pf->lcache); | 1463 | line_list__init(&pf->lcache); |
1320 | /* Loop on CUs (Compilation Unit) */ | 1464 | /* Loop on CUs (Compilation Unit) */ |
1321 | while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) && | 1465 | while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) { |
1322 | ret >= 0) { | ||
1323 | /* Get the DIE(Debugging Information Entry) of this CU */ | 1466 | /* Get the DIE(Debugging Information Entry) of this CU */ |
1324 | diep = dwarf_offdie(dbg, off + cuhl, &pf->cu_die); | 1467 | diep = dwarf_offdie(dbg, off + cuhl, &pf->cu_die); |
1325 | if (!diep) | 1468 | if (!diep) |
@@ -1340,6 +1483,8 @@ static int find_probes(int fd, struct probe_finder *pf) | |||
1340 | pf->lno = pp->line; | 1483 | pf->lno = pp->line; |
1341 | ret = find_probe_point_by_line(pf); | 1484 | ret = find_probe_point_by_line(pf); |
1342 | } | 1485 | } |
1486 | if (ret < 0) | ||
1487 | break; | ||
1343 | } | 1488 | } |
1344 | off = noff; | 1489 | off = noff; |
1345 | } | 1490 | } |
@@ -1644,91 +1789,28 @@ static int line_range_add_line(const char *src, unsigned int lineno, | |||
1644 | return line_list__add_line(&lr->line_list, lineno); | 1789 | return line_list__add_line(&lr->line_list, lineno); |
1645 | } | 1790 | } |
1646 | 1791 | ||
1647 | /* Search function declaration lines */ | 1792 | static int line_range_walk_cb(const char *fname, int lineno, |
1648 | static int line_range_funcdecl_cb(Dwarf_Die *sp_die, void *data) | 1793 | Dwarf_Addr addr __used, |
1794 | void *data) | ||
1649 | { | 1795 | { |
1650 | struct dwarf_callback_param *param = data; | 1796 | struct line_finder *lf = data; |
1651 | struct line_finder *lf = param->data; | ||
1652 | const char *src; | ||
1653 | int lineno; | ||
1654 | 1797 | ||
1655 | src = dwarf_decl_file(sp_die); | 1798 | if ((strtailcmp(fname, lf->fname) != 0) || |
1656 | if (src && strtailcmp(src, lf->fname) != 0) | ||
1657 | return DWARF_CB_OK; | ||
1658 | |||
1659 | if (dwarf_decl_line(sp_die, &lineno) != 0 || | ||
1660 | (lf->lno_s > lineno || lf->lno_e < lineno)) | 1799 | (lf->lno_s > lineno || lf->lno_e < lineno)) |
1661 | return DWARF_CB_OK; | 1800 | return 0; |
1662 | 1801 | ||
1663 | param->retval = line_range_add_line(src, lineno, lf->lr); | 1802 | if (line_range_add_line(fname, lineno, lf->lr) < 0) |
1664 | if (param->retval < 0) | 1803 | return -EINVAL; |
1665 | return DWARF_CB_ABORT; | ||
1666 | return DWARF_CB_OK; | ||
1667 | } | ||
1668 | 1804 | ||
1669 | static int find_line_range_func_decl_lines(struct line_finder *lf) | 1805 | return 0; |
1670 | { | ||
1671 | struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0}; | ||
1672 | dwarf_getfuncs(&lf->cu_die, line_range_funcdecl_cb, ¶m, 0); | ||
1673 | return param.retval; | ||
1674 | } | 1806 | } |
1675 | 1807 | ||
1676 | /* Find line range from its line number */ | 1808 | /* Find line range from its line number */ |
1677 | static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf) | 1809 | static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf) |
1678 | { | 1810 | { |
1679 | Dwarf_Lines *lines; | 1811 | int ret; |
1680 | Dwarf_Line *line; | ||
1681 | size_t nlines, i; | ||
1682 | Dwarf_Addr addr; | ||
1683 | int lineno, ret = 0; | ||
1684 | const char *src; | ||
1685 | Dwarf_Die die_mem; | ||
1686 | |||
1687 | line_list__init(&lf->lr->line_list); | ||
1688 | if (dwarf_getsrclines(&lf->cu_die, &lines, &nlines) != 0) { | ||
1689 | pr_warning("No source lines found.\n"); | ||
1690 | return -ENOENT; | ||
1691 | } | ||
1692 | |||
1693 | /* Search probable lines on lines list */ | ||
1694 | for (i = 0; i < nlines; i++) { | ||
1695 | line = dwarf_onesrcline(lines, i); | ||
1696 | if (dwarf_lineno(line, &lineno) != 0 || | ||
1697 | (lf->lno_s > lineno || lf->lno_e < lineno)) | ||
1698 | continue; | ||
1699 | |||
1700 | if (sp_die) { | ||
1701 | /* Address filtering 1: does sp_die include addr? */ | ||
1702 | if (dwarf_lineaddr(line, &addr) != 0 || | ||
1703 | !dwarf_haspc(sp_die, addr)) | ||
1704 | continue; | ||
1705 | |||
1706 | /* Address filtering 2: No child include addr? */ | ||
1707 | if (die_find_inlinefunc(sp_die, addr, &die_mem)) | ||
1708 | continue; | ||
1709 | } | ||
1710 | |||
1711 | /* TODO: Get fileno from line, but how? */ | ||
1712 | src = dwarf_linesrc(line, NULL, NULL); | ||
1713 | if (strtailcmp(src, lf->fname) != 0) | ||
1714 | continue; | ||
1715 | |||
1716 | ret = line_range_add_line(src, lineno, lf->lr); | ||
1717 | if (ret < 0) | ||
1718 | return ret; | ||
1719 | } | ||
1720 | 1812 | ||
1721 | /* | 1813 | ret = die_walk_lines(sp_die ?: &lf->cu_die, line_range_walk_cb, lf); |
1722 | * Dwarf lines doesn't include function declarations. We have to | ||
1723 | * check functions list or given function. | ||
1724 | */ | ||
1725 | if (sp_die) { | ||
1726 | src = dwarf_decl_file(sp_die); | ||
1727 | if (src && dwarf_decl_line(sp_die, &lineno) == 0 && | ||
1728 | (lf->lno_s <= lineno && lf->lno_e >= lineno)) | ||
1729 | ret = line_range_add_line(src, lineno, lf->lr); | ||
1730 | } else | ||
1731 | ret = find_line_range_func_decl_lines(lf); | ||
1732 | 1814 | ||
1733 | /* Update status */ | 1815 | /* Update status */ |
1734 | if (ret >= 0) | 1816 | if (ret >= 0) |
@@ -1758,9 +1840,6 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data) | |||
1758 | struct line_finder *lf = param->data; | 1840 | struct line_finder *lf = param->data; |
1759 | struct line_range *lr = lf->lr; | 1841 | struct line_range *lr = lf->lr; |
1760 | 1842 | ||
1761 | pr_debug("find (%llx) %s\n", | ||
1762 | (unsigned long long)dwarf_dieoffset(sp_die), | ||
1763 | dwarf_diename(sp_die)); | ||
1764 | if (dwarf_tag(sp_die) == DW_TAG_subprogram && | 1843 | if (dwarf_tag(sp_die) == DW_TAG_subprogram && |
1765 | die_compare_name(sp_die, lr->function)) { | 1844 | die_compare_name(sp_die, lr->function)) { |
1766 | lf->fname = dwarf_decl_file(sp_die); | 1845 | lf->fname = dwarf_decl_file(sp_die); |
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c new file mode 100644 index 000000000000..a9f2d7e1204d --- /dev/null +++ b/tools/perf/util/python.c | |||
@@ -0,0 +1,896 @@ | |||
1 | #include <Python.h> | ||
2 | #include <structmember.h> | ||
3 | #include <inttypes.h> | ||
4 | #include <poll.h> | ||
5 | #include "evlist.h" | ||
6 | #include "evsel.h" | ||
7 | #include "event.h" | ||
8 | #include "cpumap.h" | ||
9 | #include "thread_map.h" | ||
10 | |||
11 | /* Define PyVarObject_HEAD_INIT for python 2.5 */ | ||
12 | #ifndef PyVarObject_HEAD_INIT | ||
13 | # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, | ||
14 | #endif | ||
15 | |||
16 | struct throttle_event { | ||
17 | struct perf_event_header header; | ||
18 | u64 time; | ||
19 | u64 id; | ||
20 | u64 stream_id; | ||
21 | }; | ||
22 | |||
23 | PyMODINIT_FUNC initperf(void); | ||
24 | |||
25 | #define member_def(type, member, ptype, help) \ | ||
26 | { #member, ptype, \ | ||
27 | offsetof(struct pyrf_event, event) + offsetof(struct type, member), \ | ||
28 | 0, help } | ||
29 | |||
30 | #define sample_member_def(name, member, ptype, help) \ | ||
31 | { #name, ptype, \ | ||
32 | offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \ | ||
33 | 0, help } | ||
34 | |||
35 | struct pyrf_event { | ||
36 | PyObject_HEAD | ||
37 | struct perf_sample sample; | ||
38 | union perf_event event; | ||
39 | }; | ||
40 | |||
41 | #define sample_members \ | ||
42 | sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \ | ||
43 | sample_member_def(sample_pid, pid, T_INT, "event pid"), \ | ||
44 | sample_member_def(sample_tid, tid, T_INT, "event tid"), \ | ||
45 | sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ | ||
46 | sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \ | ||
47 | sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \ | ||
48 | sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \ | ||
49 | sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \ | ||
50 | sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"), | ||
51 | |||
52 | static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object."); | ||
53 | |||
54 | static PyMemberDef pyrf_mmap_event__members[] = { | ||
55 | sample_members | ||
56 | member_def(perf_event_header, type, T_UINT, "event type"), | ||
57 | member_def(mmap_event, pid, T_UINT, "event pid"), | ||
58 | member_def(mmap_event, tid, T_UINT, "event tid"), | ||
59 | member_def(mmap_event, start, T_ULONGLONG, "start of the map"), | ||
60 | member_def(mmap_event, len, T_ULONGLONG, "map length"), | ||
61 | member_def(mmap_event, pgoff, T_ULONGLONG, "page offset"), | ||
62 | member_def(mmap_event, filename, T_STRING_INPLACE, "backing store"), | ||
63 | { .name = NULL, }, | ||
64 | }; | ||
65 | |||
66 | static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent) | ||
67 | { | ||
68 | PyObject *ret; | ||
69 | char *s; | ||
70 | |||
71 | if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRIx64 ", " | ||
72 | "length: %#" PRIx64 ", offset: %#" PRIx64 ", " | ||
73 | "filename: %s }", | ||
74 | pevent->event.mmap.pid, pevent->event.mmap.tid, | ||
75 | pevent->event.mmap.start, pevent->event.mmap.len, | ||
76 | pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) { | ||
77 | ret = PyErr_NoMemory(); | ||
78 | } else { | ||
79 | ret = PyString_FromString(s); | ||
80 | free(s); | ||
81 | } | ||
82 | return ret; | ||
83 | } | ||
84 | |||
85 | static PyTypeObject pyrf_mmap_event__type = { | ||
86 | PyVarObject_HEAD_INIT(NULL, 0) | ||
87 | .tp_name = "perf.mmap_event", | ||
88 | .tp_basicsize = sizeof(struct pyrf_event), | ||
89 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
90 | .tp_doc = pyrf_mmap_event__doc, | ||
91 | .tp_members = pyrf_mmap_event__members, | ||
92 | .tp_repr = (reprfunc)pyrf_mmap_event__repr, | ||
93 | }; | ||
94 | |||
95 | static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object."); | ||
96 | |||
97 | static PyMemberDef pyrf_task_event__members[] = { | ||
98 | sample_members | ||
99 | member_def(perf_event_header, type, T_UINT, "event type"), | ||
100 | member_def(fork_event, pid, T_UINT, "event pid"), | ||
101 | member_def(fork_event, ppid, T_UINT, "event ppid"), | ||
102 | member_def(fork_event, tid, T_UINT, "event tid"), | ||
103 | member_def(fork_event, ptid, T_UINT, "event ptid"), | ||
104 | member_def(fork_event, time, T_ULONGLONG, "timestamp"), | ||
105 | { .name = NULL, }, | ||
106 | }; | ||
107 | |||
108 | static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent) | ||
109 | { | ||
110 | return PyString_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, " | ||
111 | "ptid: %u, time: %" PRIu64 "}", | ||
112 | pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit", | ||
113 | pevent->event.fork.pid, | ||
114 | pevent->event.fork.ppid, | ||
115 | pevent->event.fork.tid, | ||
116 | pevent->event.fork.ptid, | ||
117 | pevent->event.fork.time); | ||
118 | } | ||
119 | |||
120 | static PyTypeObject pyrf_task_event__type = { | ||
121 | PyVarObject_HEAD_INIT(NULL, 0) | ||
122 | .tp_name = "perf.task_event", | ||
123 | .tp_basicsize = sizeof(struct pyrf_event), | ||
124 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
125 | .tp_doc = pyrf_task_event__doc, | ||
126 | .tp_members = pyrf_task_event__members, | ||
127 | .tp_repr = (reprfunc)pyrf_task_event__repr, | ||
128 | }; | ||
129 | |||
130 | static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object."); | ||
131 | |||
132 | static PyMemberDef pyrf_comm_event__members[] = { | ||
133 | sample_members | ||
134 | member_def(perf_event_header, type, T_UINT, "event type"), | ||
135 | member_def(comm_event, pid, T_UINT, "event pid"), | ||
136 | member_def(comm_event, tid, T_UINT, "event tid"), | ||
137 | member_def(comm_event, comm, T_STRING_INPLACE, "process name"), | ||
138 | { .name = NULL, }, | ||
139 | }; | ||
140 | |||
141 | static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent) | ||
142 | { | ||
143 | return PyString_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }", | ||
144 | pevent->event.comm.pid, | ||
145 | pevent->event.comm.tid, | ||
146 | pevent->event.comm.comm); | ||
147 | } | ||
148 | |||
149 | static PyTypeObject pyrf_comm_event__type = { | ||
150 | PyVarObject_HEAD_INIT(NULL, 0) | ||
151 | .tp_name = "perf.comm_event", | ||
152 | .tp_basicsize = sizeof(struct pyrf_event), | ||
153 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
154 | .tp_doc = pyrf_comm_event__doc, | ||
155 | .tp_members = pyrf_comm_event__members, | ||
156 | .tp_repr = (reprfunc)pyrf_comm_event__repr, | ||
157 | }; | ||
158 | |||
159 | static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object."); | ||
160 | |||
161 | static PyMemberDef pyrf_throttle_event__members[] = { | ||
162 | sample_members | ||
163 | member_def(perf_event_header, type, T_UINT, "event type"), | ||
164 | member_def(throttle_event, time, T_ULONGLONG, "timestamp"), | ||
165 | member_def(throttle_event, id, T_ULONGLONG, "event id"), | ||
166 | member_def(throttle_event, stream_id, T_ULONGLONG, "event stream id"), | ||
167 | { .name = NULL, }, | ||
168 | }; | ||
169 | |||
170 | static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent) | ||
171 | { | ||
172 | struct throttle_event *te = (struct throttle_event *)(&pevent->event.header + 1); | ||
173 | |||
174 | return PyString_FromFormat("{ type: %sthrottle, time: %" PRIu64 ", id: %" PRIu64 | ||
175 | ", stream_id: %" PRIu64 " }", | ||
176 | pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un", | ||
177 | te->time, te->id, te->stream_id); | ||
178 | } | ||
179 | |||
180 | static PyTypeObject pyrf_throttle_event__type = { | ||
181 | PyVarObject_HEAD_INIT(NULL, 0) | ||
182 | .tp_name = "perf.throttle_event", | ||
183 | .tp_basicsize = sizeof(struct pyrf_event), | ||
184 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
185 | .tp_doc = pyrf_throttle_event__doc, | ||
186 | .tp_members = pyrf_throttle_event__members, | ||
187 | .tp_repr = (reprfunc)pyrf_throttle_event__repr, | ||
188 | }; | ||
189 | |||
190 | static int pyrf_event__setup_types(void) | ||
191 | { | ||
192 | int err; | ||
193 | pyrf_mmap_event__type.tp_new = | ||
194 | pyrf_task_event__type.tp_new = | ||
195 | pyrf_comm_event__type.tp_new = | ||
196 | pyrf_throttle_event__type.tp_new = PyType_GenericNew; | ||
197 | err = PyType_Ready(&pyrf_mmap_event__type); | ||
198 | if (err < 0) | ||
199 | goto out; | ||
200 | err = PyType_Ready(&pyrf_task_event__type); | ||
201 | if (err < 0) | ||
202 | goto out; | ||
203 | err = PyType_Ready(&pyrf_comm_event__type); | ||
204 | if (err < 0) | ||
205 | goto out; | ||
206 | err = PyType_Ready(&pyrf_throttle_event__type); | ||
207 | if (err < 0) | ||
208 | goto out; | ||
209 | out: | ||
210 | return err; | ||
211 | } | ||
212 | |||
213 | static PyTypeObject *pyrf_event__type[] = { | ||
214 | [PERF_RECORD_MMAP] = &pyrf_mmap_event__type, | ||
215 | [PERF_RECORD_LOST] = &pyrf_mmap_event__type, | ||
216 | [PERF_RECORD_COMM] = &pyrf_comm_event__type, | ||
217 | [PERF_RECORD_EXIT] = &pyrf_task_event__type, | ||
218 | [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type, | ||
219 | [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type, | ||
220 | [PERF_RECORD_FORK] = &pyrf_task_event__type, | ||
221 | [PERF_RECORD_READ] = &pyrf_mmap_event__type, | ||
222 | [PERF_RECORD_SAMPLE] = &pyrf_mmap_event__type, | ||
223 | }; | ||
224 | |||
225 | static PyObject *pyrf_event__new(union perf_event *event) | ||
226 | { | ||
227 | struct pyrf_event *pevent; | ||
228 | PyTypeObject *ptype; | ||
229 | |||
230 | if (event->header.type < PERF_RECORD_MMAP || | ||
231 | event->header.type > PERF_RECORD_SAMPLE) | ||
232 | return NULL; | ||
233 | |||
234 | ptype = pyrf_event__type[event->header.type]; | ||
235 | pevent = PyObject_New(struct pyrf_event, ptype); | ||
236 | if (pevent != NULL) | ||
237 | memcpy(&pevent->event, event, event->header.size); | ||
238 | return (PyObject *)pevent; | ||
239 | } | ||
240 | |||
241 | struct pyrf_cpu_map { | ||
242 | PyObject_HEAD | ||
243 | |||
244 | struct cpu_map *cpus; | ||
245 | }; | ||
246 | |||
247 | static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, | ||
248 | PyObject *args, PyObject *kwargs) | ||
249 | { | ||
250 | static char *kwlist[] = { "cpustr", NULL, NULL, }; | ||
251 | char *cpustr = NULL; | ||
252 | |||
253 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", | ||
254 | kwlist, &cpustr)) | ||
255 | return -1; | ||
256 | |||
257 | pcpus->cpus = cpu_map__new(cpustr); | ||
258 | if (pcpus->cpus == NULL) | ||
259 | return -1; | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus) | ||
264 | { | ||
265 | cpu_map__delete(pcpus->cpus); | ||
266 | pcpus->ob_type->tp_free((PyObject*)pcpus); | ||
267 | } | ||
268 | |||
269 | static Py_ssize_t pyrf_cpu_map__length(PyObject *obj) | ||
270 | { | ||
271 | struct pyrf_cpu_map *pcpus = (void *)obj; | ||
272 | |||
273 | return pcpus->cpus->nr; | ||
274 | } | ||
275 | |||
276 | static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i) | ||
277 | { | ||
278 | struct pyrf_cpu_map *pcpus = (void *)obj; | ||
279 | |||
280 | if (i >= pcpus->cpus->nr) | ||
281 | return NULL; | ||
282 | |||
283 | return Py_BuildValue("i", pcpus->cpus->map[i]); | ||
284 | } | ||
285 | |||
286 | static PySequenceMethods pyrf_cpu_map__sequence_methods = { | ||
287 | .sq_length = pyrf_cpu_map__length, | ||
288 | .sq_item = pyrf_cpu_map__item, | ||
289 | }; | ||
290 | |||
291 | static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object."); | ||
292 | |||
293 | static PyTypeObject pyrf_cpu_map__type = { | ||
294 | PyVarObject_HEAD_INIT(NULL, 0) | ||
295 | .tp_name = "perf.cpu_map", | ||
296 | .tp_basicsize = sizeof(struct pyrf_cpu_map), | ||
297 | .tp_dealloc = (destructor)pyrf_cpu_map__delete, | ||
298 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
299 | .tp_doc = pyrf_cpu_map__doc, | ||
300 | .tp_as_sequence = &pyrf_cpu_map__sequence_methods, | ||
301 | .tp_init = (initproc)pyrf_cpu_map__init, | ||
302 | }; | ||
303 | |||
304 | static int pyrf_cpu_map__setup_types(void) | ||
305 | { | ||
306 | pyrf_cpu_map__type.tp_new = PyType_GenericNew; | ||
307 | return PyType_Ready(&pyrf_cpu_map__type); | ||
308 | } | ||
309 | |||
310 | struct pyrf_thread_map { | ||
311 | PyObject_HEAD | ||
312 | |||
313 | struct thread_map *threads; | ||
314 | }; | ||
315 | |||
316 | static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, | ||
317 | PyObject *args, PyObject *kwargs) | ||
318 | { | ||
319 | static char *kwlist[] = { "pid", "tid", NULL, NULL, }; | ||
320 | int pid = -1, tid = -1; | ||
321 | |||
322 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", | ||
323 | kwlist, &pid, &tid)) | ||
324 | return -1; | ||
325 | |||
326 | pthreads->threads = thread_map__new(pid, tid); | ||
327 | if (pthreads->threads == NULL) | ||
328 | return -1; | ||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads) | ||
333 | { | ||
334 | thread_map__delete(pthreads->threads); | ||
335 | pthreads->ob_type->tp_free((PyObject*)pthreads); | ||
336 | } | ||
337 | |||
338 | static Py_ssize_t pyrf_thread_map__length(PyObject *obj) | ||
339 | { | ||
340 | struct pyrf_thread_map *pthreads = (void *)obj; | ||
341 | |||
342 | return pthreads->threads->nr; | ||
343 | } | ||
344 | |||
345 | static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i) | ||
346 | { | ||
347 | struct pyrf_thread_map *pthreads = (void *)obj; | ||
348 | |||
349 | if (i >= pthreads->threads->nr) | ||
350 | return NULL; | ||
351 | |||
352 | return Py_BuildValue("i", pthreads->threads->map[i]); | ||
353 | } | ||
354 | |||
355 | static PySequenceMethods pyrf_thread_map__sequence_methods = { | ||
356 | .sq_length = pyrf_thread_map__length, | ||
357 | .sq_item = pyrf_thread_map__item, | ||
358 | }; | ||
359 | |||
360 | static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object."); | ||
361 | |||
362 | static PyTypeObject pyrf_thread_map__type = { | ||
363 | PyVarObject_HEAD_INIT(NULL, 0) | ||
364 | .tp_name = "perf.thread_map", | ||
365 | .tp_basicsize = sizeof(struct pyrf_thread_map), | ||
366 | .tp_dealloc = (destructor)pyrf_thread_map__delete, | ||
367 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
368 | .tp_doc = pyrf_thread_map__doc, | ||
369 | .tp_as_sequence = &pyrf_thread_map__sequence_methods, | ||
370 | .tp_init = (initproc)pyrf_thread_map__init, | ||
371 | }; | ||
372 | |||
373 | static int pyrf_thread_map__setup_types(void) | ||
374 | { | ||
375 | pyrf_thread_map__type.tp_new = PyType_GenericNew; | ||
376 | return PyType_Ready(&pyrf_thread_map__type); | ||
377 | } | ||
378 | |||
379 | struct pyrf_evsel { | ||
380 | PyObject_HEAD | ||
381 | |||
382 | struct perf_evsel evsel; | ||
383 | }; | ||
384 | |||
385 | static int pyrf_evsel__init(struct pyrf_evsel *pevsel, | ||
386 | PyObject *args, PyObject *kwargs) | ||
387 | { | ||
388 | struct perf_event_attr attr = { | ||
389 | .type = PERF_TYPE_HARDWARE, | ||
390 | .config = PERF_COUNT_HW_CPU_CYCLES, | ||
391 | .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID, | ||
392 | }; | ||
393 | static char *kwlist[] = { | ||
394 | "type", | ||
395 | "config", | ||
396 | "sample_freq", | ||
397 | "sample_period", | ||
398 | "sample_type", | ||
399 | "read_format", | ||
400 | "disabled", | ||
401 | "inherit", | ||
402 | "pinned", | ||
403 | "exclusive", | ||
404 | "exclude_user", | ||
405 | "exclude_kernel", | ||
406 | "exclude_hv", | ||
407 | "exclude_idle", | ||
408 | "mmap", | ||
409 | "comm", | ||
410 | "freq", | ||
411 | "inherit_stat", | ||
412 | "enable_on_exec", | ||
413 | "task", | ||
414 | "watermark", | ||
415 | "precise_ip", | ||
416 | "mmap_data", | ||
417 | "sample_id_all", | ||
418 | "wakeup_events", | ||
419 | "bp_type", | ||
420 | "bp_addr", | ||
421 | "bp_len", NULL, NULL, }; | ||
422 | u64 sample_period = 0; | ||
423 | u32 disabled = 0, | ||
424 | inherit = 0, | ||
425 | pinned = 0, | ||
426 | exclusive = 0, | ||
427 | exclude_user = 0, | ||
428 | exclude_kernel = 0, | ||
429 | exclude_hv = 0, | ||
430 | exclude_idle = 0, | ||
431 | mmap = 0, | ||
432 | comm = 0, | ||
433 | freq = 1, | ||
434 | inherit_stat = 0, | ||
435 | enable_on_exec = 0, | ||
436 | task = 0, | ||
437 | watermark = 0, | ||
438 | precise_ip = 0, | ||
439 | mmap_data = 0, | ||
440 | sample_id_all = 1; | ||
441 | int idx = 0; | ||
442 | |||
443 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, | ||
444 | "|iKiKKiiiiiiiiiiiiiiiiiiiiiKK", kwlist, | ||
445 | &attr.type, &attr.config, &attr.sample_freq, | ||
446 | &sample_period, &attr.sample_type, | ||
447 | &attr.read_format, &disabled, &inherit, | ||
448 | &pinned, &exclusive, &exclude_user, | ||
449 | &exclude_kernel, &exclude_hv, &exclude_idle, | ||
450 | &mmap, &comm, &freq, &inherit_stat, | ||
451 | &enable_on_exec, &task, &watermark, | ||
452 | &precise_ip, &mmap_data, &sample_id_all, | ||
453 | &attr.wakeup_events, &attr.bp_type, | ||
454 | &attr.bp_addr, &attr.bp_len, &idx)) | ||
455 | return -1; | ||
456 | |||
457 | /* union... */ | ||
458 | if (sample_period != 0) { | ||
459 | if (attr.sample_freq != 0) | ||
460 | return -1; /* FIXME: throw right exception */ | ||
461 | attr.sample_period = sample_period; | ||
462 | } | ||
463 | |||
464 | /* Bitfields */ | ||
465 | attr.disabled = disabled; | ||
466 | attr.inherit = inherit; | ||
467 | attr.pinned = pinned; | ||
468 | attr.exclusive = exclusive; | ||
469 | attr.exclude_user = exclude_user; | ||
470 | attr.exclude_kernel = exclude_kernel; | ||
471 | attr.exclude_hv = exclude_hv; | ||
472 | attr.exclude_idle = exclude_idle; | ||
473 | attr.mmap = mmap; | ||
474 | attr.comm = comm; | ||
475 | attr.freq = freq; | ||
476 | attr.inherit_stat = inherit_stat; | ||
477 | attr.enable_on_exec = enable_on_exec; | ||
478 | attr.task = task; | ||
479 | attr.watermark = watermark; | ||
480 | attr.precise_ip = precise_ip; | ||
481 | attr.mmap_data = mmap_data; | ||
482 | attr.sample_id_all = sample_id_all; | ||
483 | |||
484 | perf_evsel__init(&pevsel->evsel, &attr, idx); | ||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | static void pyrf_evsel__delete(struct pyrf_evsel *pevsel) | ||
489 | { | ||
490 | perf_evsel__exit(&pevsel->evsel); | ||
491 | pevsel->ob_type->tp_free((PyObject*)pevsel); | ||
492 | } | ||
493 | |||
494 | static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel, | ||
495 | PyObject *args, PyObject *kwargs) | ||
496 | { | ||
497 | struct perf_evsel *evsel = &pevsel->evsel; | ||
498 | struct cpu_map *cpus = NULL; | ||
499 | struct thread_map *threads = NULL; | ||
500 | PyObject *pcpus = NULL, *pthreads = NULL; | ||
501 | int group = 0, overwrite = 0; | ||
502 | static char *kwlist[] = {"cpus", "threads", "group", "overwrite", NULL, NULL}; | ||
503 | |||
504 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, | ||
505 | &pcpus, &pthreads, &group, &overwrite)) | ||
506 | return NULL; | ||
507 | |||
508 | if (pthreads != NULL) | ||
509 | threads = ((struct pyrf_thread_map *)pthreads)->threads; | ||
510 | |||
511 | if (pcpus != NULL) | ||
512 | cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; | ||
513 | |||
514 | if (perf_evsel__open(evsel, cpus, threads, group, overwrite) < 0) { | ||
515 | PyErr_SetFromErrno(PyExc_OSError); | ||
516 | return NULL; | ||
517 | } | ||
518 | |||
519 | Py_INCREF(Py_None); | ||
520 | return Py_None; | ||
521 | } | ||
522 | |||
523 | static PyMethodDef pyrf_evsel__methods[] = { | ||
524 | { | ||
525 | .ml_name = "open", | ||
526 | .ml_meth = (PyCFunction)pyrf_evsel__open, | ||
527 | .ml_flags = METH_VARARGS | METH_KEYWORDS, | ||
528 | .ml_doc = PyDoc_STR("open the event selector file descriptor table.") | ||
529 | }, | ||
530 | { .ml_name = NULL, } | ||
531 | }; | ||
532 | |||
533 | static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object."); | ||
534 | |||
535 | static PyTypeObject pyrf_evsel__type = { | ||
536 | PyVarObject_HEAD_INIT(NULL, 0) | ||
537 | .tp_name = "perf.evsel", | ||
538 | .tp_basicsize = sizeof(struct pyrf_evsel), | ||
539 | .tp_dealloc = (destructor)pyrf_evsel__delete, | ||
540 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
541 | .tp_doc = pyrf_evsel__doc, | ||
542 | .tp_methods = pyrf_evsel__methods, | ||
543 | .tp_init = (initproc)pyrf_evsel__init, | ||
544 | }; | ||
545 | |||
546 | static int pyrf_evsel__setup_types(void) | ||
547 | { | ||
548 | pyrf_evsel__type.tp_new = PyType_GenericNew; | ||
549 | return PyType_Ready(&pyrf_evsel__type); | ||
550 | } | ||
551 | |||
552 | struct pyrf_evlist { | ||
553 | PyObject_HEAD | ||
554 | |||
555 | struct perf_evlist evlist; | ||
556 | }; | ||
557 | |||
558 | static int pyrf_evlist__init(struct pyrf_evlist *pevlist, | ||
559 | PyObject *args, PyObject *kwargs __used) | ||
560 | { | ||
561 | PyObject *pcpus = NULL, *pthreads = NULL; | ||
562 | struct cpu_map *cpus; | ||
563 | struct thread_map *threads; | ||
564 | |||
565 | if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads)) | ||
566 | return -1; | ||
567 | |||
568 | threads = ((struct pyrf_thread_map *)pthreads)->threads; | ||
569 | cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; | ||
570 | perf_evlist__init(&pevlist->evlist, cpus, threads); | ||
571 | return 0; | ||
572 | } | ||
573 | |||
574 | static void pyrf_evlist__delete(struct pyrf_evlist *pevlist) | ||
575 | { | ||
576 | perf_evlist__exit(&pevlist->evlist); | ||
577 | pevlist->ob_type->tp_free((PyObject*)pevlist); | ||
578 | } | ||
579 | |||
580 | static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist, | ||
581 | PyObject *args, PyObject *kwargs) | ||
582 | { | ||
583 | struct perf_evlist *evlist = &pevlist->evlist; | ||
584 | static char *kwlist[] = {"pages", "overwrite", | ||
585 | NULL, NULL}; | ||
586 | int pages = 128, overwrite = false; | ||
587 | |||
588 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, | ||
589 | &pages, &overwrite)) | ||
590 | return NULL; | ||
591 | |||
592 | if (perf_evlist__mmap(evlist, pages, overwrite) < 0) { | ||
593 | PyErr_SetFromErrno(PyExc_OSError); | ||
594 | return NULL; | ||
595 | } | ||
596 | |||
597 | Py_INCREF(Py_None); | ||
598 | return Py_None; | ||
599 | } | ||
600 | |||
601 | static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, | ||
602 | PyObject *args, PyObject *kwargs) | ||
603 | { | ||
604 | struct perf_evlist *evlist = &pevlist->evlist; | ||
605 | static char *kwlist[] = {"timeout", NULL, NULL}; | ||
606 | int timeout = -1, n; | ||
607 | |||
608 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) | ||
609 | return NULL; | ||
610 | |||
611 | n = poll(evlist->pollfd, evlist->nr_fds, timeout); | ||
612 | if (n < 0) { | ||
613 | PyErr_SetFromErrno(PyExc_OSError); | ||
614 | return NULL; | ||
615 | } | ||
616 | |||
617 | return Py_BuildValue("i", n); | ||
618 | } | ||
619 | |||
620 | static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, | ||
621 | PyObject *args __used, PyObject *kwargs __used) | ||
622 | { | ||
623 | struct perf_evlist *evlist = &pevlist->evlist; | ||
624 | PyObject *list = PyList_New(0); | ||
625 | int i; | ||
626 | |||
627 | for (i = 0; i < evlist->nr_fds; ++i) { | ||
628 | PyObject *file; | ||
629 | FILE *fp = fdopen(evlist->pollfd[i].fd, "r"); | ||
630 | |||
631 | if (fp == NULL) | ||
632 | goto free_list; | ||
633 | |||
634 | file = PyFile_FromFile(fp, "perf", "r", NULL); | ||
635 | if (file == NULL) | ||
636 | goto free_list; | ||
637 | |||
638 | if (PyList_Append(list, file) != 0) { | ||
639 | Py_DECREF(file); | ||
640 | goto free_list; | ||
641 | } | ||
642 | |||
643 | Py_DECREF(file); | ||
644 | } | ||
645 | |||
646 | return list; | ||
647 | free_list: | ||
648 | return PyErr_NoMemory(); | ||
649 | } | ||
650 | |||
651 | |||
652 | static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, | ||
653 | PyObject *args, PyObject *kwargs __used) | ||
654 | { | ||
655 | struct perf_evlist *evlist = &pevlist->evlist; | ||
656 | PyObject *pevsel; | ||
657 | struct perf_evsel *evsel; | ||
658 | |||
659 | if (!PyArg_ParseTuple(args, "O", &pevsel)) | ||
660 | return NULL; | ||
661 | |||
662 | Py_INCREF(pevsel); | ||
663 | evsel = &((struct pyrf_evsel *)pevsel)->evsel; | ||
664 | evsel->idx = evlist->nr_entries; | ||
665 | perf_evlist__add(evlist, evsel); | ||
666 | |||
667 | return Py_BuildValue("i", evlist->nr_entries); | ||
668 | } | ||
669 | |||
670 | static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, | ||
671 | PyObject *args, PyObject *kwargs) | ||
672 | { | ||
673 | struct perf_evlist *evlist = &pevlist->evlist; | ||
674 | union perf_event *event; | ||
675 | int sample_id_all = 1, cpu; | ||
676 | static char *kwlist[] = {"sample_id_all", NULL, NULL}; | ||
677 | |||
678 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, | ||
679 | &cpu, &sample_id_all)) | ||
680 | return NULL; | ||
681 | |||
682 | event = perf_evlist__read_on_cpu(evlist, cpu); | ||
683 | if (event != NULL) { | ||
684 | struct perf_evsel *first; | ||
685 | PyObject *pyevent = pyrf_event__new(event); | ||
686 | struct pyrf_event *pevent = (struct pyrf_event *)pyevent; | ||
687 | |||
688 | if (pyevent == NULL) | ||
689 | return PyErr_NoMemory(); | ||
690 | |||
691 | first = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
692 | perf_event__parse_sample(event, first->attr.sample_type, sample_id_all, | ||
693 | &pevent->sample); | ||
694 | return pyevent; | ||
695 | } | ||
696 | |||
697 | Py_INCREF(Py_None); | ||
698 | return Py_None; | ||
699 | } | ||
700 | |||
701 | static PyMethodDef pyrf_evlist__methods[] = { | ||
702 | { | ||
703 | .ml_name = "mmap", | ||
704 | .ml_meth = (PyCFunction)pyrf_evlist__mmap, | ||
705 | .ml_flags = METH_VARARGS | METH_KEYWORDS, | ||
706 | .ml_doc = PyDoc_STR("mmap the file descriptor table.") | ||
707 | }, | ||
708 | { | ||
709 | .ml_name = "poll", | ||
710 | .ml_meth = (PyCFunction)pyrf_evlist__poll, | ||
711 | .ml_flags = METH_VARARGS | METH_KEYWORDS, | ||
712 | .ml_doc = PyDoc_STR("poll the file descriptor table.") | ||
713 | }, | ||
714 | { | ||
715 | .ml_name = "get_pollfd", | ||
716 | .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd, | ||
717 | .ml_flags = METH_VARARGS | METH_KEYWORDS, | ||
718 | .ml_doc = PyDoc_STR("get the poll file descriptor table.") | ||
719 | }, | ||
720 | { | ||
721 | .ml_name = "add", | ||
722 | .ml_meth = (PyCFunction)pyrf_evlist__add, | ||
723 | .ml_flags = METH_VARARGS | METH_KEYWORDS, | ||
724 | .ml_doc = PyDoc_STR("adds an event selector to the list.") | ||
725 | }, | ||
726 | { | ||
727 | .ml_name = "read_on_cpu", | ||
728 | .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu, | ||
729 | .ml_flags = METH_VARARGS | METH_KEYWORDS, | ||
730 | .ml_doc = PyDoc_STR("reads an event.") | ||
731 | }, | ||
732 | { .ml_name = NULL, } | ||
733 | }; | ||
734 | |||
735 | static Py_ssize_t pyrf_evlist__length(PyObject *obj) | ||
736 | { | ||
737 | struct pyrf_evlist *pevlist = (void *)obj; | ||
738 | |||
739 | return pevlist->evlist.nr_entries; | ||
740 | } | ||
741 | |||
742 | static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i) | ||
743 | { | ||
744 | struct pyrf_evlist *pevlist = (void *)obj; | ||
745 | struct perf_evsel *pos; | ||
746 | |||
747 | if (i >= pevlist->evlist.nr_entries) | ||
748 | return NULL; | ||
749 | |||
750 | list_for_each_entry(pos, &pevlist->evlist.entries, node) | ||
751 | if (i-- == 0) | ||
752 | break; | ||
753 | |||
754 | return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel)); | ||
755 | } | ||
756 | |||
757 | static PySequenceMethods pyrf_evlist__sequence_methods = { | ||
758 | .sq_length = pyrf_evlist__length, | ||
759 | .sq_item = pyrf_evlist__item, | ||
760 | }; | ||
761 | |||
762 | static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object."); | ||
763 | |||
764 | static PyTypeObject pyrf_evlist__type = { | ||
765 | PyVarObject_HEAD_INIT(NULL, 0) | ||
766 | .tp_name = "perf.evlist", | ||
767 | .tp_basicsize = sizeof(struct pyrf_evlist), | ||
768 | .tp_dealloc = (destructor)pyrf_evlist__delete, | ||
769 | .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, | ||
770 | .tp_as_sequence = &pyrf_evlist__sequence_methods, | ||
771 | .tp_doc = pyrf_evlist__doc, | ||
772 | .tp_methods = pyrf_evlist__methods, | ||
773 | .tp_init = (initproc)pyrf_evlist__init, | ||
774 | }; | ||
775 | |||
776 | static int pyrf_evlist__setup_types(void) | ||
777 | { | ||
778 | pyrf_evlist__type.tp_new = PyType_GenericNew; | ||
779 | return PyType_Ready(&pyrf_evlist__type); | ||
780 | } | ||
781 | |||
782 | static struct { | ||
783 | const char *name; | ||
784 | int value; | ||
785 | } perf__constants[] = { | ||
786 | { "TYPE_HARDWARE", PERF_TYPE_HARDWARE }, | ||
787 | { "TYPE_SOFTWARE", PERF_TYPE_SOFTWARE }, | ||
788 | { "TYPE_TRACEPOINT", PERF_TYPE_TRACEPOINT }, | ||
789 | { "TYPE_HW_CACHE", PERF_TYPE_HW_CACHE }, | ||
790 | { "TYPE_RAW", PERF_TYPE_RAW }, | ||
791 | { "TYPE_BREAKPOINT", PERF_TYPE_BREAKPOINT }, | ||
792 | |||
793 | { "COUNT_HW_CPU_CYCLES", PERF_COUNT_HW_CPU_CYCLES }, | ||
794 | { "COUNT_HW_INSTRUCTIONS", PERF_COUNT_HW_INSTRUCTIONS }, | ||
795 | { "COUNT_HW_CACHE_REFERENCES", PERF_COUNT_HW_CACHE_REFERENCES }, | ||
796 | { "COUNT_HW_CACHE_MISSES", PERF_COUNT_HW_CACHE_MISSES }, | ||
797 | { "COUNT_HW_BRANCH_INSTRUCTIONS", PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, | ||
798 | { "COUNT_HW_BRANCH_MISSES", PERF_COUNT_HW_BRANCH_MISSES }, | ||
799 | { "COUNT_HW_BUS_CYCLES", PERF_COUNT_HW_BUS_CYCLES }, | ||
800 | { "COUNT_HW_CACHE_L1D", PERF_COUNT_HW_CACHE_L1D }, | ||
801 | { "COUNT_HW_CACHE_L1I", PERF_COUNT_HW_CACHE_L1I }, | ||
802 | { "COUNT_HW_CACHE_LL", PERF_COUNT_HW_CACHE_LL }, | ||
803 | { "COUNT_HW_CACHE_DTLB", PERF_COUNT_HW_CACHE_DTLB }, | ||
804 | { "COUNT_HW_CACHE_ITLB", PERF_COUNT_HW_CACHE_ITLB }, | ||
805 | { "COUNT_HW_CACHE_BPU", PERF_COUNT_HW_CACHE_BPU }, | ||
806 | { "COUNT_HW_CACHE_OP_READ", PERF_COUNT_HW_CACHE_OP_READ }, | ||
807 | { "COUNT_HW_CACHE_OP_WRITE", PERF_COUNT_HW_CACHE_OP_WRITE }, | ||
808 | { "COUNT_HW_CACHE_OP_PREFETCH", PERF_COUNT_HW_CACHE_OP_PREFETCH }, | ||
809 | { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS }, | ||
810 | { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS }, | ||
811 | |||
812 | { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK }, | ||
813 | { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK }, | ||
814 | { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS }, | ||
815 | { "COUNT_SW_CONTEXT_SWITCHES", PERF_COUNT_SW_CONTEXT_SWITCHES }, | ||
816 | { "COUNT_SW_CPU_MIGRATIONS", PERF_COUNT_SW_CPU_MIGRATIONS }, | ||
817 | { "COUNT_SW_PAGE_FAULTS_MIN", PERF_COUNT_SW_PAGE_FAULTS_MIN }, | ||
818 | { "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ }, | ||
819 | { "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS }, | ||
820 | { "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS }, | ||
821 | |||
822 | { "SAMPLE_IP", PERF_SAMPLE_IP }, | ||
823 | { "SAMPLE_TID", PERF_SAMPLE_TID }, | ||
824 | { "SAMPLE_TIME", PERF_SAMPLE_TIME }, | ||
825 | { "SAMPLE_ADDR", PERF_SAMPLE_ADDR }, | ||
826 | { "SAMPLE_READ", PERF_SAMPLE_READ }, | ||
827 | { "SAMPLE_CALLCHAIN", PERF_SAMPLE_CALLCHAIN }, | ||
828 | { "SAMPLE_ID", PERF_SAMPLE_ID }, | ||
829 | { "SAMPLE_CPU", PERF_SAMPLE_CPU }, | ||
830 | { "SAMPLE_PERIOD", PERF_SAMPLE_PERIOD }, | ||
831 | { "SAMPLE_STREAM_ID", PERF_SAMPLE_STREAM_ID }, | ||
832 | { "SAMPLE_RAW", PERF_SAMPLE_RAW }, | ||
833 | |||
834 | { "FORMAT_TOTAL_TIME_ENABLED", PERF_FORMAT_TOTAL_TIME_ENABLED }, | ||
835 | { "FORMAT_TOTAL_TIME_RUNNING", PERF_FORMAT_TOTAL_TIME_RUNNING }, | ||
836 | { "FORMAT_ID", PERF_FORMAT_ID }, | ||
837 | { "FORMAT_GROUP", PERF_FORMAT_GROUP }, | ||
838 | |||
839 | { "RECORD_MMAP", PERF_RECORD_MMAP }, | ||
840 | { "RECORD_LOST", PERF_RECORD_LOST }, | ||
841 | { "RECORD_COMM", PERF_RECORD_COMM }, | ||
842 | { "RECORD_EXIT", PERF_RECORD_EXIT }, | ||
843 | { "RECORD_THROTTLE", PERF_RECORD_THROTTLE }, | ||
844 | { "RECORD_UNTHROTTLE", PERF_RECORD_UNTHROTTLE }, | ||
845 | { "RECORD_FORK", PERF_RECORD_FORK }, | ||
846 | { "RECORD_READ", PERF_RECORD_READ }, | ||
847 | { "RECORD_SAMPLE", PERF_RECORD_SAMPLE }, | ||
848 | { .name = NULL, }, | ||
849 | }; | ||
850 | |||
851 | static PyMethodDef perf__methods[] = { | ||
852 | { .ml_name = NULL, } | ||
853 | }; | ||
854 | |||
855 | PyMODINIT_FUNC initperf(void) | ||
856 | { | ||
857 | PyObject *obj; | ||
858 | int i; | ||
859 | PyObject *dict, *module = Py_InitModule("perf", perf__methods); | ||
860 | |||
861 | if (module == NULL || | ||
862 | pyrf_event__setup_types() < 0 || | ||
863 | pyrf_evlist__setup_types() < 0 || | ||
864 | pyrf_evsel__setup_types() < 0 || | ||
865 | pyrf_thread_map__setup_types() < 0 || | ||
866 | pyrf_cpu_map__setup_types() < 0) | ||
867 | return; | ||
868 | |||
869 | Py_INCREF(&pyrf_evlist__type); | ||
870 | PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type); | ||
871 | |||
872 | Py_INCREF(&pyrf_evsel__type); | ||
873 | PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type); | ||
874 | |||
875 | Py_INCREF(&pyrf_thread_map__type); | ||
876 | PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type); | ||
877 | |||
878 | Py_INCREF(&pyrf_cpu_map__type); | ||
879 | PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type); | ||
880 | |||
881 | dict = PyModule_GetDict(module); | ||
882 | if (dict == NULL) | ||
883 | goto error; | ||
884 | |||
885 | for (i = 0; perf__constants[i].name != NULL; i++) { | ||
886 | obj = PyInt_FromLong(perf__constants[i].value); | ||
887 | if (obj == NULL) | ||
888 | goto error; | ||
889 | PyDict_SetItemString(dict, perf__constants[i].name, obj); | ||
890 | Py_DECREF(obj); | ||
891 | } | ||
892 | |||
893 | error: | ||
894 | if (PyErr_Occurred()) | ||
895 | PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); | ||
896 | } | ||
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index c6d99334bdfa..2040b8538527 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c | |||
@@ -248,8 +248,7 @@ static void python_process_event(int cpu, void *data, | |||
248 | context = PyCObject_FromVoidPtr(scripting_context, NULL); | 248 | context = PyCObject_FromVoidPtr(scripting_context, NULL); |
249 | 249 | ||
250 | PyTuple_SetItem(t, n++, PyString_FromString(handler_name)); | 250 | PyTuple_SetItem(t, n++, PyString_FromString(handler_name)); |
251 | PyTuple_SetItem(t, n++, | 251 | PyTuple_SetItem(t, n++, context); |
252 | PyCObject_FromVoidPtr(scripting_context, NULL)); | ||
253 | 252 | ||
254 | if (handler) { | 253 | if (handler) { |
255 | PyTuple_SetItem(t, n++, PyInt_FromLong(cpu)); | 254 | PyTuple_SetItem(t, n++, PyInt_FromLong(cpu)); |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 105f00bfd555..f26639fa0fb3 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <sys/types.h> | 7 | #include <sys/types.h> |
8 | #include <sys/mman.h> | 8 | #include <sys/mman.h> |
9 | 9 | ||
10 | #include "evlist.h" | ||
11 | #include "evsel.h" | ||
10 | #include "session.h" | 12 | #include "session.h" |
11 | #include "sort.h" | 13 | #include "sort.h" |
12 | #include "util.h" | 14 | #include "util.h" |
@@ -19,7 +21,7 @@ static int perf_session__open(struct perf_session *self, bool force) | |||
19 | self->fd_pipe = true; | 21 | self->fd_pipe = true; |
20 | self->fd = STDIN_FILENO; | 22 | self->fd = STDIN_FILENO; |
21 | 23 | ||
22 | if (perf_header__read(self, self->fd) < 0) | 24 | if (perf_session__read_header(self, self->fd) < 0) |
23 | pr_err("incompatible file format"); | 25 | pr_err("incompatible file format"); |
24 | 26 | ||
25 | return 0; | 27 | return 0; |
@@ -51,7 +53,7 @@ static int perf_session__open(struct perf_session *self, bool force) | |||
51 | goto out_close; | 53 | goto out_close; |
52 | } | 54 | } |
53 | 55 | ||
54 | if (perf_header__read(self, self->fd) < 0) { | 56 | if (perf_session__read_header(self, self->fd) < 0) { |
55 | pr_err("incompatible file format"); | 57 | pr_err("incompatible file format"); |
56 | goto out_close; | 58 | goto out_close; |
57 | } | 59 | } |
@@ -67,7 +69,7 @@ out_close: | |||
67 | 69 | ||
68 | static void perf_session__id_header_size(struct perf_session *session) | 70 | static void perf_session__id_header_size(struct perf_session *session) |
69 | { | 71 | { |
70 | struct sample_data *data; | 72 | struct perf_sample *data; |
71 | u64 sample_type = session->sample_type; | 73 | u64 sample_type = session->sample_type; |
72 | u16 size = 0; | 74 | u16 size = 0; |
73 | 75 | ||
@@ -92,21 +94,10 @@ out: | |||
92 | session->id_hdr_size = size; | 94 | session->id_hdr_size = size; |
93 | } | 95 | } |
94 | 96 | ||
95 | void perf_session__set_sample_id_all(struct perf_session *session, bool value) | ||
96 | { | ||
97 | session->sample_id_all = value; | ||
98 | perf_session__id_header_size(session); | ||
99 | } | ||
100 | |||
101 | void perf_session__set_sample_type(struct perf_session *session, u64 type) | ||
102 | { | ||
103 | session->sample_type = type; | ||
104 | } | ||
105 | |||
106 | void perf_session__update_sample_type(struct perf_session *self) | 97 | void perf_session__update_sample_type(struct perf_session *self) |
107 | { | 98 | { |
108 | self->sample_type = perf_header__sample_type(&self->header); | 99 | self->sample_type = perf_evlist__sample_type(self->evlist); |
109 | self->sample_id_all = perf_header__sample_id_all(&self->header); | 100 | self->sample_id_all = perf_evlist__sample_id_all(self->evlist); |
110 | perf_session__id_header_size(self); | 101 | perf_session__id_header_size(self); |
111 | } | 102 | } |
112 | 103 | ||
@@ -135,13 +126,9 @@ struct perf_session *perf_session__new(const char *filename, int mode, | |||
135 | if (self == NULL) | 126 | if (self == NULL) |
136 | goto out; | 127 | goto out; |
137 | 128 | ||
138 | if (perf_header__init(&self->header) < 0) | ||
139 | goto out_free; | ||
140 | |||
141 | memcpy(self->filename, filename, len); | 129 | memcpy(self->filename, filename, len); |
142 | self->threads = RB_ROOT; | 130 | self->threads = RB_ROOT; |
143 | INIT_LIST_HEAD(&self->dead_threads); | 131 | INIT_LIST_HEAD(&self->dead_threads); |
144 | self->hists_tree = RB_ROOT; | ||
145 | self->last_match = NULL; | 132 | self->last_match = NULL; |
146 | /* | 133 | /* |
147 | * On 64bit we can mmap the data file in one go. No need for tiny mmap | 134 | * On 64bit we can mmap the data file in one go. No need for tiny mmap |
@@ -162,17 +149,16 @@ struct perf_session *perf_session__new(const char *filename, int mode, | |||
162 | if (mode == O_RDONLY) { | 149 | if (mode == O_RDONLY) { |
163 | if (perf_session__open(self, force) < 0) | 150 | if (perf_session__open(self, force) < 0) |
164 | goto out_delete; | 151 | goto out_delete; |
152 | perf_session__update_sample_type(self); | ||
165 | } else if (mode == O_WRONLY) { | 153 | } else if (mode == O_WRONLY) { |
166 | /* | 154 | /* |
167 | * In O_RDONLY mode this will be performed when reading the | 155 | * In O_RDONLY mode this will be performed when reading the |
168 | * kernel MMAP event, in event__process_mmap(). | 156 | * kernel MMAP event, in perf_event__process_mmap(). |
169 | */ | 157 | */ |
170 | if (perf_session__create_kernel_maps(self) < 0) | 158 | if (perf_session__create_kernel_maps(self) < 0) |
171 | goto out_delete; | 159 | goto out_delete; |
172 | } | 160 | } |
173 | 161 | ||
174 | perf_session__update_sample_type(self); | ||
175 | |||
176 | if (ops && ops->ordering_requires_timestamps && | 162 | if (ops && ops->ordering_requires_timestamps && |
177 | ops->ordered_samples && !self->sample_id_all) { | 163 | ops->ordered_samples && !self->sample_id_all) { |
178 | dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); | 164 | dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); |
@@ -181,9 +167,6 @@ struct perf_session *perf_session__new(const char *filename, int mode, | |||
181 | 167 | ||
182 | out: | 168 | out: |
183 | return self; | 169 | return self; |
184 | out_free: | ||
185 | free(self); | ||
186 | return NULL; | ||
187 | out_delete: | 170 | out_delete: |
188 | perf_session__delete(self); | 171 | perf_session__delete(self); |
189 | return NULL; | 172 | return NULL; |
@@ -214,7 +197,6 @@ static void perf_session__delete_threads(struct perf_session *self) | |||
214 | 197 | ||
215 | void perf_session__delete(struct perf_session *self) | 198 | void perf_session__delete(struct perf_session *self) |
216 | { | 199 | { |
217 | perf_header__exit(&self->header); | ||
218 | perf_session__destroy_kernel_maps(self); | 200 | perf_session__destroy_kernel_maps(self); |
219 | perf_session__delete_dead_threads(self); | 201 | perf_session__delete_dead_threads(self); |
220 | perf_session__delete_threads(self); | 202 | perf_session__delete_threads(self); |
@@ -242,17 +224,16 @@ static bool symbol__match_parent_regex(struct symbol *sym) | |||
242 | return 0; | 224 | return 0; |
243 | } | 225 | } |
244 | 226 | ||
245 | struct map_symbol *perf_session__resolve_callchain(struct perf_session *self, | 227 | int perf_session__resolve_callchain(struct perf_session *self, |
246 | struct thread *thread, | 228 | struct thread *thread, |
247 | struct ip_callchain *chain, | 229 | struct ip_callchain *chain, |
248 | struct symbol **parent) | 230 | struct symbol **parent) |
249 | { | 231 | { |
250 | u8 cpumode = PERF_RECORD_MISC_USER; | 232 | u8 cpumode = PERF_RECORD_MISC_USER; |
251 | unsigned int i; | 233 | unsigned int i; |
252 | struct map_symbol *syms = calloc(chain->nr, sizeof(*syms)); | 234 | int err; |
253 | 235 | ||
254 | if (!syms) | 236 | callchain_cursor_reset(&self->callchain_cursor); |
255 | return NULL; | ||
256 | 237 | ||
257 | for (i = 0; i < chain->nr; i++) { | 238 | for (i = 0; i < chain->nr; i++) { |
258 | u64 ip = chain->ips[i]; | 239 | u64 ip = chain->ips[i]; |
@@ -281,30 +262,33 @@ struct map_symbol *perf_session__resolve_callchain(struct perf_session *self, | |||
281 | *parent = al.sym; | 262 | *parent = al.sym; |
282 | if (!symbol_conf.use_callchain) | 263 | if (!symbol_conf.use_callchain) |
283 | break; | 264 | break; |
284 | syms[i].map = al.map; | ||
285 | syms[i].sym = al.sym; | ||
286 | } | 265 | } |
266 | |||
267 | err = callchain_cursor_append(&self->callchain_cursor, | ||
268 | ip, al.map, al.sym); | ||
269 | if (err) | ||
270 | return err; | ||
287 | } | 271 | } |
288 | 272 | ||
289 | return syms; | 273 | return 0; |
290 | } | 274 | } |
291 | 275 | ||
292 | static int process_event_synth_stub(event_t *event __used, | 276 | static int process_event_synth_stub(union perf_event *event __used, |
293 | struct perf_session *session __used) | 277 | struct perf_session *session __used) |
294 | { | 278 | { |
295 | dump_printf(": unhandled!\n"); | 279 | dump_printf(": unhandled!\n"); |
296 | return 0; | 280 | return 0; |
297 | } | 281 | } |
298 | 282 | ||
299 | static int process_event_stub(event_t *event __used, | 283 | static int process_event_stub(union perf_event *event __used, |
300 | struct sample_data *sample __used, | 284 | struct perf_sample *sample __used, |
301 | struct perf_session *session __used) | 285 | struct perf_session *session __used) |
302 | { | 286 | { |
303 | dump_printf(": unhandled!\n"); | 287 | dump_printf(": unhandled!\n"); |
304 | return 0; | 288 | return 0; |
305 | } | 289 | } |
306 | 290 | ||
307 | static int process_finished_round_stub(event_t *event __used, | 291 | static int process_finished_round_stub(union perf_event *event __used, |
308 | struct perf_session *session __used, | 292 | struct perf_session *session __used, |
309 | struct perf_event_ops *ops __used) | 293 | struct perf_event_ops *ops __used) |
310 | { | 294 | { |
@@ -312,7 +296,7 @@ static int process_finished_round_stub(event_t *event __used, | |||
312 | return 0; | 296 | return 0; |
313 | } | 297 | } |
314 | 298 | ||
315 | static int process_finished_round(event_t *event, | 299 | static int process_finished_round(union perf_event *event, |
316 | struct perf_session *session, | 300 | struct perf_session *session, |
317 | struct perf_event_ops *ops); | 301 | struct perf_event_ops *ops); |
318 | 302 | ||
@@ -329,7 +313,7 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) | |||
329 | if (handler->exit == NULL) | 313 | if (handler->exit == NULL) |
330 | handler->exit = process_event_stub; | 314 | handler->exit = process_event_stub; |
331 | if (handler->lost == NULL) | 315 | if (handler->lost == NULL) |
332 | handler->lost = event__process_lost; | 316 | handler->lost = perf_event__process_lost; |
333 | if (handler->read == NULL) | 317 | if (handler->read == NULL) |
334 | handler->read = process_event_stub; | 318 | handler->read = process_event_stub; |
335 | if (handler->throttle == NULL) | 319 | if (handler->throttle == NULL) |
@@ -363,98 +347,98 @@ void mem_bswap_64(void *src, int byte_size) | |||
363 | } | 347 | } |
364 | } | 348 | } |
365 | 349 | ||
366 | static void event__all64_swap(event_t *self) | 350 | static void perf_event__all64_swap(union perf_event *event) |
367 | { | 351 | { |
368 | struct perf_event_header *hdr = &self->header; | 352 | struct perf_event_header *hdr = &event->header; |
369 | mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr)); | 353 | mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); |
370 | } | 354 | } |
371 | 355 | ||
372 | static void event__comm_swap(event_t *self) | 356 | static void perf_event__comm_swap(union perf_event *event) |
373 | { | 357 | { |
374 | self->comm.pid = bswap_32(self->comm.pid); | 358 | event->comm.pid = bswap_32(event->comm.pid); |
375 | self->comm.tid = bswap_32(self->comm.tid); | 359 | event->comm.tid = bswap_32(event->comm.tid); |
376 | } | 360 | } |
377 | 361 | ||
378 | static void event__mmap_swap(event_t *self) | 362 | static void perf_event__mmap_swap(union perf_event *event) |
379 | { | 363 | { |
380 | self->mmap.pid = bswap_32(self->mmap.pid); | 364 | event->mmap.pid = bswap_32(event->mmap.pid); |
381 | self->mmap.tid = bswap_32(self->mmap.tid); | 365 | event->mmap.tid = bswap_32(event->mmap.tid); |
382 | self->mmap.start = bswap_64(self->mmap.start); | 366 | event->mmap.start = bswap_64(event->mmap.start); |
383 | self->mmap.len = bswap_64(self->mmap.len); | 367 | event->mmap.len = bswap_64(event->mmap.len); |
384 | self->mmap.pgoff = bswap_64(self->mmap.pgoff); | 368 | event->mmap.pgoff = bswap_64(event->mmap.pgoff); |
385 | } | 369 | } |
386 | 370 | ||
387 | static void event__task_swap(event_t *self) | 371 | static void perf_event__task_swap(union perf_event *event) |
388 | { | 372 | { |
389 | self->fork.pid = bswap_32(self->fork.pid); | 373 | event->fork.pid = bswap_32(event->fork.pid); |
390 | self->fork.tid = bswap_32(self->fork.tid); | 374 | event->fork.tid = bswap_32(event->fork.tid); |
391 | self->fork.ppid = bswap_32(self->fork.ppid); | 375 | event->fork.ppid = bswap_32(event->fork.ppid); |
392 | self->fork.ptid = bswap_32(self->fork.ptid); | 376 | event->fork.ptid = bswap_32(event->fork.ptid); |
393 | self->fork.time = bswap_64(self->fork.time); | 377 | event->fork.time = bswap_64(event->fork.time); |
394 | } | 378 | } |
395 | 379 | ||
396 | static void event__read_swap(event_t *self) | 380 | static void perf_event__read_swap(union perf_event *event) |
397 | { | 381 | { |
398 | self->read.pid = bswap_32(self->read.pid); | 382 | event->read.pid = bswap_32(event->read.pid); |
399 | self->read.tid = bswap_32(self->read.tid); | 383 | event->read.tid = bswap_32(event->read.tid); |
400 | self->read.value = bswap_64(self->read.value); | 384 | event->read.value = bswap_64(event->read.value); |
401 | self->read.time_enabled = bswap_64(self->read.time_enabled); | 385 | event->read.time_enabled = bswap_64(event->read.time_enabled); |
402 | self->read.time_running = bswap_64(self->read.time_running); | 386 | event->read.time_running = bswap_64(event->read.time_running); |
403 | self->read.id = bswap_64(self->read.id); | 387 | event->read.id = bswap_64(event->read.id); |
404 | } | 388 | } |
405 | 389 | ||
406 | static void event__attr_swap(event_t *self) | 390 | static void perf_event__attr_swap(union perf_event *event) |
407 | { | 391 | { |
408 | size_t size; | 392 | size_t size; |
409 | 393 | ||
410 | self->attr.attr.type = bswap_32(self->attr.attr.type); | 394 | event->attr.attr.type = bswap_32(event->attr.attr.type); |
411 | self->attr.attr.size = bswap_32(self->attr.attr.size); | 395 | event->attr.attr.size = bswap_32(event->attr.attr.size); |
412 | self->attr.attr.config = bswap_64(self->attr.attr.config); | 396 | event->attr.attr.config = bswap_64(event->attr.attr.config); |
413 | self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period); | 397 | event->attr.attr.sample_period = bswap_64(event->attr.attr.sample_period); |
414 | self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type); | 398 | event->attr.attr.sample_type = bswap_64(event->attr.attr.sample_type); |
415 | self->attr.attr.read_format = bswap_64(self->attr.attr.read_format); | 399 | event->attr.attr.read_format = bswap_64(event->attr.attr.read_format); |
416 | self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events); | 400 | event->attr.attr.wakeup_events = bswap_32(event->attr.attr.wakeup_events); |
417 | self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type); | 401 | event->attr.attr.bp_type = bswap_32(event->attr.attr.bp_type); |
418 | self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr); | 402 | event->attr.attr.bp_addr = bswap_64(event->attr.attr.bp_addr); |
419 | self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len); | 403 | event->attr.attr.bp_len = bswap_64(event->attr.attr.bp_len); |
420 | 404 | ||
421 | size = self->header.size; | 405 | size = event->header.size; |
422 | size -= (void *)&self->attr.id - (void *)self; | 406 | size -= (void *)&event->attr.id - (void *)event; |
423 | mem_bswap_64(self->attr.id, size); | 407 | mem_bswap_64(event->attr.id, size); |
424 | } | 408 | } |
425 | 409 | ||
426 | static void event__event_type_swap(event_t *self) | 410 | static void perf_event__event_type_swap(union perf_event *event) |
427 | { | 411 | { |
428 | self->event_type.event_type.event_id = | 412 | event->event_type.event_type.event_id = |
429 | bswap_64(self->event_type.event_type.event_id); | 413 | bswap_64(event->event_type.event_type.event_id); |
430 | } | 414 | } |
431 | 415 | ||
432 | static void event__tracing_data_swap(event_t *self) | 416 | static void perf_event__tracing_data_swap(union perf_event *event) |
433 | { | 417 | { |
434 | self->tracing_data.size = bswap_32(self->tracing_data.size); | 418 | event->tracing_data.size = bswap_32(event->tracing_data.size); |
435 | } | 419 | } |
436 | 420 | ||
437 | typedef void (*event__swap_op)(event_t *self); | 421 | typedef void (*perf_event__swap_op)(union perf_event *event); |
438 | 422 | ||
439 | static event__swap_op event__swap_ops[] = { | 423 | static perf_event__swap_op perf_event__swap_ops[] = { |
440 | [PERF_RECORD_MMAP] = event__mmap_swap, | 424 | [PERF_RECORD_MMAP] = perf_event__mmap_swap, |
441 | [PERF_RECORD_COMM] = event__comm_swap, | 425 | [PERF_RECORD_COMM] = perf_event__comm_swap, |
442 | [PERF_RECORD_FORK] = event__task_swap, | 426 | [PERF_RECORD_FORK] = perf_event__task_swap, |
443 | [PERF_RECORD_EXIT] = event__task_swap, | 427 | [PERF_RECORD_EXIT] = perf_event__task_swap, |
444 | [PERF_RECORD_LOST] = event__all64_swap, | 428 | [PERF_RECORD_LOST] = perf_event__all64_swap, |
445 | [PERF_RECORD_READ] = event__read_swap, | 429 | [PERF_RECORD_READ] = perf_event__read_swap, |
446 | [PERF_RECORD_SAMPLE] = event__all64_swap, | 430 | [PERF_RECORD_SAMPLE] = perf_event__all64_swap, |
447 | [PERF_RECORD_HEADER_ATTR] = event__attr_swap, | 431 | [PERF_RECORD_HEADER_ATTR] = perf_event__attr_swap, |
448 | [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap, | 432 | [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, |
449 | [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap, | 433 | [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, |
450 | [PERF_RECORD_HEADER_BUILD_ID] = NULL, | 434 | [PERF_RECORD_HEADER_BUILD_ID] = NULL, |
451 | [PERF_RECORD_HEADER_MAX] = NULL, | 435 | [PERF_RECORD_HEADER_MAX] = NULL, |
452 | }; | 436 | }; |
453 | 437 | ||
454 | struct sample_queue { | 438 | struct sample_queue { |
455 | u64 timestamp; | 439 | u64 timestamp; |
456 | u64 file_offset; | 440 | u64 file_offset; |
457 | event_t *event; | 441 | union perf_event *event; |
458 | struct list_head list; | 442 | struct list_head list; |
459 | }; | 443 | }; |
460 | 444 | ||
@@ -472,8 +456,8 @@ static void perf_session_free_sample_buffers(struct perf_session *session) | |||
472 | } | 456 | } |
473 | 457 | ||
474 | static int perf_session_deliver_event(struct perf_session *session, | 458 | static int perf_session_deliver_event(struct perf_session *session, |
475 | event_t *event, | 459 | union perf_event *event, |
476 | struct sample_data *sample, | 460 | struct perf_sample *sample, |
477 | struct perf_event_ops *ops, | 461 | struct perf_event_ops *ops, |
478 | u64 file_offset); | 462 | u64 file_offset); |
479 | 463 | ||
@@ -483,7 +467,7 @@ static void flush_sample_queue(struct perf_session *s, | |||
483 | struct ordered_samples *os = &s->ordered_samples; | 467 | struct ordered_samples *os = &s->ordered_samples; |
484 | struct list_head *head = &os->samples; | 468 | struct list_head *head = &os->samples; |
485 | struct sample_queue *tmp, *iter; | 469 | struct sample_queue *tmp, *iter; |
486 | struct sample_data sample; | 470 | struct perf_sample sample; |
487 | u64 limit = os->next_flush; | 471 | u64 limit = os->next_flush; |
488 | u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; | 472 | u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; |
489 | 473 | ||
@@ -494,7 +478,7 @@ static void flush_sample_queue(struct perf_session *s, | |||
494 | if (iter->timestamp > limit) | 478 | if (iter->timestamp > limit) |
495 | break; | 479 | break; |
496 | 480 | ||
497 | event__parse_sample(iter->event, s, &sample); | 481 | perf_session__parse_sample(s, iter->event, &sample); |
498 | perf_session_deliver_event(s, iter->event, &sample, ops, | 482 | perf_session_deliver_event(s, iter->event, &sample, ops, |
499 | iter->file_offset); | 483 | iter->file_offset); |
500 | 484 | ||
@@ -550,7 +534,7 @@ static void flush_sample_queue(struct perf_session *s, | |||
550 | * Flush every events below timestamp 7 | 534 | * Flush every events below timestamp 7 |
551 | * etc... | 535 | * etc... |
552 | */ | 536 | */ |
553 | static int process_finished_round(event_t *event __used, | 537 | static int process_finished_round(union perf_event *event __used, |
554 | struct perf_session *session, | 538 | struct perf_session *session, |
555 | struct perf_event_ops *ops) | 539 | struct perf_event_ops *ops) |
556 | { | 540 | { |
@@ -607,12 +591,12 @@ static void __queue_event(struct sample_queue *new, struct perf_session *s) | |||
607 | 591 | ||
608 | #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) | 592 | #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) |
609 | 593 | ||
610 | static int perf_session_queue_event(struct perf_session *s, event_t *event, | 594 | static int perf_session_queue_event(struct perf_session *s, union perf_event *event, |
611 | struct sample_data *data, u64 file_offset) | 595 | struct perf_sample *sample, u64 file_offset) |
612 | { | 596 | { |
613 | struct ordered_samples *os = &s->ordered_samples; | 597 | struct ordered_samples *os = &s->ordered_samples; |
614 | struct list_head *sc = &os->sample_cache; | 598 | struct list_head *sc = &os->sample_cache; |
615 | u64 timestamp = data->time; | 599 | u64 timestamp = sample->time; |
616 | struct sample_queue *new; | 600 | struct sample_queue *new; |
617 | 601 | ||
618 | if (!timestamp || timestamp == ~0ULL) | 602 | if (!timestamp || timestamp == ~0ULL) |
@@ -648,7 +632,7 @@ static int perf_session_queue_event(struct perf_session *s, event_t *event, | |||
648 | return 0; | 632 | return 0; |
649 | } | 633 | } |
650 | 634 | ||
651 | static void callchain__printf(struct sample_data *sample) | 635 | static void callchain__printf(struct perf_sample *sample) |
652 | { | 636 | { |
653 | unsigned int i; | 637 | unsigned int i; |
654 | 638 | ||
@@ -660,8 +644,8 @@ static void callchain__printf(struct sample_data *sample) | |||
660 | } | 644 | } |
661 | 645 | ||
662 | static void perf_session__print_tstamp(struct perf_session *session, | 646 | static void perf_session__print_tstamp(struct perf_session *session, |
663 | event_t *event, | 647 | union perf_event *event, |
664 | struct sample_data *sample) | 648 | struct perf_sample *sample) |
665 | { | 649 | { |
666 | if (event->header.type != PERF_RECORD_SAMPLE && | 650 | if (event->header.type != PERF_RECORD_SAMPLE && |
667 | !session->sample_id_all) { | 651 | !session->sample_id_all) { |
@@ -676,8 +660,8 @@ static void perf_session__print_tstamp(struct perf_session *session, | |||
676 | printf("%" PRIu64 " ", sample->time); | 660 | printf("%" PRIu64 " ", sample->time); |
677 | } | 661 | } |
678 | 662 | ||
679 | static void dump_event(struct perf_session *session, event_t *event, | 663 | static void dump_event(struct perf_session *session, union perf_event *event, |
680 | u64 file_offset, struct sample_data *sample) | 664 | u64 file_offset, struct perf_sample *sample) |
681 | { | 665 | { |
682 | if (!dump_trace) | 666 | if (!dump_trace) |
683 | return; | 667 | return; |
@@ -691,11 +675,11 @@ static void dump_event(struct perf_session *session, event_t *event, | |||
691 | perf_session__print_tstamp(session, event, sample); | 675 | perf_session__print_tstamp(session, event, sample); |
692 | 676 | ||
693 | printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, | 677 | printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, |
694 | event->header.size, event__get_event_name(event->header.type)); | 678 | event->header.size, perf_event__name(event->header.type)); |
695 | } | 679 | } |
696 | 680 | ||
697 | static void dump_sample(struct perf_session *session, event_t *event, | 681 | static void dump_sample(struct perf_session *session, union perf_event *event, |
698 | struct sample_data *sample) | 682 | struct perf_sample *sample) |
699 | { | 683 | { |
700 | if (!dump_trace) | 684 | if (!dump_trace) |
701 | return; | 685 | return; |
@@ -709,8 +693,8 @@ static void dump_sample(struct perf_session *session, event_t *event, | |||
709 | } | 693 | } |
710 | 694 | ||
711 | static int perf_session_deliver_event(struct perf_session *session, | 695 | static int perf_session_deliver_event(struct perf_session *session, |
712 | event_t *event, | 696 | union perf_event *event, |
713 | struct sample_data *sample, | 697 | struct perf_sample *sample, |
714 | struct perf_event_ops *ops, | 698 | struct perf_event_ops *ops, |
715 | u64 file_offset) | 699 | u64 file_offset) |
716 | { | 700 | { |
@@ -743,7 +727,7 @@ static int perf_session_deliver_event(struct perf_session *session, | |||
743 | } | 727 | } |
744 | 728 | ||
745 | static int perf_session__preprocess_sample(struct perf_session *session, | 729 | static int perf_session__preprocess_sample(struct perf_session *session, |
746 | event_t *event, struct sample_data *sample) | 730 | union perf_event *event, struct perf_sample *sample) |
747 | { | 731 | { |
748 | if (event->header.type != PERF_RECORD_SAMPLE || | 732 | if (event->header.type != PERF_RECORD_SAMPLE || |
749 | !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) | 733 | !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) |
@@ -758,7 +742,7 @@ static int perf_session__preprocess_sample(struct perf_session *session, | |||
758 | return 0; | 742 | return 0; |
759 | } | 743 | } |
760 | 744 | ||
761 | static int perf_session__process_user_event(struct perf_session *session, event_t *event, | 745 | static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, |
762 | struct perf_event_ops *ops, u64 file_offset) | 746 | struct perf_event_ops *ops, u64 file_offset) |
763 | { | 747 | { |
764 | dump_event(session, event, file_offset, NULL); | 748 | dump_event(session, event, file_offset, NULL); |
@@ -783,15 +767,16 @@ static int perf_session__process_user_event(struct perf_session *session, event_ | |||
783 | } | 767 | } |
784 | 768 | ||
785 | static int perf_session__process_event(struct perf_session *session, | 769 | static int perf_session__process_event(struct perf_session *session, |
786 | event_t *event, | 770 | union perf_event *event, |
787 | struct perf_event_ops *ops, | 771 | struct perf_event_ops *ops, |
788 | u64 file_offset) | 772 | u64 file_offset) |
789 | { | 773 | { |
790 | struct sample_data sample; | 774 | struct perf_sample sample; |
791 | int ret; | 775 | int ret; |
792 | 776 | ||
793 | if (session->header.needs_swap && event__swap_ops[event->header.type]) | 777 | if (session->header.needs_swap && |
794 | event__swap_ops[event->header.type](event); | 778 | perf_event__swap_ops[event->header.type]) |
779 | perf_event__swap_ops[event->header.type](event); | ||
795 | 780 | ||
796 | if (event->header.type >= PERF_RECORD_HEADER_MAX) | 781 | if (event->header.type >= PERF_RECORD_HEADER_MAX) |
797 | return -EINVAL; | 782 | return -EINVAL; |
@@ -804,7 +789,7 @@ static int perf_session__process_event(struct perf_session *session, | |||
804 | /* | 789 | /* |
805 | * For all kernel events we get the sample data | 790 | * For all kernel events we get the sample data |
806 | */ | 791 | */ |
807 | event__parse_sample(event, session, &sample); | 792 | perf_session__parse_sample(session, event, &sample); |
808 | 793 | ||
809 | /* Preprocess sample records - precheck callchains */ | 794 | /* Preprocess sample records - precheck callchains */ |
810 | if (perf_session__preprocess_sample(session, event, &sample)) | 795 | if (perf_session__preprocess_sample(session, event, &sample)) |
@@ -843,7 +828,7 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se | |||
843 | static void perf_session__warn_about_errors(const struct perf_session *session, | 828 | static void perf_session__warn_about_errors(const struct perf_session *session, |
844 | const struct perf_event_ops *ops) | 829 | const struct perf_event_ops *ops) |
845 | { | 830 | { |
846 | if (ops->lost == event__process_lost && | 831 | if (ops->lost == perf_event__process_lost && |
847 | session->hists.stats.total_lost != 0) { | 832 | session->hists.stats.total_lost != 0) { |
848 | ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64 | 833 | ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64 |
849 | "!\n\nCheck IO/CPU overload!\n\n", | 834 | "!\n\nCheck IO/CPU overload!\n\n", |
@@ -875,7 +860,7 @@ volatile int session_done; | |||
875 | static int __perf_session__process_pipe_events(struct perf_session *self, | 860 | static int __perf_session__process_pipe_events(struct perf_session *self, |
876 | struct perf_event_ops *ops) | 861 | struct perf_event_ops *ops) |
877 | { | 862 | { |
878 | event_t event; | 863 | union perf_event event; |
879 | uint32_t size; | 864 | uint32_t size; |
880 | int skip = 0; | 865 | int skip = 0; |
881 | u64 head; | 866 | u64 head; |
@@ -956,7 +941,7 @@ int __perf_session__process_events(struct perf_session *session, | |||
956 | struct ui_progress *progress; | 941 | struct ui_progress *progress; |
957 | size_t page_size, mmap_size; | 942 | size_t page_size, mmap_size; |
958 | char *buf, *mmaps[8]; | 943 | char *buf, *mmaps[8]; |
959 | event_t *event; | 944 | union perf_event *event; |
960 | uint32_t size; | 945 | uint32_t size; |
961 | 946 | ||
962 | perf_event_ops__fill_defaults(ops); | 947 | perf_event_ops__fill_defaults(ops); |
@@ -1001,7 +986,7 @@ remap: | |||
1001 | file_pos = file_offset + head; | 986 | file_pos = file_offset + head; |
1002 | 987 | ||
1003 | more: | 988 | more: |
1004 | event = (event_t *)(buf + head); | 989 | event = (union perf_event *)(buf + head); |
1005 | 990 | ||
1006 | if (session->header.needs_swap) | 991 | if (session->header.needs_swap) |
1007 | perf_event_header__bswap(&event->header); | 992 | perf_event_header__bswap(&event->header); |
@@ -1134,3 +1119,18 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, | |||
1134 | size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); | 1119 | size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); |
1135 | return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); | 1120 | return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); |
1136 | } | 1121 | } |
1122 | |||
1123 | size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) | ||
1124 | { | ||
1125 | struct perf_evsel *pos; | ||
1126 | size_t ret = fprintf(fp, "Aggregated stats:\n"); | ||
1127 | |||
1128 | ret += hists__fprintf_nr_events(&session->hists, fp); | ||
1129 | |||
1130 | list_for_each_entry(pos, &session->evlist->entries, node) { | ||
1131 | ret += fprintf(fp, "%s stats:\n", event_name(pos)); | ||
1132 | ret += hists__fprintf_nr_events(&pos->hists, fp); | ||
1133 | } | ||
1134 | |||
1135 | return ret; | ||
1136 | } | ||
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index decd83f274fd..b5b148b0aaca 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h | |||
@@ -34,12 +34,12 @@ struct perf_session { | |||
34 | struct thread *last_match; | 34 | struct thread *last_match; |
35 | struct machine host_machine; | 35 | struct machine host_machine; |
36 | struct rb_root machines; | 36 | struct rb_root machines; |
37 | struct rb_root hists_tree; | 37 | struct perf_evlist *evlist; |
38 | /* | 38 | /* |
39 | * FIXME: should point to the first entry in hists_tree and | 39 | * FIXME: Need to split this up further, we need global |
40 | * be a hists instance. Right now its only 'report' | 40 | * stats + per event stats. 'perf diff' also needs |
41 | * that is using ->hists_tree while all the rest use | 41 | * to properly support multiple events in a single |
42 | * ->hists. | 42 | * perf.data file. |
43 | */ | 43 | */ |
44 | struct hists hists; | 44 | struct hists hists; |
45 | u64 sample_type; | 45 | u64 sample_type; |
@@ -51,15 +51,17 @@ struct perf_session { | |||
51 | int cwdlen; | 51 | int cwdlen; |
52 | char *cwd; | 52 | char *cwd; |
53 | struct ordered_samples ordered_samples; | 53 | struct ordered_samples ordered_samples; |
54 | char filename[0]; | 54 | struct callchain_cursor callchain_cursor; |
55 | char filename[0]; | ||
55 | }; | 56 | }; |
56 | 57 | ||
57 | struct perf_event_ops; | 58 | struct perf_event_ops; |
58 | 59 | ||
59 | typedef int (*event_op)(event_t *self, struct sample_data *sample, | 60 | typedef int (*event_op)(union perf_event *self, struct perf_sample *sample, |
60 | struct perf_session *session); | 61 | struct perf_session *session); |
61 | typedef int (*event_synth_op)(event_t *self, struct perf_session *session); | 62 | typedef int (*event_synth_op)(union perf_event *self, |
62 | typedef int (*event_op2)(event_t *self, struct perf_session *session, | 63 | struct perf_session *session); |
64 | typedef int (*event_op2)(union perf_event *self, struct perf_session *session, | ||
63 | struct perf_event_ops *ops); | 65 | struct perf_event_ops *ops); |
64 | 66 | ||
65 | struct perf_event_ops { | 67 | struct perf_event_ops { |
@@ -94,10 +96,10 @@ int __perf_session__process_events(struct perf_session *self, | |||
94 | int perf_session__process_events(struct perf_session *self, | 96 | int perf_session__process_events(struct perf_session *self, |
95 | struct perf_event_ops *event_ops); | 97 | struct perf_event_ops *event_ops); |
96 | 98 | ||
97 | struct map_symbol *perf_session__resolve_callchain(struct perf_session *self, | 99 | int perf_session__resolve_callchain(struct perf_session *self, |
98 | struct thread *thread, | 100 | struct thread *thread, |
99 | struct ip_callchain *chain, | 101 | struct ip_callchain *chain, |
100 | struct symbol **parent); | 102 | struct symbol **parent); |
101 | 103 | ||
102 | bool perf_session__has_traces(struct perf_session *self, const char *msg); | 104 | bool perf_session__has_traces(struct perf_session *self, const char *msg); |
103 | 105 | ||
@@ -110,8 +112,6 @@ void mem_bswap_64(void *src, int byte_size); | |||
110 | int perf_session__create_kernel_maps(struct perf_session *self); | 112 | int perf_session__create_kernel_maps(struct perf_session *self); |
111 | 113 | ||
112 | void perf_session__update_sample_type(struct perf_session *self); | 114 | void perf_session__update_sample_type(struct perf_session *self); |
113 | void perf_session__set_sample_id_all(struct perf_session *session, bool value); | ||
114 | void perf_session__set_sample_type(struct perf_session *session, u64 type); | ||
115 | void perf_session__remove_thread(struct perf_session *self, struct thread *th); | 115 | void perf_session__remove_thread(struct perf_session *self, struct thread *th); |
116 | 116 | ||
117 | static inline | 117 | static inline |
@@ -149,9 +149,14 @@ size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp); | |||
149 | size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, | 149 | size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, |
150 | FILE *fp, bool with_hits); | 150 | FILE *fp, bool with_hits); |
151 | 151 | ||
152 | static inline | 152 | size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp); |
153 | size_t perf_session__fprintf_nr_events(struct perf_session *self, FILE *fp) | 153 | |
154 | static inline int perf_session__parse_sample(struct perf_session *session, | ||
155 | const union perf_event *event, | ||
156 | struct perf_sample *sample) | ||
154 | { | 157 | { |
155 | return hists__fprintf_nr_events(&self->hists, fp); | 158 | return perf_event__parse_sample(event, session->sample_type, |
159 | session->sample_id_all, sample); | ||
156 | } | 160 | } |
161 | |||
157 | #endif /* __PERF_SESSION_H */ | 162 | #endif /* __PERF_SESSION_H */ |
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py new file mode 100644 index 000000000000..e24ffadb20b2 --- /dev/null +++ b/tools/perf/util/setup.py | |||
@@ -0,0 +1,19 @@ | |||
1 | #!/usr/bin/python2 | ||
2 | |||
3 | from distutils.core import setup, Extension | ||
4 | |||
5 | perf = Extension('perf', | ||
6 | sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c', | ||
7 | 'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c', | ||
8 | 'util/util.c', 'util/xyarray.c', 'util/cgroup.c'], | ||
9 | include_dirs = ['util/include'], | ||
10 | extra_compile_args = ['-fno-strict-aliasing', '-Wno-write-strings']) | ||
11 | |||
12 | setup(name='perf', | ||
13 | version='0.1', | ||
14 | description='Interface with the Linux profiling infrastructure', | ||
15 | author='Arnaldo Carvalho de Melo', | ||
16 | author_email='acme@redhat.com', | ||
17 | license='GPLv2', | ||
18 | url='http://perf.wiki.kernel.org', | ||
19 | ext_modules=[perf]) | ||
diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c new file mode 100644 index 000000000000..834c8ebfe38e --- /dev/null +++ b/tools/perf/util/strfilter.c | |||
@@ -0,0 +1,199 @@ | |||
1 | #include "util.h" | ||
2 | #include "string.h" | ||
3 | #include "strfilter.h" | ||
4 | |||
5 | /* Operators */ | ||
6 | static const char *OP_and = "&"; /* Logical AND */ | ||
7 | static const char *OP_or = "|"; /* Logical OR */ | ||
8 | static const char *OP_not = "!"; /* Logical NOT */ | ||
9 | |||
10 | #define is_operator(c) ((c) == '|' || (c) == '&' || (c) == '!') | ||
11 | #define is_separator(c) (is_operator(c) || (c) == '(' || (c) == ')') | ||
12 | |||
13 | static void strfilter_node__delete(struct strfilter_node *self) | ||
14 | { | ||
15 | if (self) { | ||
16 | if (self->p && !is_operator(*self->p)) | ||
17 | free((char *)self->p); | ||
18 | strfilter_node__delete(self->l); | ||
19 | strfilter_node__delete(self->r); | ||
20 | free(self); | ||
21 | } | ||
22 | } | ||
23 | |||
24 | void strfilter__delete(struct strfilter *self) | ||
25 | { | ||
26 | if (self) { | ||
27 | strfilter_node__delete(self->root); | ||
28 | free(self); | ||
29 | } | ||
30 | } | ||
31 | |||
32 | static const char *get_token(const char *s, const char **e) | ||
33 | { | ||
34 | const char *p; | ||
35 | |||
36 | while (isspace(*s)) /* Skip spaces */ | ||
37 | s++; | ||
38 | |||
39 | if (*s == '\0') { | ||
40 | p = s; | ||
41 | goto end; | ||
42 | } | ||
43 | |||
44 | p = s + 1; | ||
45 | if (!is_separator(*s)) { | ||
46 | /* End search */ | ||
47 | retry: | ||
48 | while (*p && !is_separator(*p) && !isspace(*p)) | ||
49 | p++; | ||
50 | /* Escape and special case: '!' is also used in glob pattern */ | ||
51 | if (*(p - 1) == '\\' || (*p == '!' && *(p - 1) == '[')) { | ||
52 | p++; | ||
53 | goto retry; | ||
54 | } | ||
55 | } | ||
56 | end: | ||
57 | *e = p; | ||
58 | return s; | ||
59 | } | ||
60 | |||
61 | static struct strfilter_node *strfilter_node__alloc(const char *op, | ||
62 | struct strfilter_node *l, | ||
63 | struct strfilter_node *r) | ||
64 | { | ||
65 | struct strfilter_node *ret = zalloc(sizeof(struct strfilter_node)); | ||
66 | |||
67 | if (ret) { | ||
68 | ret->p = op; | ||
69 | ret->l = l; | ||
70 | ret->r = r; | ||
71 | } | ||
72 | |||
73 | return ret; | ||
74 | } | ||
75 | |||
76 | static struct strfilter_node *strfilter_node__new(const char *s, | ||
77 | const char **ep) | ||
78 | { | ||
79 | struct strfilter_node root, *cur, *last_op; | ||
80 | const char *e; | ||
81 | |||
82 | if (!s) | ||
83 | return NULL; | ||
84 | |||
85 | memset(&root, 0, sizeof(root)); | ||
86 | last_op = cur = &root; | ||
87 | |||
88 | s = get_token(s, &e); | ||
89 | while (*s != '\0' && *s != ')') { | ||
90 | switch (*s) { | ||
91 | case '&': /* Exchg last OP->r with AND */ | ||
92 | if (!cur->r || !last_op->r) | ||
93 | goto error; | ||
94 | cur = strfilter_node__alloc(OP_and, last_op->r, NULL); | ||
95 | if (!cur) | ||
96 | goto nomem; | ||
97 | last_op->r = cur; | ||
98 | last_op = cur; | ||
99 | break; | ||
100 | case '|': /* Exchg the root with OR */ | ||
101 | if (!cur->r || !root.r) | ||
102 | goto error; | ||
103 | cur = strfilter_node__alloc(OP_or, root.r, NULL); | ||
104 | if (!cur) | ||
105 | goto nomem; | ||
106 | root.r = cur; | ||
107 | last_op = cur; | ||
108 | break; | ||
109 | case '!': /* Add NOT as a leaf node */ | ||
110 | if (cur->r) | ||
111 | goto error; | ||
112 | cur->r = strfilter_node__alloc(OP_not, NULL, NULL); | ||
113 | if (!cur->r) | ||
114 | goto nomem; | ||
115 | cur = cur->r; | ||
116 | break; | ||
117 | case '(': /* Recursively parses inside the parenthesis */ | ||
118 | if (cur->r) | ||
119 | goto error; | ||
120 | cur->r = strfilter_node__new(s + 1, &s); | ||
121 | if (!s) | ||
122 | goto nomem; | ||
123 | if (!cur->r || *s != ')') | ||
124 | goto error; | ||
125 | e = s + 1; | ||
126 | break; | ||
127 | default: | ||
128 | if (cur->r) | ||
129 | goto error; | ||
130 | cur->r = strfilter_node__alloc(NULL, NULL, NULL); | ||
131 | if (!cur->r) | ||
132 | goto nomem; | ||
133 | cur->r->p = strndup(s, e - s); | ||
134 | if (!cur->r->p) | ||
135 | goto nomem; | ||
136 | } | ||
137 | s = get_token(e, &e); | ||
138 | } | ||
139 | if (!cur->r) | ||
140 | goto error; | ||
141 | *ep = s; | ||
142 | return root.r; | ||
143 | nomem: | ||
144 | s = NULL; | ||
145 | error: | ||
146 | *ep = s; | ||
147 | strfilter_node__delete(root.r); | ||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * Parse filter rule and return new strfilter. | ||
153 | * Return NULL if fail, and *ep == NULL if memory allocation failed. | ||
154 | */ | ||
155 | struct strfilter *strfilter__new(const char *rules, const char **err) | ||
156 | { | ||
157 | struct strfilter *ret = zalloc(sizeof(struct strfilter)); | ||
158 | const char *ep = NULL; | ||
159 | |||
160 | if (ret) | ||
161 | ret->root = strfilter_node__new(rules, &ep); | ||
162 | |||
163 | if (!ret || !ret->root || *ep != '\0') { | ||
164 | if (err) | ||
165 | *err = ep; | ||
166 | strfilter__delete(ret); | ||
167 | ret = NULL; | ||
168 | } | ||
169 | |||
170 | return ret; | ||
171 | } | ||
172 | |||
173 | static bool strfilter_node__compare(struct strfilter_node *self, | ||
174 | const char *str) | ||
175 | { | ||
176 | if (!self || !self->p) | ||
177 | return false; | ||
178 | |||
179 | switch (*self->p) { | ||
180 | case '|': /* OR */ | ||
181 | return strfilter_node__compare(self->l, str) || | ||
182 | strfilter_node__compare(self->r, str); | ||
183 | case '&': /* AND */ | ||
184 | return strfilter_node__compare(self->l, str) && | ||
185 | strfilter_node__compare(self->r, str); | ||
186 | case '!': /* NOT */ | ||
187 | return !strfilter_node__compare(self->r, str); | ||
188 | default: | ||
189 | return strglobmatch(str, self->p); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | /* Return true if STR matches the filter rules */ | ||
194 | bool strfilter__compare(struct strfilter *self, const char *str) | ||
195 | { | ||
196 | if (!self) | ||
197 | return false; | ||
198 | return strfilter_node__compare(self->root, str); | ||
199 | } | ||
diff --git a/tools/perf/util/strfilter.h b/tools/perf/util/strfilter.h new file mode 100644 index 000000000000..00f58a7506de --- /dev/null +++ b/tools/perf/util/strfilter.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef __PERF_STRFILTER_H | ||
2 | #define __PERF_STRFILTER_H | ||
3 | /* General purpose glob matching filter */ | ||
4 | |||
5 | #include <linux/list.h> | ||
6 | #include <stdbool.h> | ||
7 | |||
8 | /* A node of string filter */ | ||
9 | struct strfilter_node { | ||
10 | struct strfilter_node *l; /* Tree left branche (for &,|) */ | ||
11 | struct strfilter_node *r; /* Tree right branche (for !,&,|) */ | ||
12 | const char *p; /* Operator or rule */ | ||
13 | }; | ||
14 | |||
15 | /* String filter */ | ||
16 | struct strfilter { | ||
17 | struct strfilter_node *root; | ||
18 | }; | ||
19 | |||
20 | /** | ||
21 | * strfilter__new - Create a new string filter | ||
22 | * @rules: Filter rule, which is a combination of glob expressions. | ||
23 | * @err: Pointer which points an error detected on @rules | ||
24 | * | ||
25 | * Parse @rules and return new strfilter. Return NULL if an error detected. | ||
26 | * In that case, *@err will indicate where it is detected, and *@err is NULL | ||
27 | * if a memory allocation is failed. | ||
28 | */ | ||
29 | struct strfilter *strfilter__new(const char *rules, const char **err); | ||
30 | |||
31 | /** | ||
32 | * strfilter__compare - compare given string and a string filter | ||
33 | * @self: String filter | ||
34 | * @str: target string | ||
35 | * | ||
36 | * Compare @str and @self. Return true if the str match the rule | ||
37 | */ | ||
38 | bool strfilter__compare(struct strfilter *self, const char *str); | ||
39 | |||
40 | /** | ||
41 | * strfilter__delete - delete a string filter | ||
42 | * @self: String filter to delete | ||
43 | * | ||
44 | * Delete @self. | ||
45 | */ | ||
46 | void strfilter__delete(struct strfilter *self); | ||
47 | |||
48 | #endif | ||
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index b1bf490aff88..00014e32c288 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -207,7 +207,6 @@ struct dso *dso__new(const char *name) | |||
207 | dso__set_short_name(self, self->name); | 207 | dso__set_short_name(self, self->name); |
208 | for (i = 0; i < MAP__NR_TYPES; ++i) | 208 | for (i = 0; i < MAP__NR_TYPES; ++i) |
209 | self->symbols[i] = self->symbol_names[i] = RB_ROOT; | 209 | self->symbols[i] = self->symbol_names[i] = RB_ROOT; |
210 | self->slen_calculated = 0; | ||
211 | self->origin = DSO__ORIG_NOT_FOUND; | 210 | self->origin = DSO__ORIG_NOT_FOUND; |
212 | self->loaded = 0; | 211 | self->loaded = 0; |
213 | self->sorted_by_name = 0; | 212 | self->sorted_by_name = 0; |
@@ -1525,8 +1524,8 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) | |||
1525 | symbol_conf.symfs, self->long_name); | 1524 | symbol_conf.symfs, self->long_name); |
1526 | break; | 1525 | break; |
1527 | case DSO__ORIG_GUEST_KMODULE: | 1526 | case DSO__ORIG_GUEST_KMODULE: |
1528 | if (map->groups && map->groups->machine) | 1527 | if (map->groups && machine) |
1529 | root_dir = map->groups->machine->root_dir; | 1528 | root_dir = machine->root_dir; |
1530 | else | 1529 | else |
1531 | root_dir = ""; | 1530 | root_dir = ""; |
1532 | snprintf(name, size, "%s%s%s", symbol_conf.symfs, | 1531 | snprintf(name, size, "%s%s%s", symbol_conf.symfs, |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 670cd1c88f54..4d7ed09fe332 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -132,7 +132,6 @@ struct dso { | |||
132 | struct rb_root symbol_names[MAP__NR_TYPES]; | 132 | struct rb_root symbol_names[MAP__NR_TYPES]; |
133 | enum dso_kernel_type kernel; | 133 | enum dso_kernel_type kernel; |
134 | u8 adjust_symbols:1; | 134 | u8 adjust_symbols:1; |
135 | u8 slen_calculated:1; | ||
136 | u8 has_build_id:1; | 135 | u8 has_build_id:1; |
137 | u8 hit:1; | 136 | u8 hit:1; |
138 | u8 annotate_warned:1; | 137 | u8 annotate_warned:1; |
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 00f4eade2e3e..d5d3b22250f3 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c | |||
@@ -7,61 +7,6 @@ | |||
7 | #include "util.h" | 7 | #include "util.h" |
8 | #include "debug.h" | 8 | #include "debug.h" |
9 | 9 | ||
10 | /* Skip "." and ".." directories */ | ||
11 | static int filter(const struct dirent *dir) | ||
12 | { | ||
13 | if (dir->d_name[0] == '.') | ||
14 | return 0; | ||
15 | else | ||
16 | return 1; | ||
17 | } | ||
18 | |||
19 | struct thread_map *thread_map__new_by_pid(pid_t pid) | ||
20 | { | ||
21 | struct thread_map *threads; | ||
22 | char name[256]; | ||
23 | int items; | ||
24 | struct dirent **namelist = NULL; | ||
25 | int i; | ||
26 | |||
27 | sprintf(name, "/proc/%d/task", pid); | ||
28 | items = scandir(name, &namelist, filter, NULL); | ||
29 | if (items <= 0) | ||
30 | return NULL; | ||
31 | |||
32 | threads = malloc(sizeof(*threads) + sizeof(pid_t) * items); | ||
33 | if (threads != NULL) { | ||
34 | for (i = 0; i < items; i++) | ||
35 | threads->map[i] = atoi(namelist[i]->d_name); | ||
36 | threads->nr = items; | ||
37 | } | ||
38 | |||
39 | for (i=0; i<items; i++) | ||
40 | free(namelist[i]); | ||
41 | free(namelist); | ||
42 | |||
43 | return threads; | ||
44 | } | ||
45 | |||
46 | struct thread_map *thread_map__new_by_tid(pid_t tid) | ||
47 | { | ||
48 | struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t)); | ||
49 | |||
50 | if (threads != NULL) { | ||
51 | threads->map[0] = tid; | ||
52 | threads->nr = 1; | ||
53 | } | ||
54 | |||
55 | return threads; | ||
56 | } | ||
57 | |||
58 | struct thread_map *thread_map__new(pid_t pid, pid_t tid) | ||
59 | { | ||
60 | if (pid != -1) | ||
61 | return thread_map__new_by_pid(pid); | ||
62 | return thread_map__new_by_tid(tid); | ||
63 | } | ||
64 | |||
65 | static struct thread *thread__new(pid_t pid) | 10 | static struct thread *thread__new(pid_t pid) |
66 | { | 11 | { |
67 | struct thread *self = zalloc(sizeof(*self)); | 12 | struct thread *self = zalloc(sizeof(*self)); |
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index d7574101054a..e5f2401c1b5e 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h | |||
@@ -18,24 +18,10 @@ struct thread { | |||
18 | int comm_len; | 18 | int comm_len; |
19 | }; | 19 | }; |
20 | 20 | ||
21 | struct thread_map { | ||
22 | int nr; | ||
23 | int map[]; | ||
24 | }; | ||
25 | |||
26 | struct perf_session; | 21 | struct perf_session; |
27 | 22 | ||
28 | void thread__delete(struct thread *self); | 23 | void thread__delete(struct thread *self); |
29 | 24 | ||
30 | struct thread_map *thread_map__new_by_pid(pid_t pid); | ||
31 | struct thread_map *thread_map__new_by_tid(pid_t tid); | ||
32 | struct thread_map *thread_map__new(pid_t pid, pid_t tid); | ||
33 | |||
34 | static inline void thread_map__delete(struct thread_map *threads) | ||
35 | { | ||
36 | free(threads); | ||
37 | } | ||
38 | |||
39 | int thread__set_comm(struct thread *self, const char *comm); | 25 | int thread__set_comm(struct thread *self, const char *comm); |
40 | int thread__comm_len(struct thread *self); | 26 | int thread__comm_len(struct thread *self); |
41 | struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); | 27 | struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); |
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c new file mode 100644 index 000000000000..a5df131b77c3 --- /dev/null +++ b/tools/perf/util/thread_map.c | |||
@@ -0,0 +1,64 @@ | |||
1 | #include <dirent.h> | ||
2 | #include <stdlib.h> | ||
3 | #include <stdio.h> | ||
4 | #include "thread_map.h" | ||
5 | |||
6 | /* Skip "." and ".." directories */ | ||
7 | static int filter(const struct dirent *dir) | ||
8 | { | ||
9 | if (dir->d_name[0] == '.') | ||
10 | return 0; | ||
11 | else | ||
12 | return 1; | ||
13 | } | ||
14 | |||
15 | struct thread_map *thread_map__new_by_pid(pid_t pid) | ||
16 | { | ||
17 | struct thread_map *threads; | ||
18 | char name[256]; | ||
19 | int items; | ||
20 | struct dirent **namelist = NULL; | ||
21 | int i; | ||
22 | |||
23 | sprintf(name, "/proc/%d/task", pid); | ||
24 | items = scandir(name, &namelist, filter, NULL); | ||
25 | if (items <= 0) | ||
26 | return NULL; | ||
27 | |||
28 | threads = malloc(sizeof(*threads) + sizeof(pid_t) * items); | ||
29 | if (threads != NULL) { | ||
30 | for (i = 0; i < items; i++) | ||
31 | threads->map[i] = atoi(namelist[i]->d_name); | ||
32 | threads->nr = items; | ||
33 | } | ||
34 | |||
35 | for (i=0; i<items; i++) | ||
36 | free(namelist[i]); | ||
37 | free(namelist); | ||
38 | |||
39 | return threads; | ||
40 | } | ||
41 | |||
42 | struct thread_map *thread_map__new_by_tid(pid_t tid) | ||
43 | { | ||
44 | struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t)); | ||
45 | |||
46 | if (threads != NULL) { | ||
47 | threads->map[0] = tid; | ||
48 | threads->nr = 1; | ||
49 | } | ||
50 | |||
51 | return threads; | ||
52 | } | ||
53 | |||
54 | struct thread_map *thread_map__new(pid_t pid, pid_t tid) | ||
55 | { | ||
56 | if (pid != -1) | ||
57 | return thread_map__new_by_pid(pid); | ||
58 | return thread_map__new_by_tid(tid); | ||
59 | } | ||
60 | |||
61 | void thread_map__delete(struct thread_map *threads) | ||
62 | { | ||
63 | free(threads); | ||
64 | } | ||
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h new file mode 100644 index 000000000000..3cb907311409 --- /dev/null +++ b/tools/perf/util/thread_map.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef __PERF_THREAD_MAP_H | ||
2 | #define __PERF_THREAD_MAP_H | ||
3 | |||
4 | #include <sys/types.h> | ||
5 | |||
6 | struct thread_map { | ||
7 | int nr; | ||
8 | int map[]; | ||
9 | }; | ||
10 | |||
11 | struct thread_map *thread_map__new_by_pid(pid_t pid); | ||
12 | struct thread_map *thread_map__new_by_tid(pid_t tid); | ||
13 | struct thread_map *thread_map__new(pid_t pid, pid_t tid); | ||
14 | void thread_map__delete(struct thread_map *threads); | ||
15 | #endif /* __PERF_THREAD_MAP_H */ | ||
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c new file mode 100644 index 000000000000..75cfe4d45119 --- /dev/null +++ b/tools/perf/util/top.c | |||
@@ -0,0 +1,238 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | ||
3 | * | ||
4 | * Refactored from builtin-top.c, see that files for further copyright notes. | ||
5 | * | ||
6 | * Released under the GPL v2. (and only v2, not any later version) | ||
7 | */ | ||
8 | |||
9 | #include "cpumap.h" | ||
10 | #include "event.h" | ||
11 | #include "evlist.h" | ||
12 | #include "evsel.h" | ||
13 | #include "parse-events.h" | ||
14 | #include "symbol.h" | ||
15 | #include "top.h" | ||
16 | #include <inttypes.h> | ||
17 | |||
18 | /* | ||
19 | * Ordering weight: count-1 * count-2 * ... / count-n | ||
20 | */ | ||
21 | static double sym_weight(const struct sym_entry *sym, struct perf_top *top) | ||
22 | { | ||
23 | double weight = sym->snap_count; | ||
24 | int counter; | ||
25 | |||
26 | if (!top->display_weighted) | ||
27 | return weight; | ||
28 | |||
29 | for (counter = 1; counter < top->evlist->nr_entries - 1; counter++) | ||
30 | weight *= sym->count[counter]; | ||
31 | |||
32 | weight /= (sym->count[counter] + 1); | ||
33 | |||
34 | return weight; | ||
35 | } | ||
36 | |||
37 | static void perf_top__remove_active_sym(struct perf_top *top, struct sym_entry *syme) | ||
38 | { | ||
39 | pthread_mutex_lock(&top->active_symbols_lock); | ||
40 | list_del_init(&syme->node); | ||
41 | pthread_mutex_unlock(&top->active_symbols_lock); | ||
42 | } | ||
43 | |||
44 | static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) | ||
45 | { | ||
46 | struct rb_node **p = &tree->rb_node; | ||
47 | struct rb_node *parent = NULL; | ||
48 | struct sym_entry *iter; | ||
49 | |||
50 | while (*p != NULL) { | ||
51 | parent = *p; | ||
52 | iter = rb_entry(parent, struct sym_entry, rb_node); | ||
53 | |||
54 | if (se->weight > iter->weight) | ||
55 | p = &(*p)->rb_left; | ||
56 | else | ||
57 | p = &(*p)->rb_right; | ||
58 | } | ||
59 | |||
60 | rb_link_node(&se->rb_node, parent, p); | ||
61 | rb_insert_color(&se->rb_node, tree); | ||
62 | } | ||
63 | |||
64 | #define SNPRINTF(buf, size, fmt, args...) \ | ||
65 | ({ \ | ||
66 | size_t r = snprintf(buf, size, fmt, ## args); \ | ||
67 | r > size ? size : r; \ | ||
68 | }) | ||
69 | |||
70 | size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) | ||
71 | { | ||
72 | struct perf_evsel *counter; | ||
73 | float samples_per_sec = top->samples / top->delay_secs; | ||
74 | float ksamples_per_sec = top->kernel_samples / top->delay_secs; | ||
75 | float esamples_percent = (100.0 * top->exact_samples) / top->samples; | ||
76 | size_t ret = 0; | ||
77 | |||
78 | if (!perf_guest) { | ||
79 | ret = SNPRINTF(bf, size, | ||
80 | " PerfTop:%8.0f irqs/sec kernel:%4.1f%%" | ||
81 | " exact: %4.1f%% [", samples_per_sec, | ||
82 | 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) / | ||
83 | samples_per_sec)), | ||
84 | esamples_percent); | ||
85 | } else { | ||
86 | float us_samples_per_sec = top->us_samples / top->delay_secs; | ||
87 | float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs; | ||
88 | float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs; | ||
89 | |||
90 | ret = SNPRINTF(bf, size, | ||
91 | " PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%" | ||
92 | " guest kernel:%4.1f%% guest us:%4.1f%%" | ||
93 | " exact: %4.1f%% [", samples_per_sec, | ||
94 | 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) / | ||
95 | samples_per_sec)), | ||
96 | 100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) / | ||
97 | samples_per_sec)), | ||
98 | 100.0 - (100.0 * ((samples_per_sec - | ||
99 | guest_kernel_samples_per_sec) / | ||
100 | samples_per_sec)), | ||
101 | 100.0 - (100.0 * ((samples_per_sec - | ||
102 | guest_us_samples_per_sec) / | ||
103 | samples_per_sec)), | ||
104 | esamples_percent); | ||
105 | } | ||
106 | |||
107 | if (top->evlist->nr_entries == 1 || !top->display_weighted) { | ||
108 | struct perf_evsel *first; | ||
109 | first = list_entry(top->evlist->entries.next, struct perf_evsel, node); | ||
110 | ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ", | ||
111 | (uint64_t)first->attr.sample_period, | ||
112 | top->freq ? "Hz" : ""); | ||
113 | } | ||
114 | |||
115 | if (!top->display_weighted) { | ||
116 | ret += SNPRINTF(bf + ret, size - ret, "%s", | ||
117 | event_name(top->sym_evsel)); | ||
118 | } else { | ||
119 | /* | ||
120 | * Don't let events eat all the space. Leaving 30 bytes | ||
121 | * for the rest should be enough. | ||
122 | */ | ||
123 | size_t last_pos = size - 30; | ||
124 | |||
125 | list_for_each_entry(counter, &top->evlist->entries, node) { | ||
126 | ret += SNPRINTF(bf + ret, size - ret, "%s%s", | ||
127 | counter->idx ? "/" : "", | ||
128 | event_name(counter)); | ||
129 | if (ret > last_pos) { | ||
130 | sprintf(bf + last_pos - 3, ".."); | ||
131 | ret = last_pos - 1; | ||
132 | break; | ||
133 | } | ||
134 | } | ||
135 | } | ||
136 | |||
137 | ret += SNPRINTF(bf + ret, size - ret, "], "); | ||
138 | |||
139 | if (top->target_pid != -1) | ||
140 | ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %d", | ||
141 | top->target_pid); | ||
142 | else if (top->target_tid != -1) | ||
143 | ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %d", | ||
144 | top->target_tid); | ||
145 | else | ||
146 | ret += SNPRINTF(bf + ret, size - ret, " (all"); | ||
147 | |||
148 | if (top->cpu_list) | ||
149 | ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", | ||
150 | top->evlist->cpus->nr > 1 ? "s" : "", top->cpu_list); | ||
151 | else { | ||
152 | if (top->target_tid != -1) | ||
153 | ret += SNPRINTF(bf + ret, size - ret, ")"); | ||
154 | else | ||
155 | ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", | ||
156 | top->evlist->cpus->nr, | ||
157 | top->evlist->cpus->nr > 1 ? "s" : ""); | ||
158 | } | ||
159 | |||
160 | return ret; | ||
161 | } | ||
162 | |||
163 | void perf_top__reset_sample_counters(struct perf_top *top) | ||
164 | { | ||
165 | top->samples = top->us_samples = top->kernel_samples = | ||
166 | top->exact_samples = top->guest_kernel_samples = | ||
167 | top->guest_us_samples = 0; | ||
168 | } | ||
169 | |||
170 | float perf_top__decay_samples(struct perf_top *top, struct rb_root *root) | ||
171 | { | ||
172 | struct sym_entry *syme, *n; | ||
173 | float sum_ksamples = 0.0; | ||
174 | int snap = !top->display_weighted ? top->sym_counter : 0, j; | ||
175 | |||
176 | /* Sort the active symbols */ | ||
177 | pthread_mutex_lock(&top->active_symbols_lock); | ||
178 | syme = list_entry(top->active_symbols.next, struct sym_entry, node); | ||
179 | pthread_mutex_unlock(&top->active_symbols_lock); | ||
180 | |||
181 | top->rb_entries = 0; | ||
182 | list_for_each_entry_safe_from(syme, n, &top->active_symbols, node) { | ||
183 | syme->snap_count = syme->count[snap]; | ||
184 | if (syme->snap_count != 0) { | ||
185 | |||
186 | if ((top->hide_user_symbols && | ||
187 | syme->origin == PERF_RECORD_MISC_USER) || | ||
188 | (top->hide_kernel_symbols && | ||
189 | syme->origin == PERF_RECORD_MISC_KERNEL)) { | ||
190 | perf_top__remove_active_sym(top, syme); | ||
191 | continue; | ||
192 | } | ||
193 | syme->weight = sym_weight(syme, top); | ||
194 | |||
195 | if ((int)syme->snap_count >= top->count_filter) { | ||
196 | rb_insert_active_sym(root, syme); | ||
197 | ++top->rb_entries; | ||
198 | } | ||
199 | sum_ksamples += syme->snap_count; | ||
200 | |||
201 | for (j = 0; j < top->evlist->nr_entries; j++) | ||
202 | syme->count[j] = top->zero ? 0 : syme->count[j] * 7 / 8; | ||
203 | } else | ||
204 | perf_top__remove_active_sym(top, syme); | ||
205 | } | ||
206 | |||
207 | return sum_ksamples; | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * Find the longest symbol name that will be displayed | ||
212 | */ | ||
213 | void perf_top__find_widths(struct perf_top *top, struct rb_root *root, | ||
214 | int *dso_width, int *dso_short_width, int *sym_width) | ||
215 | { | ||
216 | struct rb_node *nd; | ||
217 | int printed = 0; | ||
218 | |||
219 | *sym_width = *dso_width = *dso_short_width = 0; | ||
220 | |||
221 | for (nd = rb_first(root); nd; nd = rb_next(nd)) { | ||
222 | struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node); | ||
223 | struct symbol *sym = sym_entry__symbol(syme); | ||
224 | |||
225 | if (++printed > top->print_entries || | ||
226 | (int)syme->snap_count < top->count_filter) | ||
227 | continue; | ||
228 | |||
229 | if (syme->map->dso->long_name_len > *dso_width) | ||
230 | *dso_width = syme->map->dso->long_name_len; | ||
231 | |||
232 | if (syme->map->dso->short_name_len > *dso_short_width) | ||
233 | *dso_short_width = syme->map->dso->short_name_len; | ||
234 | |||
235 | if (sym->namelen > *sym_width) | ||
236 | *sym_width = sym->namelen; | ||
237 | } | ||
238 | } | ||
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h new file mode 100644 index 000000000000..96d1cb78af01 --- /dev/null +++ b/tools/perf/util/top.h | |||
@@ -0,0 +1,66 @@ | |||
1 | #ifndef __PERF_TOP_H | ||
2 | #define __PERF_TOP_H 1 | ||
3 | |||
4 | #include "types.h" | ||
5 | #include "../perf.h" | ||
6 | #include <stddef.h> | ||
7 | #include <pthread.h> | ||
8 | #include <linux/list.h> | ||
9 | #include <linux/rbtree.h> | ||
10 | |||
11 | struct perf_evlist; | ||
12 | struct perf_evsel; | ||
13 | |||
14 | struct sym_entry { | ||
15 | struct rb_node rb_node; | ||
16 | struct list_head node; | ||
17 | unsigned long snap_count; | ||
18 | double weight; | ||
19 | int skip; | ||
20 | u8 origin; | ||
21 | struct map *map; | ||
22 | unsigned long count[0]; | ||
23 | }; | ||
24 | |||
25 | static inline struct symbol *sym_entry__symbol(struct sym_entry *self) | ||
26 | { | ||
27 | return ((void *)self) + symbol_conf.priv_size; | ||
28 | } | ||
29 | |||
30 | struct perf_top { | ||
31 | struct perf_evlist *evlist; | ||
32 | /* | ||
33 | * Symbols will be added here in perf_event__process_sample and will | ||
34 | * get out after decayed. | ||
35 | */ | ||
36 | struct list_head active_symbols; | ||
37 | pthread_mutex_t active_symbols_lock; | ||
38 | pthread_cond_t active_symbols_cond; | ||
39 | u64 samples; | ||
40 | u64 kernel_samples, us_samples; | ||
41 | u64 exact_samples; | ||
42 | u64 guest_us_samples, guest_kernel_samples; | ||
43 | int print_entries, count_filter, delay_secs; | ||
44 | int display_weighted, freq, rb_entries, sym_counter; | ||
45 | pid_t target_pid, target_tid; | ||
46 | bool hide_kernel_symbols, hide_user_symbols, zero; | ||
47 | const char *cpu_list; | ||
48 | struct sym_entry *sym_filter_entry; | ||
49 | struct perf_evsel *sym_evsel; | ||
50 | }; | ||
51 | |||
52 | size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size); | ||
53 | void perf_top__reset_sample_counters(struct perf_top *top); | ||
54 | float perf_top__decay_samples(struct perf_top *top, struct rb_root *root); | ||
55 | void perf_top__find_widths(struct perf_top *top, struct rb_root *root, | ||
56 | int *dso_width, int *dso_short_width, int *sym_width); | ||
57 | |||
58 | #ifdef NO_NEWT_SUPPORT | ||
59 | static inline int perf_top__tui_browser(struct perf_top *top __used) | ||
60 | { | ||
61 | return 0; | ||
62 | } | ||
63 | #else | ||
64 | int perf_top__tui_browser(struct perf_top *top); | ||
65 | #endif | ||
66 | #endif /* __PERF_TOP_H */ | ||
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 73a02223c629..d8e622dd738a 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c | |||
@@ -153,7 +153,7 @@ void parse_proc_kallsyms(char *file, unsigned int size __unused) | |||
153 | char *next = NULL; | 153 | char *next = NULL; |
154 | char *addr_str; | 154 | char *addr_str; |
155 | char ch; | 155 | char ch; |
156 | int ret; | 156 | int ret __used; |
157 | int i; | 157 | int i; |
158 | 158 | ||
159 | line = strtok_r(file, "\n", &next); | 159 | line = strtok_r(file, "\n", &next); |
diff --git a/tools/perf/util/ui/browser.c b/tools/perf/util/ui/browser.c index 8bc010edca25..611219f80680 100644 --- a/tools/perf/util/ui/browser.c +++ b/tools/perf/util/ui/browser.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include "libslang.h" | 1 | #include "libslang.h" |
2 | #include "ui.h" | ||
2 | #include <linux/compiler.h> | 3 | #include <linux/compiler.h> |
3 | #include <linux/list.h> | 4 | #include <linux/list.h> |
4 | #include <linux/rbtree.h> | 5 | #include <linux/rbtree.h> |
@@ -156,6 +157,20 @@ void ui_browser__add_exit_keys(struct ui_browser *self, int keys[]) | |||
156 | } | 157 | } |
157 | } | 158 | } |
158 | 159 | ||
160 | void __ui_browser__show_title(struct ui_browser *browser, const char *title) | ||
161 | { | ||
162 | SLsmg_gotorc(0, 0); | ||
163 | ui_browser__set_color(browser, NEWT_COLORSET_ROOT); | ||
164 | slsmg_write_nstring(title, browser->width); | ||
165 | } | ||
166 | |||
167 | void ui_browser__show_title(struct ui_browser *browser, const char *title) | ||
168 | { | ||
169 | pthread_mutex_lock(&ui__lock); | ||
170 | __ui_browser__show_title(browser, title); | ||
171 | pthread_mutex_unlock(&ui__lock); | ||
172 | } | ||
173 | |||
159 | int ui_browser__show(struct ui_browser *self, const char *title, | 174 | int ui_browser__show(struct ui_browser *self, const char *title, |
160 | const char *helpline, ...) | 175 | const char *helpline, ...) |
161 | { | 176 | { |
@@ -178,9 +193,8 @@ int ui_browser__show(struct ui_browser *self, const char *title, | |||
178 | if (self->sb == NULL) | 193 | if (self->sb == NULL) |
179 | return -1; | 194 | return -1; |
180 | 195 | ||
181 | SLsmg_gotorc(0, 0); | 196 | pthread_mutex_lock(&ui__lock); |
182 | ui_browser__set_color(self, NEWT_COLORSET_ROOT); | 197 | __ui_browser__show_title(self, title); |
183 | slsmg_write_nstring(title, self->width); | ||
184 | 198 | ||
185 | ui_browser__add_exit_keys(self, keys); | 199 | ui_browser__add_exit_keys(self, keys); |
186 | newtFormAddComponent(self->form, self->sb); | 200 | newtFormAddComponent(self->form, self->sb); |
@@ -188,25 +202,30 @@ int ui_browser__show(struct ui_browser *self, const char *title, | |||
188 | va_start(ap, helpline); | 202 | va_start(ap, helpline); |
189 | ui_helpline__vpush(helpline, ap); | 203 | ui_helpline__vpush(helpline, ap); |
190 | va_end(ap); | 204 | va_end(ap); |
205 | pthread_mutex_unlock(&ui__lock); | ||
191 | return 0; | 206 | return 0; |
192 | } | 207 | } |
193 | 208 | ||
194 | void ui_browser__hide(struct ui_browser *self) | 209 | void ui_browser__hide(struct ui_browser *self) |
195 | { | 210 | { |
211 | pthread_mutex_lock(&ui__lock); | ||
196 | newtFormDestroy(self->form); | 212 | newtFormDestroy(self->form); |
197 | self->form = NULL; | 213 | self->form = NULL; |
198 | ui_helpline__pop(); | 214 | ui_helpline__pop(); |
215 | pthread_mutex_unlock(&ui__lock); | ||
199 | } | 216 | } |
200 | 217 | ||
201 | int ui_browser__refresh(struct ui_browser *self) | 218 | int ui_browser__refresh(struct ui_browser *self) |
202 | { | 219 | { |
203 | int row; | 220 | int row; |
204 | 221 | ||
222 | pthread_mutex_lock(&ui__lock); | ||
205 | newtScrollbarSet(self->sb, self->index, self->nr_entries - 1); | 223 | newtScrollbarSet(self->sb, self->index, self->nr_entries - 1); |
206 | row = self->refresh(self); | 224 | row = self->refresh(self); |
207 | ui_browser__set_color(self, HE_COLORSET_NORMAL); | 225 | ui_browser__set_color(self, HE_COLORSET_NORMAL); |
208 | SLsmg_fill_region(self->y + row, self->x, | 226 | SLsmg_fill_region(self->y + row, self->x, |
209 | self->height - row, self->width, ' '); | 227 | self->height - row, self->width, ' '); |
228 | pthread_mutex_unlock(&ui__lock); | ||
210 | 229 | ||
211 | return 0; | 230 | return 0; |
212 | } | 231 | } |
diff --git a/tools/perf/util/ui/browser.h b/tools/perf/util/ui/browser.h index 0dc7e4da36f5..fc63dda10910 100644 --- a/tools/perf/util/ui/browser.h +++ b/tools/perf/util/ui/browser.h | |||
@@ -24,7 +24,6 @@ struct ui_browser { | |||
24 | u32 nr_entries; | 24 | u32 nr_entries; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | |||
28 | void ui_browser__set_color(struct ui_browser *self, int color); | 27 | void ui_browser__set_color(struct ui_browser *self, int color); |
29 | void ui_browser__set_percent_color(struct ui_browser *self, | 28 | void ui_browser__set_percent_color(struct ui_browser *self, |
30 | double percent, bool current); | 29 | double percent, bool current); |
@@ -35,6 +34,8 @@ void ui_browser__reset_index(struct ui_browser *self); | |||
35 | void ui_browser__gotorc(struct ui_browser *self, int y, int x); | 34 | void ui_browser__gotorc(struct ui_browser *self, int y, int x); |
36 | void ui_browser__add_exit_key(struct ui_browser *self, int key); | 35 | void ui_browser__add_exit_key(struct ui_browser *self, int key); |
37 | void ui_browser__add_exit_keys(struct ui_browser *self, int keys[]); | 36 | void ui_browser__add_exit_keys(struct ui_browser *self, int keys[]); |
37 | void __ui_browser__show_title(struct ui_browser *browser, const char *title); | ||
38 | void ui_browser__show_title(struct ui_browser *browser, const char *title); | ||
38 | int ui_browser__show(struct ui_browser *self, const char *title, | 39 | int ui_browser__show(struct ui_browser *self, const char *title, |
39 | const char *helpline, ...); | 40 | const char *helpline, ...); |
40 | void ui_browser__hide(struct ui_browser *self); | 41 | void ui_browser__hide(struct ui_browser *self); |
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c index 82b78f99251b..8c17a8730e4a 100644 --- a/tools/perf/util/ui/browsers/annotate.c +++ b/tools/perf/util/ui/browsers/annotate.c | |||
@@ -1,9 +1,12 @@ | |||
1 | #include "../browser.h" | 1 | #include "../browser.h" |
2 | #include "../helpline.h" | 2 | #include "../helpline.h" |
3 | #include "../libslang.h" | 3 | #include "../libslang.h" |
4 | #include "../../annotate.h" | ||
4 | #include "../../hist.h" | 5 | #include "../../hist.h" |
5 | #include "../../sort.h" | 6 | #include "../../sort.h" |
6 | #include "../../symbol.h" | 7 | #include "../../symbol.h" |
8 | #include "../../annotate.h" | ||
9 | #include <pthread.h> | ||
7 | 10 | ||
8 | static void ui__error_window(const char *fmt, ...) | 11 | static void ui__error_window(const char *fmt, ...) |
9 | { | 12 | { |
@@ -42,8 +45,6 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro | |||
42 | struct objdump_line_rb_node *olrb = objdump_line__rb(ol); | 45 | struct objdump_line_rb_node *olrb = objdump_line__rb(ol); |
43 | ui_browser__set_percent_color(self, olrb->percent, current_entry); | 46 | ui_browser__set_percent_color(self, olrb->percent, current_entry); |
44 | slsmg_printf(" %7.2f ", olrb->percent); | 47 | slsmg_printf(" %7.2f ", olrb->percent); |
45 | if (!current_entry) | ||
46 | ui_browser__set_color(self, HE_COLORSET_CODE); | ||
47 | } else { | 48 | } else { |
48 | ui_browser__set_percent_color(self, 0, current_entry); | 49 | ui_browser__set_percent_color(self, 0, current_entry); |
49 | slsmg_write_nstring(" ", 9); | 50 | slsmg_write_nstring(" ", 9); |
@@ -55,35 +56,40 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro | |||
55 | slsmg_write_nstring(" ", width - 18); | 56 | slsmg_write_nstring(" ", width - 18); |
56 | else | 57 | else |
57 | slsmg_write_nstring(ol->line, width - 18); | 58 | slsmg_write_nstring(ol->line, width - 18); |
59 | |||
60 | if (!current_entry) | ||
61 | ui_browser__set_color(self, HE_COLORSET_CODE); | ||
58 | } | 62 | } |
59 | 63 | ||
60 | static double objdump_line__calc_percent(struct objdump_line *self, | 64 | static double objdump_line__calc_percent(struct objdump_line *self, |
61 | struct list_head *head, | 65 | struct symbol *sym, int evidx) |
62 | struct symbol *sym) | ||
63 | { | 66 | { |
64 | double percent = 0.0; | 67 | double percent = 0.0; |
65 | 68 | ||
66 | if (self->offset != -1) { | 69 | if (self->offset != -1) { |
67 | int len = sym->end - sym->start; | 70 | int len = sym->end - sym->start; |
68 | unsigned int hits = 0; | 71 | unsigned int hits = 0; |
69 | struct sym_priv *priv = symbol__priv(sym); | 72 | struct annotation *notes = symbol__annotation(sym); |
70 | struct sym_ext *sym_ext = priv->ext; | 73 | struct source_line *src_line = notes->src->lines; |
71 | struct sym_hist *h = priv->hist; | 74 | struct sym_hist *h = annotation__histogram(notes, evidx); |
72 | s64 offset = self->offset; | 75 | s64 offset = self->offset; |
73 | struct objdump_line *next = objdump__get_next_ip_line(head, self); | 76 | struct objdump_line *next; |
74 | |||
75 | 77 | ||
78 | next = objdump__get_next_ip_line(¬es->src->source, self); | ||
76 | while (offset < (s64)len && | 79 | while (offset < (s64)len && |
77 | (next == NULL || offset < next->offset)) { | 80 | (next == NULL || offset < next->offset)) { |
78 | if (sym_ext) { | 81 | if (src_line) { |
79 | percent += sym_ext[offset].percent; | 82 | percent += src_line[offset].percent; |
80 | } else | 83 | } else |
81 | hits += h->ip[offset]; | 84 | hits += h->addr[offset]; |
82 | 85 | ||
83 | ++offset; | 86 | ++offset; |
84 | } | 87 | } |
85 | 88 | /* | |
86 | if (sym_ext == NULL && h->sum) | 89 | * If the percentage wasn't already calculated in |
90 | * symbol__get_source_line, do it now: | ||
91 | */ | ||
92 | if (src_line == NULL && h->sum) | ||
87 | percent = 100.0 * hits / h->sum; | 93 | percent = 100.0 * hits / h->sum; |
88 | } | 94 | } |
89 | 95 | ||
@@ -133,103 +139,161 @@ static void annotate_browser__set_top(struct annotate_browser *self, | |||
133 | self->curr_hot = nd; | 139 | self->curr_hot = nd; |
134 | } | 140 | } |
135 | 141 | ||
136 | static int annotate_browser__run(struct annotate_browser *self) | 142 | static void annotate_browser__calc_percent(struct annotate_browser *browser, |
143 | int evidx) | ||
137 | { | 144 | { |
138 | struct rb_node *nd; | 145 | struct symbol *sym = browser->b.priv; |
139 | struct hist_entry *he = self->b.priv; | 146 | struct annotation *notes = symbol__annotation(sym); |
140 | int key; | 147 | struct objdump_line *pos; |
141 | 148 | ||
142 | if (ui_browser__show(&self->b, he->ms.sym->name, | 149 | browser->entries = RB_ROOT; |
143 | "<-, -> or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0) | 150 | |
144 | return -1; | 151 | pthread_mutex_lock(¬es->lock); |
152 | |||
153 | list_for_each_entry(pos, ¬es->src->source, node) { | ||
154 | struct objdump_line_rb_node *rbpos = objdump_line__rb(pos); | ||
155 | rbpos->percent = objdump_line__calc_percent(pos, sym, evidx); | ||
156 | if (rbpos->percent < 0.01) { | ||
157 | RB_CLEAR_NODE(&rbpos->rb_node); | ||
158 | continue; | ||
159 | } | ||
160 | objdump__insert_line(&browser->entries, rbpos); | ||
161 | } | ||
162 | pthread_mutex_unlock(¬es->lock); | ||
163 | |||
164 | browser->curr_hot = rb_last(&browser->entries); | ||
165 | } | ||
166 | |||
167 | static int annotate_browser__run(struct annotate_browser *self, int evidx, | ||
168 | int refresh) | ||
169 | { | ||
170 | struct rb_node *nd = NULL; | ||
171 | struct symbol *sym = self->b.priv; | ||
145 | /* | 172 | /* |
146 | * To allow builtin-annotate to cycle thru multiple symbols by | 173 | * RIGHT To allow builtin-annotate to cycle thru multiple symbols by |
147 | * examining the exit key for this function. | 174 | * examining the exit key for this function. |
148 | */ | 175 | */ |
149 | ui_browser__add_exit_key(&self->b, NEWT_KEY_RIGHT); | 176 | int exit_keys[] = { 'H', NEWT_KEY_TAB, NEWT_KEY_UNTAB, |
177 | NEWT_KEY_RIGHT, 0 }; | ||
178 | int key; | ||
179 | |||
180 | if (ui_browser__show(&self->b, sym->name, | ||
181 | "<-, -> or ESC: exit, TAB/shift+TAB: " | ||
182 | "cycle hottest lines, H: Hottest") < 0) | ||
183 | return -1; | ||
184 | |||
185 | ui_browser__add_exit_keys(&self->b, exit_keys); | ||
186 | annotate_browser__calc_percent(self, evidx); | ||
187 | |||
188 | if (self->curr_hot) | ||
189 | annotate_browser__set_top(self, self->curr_hot); | ||
150 | 190 | ||
151 | nd = self->curr_hot; | 191 | nd = self->curr_hot; |
152 | if (nd) { | 192 | |
153 | int tabs[] = { NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0 }; | 193 | if (refresh != 0) |
154 | ui_browser__add_exit_keys(&self->b, tabs); | 194 | newtFormSetTimer(self->b.form, refresh); |
155 | } | ||
156 | 195 | ||
157 | while (1) { | 196 | while (1) { |
158 | key = ui_browser__run(&self->b); | 197 | key = ui_browser__run(&self->b); |
159 | 198 | ||
199 | if (refresh != 0) { | ||
200 | annotate_browser__calc_percent(self, evidx); | ||
201 | /* | ||
202 | * Current line focus got out of the list of most active | ||
203 | * lines, NULL it so that if TAB|UNTAB is pressed, we | ||
204 | * move to curr_hot (current hottest line). | ||
205 | */ | ||
206 | if (nd != NULL && RB_EMPTY_NODE(nd)) | ||
207 | nd = NULL; | ||
208 | } | ||
209 | |||
160 | switch (key) { | 210 | switch (key) { |
211 | case -1: | ||
212 | /* | ||
213 | * FIXME we need to check if it was | ||
214 | * es.reason == NEWT_EXIT_TIMER | ||
215 | */ | ||
216 | if (refresh != 0) | ||
217 | symbol__annotate_decay_histogram(sym, evidx); | ||
218 | continue; | ||
161 | case NEWT_KEY_TAB: | 219 | case NEWT_KEY_TAB: |
162 | nd = rb_prev(nd); | 220 | if (nd != NULL) { |
163 | if (nd == NULL) | 221 | nd = rb_prev(nd); |
164 | nd = rb_last(&self->entries); | 222 | if (nd == NULL) |
165 | annotate_browser__set_top(self, nd); | 223 | nd = rb_last(&self->entries); |
224 | } else | ||
225 | nd = self->curr_hot; | ||
166 | break; | 226 | break; |
167 | case NEWT_KEY_UNTAB: | 227 | case NEWT_KEY_UNTAB: |
168 | nd = rb_next(nd); | 228 | if (nd != NULL) |
169 | if (nd == NULL) | 229 | nd = rb_next(nd); |
170 | nd = rb_first(&self->entries); | 230 | if (nd == NULL) |
171 | annotate_browser__set_top(self, nd); | 231 | nd = rb_first(&self->entries); |
232 | else | ||
233 | nd = self->curr_hot; | ||
234 | break; | ||
235 | case 'H': | ||
236 | nd = self->curr_hot; | ||
172 | break; | 237 | break; |
173 | default: | 238 | default: |
174 | goto out; | 239 | goto out; |
175 | } | 240 | } |
241 | |||
242 | if (nd != NULL) | ||
243 | annotate_browser__set_top(self, nd); | ||
176 | } | 244 | } |
177 | out: | 245 | out: |
178 | ui_browser__hide(&self->b); | 246 | ui_browser__hide(&self->b); |
179 | return key; | 247 | return key; |
180 | } | 248 | } |
181 | 249 | ||
182 | int hist_entry__tui_annotate(struct hist_entry *self) | 250 | int hist_entry__tui_annotate(struct hist_entry *he, int evidx) |
251 | { | ||
252 | return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, 0); | ||
253 | } | ||
254 | |||
255 | int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, | ||
256 | int refresh) | ||
183 | { | 257 | { |
184 | struct objdump_line *pos, *n; | 258 | struct objdump_line *pos, *n; |
185 | struct objdump_line_rb_node *rbpos; | 259 | struct annotation *notes = symbol__annotation(sym); |
186 | LIST_HEAD(head); | ||
187 | struct annotate_browser browser = { | 260 | struct annotate_browser browser = { |
188 | .b = { | 261 | .b = { |
189 | .entries = &head, | 262 | .entries = ¬es->src->source, |
190 | .refresh = ui_browser__list_head_refresh, | 263 | .refresh = ui_browser__list_head_refresh, |
191 | .seek = ui_browser__list_head_seek, | 264 | .seek = ui_browser__list_head_seek, |
192 | .write = annotate_browser__write, | 265 | .write = annotate_browser__write, |
193 | .priv = self, | 266 | .priv = sym, |
194 | }, | 267 | }, |
195 | }; | 268 | }; |
196 | int ret; | 269 | int ret; |
197 | 270 | ||
198 | if (self->ms.sym == NULL) | 271 | if (sym == NULL) |
199 | return -1; | 272 | return -1; |
200 | 273 | ||
201 | if (self->ms.map->dso->annotate_warned) | 274 | if (map->dso->annotate_warned) |
202 | return -1; | 275 | return -1; |
203 | 276 | ||
204 | if (hist_entry__annotate(self, &head, sizeof(*rbpos)) < 0) { | 277 | if (symbol__annotate(sym, map, sizeof(struct objdump_line_rb_node)) < 0) { |
205 | ui__error_window(ui_helpline__last_msg); | 278 | ui__error_window(ui_helpline__last_msg); |
206 | return -1; | 279 | return -1; |
207 | } | 280 | } |
208 | 281 | ||
209 | ui_helpline__push("Press <- or ESC to exit"); | 282 | ui_helpline__push("Press <- or ESC to exit"); |
210 | 283 | ||
211 | list_for_each_entry(pos, &head, node) { | 284 | list_for_each_entry(pos, ¬es->src->source, node) { |
285 | struct objdump_line_rb_node *rbpos; | ||
212 | size_t line_len = strlen(pos->line); | 286 | size_t line_len = strlen(pos->line); |
287 | |||
213 | if (browser.b.width < line_len) | 288 | if (browser.b.width < line_len) |
214 | browser.b.width = line_len; | 289 | browser.b.width = line_len; |
215 | rbpos = objdump_line__rb(pos); | 290 | rbpos = objdump_line__rb(pos); |
216 | rbpos->idx = browser.b.nr_entries++; | 291 | rbpos->idx = browser.b.nr_entries++; |
217 | rbpos->percent = objdump_line__calc_percent(pos, &head, self->ms.sym); | ||
218 | if (rbpos->percent < 0.01) | ||
219 | continue; | ||
220 | objdump__insert_line(&browser.entries, rbpos); | ||
221 | } | 292 | } |
222 | 293 | ||
223 | /* | ||
224 | * Position the browser at the hottest line. | ||
225 | */ | ||
226 | browser.curr_hot = rb_last(&browser.entries); | ||
227 | if (browser.curr_hot) | ||
228 | annotate_browser__set_top(&browser, browser.curr_hot); | ||
229 | |||
230 | browser.b.width += 18; /* Percentage */ | 294 | browser.b.width += 18; /* Percentage */ |
231 | ret = annotate_browser__run(&browser); | 295 | ret = annotate_browser__run(&browser, evidx, refresh); |
232 | list_for_each_entry_safe(pos, n, &head, node) { | 296 | list_for_each_entry_safe(pos, n, ¬es->src->source, node) { |
233 | list_del(&pos->node); | 297 | list_del(&pos->node); |
234 | objdump_line__free(pos); | 298 | objdump_line__free(pos); |
235 | } | 299 | } |
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c index 60c463c16028..798efdca3ead 100644 --- a/tools/perf/util/ui/browsers/hists.c +++ b/tools/perf/util/ui/browsers/hists.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <newt.h> | 7 | #include <newt.h> |
8 | #include <linux/rbtree.h> | 8 | #include <linux/rbtree.h> |
9 | 9 | ||
10 | #include "../../evsel.h" | ||
11 | #include "../../evlist.h" | ||
10 | #include "../../hist.h" | 12 | #include "../../hist.h" |
11 | #include "../../pstack.h" | 13 | #include "../../pstack.h" |
12 | #include "../../sort.h" | 14 | #include "../../sort.h" |
@@ -292,7 +294,8 @@ static int hist_browser__run(struct hist_browser *self, const char *title) | |||
292 | { | 294 | { |
293 | int key; | 295 | int key; |
294 | int exit_keys[] = { 'a', '?', 'h', 'C', 'd', 'D', 'E', 't', | 296 | int exit_keys[] = { 'a', '?', 'h', 'C', 'd', 'D', 'E', 't', |
295 | NEWT_KEY_ENTER, NEWT_KEY_RIGHT, NEWT_KEY_LEFT, 0, }; | 297 | NEWT_KEY_ENTER, NEWT_KEY_RIGHT, NEWT_KEY_LEFT, |
298 | NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0, }; | ||
296 | 299 | ||
297 | self->b.entries = &self->hists->entries; | 300 | self->b.entries = &self->hists->entries; |
298 | self->b.nr_entries = self->hists->nr_entries; | 301 | self->b.nr_entries = self->hists->nr_entries; |
@@ -377,7 +380,7 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self, | |||
377 | while (node) { | 380 | while (node) { |
378 | struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); | 381 | struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); |
379 | struct rb_node *next = rb_next(node); | 382 | struct rb_node *next = rb_next(node); |
380 | u64 cumul = cumul_hits(child); | 383 | u64 cumul = callchain_cumul_hits(child); |
381 | struct callchain_list *chain; | 384 | struct callchain_list *chain; |
382 | char folded_sign = ' '; | 385 | char folded_sign = ' '; |
383 | int first = true; | 386 | int first = true; |
@@ -638,6 +641,9 @@ static void ui_browser__hists_seek(struct ui_browser *self, | |||
638 | struct rb_node *nd; | 641 | struct rb_node *nd; |
639 | bool first = true; | 642 | bool first = true; |
640 | 643 | ||
644 | if (self->nr_entries == 0) | ||
645 | return; | ||
646 | |||
641 | switch (whence) { | 647 | switch (whence) { |
642 | case SEEK_SET: | 648 | case SEEK_SET: |
643 | nd = hists__filter_entries(rb_first(self->entries)); | 649 | nd = hists__filter_entries(rb_first(self->entries)); |
@@ -797,8 +803,11 @@ static int hists__browser_title(struct hists *self, char *bf, size_t size, | |||
797 | return printed; | 803 | return printed; |
798 | } | 804 | } |
799 | 805 | ||
800 | int hists__browse(struct hists *self, const char *helpline, const char *ev_name) | 806 | static int perf_evsel__hists_browse(struct perf_evsel *evsel, |
807 | const char *helpline, const char *ev_name, | ||
808 | bool left_exits) | ||
801 | { | 809 | { |
810 | struct hists *self = &evsel->hists; | ||
802 | struct hist_browser *browser = hist_browser__new(self); | 811 | struct hist_browser *browser = hist_browser__new(self); |
803 | struct pstack *fstack; | 812 | struct pstack *fstack; |
804 | const struct thread *thread_filter = NULL; | 813 | const struct thread *thread_filter = NULL; |
@@ -818,8 +827,8 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name) | |||
818 | hists__browser_title(self, msg, sizeof(msg), ev_name, | 827 | hists__browser_title(self, msg, sizeof(msg), ev_name, |
819 | dso_filter, thread_filter); | 828 | dso_filter, thread_filter); |
820 | while (1) { | 829 | while (1) { |
821 | const struct thread *thread; | 830 | const struct thread *thread = NULL; |
822 | const struct dso *dso; | 831 | const struct dso *dso = NULL; |
823 | char *options[16]; | 832 | char *options[16]; |
824 | int nr_options = 0, choice = 0, i, | 833 | int nr_options = 0, choice = 0, i, |
825 | annotate = -2, zoom_dso = -2, zoom_thread = -2, | 834 | annotate = -2, zoom_dso = -2, zoom_thread = -2, |
@@ -827,8 +836,10 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name) | |||
827 | 836 | ||
828 | key = hist_browser__run(browser, msg); | 837 | key = hist_browser__run(browser, msg); |
829 | 838 | ||
830 | thread = hist_browser__selected_thread(browser); | 839 | if (browser->he_selection != NULL) { |
831 | dso = browser->selection->map ? browser->selection->map->dso : NULL; | 840 | thread = hist_browser__selected_thread(browser); |
841 | dso = browser->selection->map ? browser->selection->map->dso : NULL; | ||
842 | } | ||
832 | 843 | ||
833 | switch (key) { | 844 | switch (key) { |
834 | case NEWT_KEY_TAB: | 845 | case NEWT_KEY_TAB: |
@@ -839,7 +850,8 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name) | |||
839 | */ | 850 | */ |
840 | goto out_free_stack; | 851 | goto out_free_stack; |
841 | case 'a': | 852 | case 'a': |
842 | if (browser->selection->map == NULL && | 853 | if (browser->selection == NULL || |
854 | browser->selection->map == NULL || | ||
843 | browser->selection->map->dso->annotate_warned) | 855 | browser->selection->map->dso->annotate_warned) |
844 | continue; | 856 | continue; |
845 | goto do_annotate; | 857 | goto do_annotate; |
@@ -858,6 +870,7 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name) | |||
858 | "E Expand all callchains\n" | 870 | "E Expand all callchains\n" |
859 | "d Zoom into current DSO\n" | 871 | "d Zoom into current DSO\n" |
860 | "t Zoom into current Thread\n" | 872 | "t Zoom into current Thread\n" |
873 | "TAB/UNTAB Switch events\n" | ||
861 | "q/CTRL+C Exit browser"); | 874 | "q/CTRL+C Exit browser"); |
862 | continue; | 875 | continue; |
863 | case NEWT_KEY_ENTER: | 876 | case NEWT_KEY_ENTER: |
@@ -867,8 +880,14 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name) | |||
867 | case NEWT_KEY_LEFT: { | 880 | case NEWT_KEY_LEFT: { |
868 | const void *top; | 881 | const void *top; |
869 | 882 | ||
870 | if (pstack__empty(fstack)) | 883 | if (pstack__empty(fstack)) { |
884 | /* | ||
885 | * Go back to the perf_evsel_menu__run or other user | ||
886 | */ | ||
887 | if (left_exits) | ||
888 | goto out_free_stack; | ||
871 | continue; | 889 | continue; |
890 | } | ||
872 | top = pstack__pop(fstack); | 891 | top = pstack__pop(fstack); |
873 | if (top == &dso_filter) | 892 | if (top == &dso_filter) |
874 | goto zoom_out_dso; | 893 | goto zoom_out_dso; |
@@ -877,14 +896,16 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name) | |||
877 | continue; | 896 | continue; |
878 | } | 897 | } |
879 | case NEWT_KEY_ESCAPE: | 898 | case NEWT_KEY_ESCAPE: |
880 | if (!ui__dialog_yesno("Do you really want to exit?")) | 899 | if (!left_exits && |
900 | !ui__dialog_yesno("Do you really want to exit?")) | ||
881 | continue; | 901 | continue; |
882 | /* Fall thru */ | 902 | /* Fall thru */ |
883 | default: | 903 | default: |
884 | goto out_free_stack; | 904 | goto out_free_stack; |
885 | } | 905 | } |
886 | 906 | ||
887 | if (browser->selection->sym != NULL && | 907 | if (browser->selection != NULL && |
908 | browser->selection->sym != NULL && | ||
888 | !browser->selection->map->dso->annotate_warned && | 909 | !browser->selection->map->dso->annotate_warned && |
889 | asprintf(&options[nr_options], "Annotate %s", | 910 | asprintf(&options[nr_options], "Annotate %s", |
890 | browser->selection->sym->name) > 0) | 911 | browser->selection->sym->name) > 0) |
@@ -903,7 +924,8 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name) | |||
903 | (dso->kernel ? "the Kernel" : dso->short_name)) > 0) | 924 | (dso->kernel ? "the Kernel" : dso->short_name)) > 0) |
904 | zoom_dso = nr_options++; | 925 | zoom_dso = nr_options++; |
905 | 926 | ||
906 | if (browser->selection->map != NULL && | 927 | if (browser->selection != NULL && |
928 | browser->selection->map != NULL && | ||
907 | asprintf(&options[nr_options], "Browse map details") > 0) | 929 | asprintf(&options[nr_options], "Browse map details") > 0) |
908 | browse_map = nr_options++; | 930 | browse_map = nr_options++; |
909 | 931 | ||
@@ -923,19 +945,11 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name) | |||
923 | if (choice == annotate) { | 945 | if (choice == annotate) { |
924 | struct hist_entry *he; | 946 | struct hist_entry *he; |
925 | do_annotate: | 947 | do_annotate: |
926 | if (browser->selection->map->dso->origin == DSO__ORIG_KERNEL) { | ||
927 | browser->selection->map->dso->annotate_warned = 1; | ||
928 | ui_helpline__puts("No vmlinux file found, can't " | ||
929 | "annotate with just a " | ||
930 | "kallsyms file"); | ||
931 | continue; | ||
932 | } | ||
933 | |||
934 | he = hist_browser__selected_entry(browser); | 948 | he = hist_browser__selected_entry(browser); |
935 | if (he == NULL) | 949 | if (he == NULL) |
936 | continue; | 950 | continue; |
937 | 951 | ||
938 | hist_entry__tui_annotate(he); | 952 | hist_entry__tui_annotate(he, evsel->idx); |
939 | } else if (choice == browse_map) | 953 | } else if (choice == browse_map) |
940 | map__browse(browser->selection->map); | 954 | map__browse(browser->selection->map); |
941 | else if (choice == zoom_dso) { | 955 | else if (choice == zoom_dso) { |
@@ -984,30 +998,141 @@ out: | |||
984 | return key; | 998 | return key; |
985 | } | 999 | } |
986 | 1000 | ||
987 | int hists__tui_browse_tree(struct rb_root *self, const char *help) | 1001 | struct perf_evsel_menu { |
1002 | struct ui_browser b; | ||
1003 | struct perf_evsel *selection; | ||
1004 | }; | ||
1005 | |||
1006 | static void perf_evsel_menu__write(struct ui_browser *browser, | ||
1007 | void *entry, int row) | ||
1008 | { | ||
1009 | struct perf_evsel_menu *menu = container_of(browser, | ||
1010 | struct perf_evsel_menu, b); | ||
1011 | struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node); | ||
1012 | bool current_entry = ui_browser__is_current_entry(browser, row); | ||
1013 | unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE]; | ||
1014 | const char *ev_name = event_name(evsel); | ||
1015 | char bf[256], unit; | ||
1016 | |||
1017 | ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : | ||
1018 | HE_COLORSET_NORMAL); | ||
1019 | |||
1020 | nr_events = convert_unit(nr_events, &unit); | ||
1021 | snprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events, | ||
1022 | unit, unit == ' ' ? "" : " ", ev_name); | ||
1023 | slsmg_write_nstring(bf, browser->width); | ||
1024 | |||
1025 | if (current_entry) | ||
1026 | menu->selection = evsel; | ||
1027 | } | ||
1028 | |||
1029 | static int perf_evsel_menu__run(struct perf_evsel_menu *menu, const char *help) | ||
988 | { | 1030 | { |
989 | struct rb_node *first = rb_first(self), *nd = first, *next; | 1031 | int exit_keys[] = { NEWT_KEY_ENTER, NEWT_KEY_RIGHT, 0, }; |
990 | int key = 0; | 1032 | struct perf_evlist *evlist = menu->b.priv; |
1033 | struct perf_evsel *pos; | ||
1034 | const char *ev_name, *title = "Available samples"; | ||
1035 | int key; | ||
1036 | |||
1037 | if (ui_browser__show(&menu->b, title, | ||
1038 | "ESC: exit, ENTER|->: Browse histograms") < 0) | ||
1039 | return -1; | ||
1040 | |||
1041 | ui_browser__add_exit_keys(&menu->b, exit_keys); | ||
991 | 1042 | ||
992 | while (nd) { | 1043 | while (1) { |
993 | struct hists *hists = rb_entry(nd, struct hists, rb_node); | 1044 | key = ui_browser__run(&menu->b); |
994 | const char *ev_name = __event_name(hists->type, hists->config); | ||
995 | 1045 | ||
996 | key = hists__browse(hists, help, ev_name); | ||
997 | switch (key) { | 1046 | switch (key) { |
998 | case NEWT_KEY_TAB: | 1047 | case NEWT_KEY_RIGHT: |
999 | next = rb_next(nd); | 1048 | case NEWT_KEY_ENTER: |
1000 | if (next) | 1049 | if (!menu->selection) |
1001 | nd = next; | 1050 | continue; |
1051 | pos = menu->selection; | ||
1052 | browse_hists: | ||
1053 | ev_name = event_name(pos); | ||
1054 | key = perf_evsel__hists_browse(pos, help, ev_name, true); | ||
1055 | ui_browser__show_title(&menu->b, title); | ||
1002 | break; | 1056 | break; |
1003 | case NEWT_KEY_UNTAB: | 1057 | case NEWT_KEY_LEFT: |
1004 | if (nd == first) | 1058 | continue; |
1059 | case NEWT_KEY_ESCAPE: | ||
1060 | if (!ui__dialog_yesno("Do you really want to exit?")) | ||
1005 | continue; | 1061 | continue; |
1006 | nd = rb_prev(nd); | 1062 | /* Fall thru */ |
1063 | default: | ||
1064 | goto out; | ||
1065 | } | ||
1066 | |||
1067 | switch (key) { | ||
1068 | case NEWT_KEY_TAB: | ||
1069 | if (pos->node.next == &evlist->entries) | ||
1070 | pos = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
1071 | else | ||
1072 | pos = list_entry(pos->node.next, struct perf_evsel, node); | ||
1073 | goto browse_hists; | ||
1074 | case NEWT_KEY_UNTAB: | ||
1075 | if (pos->node.prev == &evlist->entries) | ||
1076 | pos = list_entry(evlist->entries.prev, struct perf_evsel, node); | ||
1077 | else | ||
1078 | pos = list_entry(pos->node.prev, struct perf_evsel, node); | ||
1079 | goto browse_hists; | ||
1080 | case 'q': | ||
1081 | case CTRL('c'): | ||
1082 | goto out; | ||
1007 | default: | 1083 | default: |
1008 | return key; | 1084 | break; |
1009 | } | 1085 | } |
1010 | } | 1086 | } |
1011 | 1087 | ||
1088 | out: | ||
1089 | ui_browser__hide(&menu->b); | ||
1012 | return key; | 1090 | return key; |
1013 | } | 1091 | } |
1092 | |||
1093 | static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, | ||
1094 | const char *help) | ||
1095 | { | ||
1096 | struct perf_evsel *pos; | ||
1097 | struct perf_evsel_menu menu = { | ||
1098 | .b = { | ||
1099 | .entries = &evlist->entries, | ||
1100 | .refresh = ui_browser__list_head_refresh, | ||
1101 | .seek = ui_browser__list_head_seek, | ||
1102 | .write = perf_evsel_menu__write, | ||
1103 | .nr_entries = evlist->nr_entries, | ||
1104 | .priv = evlist, | ||
1105 | }, | ||
1106 | }; | ||
1107 | |||
1108 | ui_helpline__push("Press ESC to exit"); | ||
1109 | |||
1110 | list_for_each_entry(pos, &evlist->entries, node) { | ||
1111 | const char *ev_name = event_name(pos); | ||
1112 | size_t line_len = strlen(ev_name) + 7; | ||
1113 | |||
1114 | if (menu.b.width < line_len) | ||
1115 | menu.b.width = line_len; | ||
1116 | /* | ||
1117 | * Cache the evsel name, tracepoints have a _high_ cost per | ||
1118 | * event_name() call. | ||
1119 | */ | ||
1120 | if (pos->name == NULL) | ||
1121 | pos->name = strdup(ev_name); | ||
1122 | } | ||
1123 | |||
1124 | return perf_evsel_menu__run(&menu, help); | ||
1125 | } | ||
1126 | |||
1127 | int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help) | ||
1128 | { | ||
1129 | |||
1130 | if (evlist->nr_entries == 1) { | ||
1131 | struct perf_evsel *first = list_entry(evlist->entries.next, | ||
1132 | struct perf_evsel, node); | ||
1133 | const char *ev_name = event_name(first); | ||
1134 | return perf_evsel__hists_browse(first, help, ev_name, false); | ||
1135 | } | ||
1136 | |||
1137 | return __perf_evlist__tui_browse_hists(evlist, help); | ||
1138 | } | ||
diff --git a/tools/perf/util/ui/browsers/map.c b/tools/perf/util/ui/browsers/map.c index e5158369106e..8462bffe20bc 100644 --- a/tools/perf/util/ui/browsers/map.c +++ b/tools/perf/util/ui/browsers/map.c | |||
@@ -41,7 +41,7 @@ static int ui_entry__read(const char *title, char *bf, size_t size, int width) | |||
41 | out_free_form: | 41 | out_free_form: |
42 | newtPopWindow(); | 42 | newtPopWindow(); |
43 | newtFormDestroy(form); | 43 | newtFormDestroy(form); |
44 | return 0; | 44 | return err; |
45 | } | 45 | } |
46 | 46 | ||
47 | struct map_browser { | 47 | struct map_browser { |
diff --git a/tools/perf/util/ui/browsers/top.c b/tools/perf/util/ui/browsers/top.c new file mode 100644 index 000000000000..5a06538532af --- /dev/null +++ b/tools/perf/util/ui/browsers/top.c | |||
@@ -0,0 +1,213 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | ||
3 | * | ||
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | ||
5 | * copyright notes. | ||
6 | * | ||
7 | * Released under the GPL v2. (and only v2, not any later version) | ||
8 | */ | ||
9 | #include "../browser.h" | ||
10 | #include "../../annotate.h" | ||
11 | #include "../helpline.h" | ||
12 | #include "../libslang.h" | ||
13 | #include "../util.h" | ||
14 | #include "../../evlist.h" | ||
15 | #include "../../hist.h" | ||
16 | #include "../../sort.h" | ||
17 | #include "../../symbol.h" | ||
18 | #include "../../top.h" | ||
19 | |||
20 | struct perf_top_browser { | ||
21 | struct ui_browser b; | ||
22 | struct rb_root root; | ||
23 | struct sym_entry *selection; | ||
24 | float sum_ksamples; | ||
25 | int dso_width; | ||
26 | int dso_short_width; | ||
27 | int sym_width; | ||
28 | }; | ||
29 | |||
30 | static void perf_top_browser__write(struct ui_browser *browser, void *entry, int row) | ||
31 | { | ||
32 | struct perf_top_browser *top_browser = container_of(browser, struct perf_top_browser, b); | ||
33 | struct sym_entry *syme = rb_entry(entry, struct sym_entry, rb_node); | ||
34 | bool current_entry = ui_browser__is_current_entry(browser, row); | ||
35 | struct symbol *symbol = sym_entry__symbol(syme); | ||
36 | struct perf_top *top = browser->priv; | ||
37 | int width = browser->width; | ||
38 | double pcnt; | ||
39 | |||
40 | pcnt = 100.0 - (100.0 * ((top_browser->sum_ksamples - syme->snap_count) / | ||
41 | top_browser->sum_ksamples)); | ||
42 | ui_browser__set_percent_color(browser, pcnt, current_entry); | ||
43 | |||
44 | if (top->evlist->nr_entries == 1 || !top->display_weighted) { | ||
45 | slsmg_printf("%20.2f ", syme->weight); | ||
46 | width -= 24; | ||
47 | } else { | ||
48 | slsmg_printf("%9.1f %10ld ", syme->weight, syme->snap_count); | ||
49 | width -= 23; | ||
50 | } | ||
51 | |||
52 | slsmg_printf("%4.1f%%", pcnt); | ||
53 | width -= 7; | ||
54 | |||
55 | if (verbose) { | ||
56 | slsmg_printf(" %016" PRIx64, symbol->start); | ||
57 | width -= 17; | ||
58 | } | ||
59 | |||
60 | slsmg_printf(" %-*.*s ", top_browser->sym_width, top_browser->sym_width, | ||
61 | symbol->name); | ||
62 | width -= top_browser->sym_width; | ||
63 | slsmg_write_nstring(width >= syme->map->dso->long_name_len ? | ||
64 | syme->map->dso->long_name : | ||
65 | syme->map->dso->short_name, width); | ||
66 | |||
67 | if (current_entry) | ||
68 | top_browser->selection = syme; | ||
69 | } | ||
70 | |||
71 | static void perf_top_browser__update_rb_tree(struct perf_top_browser *browser) | ||
72 | { | ||
73 | struct perf_top *top = browser->b.priv; | ||
74 | u64 top_idx = browser->b.top_idx; | ||
75 | |||
76 | browser->root = RB_ROOT; | ||
77 | browser->b.top = NULL; | ||
78 | browser->sum_ksamples = perf_top__decay_samples(top, &browser->root); | ||
79 | /* | ||
80 | * No active symbols | ||
81 | */ | ||
82 | if (top->rb_entries == 0) | ||
83 | return; | ||
84 | |||
85 | perf_top__find_widths(top, &browser->root, &browser->dso_width, | ||
86 | &browser->dso_short_width, | ||
87 | &browser->sym_width); | ||
88 | if (browser->sym_width + browser->dso_width > browser->b.width - 29) { | ||
89 | browser->dso_width = browser->dso_short_width; | ||
90 | if (browser->sym_width + browser->dso_width > browser->b.width - 29) | ||
91 | browser->sym_width = browser->b.width - browser->dso_width - 29; | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Adjust the ui_browser indexes since the entries in the browser->root | ||
96 | * rb_tree may have changed, then seek it from start, so that we get a | ||
97 | * possible new top of the screen. | ||
98 | */ | ||
99 | browser->b.nr_entries = top->rb_entries; | ||
100 | |||
101 | if (top_idx >= browser->b.nr_entries) { | ||
102 | if (browser->b.height >= browser->b.nr_entries) | ||
103 | top_idx = browser->b.nr_entries - browser->b.height; | ||
104 | else | ||
105 | top_idx = 0; | ||
106 | } | ||
107 | |||
108 | if (browser->b.index >= top_idx + browser->b.height) | ||
109 | browser->b.index = top_idx + browser->b.index - browser->b.top_idx; | ||
110 | |||
111 | if (browser->b.index >= browser->b.nr_entries) | ||
112 | browser->b.index = browser->b.nr_entries - 1; | ||
113 | |||
114 | browser->b.top_idx = top_idx; | ||
115 | browser->b.seek(&browser->b, top_idx, SEEK_SET); | ||
116 | } | ||
117 | |||
118 | static void perf_top_browser__annotate(struct perf_top_browser *browser) | ||
119 | { | ||
120 | struct sym_entry *syme = browser->selection; | ||
121 | struct symbol *sym = sym_entry__symbol(syme); | ||
122 | struct annotation *notes = symbol__annotation(sym); | ||
123 | struct perf_top *top = browser->b.priv; | ||
124 | |||
125 | if (notes->src != NULL) | ||
126 | goto do_annotation; | ||
127 | |||
128 | pthread_mutex_lock(¬es->lock); | ||
129 | |||
130 | top->sym_filter_entry = NULL; | ||
131 | |||
132 | if (symbol__alloc_hist(sym, top->evlist->nr_entries) < 0) { | ||
133 | pr_err("Not enough memory for annotating '%s' symbol!\n", | ||
134 | sym->name); | ||
135 | pthread_mutex_unlock(¬es->lock); | ||
136 | return; | ||
137 | } | ||
138 | |||
139 | top->sym_filter_entry = syme; | ||
140 | |||
141 | pthread_mutex_unlock(¬es->lock); | ||
142 | do_annotation: | ||
143 | symbol__tui_annotate(sym, syme->map, 0, top->delay_secs * 1000); | ||
144 | } | ||
145 | |||
146 | static int perf_top_browser__run(struct perf_top_browser *browser) | ||
147 | { | ||
148 | int key; | ||
149 | char title[160]; | ||
150 | struct perf_top *top = browser->b.priv; | ||
151 | int delay_msecs = top->delay_secs * 1000; | ||
152 | int exit_keys[] = { 'a', NEWT_KEY_ENTER, NEWT_KEY_RIGHT, 0, }; | ||
153 | |||
154 | perf_top_browser__update_rb_tree(browser); | ||
155 | perf_top__header_snprintf(top, title, sizeof(title)); | ||
156 | perf_top__reset_sample_counters(top); | ||
157 | |||
158 | if (ui_browser__show(&browser->b, title, | ||
159 | "ESC: exit, ENTER|->|a: Live Annotate") < 0) | ||
160 | return -1; | ||
161 | |||
162 | newtFormSetTimer(browser->b.form, delay_msecs); | ||
163 | ui_browser__add_exit_keys(&browser->b, exit_keys); | ||
164 | |||
165 | while (1) { | ||
166 | key = ui_browser__run(&browser->b); | ||
167 | |||
168 | switch (key) { | ||
169 | case -1: | ||
170 | /* FIXME we need to check if it was es.reason == NEWT_EXIT_TIMER */ | ||
171 | perf_top_browser__update_rb_tree(browser); | ||
172 | perf_top__header_snprintf(top, title, sizeof(title)); | ||
173 | perf_top__reset_sample_counters(top); | ||
174 | ui_browser__set_color(&browser->b, NEWT_COLORSET_ROOT); | ||
175 | SLsmg_gotorc(0, 0); | ||
176 | slsmg_write_nstring(title, browser->b.width); | ||
177 | break; | ||
178 | case 'a': | ||
179 | case NEWT_KEY_RIGHT: | ||
180 | case NEWT_KEY_ENTER: | ||
181 | if (browser->selection) | ||
182 | perf_top_browser__annotate(browser); | ||
183 | break; | ||
184 | case NEWT_KEY_LEFT: | ||
185 | continue; | ||
186 | case NEWT_KEY_ESCAPE: | ||
187 | if (!ui__dialog_yesno("Do you really want to exit?")) | ||
188 | continue; | ||
189 | /* Fall thru */ | ||
190 | default: | ||
191 | goto out; | ||
192 | } | ||
193 | } | ||
194 | out: | ||
195 | ui_browser__hide(&browser->b); | ||
196 | return key; | ||
197 | } | ||
198 | |||
199 | int perf_top__tui_browser(struct perf_top *top) | ||
200 | { | ||
201 | struct perf_top_browser browser = { | ||
202 | .b = { | ||
203 | .entries = &browser.root, | ||
204 | .refresh = ui_browser__rb_tree_refresh, | ||
205 | .seek = ui_browser__rb_tree_seek, | ||
206 | .write = perf_top_browser__write, | ||
207 | .priv = top, | ||
208 | }, | ||
209 | }; | ||
210 | |||
211 | ui_helpline__push("Press <- or ESC to exit"); | ||
212 | return perf_top_browser__run(&browser); | ||
213 | } | ||
diff --git a/tools/perf/util/ui/helpline.c b/tools/perf/util/ui/helpline.c index 8d79daa4458a..f36d2ff509ed 100644 --- a/tools/perf/util/ui/helpline.c +++ b/tools/perf/util/ui/helpline.c | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #include "../debug.h" | 6 | #include "../debug.h" |
7 | #include "helpline.h" | 7 | #include "helpline.h" |
8 | #include "ui.h" | ||
8 | 9 | ||
9 | void ui_helpline__pop(void) | 10 | void ui_helpline__pop(void) |
10 | { | 11 | { |
@@ -55,7 +56,8 @@ int ui_helpline__show_help(const char *format, va_list ap) | |||
55 | int ret; | 56 | int ret; |
56 | static int backlog; | 57 | static int backlog; |
57 | 58 | ||
58 | ret = vsnprintf(ui_helpline__last_msg + backlog, | 59 | pthread_mutex_lock(&ui__lock); |
60 | ret = vsnprintf(ui_helpline__last_msg + backlog, | ||
59 | sizeof(ui_helpline__last_msg) - backlog, format, ap); | 61 | sizeof(ui_helpline__last_msg) - backlog, format, ap); |
60 | backlog += ret; | 62 | backlog += ret; |
61 | 63 | ||
@@ -64,6 +66,7 @@ int ui_helpline__show_help(const char *format, va_list ap) | |||
64 | newtRefresh(); | 66 | newtRefresh(); |
65 | backlog = 0; | 67 | backlog = 0; |
66 | } | 68 | } |
69 | pthread_mutex_unlock(&ui__lock); | ||
67 | 70 | ||
68 | return ret; | 71 | return ret; |
69 | } | 72 | } |
diff --git a/tools/perf/util/ui/libslang.h b/tools/perf/util/ui/libslang.h index 5623da8e8080..2b63e1c9b181 100644 --- a/tools/perf/util/ui/libslang.h +++ b/tools/perf/util/ui/libslang.h | |||
@@ -13,11 +13,11 @@ | |||
13 | 13 | ||
14 | #if SLANG_VERSION < 20104 | 14 | #if SLANG_VERSION < 20104 |
15 | #define slsmg_printf(msg, args...) \ | 15 | #define slsmg_printf(msg, args...) \ |
16 | SLsmg_printf((char *)msg, ##args) | 16 | SLsmg_printf((char *)(msg), ##args) |
17 | #define slsmg_write_nstring(msg, len) \ | 17 | #define slsmg_write_nstring(msg, len) \ |
18 | SLsmg_write_nstring((char *)msg, len) | 18 | SLsmg_write_nstring((char *)(msg), len) |
19 | #define sltt_set_color(obj, name, fg, bg) \ | 19 | #define sltt_set_color(obj, name, fg, bg) \ |
20 | SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg) | 20 | SLtt_set_color(obj,(char *)(name), (char *)(fg), (char *)(bg)) |
21 | #else | 21 | #else |
22 | #define slsmg_printf SLsmg_printf | 22 | #define slsmg_printf SLsmg_printf |
23 | #define slsmg_write_nstring SLsmg_write_nstring | 23 | #define slsmg_write_nstring SLsmg_write_nstring |
diff --git a/tools/perf/util/ui/setup.c b/tools/perf/util/ui/setup.c index 662085032eb7..ee46d671db59 100644 --- a/tools/perf/util/ui/setup.c +++ b/tools/perf/util/ui/setup.c | |||
@@ -6,6 +6,9 @@ | |||
6 | #include "../debug.h" | 6 | #include "../debug.h" |
7 | #include "browser.h" | 7 | #include "browser.h" |
8 | #include "helpline.h" | 8 | #include "helpline.h" |
9 | #include "ui.h" | ||
10 | |||
11 | pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER; | ||
9 | 12 | ||
10 | static void newt_suspend(void *d __used) | 13 | static void newt_suspend(void *d __used) |
11 | { | 14 | { |
@@ -14,11 +17,12 @@ static void newt_suspend(void *d __used) | |||
14 | newtResume(); | 17 | newtResume(); |
15 | } | 18 | } |
16 | 19 | ||
17 | void setup_browser(void) | 20 | void setup_browser(bool fallback_to_pager) |
18 | { | 21 | { |
19 | if (!isatty(1) || !use_browser || dump_trace) { | 22 | if (!isatty(1) || !use_browser || dump_trace) { |
20 | use_browser = 0; | 23 | use_browser = 0; |
21 | setup_pager(); | 24 | if (fallback_to_pager) |
25 | setup_pager(); | ||
22 | return; | 26 | return; |
23 | } | 27 | } |
24 | 28 | ||
diff --git a/tools/perf/util/ui/ui.h b/tools/perf/util/ui/ui.h new file mode 100644 index 000000000000..d264e059c829 --- /dev/null +++ b/tools/perf/util/ui/ui.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _PERF_UI_H_ | ||
2 | #define _PERF_UI_H_ 1 | ||
3 | |||
4 | #include <pthread.h> | ||
5 | |||
6 | extern pthread_mutex_t ui__lock; | ||
7 | |||
8 | #endif /* _PERF_UI_H_ */ | ||
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c index 7b5a8926624e..fdf1fc8f08bc 100644 --- a/tools/perf/util/ui/util.c +++ b/tools/perf/util/ui/util.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include "../debug.h" | 9 | #include "../debug.h" |
10 | #include "browser.h" | 10 | #include "browser.h" |
11 | #include "helpline.h" | 11 | #include "helpline.h" |
12 | #include "ui.h" | ||
12 | #include "util.h" | 13 | #include "util.h" |
13 | 14 | ||
14 | static void newt_form__set_exit_keys(newtComponent self) | 15 | static void newt_form__set_exit_keys(newtComponent self) |
@@ -118,10 +119,12 @@ void ui__warning(const char *format, ...) | |||
118 | va_list args; | 119 | va_list args; |
119 | 120 | ||
120 | va_start(args, format); | 121 | va_start(args, format); |
121 | if (use_browser > 0) | 122 | if (use_browser > 0) { |
123 | pthread_mutex_lock(&ui__lock); | ||
122 | newtWinMessagev((char *)warning_str, (char *)ok, | 124 | newtWinMessagev((char *)warning_str, (char *)ok, |
123 | (char *)format, args); | 125 | (char *)format, args); |
124 | else | 126 | pthread_mutex_unlock(&ui__lock); |
127 | } else | ||
125 | vfprintf(stderr, format, args); | 128 | vfprintf(stderr, format, args); |
126 | va_end(args); | 129 | va_end(args); |
127 | } | 130 | } |
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index e833f26f3bfc..fc784284ac8b 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h | |||
@@ -70,9 +70,7 @@ | |||
70 | #include <sys/poll.h> | 70 | #include <sys/poll.h> |
71 | #include <sys/socket.h> | 71 | #include <sys/socket.h> |
72 | #include <sys/ioctl.h> | 72 | #include <sys/ioctl.h> |
73 | #ifndef NO_SYS_SELECT_H | ||
74 | #include <sys/select.h> | 73 | #include <sys/select.h> |
75 | #endif | ||
76 | #include <netinet/in.h> | 74 | #include <netinet/in.h> |
77 | #include <netinet/tcp.h> | 75 | #include <netinet/tcp.h> |
78 | #include <arpa/inet.h> | 76 | #include <arpa/inet.h> |
@@ -83,10 +81,6 @@ | |||
83 | #include "types.h" | 81 | #include "types.h" |
84 | #include <sys/ttydefaults.h> | 82 | #include <sys/ttydefaults.h> |
85 | 83 | ||
86 | #ifndef NO_ICONV | ||
87 | #include <iconv.h> | ||
88 | #endif | ||
89 | |||
90 | extern const char *graph_line; | 84 | extern const char *graph_line; |
91 | extern const char *graph_dotted_line; | 85 | extern const char *graph_dotted_line; |
92 | extern char buildid_dir[]; | 86 | extern char buildid_dir[]; |
@@ -236,26 +230,6 @@ static inline int sane_case(int x, int high) | |||
236 | return x; | 230 | return x; |
237 | } | 231 | } |
238 | 232 | ||
239 | #ifndef DIR_HAS_BSD_GROUP_SEMANTICS | ||
240 | # define FORCE_DIR_SET_GID S_ISGID | ||
241 | #else | ||
242 | # define FORCE_DIR_SET_GID 0 | ||
243 | #endif | ||
244 | |||
245 | #ifdef NO_NSEC | ||
246 | #undef USE_NSEC | ||
247 | #define ST_CTIME_NSEC(st) 0 | ||
248 | #define ST_MTIME_NSEC(st) 0 | ||
249 | #else | ||
250 | #ifdef USE_ST_TIMESPEC | ||
251 | #define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctimespec.tv_nsec)) | ||
252 | #define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtimespec.tv_nsec)) | ||
253 | #else | ||
254 | #define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctim.tv_nsec)) | ||
255 | #define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtim.tv_nsec)) | ||
256 | #endif | ||
257 | #endif | ||
258 | |||
259 | int mkdir_p(char *path, mode_t mode); | 233 | int mkdir_p(char *path, mode_t mode); |
260 | int copyfile(const char *from, const char *to); | 234 | int copyfile(const char *from, const char *to); |
261 | 235 | ||